Skip to content

Commit

Permalink
feat: bump package and add new oai models
Browse files Browse the repository at this point in the history
  • Loading branch information
mattzcarey committed May 15, 2024
1 parent 6448b2a commit 1e3e8ec
Show file tree
Hide file tree
Showing 7 changed files with 50 additions and 33 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
run: npm run build

- name: Run code review script
run: npm run start -- --ci=github --model=gpt-3.5-turbo
run: npm run start -- --ci=github --model=gpt-4o

- name: Run linting test
run: npm run lint-test
Expand All @@ -45,7 +45,7 @@ jobs:
run: npm run test-unit

- name: Run prompt tests
run: npm run test -- --ci=github --model=gpt-3.5-turbo
run: npm run test -- --ci=github --model=gpt-4o

deploy_core_to_dev:
runs-on: ubuntu-latest
Expand Down
4 changes: 2 additions & 2 deletions action.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ jobs:
fetch-depth: 0

- name: Code Review GPT
uses: mattzcarey/[email protected].4-alpha
uses: mattzcarey/[email protected].8
with:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MODEL: 'gpt-3.5-turbo'
MODEL: 'gpt-4o'
GITHUB_TOKEN: ${{ github.token }}
```
2 changes: 1 addition & 1 deletion action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ inputs:
MODEL:
description: 'The GPT model to use'
required: true
default: 'gpt-3.5-turbo'
default: 'gpt-4o'
OPENAI_API_KEY:
description: 'OpenAI API Key'
required: true
Expand Down
4 changes: 2 additions & 2 deletions packages/code-review-gpt/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions packages/code-review-gpt/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "code-review-gpt",
"version": "0.1.6",
"version": "0.1.8",
"description": "Your AI code reviewer. Improve code quality and catch bugs before you break production",
"bin": {
"code-review-gpt": "./dist/index.js"
Expand All @@ -16,7 +16,8 @@
"test": "ts-node ./src/index.ts test",
"test-unit": "dotenv -e .env jest",
"build": "node ./utils/build.js",
"postbuild": "node ./utils/shebang.js && chmod +x ./dist/index.js"
"postbuild": "node ./utils/shebang.js && chmod +x ./dist/index.js",
"publish-package": "npm i && npm run build && npm publish --access public"
},
"keywords": [
"code-review",
Expand Down
12 changes: 10 additions & 2 deletions packages/code-review-gpt/src/review/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,16 @@ export const signOff =

export const modelInfo = [
{
model: "gpt-4-1106-preview",
maxPromptLength: 128000, //128k tokens
model: "gpt-4o",
maxPromptLength: 300000, //128k tokens
},
{
model: "gpt-4-turbo",
maxPromptLength: 300000, //128k tokens
},
{
model: "gpt-4-turbo-preview",
maxPromptLength: 300000, //128k tokens
},
{
model: "gpt-4",
Expand Down
52 changes: 30 additions & 22 deletions services/core/functions/webhook/src/constants.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,36 @@
export const signOff =
"#### Powered by [Code Review GPT](https://github.com/mattzcarey/code-review-gpt)";

export const modelInfo = [
{
model: "gpt-4-1106-preview",
maxPromptLength: 300000, //100k tokens
},
{
model: "gpt-4",
maxPromptLength: 21000, //8k tokens
},
{
model: "gpt-4-32k",
maxPromptLength: 90000, //32k tokens
},
{
model: "gpt-3.5-turbo",
maxPromptLength: 9000, //4k tokens
},
{
model: "gpt-3.5-turbo-16k",
maxPromptLength: 45000, //16k tokens
},
]; // Response needs about 1k tokens ~= 3k characters
export const modelInfo = [
{
model: "gpt-4o",
maxPromptLength: 300000, //128k tokens
},
{
model: "gpt-4-turbo",
maxPromptLength: 300000, //128k tokens
},
{
model: "gpt-4-turbo-preview",
maxPromptLength: 300000, //128k tokens
},
{
model: "gpt-4",
maxPromptLength: 21000, //8k tokens
},
{
model: "gpt-4-32k",
maxPromptLength: 90000, //32k tokens
},
{
model: "gpt-3.5-turbo",
maxPromptLength: 9000, //4k tokens
},
{
model: "gpt-3.5-turbo-16k",
maxPromptLength: 45000, //16k tokens
},
]; // Response needs about 1k tokens ~= 3k characters

export const languageMap: { [key: string]: string } = {
".js": "JavaScript",
Expand Down

0 comments on commit 1e3e8ec

Please sign in to comment.