Skip to content

Commit

Permalink
Merge pull request #2410 from janhq/chore/update-modelhub
Browse files Browse the repository at this point in the history
Chore: Update model hub v0.4.10
  • Loading branch information
hahuyhoang411 committed Mar 27, 2024
2 parents 784af8c + daf4dca commit a336c13
Show file tree
Hide file tree
Showing 30 changed files with 245 additions and 342 deletions.
34 changes: 0 additions & 34 deletions models/capybara-34b/model.json

This file was deleted.

35 changes: 35 additions & 0 deletions models/command-r-34b/model.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "c4ai-command-r-v01-Q4_K_M.gguf",
"url": "https://huggingface.co/andrewcanis/c4ai-command-r-v01-GGUF/resolve/main/c4ai-command-r-v01-Q4_K_M.gguf"
}
],
"id": "command-r-34b",
"object": "model",
"name": "Command-R v01 34B Q4",
"version": "1.0",
"description": "C4AI Command-R developed by CohereAI is optimized for a variety of use cases including reasoning, summarization, and question answering.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
"llama_model_path": "c4ai-command-r-v01-Q4_K_M.gguf"
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 4096,
"stop": [],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "CohereAI",
"tags": ["34B", "Finetuned"],
"size": 21500000000
},
"engine": "nitro"
}

34 changes: 0 additions & 34 deletions models/dolphin-2.7-mixtral-8x7b/model.json

This file was deleted.

2 changes: 1 addition & 1 deletion models/dolphin-phi-2/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"object": "model",
"name": "Dolphin Phi-2 2.7B Q8",
"version": "1.0",
"description": "Dolphin Phi-2 is a 2.7B model, fine-tuned for chat, excelling in common sense and logical reasoning benchmarks.",
"description": "Dolphin Phi-2 is a good alternative for Phi-2 in chatting",
"format": "gguf",
"settings": {
"ctx_len": 4096,
Expand Down
2 changes: 1 addition & 1 deletion models/gemma-2b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
},
"metadata": {
"author": "Google",
"tags": ["2B", "Finetuned"],
"tags": ["2B", "Finetuned", "Tiny"],
"size": 1500000000
},
"engine": "nitro"
Expand Down
4 changes: 2 additions & 2 deletions models/gemma-7b/model.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"object": "model",
"name": "Gemma 7B Q4",
"version": "1.0",
"description": "Gemma is built from the same technology with Google's Gemini.",
"description": "Google's Gemma is built for multilingual purpose",
"format": "gguf",
"settings": {
"ctx_len": 4096,
Expand All @@ -27,7 +27,7 @@
},
"metadata": {
"author": "Google",
"tags": ["7B", "Finetuned"],
"tags": ["7B", "Finetuned", "Featured"],
"size": 5330000000
},
"engine": "nitro"
Expand Down
35 changes: 35 additions & 0 deletions models/hermes-pro-7b/model.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf",
"url": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/resolve/main/Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf"
}
],
"id": "hermes-pro-7b",
"object": "model",
"name": "Hermes Pro 7B Q4",
"version": "1.0",
"description": "Hermes Pro is superior in Roleplaying, Reasoning and Explaining problem.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf"
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 4096,
"stop": [],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "NousResearch",
"tags": ["7B", "Finetuned", "Featured"],
"size": 4370000000
},
"engine": "nitro"
}

Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
"url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf"
}
],
"id": "llama2-chat-70b-q4",
"id": "llama2-chat-70b",
"object": "model",
"name": "Llama 2 Chat 70B Q4",
"version": "1.0",
"description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"description": "Llama 2 specifically designed for a comprehensive understanding the world.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
Expand All @@ -26,7 +26,7 @@
"presence_penalty": 0
},
"metadata": {
"author": "MetaAI, The Bloke",
"author": "MetaAI",
"tags": ["70B", "Foundational Model"],
"size": 43920000000
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
"url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf"
}
],
"id": "llama2-chat-7b-q4",
"id": "llama2-chat-7b",
"object": "model",
"name": "Llama 2 Chat 7B Q4",
"version": "1.0",
"description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
"description": "Llama 2 specifically designed for a comprehensive understanding the world.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
Expand All @@ -26,7 +26,7 @@
"presence_penalty": 0
},
"metadata": {
"author": "MetaAI, The Bloke",
"author": "MetaAI",
"tags": ["7B", "Foundational Model"],
"size": 4080000000
},
Expand Down
35 changes: 0 additions & 35 deletions models/llava-1.5-13b-q5/model.json

This file was deleted.

35 changes: 0 additions & 35 deletions models/llava-1.5-7b-q5/model.json

This file was deleted.

35 changes: 35 additions & 0 deletions models/llava-13b/model.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "llava-v1.6-vicuna-13b.Q4_K_M.gguf",
"url": "https://huggingface.co/cjpais/llava-v1.6-vicuna-13b-gguf/resolve/main/llava-v1.6-vicuna-13b.Q4_K_M.gguf"
},
{
"filename": "mmproj-model-f16.gguf",
"url": "https://huggingface.co/cjpais/llava-v1.6-vicuna-13b-gguf/resolve/main/mmproj-model-f16.gguf"
}
],
"id": "llava-13b",
"object": "model",
"name": "LlaVa 13B Q4",
"version": "1.1",
"description": "LlaVa can bring vision understanding to Jan",
"format": "gguf",
"settings": {
"vision_model": true,
"text_model": false,
"ctx_len": 4096,
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
"llama_model_path": "llava-v1.6-vicuna-13b.Q4_K_M.gguf",
"mmproj": "mmproj-model-f16.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "liuhaotian",
"tags": ["Vision"],
"size": 7870000000
},
"engine": "nitro"
}
35 changes: 35 additions & 0 deletions models/llava-7b/model.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"sources": [
{
"filename": "llava-v1.6-mistral-7b.Q4_K_M.gguf",
"url": "https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q4_K_M.gguf"
},
{
"filename": "mmproj-model-f16.gguf",
"url": "https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf"
}
],
"id": "llava-7b",
"object": "model",
"name": "LlaVa 7B",
"version": "1.1",
"description": "LlaVa can bring vision understanding to Jan",
"format": "gguf",
"settings": {
"vision_model": true,
"text_model": false,
"ctx_len": 4096,
"prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
"llama_model_path": "llava-v1.6-mistral-7b.Q4_K_M.gguf",
"mmproj": "mmproj-model-f16.gguf"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "liuhaotian",
"tags": ["Vision"],
"size": 4370000000
},
"engine": "nitro"
}
Loading

0 comments on commit a336c13

Please sign in to comment.