{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"vllm-custom","owner":"unitaryai","isFork":true,"description":"A high-throughput and memory-efficient inference and serving engine for LLMs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":2856,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T08:44:05.056Z"}},{"type":"Public","name":"detoxify","owner":"unitaryai","isFork":false,"description":"Trained models & code to predict toxic comments on all 3 Jigsaw Toxic Comment Challenges. Built using ⚡ Pytorch Lightning and 🤗 Transformers. For access to our API, please email us at contact@unitary.ai.","allTopics":["nlp","kaggle-competition","sentence-classification","bert","hatespeech","hate-speech","toxicity","toxic-comment-classification","toxic-comments","bert-model","hate-speech-detection","huggingface","pytorch-lightning","toxicity-classification","huggingface-transformers"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":34,"starsCount":869,"forksCount":110,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-16T09:24:43.866Z"}},{"type":"Public","name":"VTC","owner":"unitaryai","isFork":false,"description":"VTC: Improving Video-Text Retrieval with User Comments","allTopics":["comments","video-understanding","multimodal-deep-learning","video-text-retrieval","vision-language-transformer","vision-language-pretraining"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":11,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-01T21:43:10.416Z"}},{"type":"Public","name":"VTC-dataset","owner":"unitaryai","isFork":false,"description":"","allTopics":["video-understanding","video-text-retrieval","vision-language-pretraining","vision-language-dataset","dataset"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-01T12:37:01.611Z"}},{"type":"Public","name":"clip-torch2","owner":"unitaryai","isFork":true,"description":"CLIP (Contrastive Language-Image Pretraining), Predict the most relevant text snippet given an image","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":1,"issueCount":0,"starsCount":0,"forksCount":3059,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-08T12:29:23.881Z"}},{"type":"Public","name":"vtc-paper","owner":"unitaryai","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-26T13:58:04.412Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}