{"payload":{"pageCount":3,"repositories":[{"type":"Public","name":"Q-LLM","owner":"dvlab-research","isFork":false,"description":"This is the official repo of \"QuickLLaMA: Query-aware Inference Acceleration for Large Language Models\"","allTopics":["fast-inference","inference-acceleration","large-language-models","long-context","kv-cache-compression"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T05:18:57.221Z"}},{"type":"Public","name":"MR-GSM8K","owner":"dvlab-research","isFork":false,"description":"Challenge LLMs to Reason About Reasoning: A Benchmark to Unveil Cognitive Depth in LLMs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":34,"forksCount":0,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,2,0,8,0,0,0,0,0,0,0,0,0,0,2,0,4,1,0,0,0,0,2,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-10T03:43:41.730Z"}},{"type":"Public","name":"LongLoRA","owner":"dvlab-research","isFork":false,"description":"Code and documents of LongLoRA and LongAlpaca (ICLR 2024 Oral)","allTopics":["lora","large-language-models","llm","long-context","fine-tuning-llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":43,"starsCount":2513,"forksCount":256,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,24,34,37,12,7,20,15,3,18,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T13:10:37.709Z"}},{"type":"Public","name":"MGM","owner":"dvlab-research","isFork":false,"description":"Official repo for \"Mini-Gemini: Mining the Potential of Multi-modality Vision Language Models\"","allTopics":["generation","large-language-models","vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":51,"starsCount":3051,"forksCount":274,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-04T14:36:51.258Z"}},{"type":"Public","name":"LISA","owner":"dvlab-research","isFork":false,"description":"Project Page for \"LISA: Reasoning Segmentation via Large Language Model\"","allTopics":["segmentation","multi-modal","llm","large-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":56,"starsCount":1545,"forksCount":106,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,24,64,19,9,5,0,1,0,4,0,0,0,1,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-08T01:28:13.066Z"}},{"type":"Public","name":"GroupContrast","owner":"dvlab-research","isFork":false,"description":"[CVPR 2024] GroupContrast: Semantic-aware Self-supervised Representation Learning for 3D Understanding","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":2,"starsCount":40,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-15T06:31:54.717Z"}},{"type":"Public","name":"Video-P2P","owner":"dvlab-research","isFork":false,"description":"Video-P2P: Video Editing with Cross-attention Control","allTopics":["image-editing","generative-model","video-editing","text-driven-editing","stable-diffusion"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":343,"forksCount":23,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-12T13:31:27.759Z"}},{"type":"Public","name":"Parametric-Contrastive-Learning","owner":"dvlab-research","isFork":false,"description":"Parametric Contrastive Learning (ICCV2021) & GPaCo (TPAMI 2023)","allTopics":["pytorch","supervised-learning","imagenet","image-classification","class-imbalance","imbalanced-data","imbalanced-learning","tpami","contrastive-learning","supervised-contrastive-learning","long-tailed-recognition","iccv2021","parametric-contrastive-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":232,"forksCount":29,"license":"MIT License","participation":[0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-29T03:37:59.960Z"}},{"type":"Public","name":"Prompt-Highlighter","owner":"dvlab-research","isFork":false,"description":"[CVPR 2024] Prompt Highlighter: Interactive Control for Multi-Modal LLMs","allTopics":["text-generation","multi-modality","llm-inference"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":104,"forksCount":2,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,6,4,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-25T04:48:15.566Z"}},{"type":"Public","name":"LLMGA","owner":"dvlab-research","isFork":false,"description":"This project is the official implementation of 'LLMGA: Multimodal Large Language Model based Generation Assistant'","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":261,"forksCount":17,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-22T16:25:33.644Z"}},{"type":"Public","name":"LLaMA-VID","owner":"dvlab-research","isFork":false,"description":"Official Implementation for LLaMA-VID: An Image is Worth 2 Tokens in Large Language Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":29,"starsCount":610,"forksCount":38,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-10T12:34:34.429Z"}},{"type":"Public","name":"MOOD","owner":"dvlab-research","isFork":false,"description":"Official PyTorch implementation of MOOD series: (1) MOODv1: Rethinking Out-of-distributionDetection: Masked Image Modeling Is All You Need. (2) MOODv2: Masked Image Modeling for Out-of-Distribution Detection.","allTopics":["outlier-detection","ood-detection","masked-image-modeling","cvpr2023","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":133,"forksCount":4,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-08T04:06:11.686Z"}},{"type":"Public","name":"MoTCoder","owner":"dvlab-research","isFork":false,"description":"This is the official code repository of MoTCoder: Elevating Large Language Models with Modular of Thought for Challenging Programming Tasks.","allTopics":["natural-language-processing","programming","code","apps","code-generation","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":55,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-06T08:55:11.326Z"}},{"type":"Public","name":"RIVAL","owner":"dvlab-research","isFork":false,"description":"[NeurIPS 2023 Spotlight] Real-World Image Variation by Aligning Diffusion Inversion Chain","allTopics":["style-transfer","text-to-image","diffusion-models","image-variations"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":138,"forksCount":10,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,7,0,2,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-02T08:04:18.111Z"}},{"type":"Public","name":"TriVol","owner":"dvlab-research","isFork":false,"description":"The official code of TriVol in CVPR-2023","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":37,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-30T06:32:42.369Z"}},{"type":"Public","name":"PFENet","owner":"dvlab-research","isFork":false,"description":"PFENet: Prior Guided Feature Enrichment Network for Few-shot Segmentation (TPAMI).","allTopics":["segmentation","few-shot","pami-2020"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":34,"starsCount":304,"forksCount":54,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-15T02:37:29.703Z"}},{"type":"Public","name":"APD","owner":"dvlab-research","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-09T08:14:23.308Z"}},{"type":"Public","name":"Imbalanced-Learning","owner":"dvlab-research","isFork":false,"description":"Imbalanced learning tool for imbalanced recognition and segmentation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":78,"forksCount":8,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-07T14:07:46.139Z"}},{"type":"Public","name":"SparseTransformer","owner":"dvlab-research","isFork":false,"description":"A fast and memory-efficient libarary for sparse transformer with varying token numbers (e.g., 3D point cloud).","allTopics":["cuda","transformer","3d-point-cloud","sparse-transformer"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":147,"forksCount":11,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-06T07:16:14.329Z"}},{"type":"Public","name":"Mask-Attention-Free-Transformer","owner":"dvlab-research","isFork":false,"description":"Official Implementation for \"Mask-Attention-Free Transformer for 3D Instance Segmentation\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":57,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-06T01:59:11.691Z"}},{"type":"Public","name":"Context-Aware-Consistency","owner":"dvlab-research","isFork":false,"description":"Semi-supervised Semantic Segmentation with Directional Context-aware Consistency (CVPR 2021)","allTopics":["semi-supervised-learning","semantic-segmentation","cvpr2021","semi-supervised-segmentation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":155,"forksCount":19,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-24T05:52:07.799Z"}},{"type":"Public","name":"ProposeReduce","owner":"dvlab-research","isFork":false,"description":"Video Instance Segmentation with a Propose-Reduce Paradigm (ICCV 2021)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":41,"forksCount":4,"license":null,"participation":[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-05T05:40:12.247Z"}},{"type":"Public","name":"PointGroup","owner":"dvlab-research","isFork":false,"description":"PointGroup: Dual-Set Point Grouping for 3D Instance Segmentation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":37,"starsCount":369,"forksCount":79,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-27T09:50:35.337Z"}},{"type":"Public","name":"Ref-NPR","owner":"dvlab-research","isFork":false,"description":"[CVPR 2023] Ref-NPR: Reference-Based Non-PhotoRealistic Radiance Fields","allTopics":["pytorch","image-editing","style-transfer","nerf","stylization","radiance-field"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":118,"forksCount":9,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-07T07:09:14.450Z"}},{"type":"Public","name":"LBGAT","owner":"dvlab-research","isFork":false,"description":"Learnable Boundary Guided Adversarial Training (ICCV2021)","allTopics":["image-recognition","robustness","adversarial-attacks","adversarial-training","adversarial-defense","iccv2021"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":33,"forksCount":2,"license":"MIT License","participation":[2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-17T04:27:54.251Z"}},{"type":"Public","name":"LargeKernel3D","owner":"dvlab-research","isFork":false,"description":"LargeKernel3D: Scaling up Kernels in 3D Sparse CNNs (CVPR 2023)","allTopics":["object-detection","semantic-segmentation","3d","scannet","nuscenes"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":186,"forksCount":8,"license":"Apache License 2.0","participation":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-16T05:09:35.554Z"}},{"type":"Public","name":"SphereFormer","owner":"dvlab-research","isFork":false,"description":"The official implementation for \"Spherical Transformer for LiDAR-based 3D Recognition\" (CVPR 2023).","allTopics":["transformer","lidar-point-cloud","3d-object-detection","3d-semantic-segmentation","nuscenes","semantickitti","waymo","cvpr2023"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":20,"starsCount":282,"forksCount":34,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-08T09:34:58.291Z"}},{"type":"Public","name":"VoxelNeXt","owner":"dvlab-research","isFork":false,"description":"VoxelNeXt: Fully Sparse VoxelNet for 3D Object Detection and Tracking (CVPR 2023)","allTopics":["lidar","autonomous-driving","3d-object-detection","nuscenes","3d-multi-object-tracking","argoverse","waymo-open-dataset"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":30,"starsCount":663,"forksCount":55,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-03T07:08:08.325Z"}},{"type":"Public","name":"FocalsConv","owner":"dvlab-research","isFork":false,"description":"Focal Sparse Convolutional Networks for 3D Object Detection (CVPR 2022, Oral)","allTopics":["autonomous-driving","kitti","sparse-convolution","3d-object-detection","nuscenes"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":363,"forksCount":35,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-01T03:23:11.855Z"}},{"type":"Public","name":"3D-Box-Segment-Anything","owner":"dvlab-research","isFork":false,"description":"We extend Segment Anything to 3D perception by combining it with VoxelNeXt.","allTopics":["autonomous-driving","3d","segment-anything"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":6,"starsCount":513,"forksCount":23,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-18T02:39:02.211Z"}}],"repositoryCount":64,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}