{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"svip-lab.github.io","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-05T01:35:54.439Z"}},{"type":"Public","name":"WeakSVR","owner":"svip-lab","isFork":false,"description":"(CVPR 2023) Official implemention of the paper \"Weakly Supervised Video Representation Learning with Unaligned Text for Sequential Videos\"","allTopics":["cvpr2023"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":25,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-02T05:39:37.733Z"}},{"type":"Public","name":"PlanarReconstruction","owner":"svip-lab","isFork":false,"description":"[CVPR'19] Single-Image Piece-wise Planar 3D Reconstruction via Associative Embedding","allTopics":["computer-vision","deep-learning","pytorch","cvpr2019"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":9,"starsCount":355,"forksCount":85,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-10T09:52:46.949Z"}},{"type":"Public","name":"PlaneDepth","owner":"svip-lab","isFork":false,"description":"[CVPR2023] This is an official implementation for \"PlaneDepth: Self-supervised Depth Estimation via Orthogonal Planes\".","allTopics":["cvpr2023","depth-estimation","self-supervised-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":99,"forksCount":5,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-06T11:55:34.699Z"}},{"type":"Public","name":"SvipLab-ChatGPT-Web-Share","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-24T09:40:38.328Z"}},{"type":"Public","name":"MLEP","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":6,"starsCount":47,"forksCount":16,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-24T23:33:53.625Z"}},{"type":"Public","name":"SVIP-Sequence-VerIfication-for-Procedures-in-Videos","owner":"svip-lab","isFork":false,"description":"[CVPR2022] SVIP: Sequence VerIfication for Procedures in Videos","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":19,"forksCount":2,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-24T07:01:14.046Z"}},{"type":"Public","name":"Indoor-SfMLearner","owner":"svip-lab","isFork":false,"description":"[ECCV'20] Patch-match and Plane-regularization for Unsupervised Indoor Depth Estimation","allTopics":["unsupervised-learning","indoor","pose-estimation","depth-estimation","self-supervised","scannet","eccv2020","pytorch","nyuv2","extract-superpixel"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":149,"forksCount":24,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-07T20:23:12.618Z"}},{"type":"Public","name":"AS-MLP","owner":"svip-lab","isFork":false,"description":"[ICLR'22] This is an official implementation for \"AS-MLP: An Axial Shifted MLP Architecture for Vision\".","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":118,"forksCount":10,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-15T13:08:10.795Z"}},{"type":"Public","name":"SphericalDNNs","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":6,"forksCount":2,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-29T13:21:44.018Z"}},{"type":"Public","name":"IVOS-W","owner":"svip-lab","isFork":false,"description":"[CVPR'21] Learning to Recommend Frame for Interactive Video Object Segmentation in the Wild","allTopics":["reinforcement-learning","video-object-segmentation","cvpr2021"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":47,"forksCount":3,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-01T16:03:32.126Z"}},{"type":"Public","name":"RGBD-Counting","owner":"svip-lab","isFork":false,"description":"RGBD crowd counting","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":37,"forksCount":3,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-23T10:14:31.086Z"}},{"type":"Public","name":"impersonator","owner":"svip-lab","isFork":false,"description":"PyTorch implementation of our ICCV 2019 paper: Liquid Warping GAN: A Unified Framework for Human Motion Imitation, Appearance Transfer and Novel View Synthesis","allTopics":["pytorch","gan","pose"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":33,"starsCount":1725,"forksCount":318,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-21T22:56:20.272Z"}},{"type":"Public","name":"TransRAC","owner":"svip-lab","isFork":true,"description":"(CVPR 2022 Oral) Official implemention: TransRAC","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":19,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-15T11:34:04.623Z"}},{"type":"Public","name":"Locating_Counting_with_a_Depth_Prior","owner":"svip-lab","isFork":false,"description":"[TPAMI] Locating and Counting Heads in Crowds With a Depth Prior","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":23,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-05-25T14:20:23.652Z"}},{"type":"Public","name":"AS-MLP-Object-Detection","owner":"svip-lab","isFork":true,"description":"[ICLR'22] This is an official implementation for \"AS-MLP: An Axial Shifted MLP Architecture for Vision\" on Object Detection and Instance Segmentation.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":6,"forksCount":9237,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-05-22T09:26:21.338Z"}},{"type":"Public","name":"AS-MLP-Semantic-Segmentation","owner":"svip-lab","isFork":true,"description":"[ICLR'22] This is an official implementation for \"AS-MLP: An Axial Shifted MLP Architecture for Vision\" on Semantic Segmentation.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":2503,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-08T07:55:55.516Z"}},{"type":"Public template","name":"CrowdCountingPAL","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":7,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-02-22T08:14:17.198Z"}},{"type":"Public","name":"ShanghaiTechRGBDSyn","owner":"svip-lab","isFork":false,"description":"[TPAMI] Locating and Counting Heads in Crowds With a Depth Prior","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":11,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-01-07T07:16:32.415Z"}},{"type":"Public","name":"HRNet-for-Fashion-Landmark-Estimation.PyTorch","owner":"svip-lab","isFork":false,"description":"[DeepFashion2 Challenge] Fashion Landmark Estimation with HRNet","allTopics":["fashion","keypoint","pytorch"],"primaryLanguage":{"name":"Cuda","color":"#3A4E3A"},"pullRequestCount":0,"issueCount":5,"starsCount":124,"forksCount":21,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-12-28T06:54:31.827Z"}},{"type":"Public","name":"LBYLNet","owner":"svip-lab","isFork":false,"description":"[CVPR2021] Look before you leap: learning landmark features for one-stage visual grounding.","allTopics":["pytorch","cvpr","visual-grounding","cvpr2021"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":46,"forksCount":9,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-08-31T11:52:18.283Z"}},{"type":"Public","name":"Weekly_Group_Meeting_Paper_List","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":42,"forksCount":7,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-08-08T12:14:00.331Z"}},{"type":"Public","name":"GazeFollowing","owner":"svip-lab","isFork":false,"description":"Code for ACCV2018 paper 'Believe It or Not, We Know What You Are Looking at!'","allTopics":["pytorch","accv2018","gaze-follow"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":101,"forksCount":22,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-07-09T12:08:39.696Z"}},{"type":"Public","name":"RGBD-Gaze","owner":"svip-lab","isFork":false,"description":"RGBD Based Gaze Estimation via Multi-task CNN","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":21,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-09-10T08:30:32.504Z"}},{"type":"Public","name":"FastMVSNet","owner":"svip-lab","isFork":false,"description":"[CVPR'20] Fast-MVSNet: Sparse-to-Dense Multi-View Stereo With Learned Propagation and Gauss-Newton Refinement","allTopics":["pytorch","multi-view-stereo","cvpr2020"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":14,"starsCount":246,"forksCount":34,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-03-31T01:36:11.037Z"}},{"type":"Public","name":"Saliency-Detection-in-360-Videos","owner":"svip-lab","isFork":false,"description":"Saliency-Detection-in-360-Videos","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":8,"forksCount":5,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2019-10-15T09:23:54.620Z"}},{"type":"Public","name":"PPGNet","owner":"svip-lab","isFork":false,"description":"Source code for our CVPR 2019 paper - PPGNet: Learning Point-Pair Graph for Line Segment Detection","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":12,"starsCount":173,"forksCount":37,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2019-07-29T04:29:30.431Z"}},{"type":"Public","name":"Medical-Image-CodeBase-SVIP-Lab","owner":"svip-lab","isFork":false,"description":"Useful and frequently used code for computer vision","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-12-06T14:38:33.067Z"}},{"type":"Public","name":"CIDNN","owner":"svip-lab","isFork":false,"description":"CIDNN: Encoding Crowd Interaction with Deep Neural Network ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":74,"forksCount":19,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-09-15T11:45:39.154Z"}},{"type":"Public","name":"svip_docker_manager","owner":"svip-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2018-08-31T16:59:47.840Z"}}],"repositoryCount":30,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}