{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"server","owner":"triton-inference-server","isFork":false,"description":"The Triton Inference Server provides an optimized cloud and edge inferencing solution. ","allTopics":["machine-learning","cloud","deep-learning","gpu","inference","edge","datacenter"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":48,"issueCount":441,"starsCount":7555,"forksCount":1401,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[12,3,9,7,11,4,6,3,7,11,6,4,6,7,7,4,2,9,12,8,5,13,8,16,3,4,6,9,1,2,7,5,8,8,3,4,6,4,4,8,5,4,2,10,10,7,6,4,5,8,5,9],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T07:27:16.784Z"}},{"type":"Public","name":"tensorrt_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for TensorRT.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":49,"forksCount":28,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:03:26.347Z"}},{"type":"Public","name":"client","owner":"triton-inference-server","isFork":false,"description":"Triton Python, C++ and Java client libraries, and GRPC-generated client examples for go, java and scala.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":33,"issueCount":10,"starsCount":504,"forksCount":220,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[5,1,3,1,3,3,5,2,1,4,1,0,1,1,2,3,4,8,3,3,4,1,1,5,2,3,2,1,0,0,4,4,1,0,1,0,1,3,8,40,15,22,4,5,11,13,7,11,17,8,2,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T05:26:43.804Z"}},{"type":"Public","name":"core","owner":"triton-inference-server","isFork":false,"description":"The core library and APIs implementing the Triton Inference Server. ","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":21,"issueCount":0,"starsCount":94,"forksCount":90,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[5,1,12,2,3,1,1,1,4,2,2,0,1,4,3,0,0,2,1,2,0,5,3,3,1,1,3,0,0,0,2,6,5,1,2,0,2,1,0,3,1,0,1,1,0,1,1,2,2,4,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T03:58:04.731Z"}},{"type":"Public","name":"python_backend","owner":"triton-inference-server","isFork":false,"description":"Triton backend that enables pre-process, post-processing and other logic to be implemented in Python.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":10,"issueCount":0,"starsCount":490,"forksCount":138,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[5,1,3,2,7,1,2,2,4,3,2,0,1,0,3,1,0,3,3,2,2,2,2,1,0,1,1,3,0,0,1,2,2,0,1,1,0,0,0,4,1,2,0,1,3,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T01:26:31.155Z"}},{"type":"Public","name":"onnxruntime_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for the ONNX Runtime.","allTopics":["inference","backend","triton-inference-server","onnx-runtime"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":63,"starsCount":113,"forksCount":53,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,4,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,0,0,0,0,1,0,2,2,1,0,0,1,0,0,1,1,1,0,0,2,1,0,0,0,0,0,0,0,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T23:03:21.551Z"}},{"type":"Public","name":"tutorials","owner":"triton-inference-server","isFork":false,"description":"This repository contains tutorials and examples for Triton Inference Server","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":14,"issueCount":8,"starsCount":443,"forksCount":76,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,0,0,0,0,0,0,0,0,4,1,2,3,2,1,1,3,1,3,0,1,0,0,1,0,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,1,1,1,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T22:18:11.958Z"}},{"type":"Public","name":"triton_cli","owner":"triton-inference-server","isFork":false,"description":"Triton CLI is an open source command line interface that enables users to create, deploy, and profile models served by the Triton Inference Server.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":1,"starsCount":26,"forksCount":1,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,1,9,0,0,2,4,7,0,2,2,0,0,5,2,2,1,0,0,1,0,0,1,5,2,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T21:24:52.898Z"}},{"type":"Public","name":"developer_tools","owner":"triton-inference-server","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":0,"starsCount":15,"forksCount":9,"license":null,"participation":[2,0,2,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T18:09:40.393Z"}},{"type":"Public","name":"model_analyzer","owner":"triton-inference-server","isFork":false,"description":"Triton Model Analyzer is a CLI tool to help with better understanding of the compute and memory requirements of the Triton Inference Server models.","allTopics":["deep-learning","gpu","inference","performance-analysis"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":12,"starsCount":386,"forksCount":73,"license":"Apache License 2.0","participation":[8,1,4,1,2,2,4,3,1,2,3,3,4,4,1,0,1,1,0,0,1,3,1,0,0,2,2,0,2,0,0,4,2,3,0,0,3,0,1,1,2,0,1,2,2,0,2,1,1,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T16:13:26.805Z"}},{"type":"Public","name":"dali_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend that allows running GPU-accelerated data pre-processing pipelines implemented in DALI's python API.","allTopics":["python","deep-learning","gpu","image-processing","dali","data-preprocessing","nvidia-dali","fast-data-pipeline"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":5,"issueCount":20,"starsCount":117,"forksCount":27,"license":"MIT License","participation":[0,0,3,0,0,0,0,0,0,0,0,0,0,1,1,0,3,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,1,3,1,1,0,2,0,0,0,0,4,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T16:07:50.238Z"}},{"type":"Public","name":"tensorrtllm_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton TensorRT-LLM Backend","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":16,"issueCount":199,"starsCount":543,"forksCount":76,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,4,0,4,8,2,1,1,1,1,1,1,1,0,2,1,1,1,0,2,0,1,3,1,1,1,1,1,1,1,0,2,1,1,1,1,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T12:36:33.085Z"}},{"type":"Public","name":"openvino_backend","owner":"triton-inference-server","isFork":false,"description":"OpenVINO backend for Triton.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":3,"starsCount":25,"forksCount":14,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,3,1,0,0,0,1,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T09:36:27.140Z"}},{"type":"Public","name":"pytorch_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for the PyTorch TorchScript models.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":0,"starsCount":107,"forksCount":41,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[2,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,3,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T21:33:01.237Z"}},{"type":"Public","name":"vllm_backend","owner":"triton-inference-server","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":0,"starsCount":127,"forksCount":13,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,26,2,2,2,1,1,0,1,0,1,0,1,0,0,1,0,0,0,2,0,0,3,0,0,0,0,0,2,1,1,1,0,0,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T18:57:42.971Z"}},{"type":"Public","name":"common","owner":"triton-inference-server","isFork":false,"description":"Common source, scripts and utilities shared across all Triton repositories.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":0,"starsCount":57,"forksCount":72,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[3,0,2,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,3,1,0,0,0,0,0,0,2,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T22:44:20.332Z"}},{"type":"Public","name":"fil_backend","owner":"triton-inference-server","isFork":false,"description":"FIL backend for the Triton Inference Server","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":3,"issueCount":48,"starsCount":66,"forksCount":34,"license":"Apache License 2.0","participation":[0,0,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,0,5,1,0,0,0,1,0,0,0,1,0,0,2,0,0,0,0,2,0,0,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T20:50:40.097Z"}},{"type":"Public","name":"backend","owner":"triton-inference-server","isFork":false,"description":"Common source, scripts and utilities for creating Triton backends.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":0,"starsCount":264,"forksCount":80,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,1,2,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-28T20:04:39.719Z"}},{"type":"Public","name":"tensorflow_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for TensorFlow.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":0,"starsCount":40,"forksCount":18,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T22:48:21.256Z"}},{"type":"Public","name":"identity_backend","owner":"triton-inference-server","isFork":false,"description":"Example Triton backend that demonstrates most of the Triton Backend API.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":13,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T01:24:29.131Z"}},{"type":"Public","name":"third_party","owner":"triton-inference-server","isFork":false,"description":"Third-party source packages that are modified for use in Triton.","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":5,"issueCount":0,"starsCount":7,"forksCount":46,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T01:23:35.812Z"}},{"type":"Public","name":"pytriton","owner":"triton-inference-server","isFork":false,"description":"PyTriton is a Flask/FastAPI-like interface that simplifies Triton's deployment in Python environments.","allTopics":["gpu","deep-learning","inference"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":677,"forksCount":45,"license":"Apache License 2.0","participation":[0,0,5,4,3,6,7,3,4,6,3,7,18,3,1,7,2,2,1,3,7,11,4,6,4,6,6,7,0,0,12,4,1,0,2,5,1,3,4,1,4,1,2,3,3,2,2,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T07:34:36.438Z"}},{"type":"Public","name":"square_backend","owner":"triton-inference-server","isFork":false,"description":"Simple Triton backend used for testing.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":4,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:31:55.150Z"}},{"type":"Public","name":"repeat_backend","owner":"triton-inference-server","isFork":false,"description":"An example Triton backend that demonstrates sending zero, one, or multiple responses for each request. ","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":7,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:31:43.795Z"}},{"type":"Public","name":"redis_cache","owner":"triton-inference-server","isFork":false,"description":"TRITONCACHE implementation of a Redis cache","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":2,"starsCount":7,"forksCount":4,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[11,3,17,4,8,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:31:36.110Z"}},{"type":"Public","name":"local_cache","owner":"triton-inference-server","isFork":false,"description":"Implementation of a local in-memory cache for Triton Inference Server's TRITONCACHE API","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":1,"starsCount":2,"forksCount":1,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[1,0,2,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:30:58.089Z"}},{"type":"Public","name":"checksum_repository_agent","owner":"triton-inference-server","isFork":false,"description":"The Triton repository agent that verifies model checksums.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":6,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:30:14.209Z"}},{"type":"Public","name":"model_navigator","owner":"triton-inference-server","isFork":false,"description":"Triton Model Navigator is an inference toolkit designed for optimizing and deploying Deep Learning models with a focus on NVIDIA GPUs.","allTopics":["deep-learning","gpu","inference"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":161,"forksCount":24,"license":"Apache License 2.0","participation":[1,0,7,12,6,3,9,4,5,3,3,1,5,2,2,6,4,3,1,2,3,5,1,0,3,4,2,3,3,0,2,2,2,5,2,5,0,2,2,4,4,9,1,3,2,5,3,4,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T18:17:29.722Z"}},{"type":"Public","name":"contrib","owner":"triton-inference-server","isFork":false,"description":"Community contributions to Triton that are not officially supported or maintained by the Triton project.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":8,"forksCount":7,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-16T23:07:29.062Z"}},{"type":"Public","name":"stateful_backend","owner":"triton-inference-server","isFork":false,"description":"Triton backend for managing the model state tensors automatically in sequence batcher","allTopics":["backend","triton","stateful"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":12,"forksCount":4,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-12T16:39:27.879Z"}}],"repositoryCount":34,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}