{"payload":{"pageCount":16,"repositories":[{"type":"Public","name":"spark-rapids","owner":"NVIDIA","isFork":false,"description":"Spark RAPIDS plugin - accelerate Apache Spark with GPUs","allTopics":["big-data","gpu","rapids","spark"],"primaryLanguage":{"name":"Scala","color":"#c22d40"},"pullRequestCount":28,"issueCount":1347,"starsCount":762,"forksCount":225,"license":"Apache License 2.0","participation":[16,18,27,41,43,46,12,23,21,12,20,32,31,9,22,32,22,22,45,45,39,40,36,11,14,29,14,5,22,52,16,23,14,13,23,7,10,9,11,12,7,10,7,12,16,17,25,14,24,6,9,15],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T03:17:35.771Z"}},{"type":"Public","name":"cuda-quantum","owner":"NVIDIA","isFork":false,"description":"C++ and Python support for the CUDA Quantum programming model for heterogeneous quantum-classical workflows","allTopics":["python","cpp","quantum","quantum-computing","quantum-programming-language","quantum-algorithms","quantum-machine-learning","unitaryhack"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":35,"issueCount":221,"starsCount":423,"forksCount":146,"license":"Other","participation":[20,32,30,7,21,10,21,8,14,20,8,13,24,8,21,16,16,19,30,8,9,18,11,12,3,3,9,11,14,8,21,14,13,16,25,26,22,13,11,7,11,11,13,19,21,6,7,7,22,16,12,26],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T02:29:23.384Z"}},{"type":"Public","name":"warp","owner":"NVIDIA","isFork":false,"description":"A Python framework for high performance GPU simulation and graphics","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":39,"starsCount":3617,"forksCount":202,"license":"Other","participation":[8,31,26,11,4,32,24,31,32,4,7,15,31,13,39,41,45,37,19,23,3,26,23,22,0,6,22,36,30,11,2,13,26,53,51,49,27,10,15,7,35,35,33,35,28,18,39,49,17,11,7,56],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T01:36:49.724Z"}},{"type":"Public","name":"NeMo","owner":"NVIDIA","isFork":false,"description":"A scalable generative AI framework built for researchers and developers working on Large Language Models, Multimodal, and Speech AI (Automatic Speech Recognition and Text-to-Speech)","allTopics":["machine-translation","tts","speech-synthesis","neural-networks","deeplearning","speaker-recognition","asr","multimodal","speech-translation","large-language-models","speaker-diariazation","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":89,"issueCount":53,"starsCount":10855,"forksCount":2264,"license":"Apache License 2.0","participation":[24,16,11,16,21,20,20,16,23,20,17,24,31,23,14,23,4,12,6,11,3,13,14,14,4,10,17,17,28,9,16,19,23,43,20,21,44,13,22,29,30,25,33,38,26,27,23,34,38,16,28,34],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T01:30:18.148Z"}},{"type":"Public","name":"Fuser","owner":"NVIDIA","isFork":false,"description":"A Fusion Code Generator for NVIDIA GPUs (commonly known as \"nvFuser\")","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":104,"issueCount":238,"starsCount":235,"forksCount":43,"license":"Other","participation":[23,25,17,15,17,26,29,31,15,16,35,44,21,24,27,29,30,39,33,43,37,36,45,13,3,10,26,15,13,20,21,18,35,15,22,37,30,21,22,13,31,30,22,20,19,20,17,40,28,20,35,34],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T23:05:29.481Z"}},{"type":"Public","name":"NeMo-Aligner","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for efficient model alignment","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":26,"issueCount":44,"starsCount":421,"forksCount":46,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,8,2,12,3,2,2,0,2,1,4,3,2,1,0,3,4,3,2,2,3,3,4,0,3,4,0,2,0,3,3,5,3,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T22:30:08.112Z"}},{"type":"Public","name":"numba-cuda","owner":"NVIDIA","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":7,"forksCount":6,"license":"BSD 2-Clause \"Simplified\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,11,6,4,1,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T21:11:27.828Z"}},{"type":"Public","name":"cuCollections","owner":"NVIDIA","isFork":false,"description":"","allTopics":["datastructures","cpp","gpu","cuda","hashmap","cpp17","hashset","hashtable"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":11,"issueCount":62,"starsCount":446,"forksCount":79,"license":"Apache License 2.0","participation":[2,1,0,6,2,1,3,3,3,2,1,1,3,5,1,0,1,0,2,6,0,1,3,6,1,4,1,0,4,5,1,2,0,0,0,4,3,0,3,1,5,0,1,3,8,3,6,4,3,6,6,8],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T20:58:51.793Z"}},{"type":"Public","name":"TensorRT-LLM","owner":"NVIDIA","isFork":false,"description":"TensorRT-LLM provides users with an easy-to-use Python API to define Large Language Models (LLMs) and build TensorRT engines that contain state-of-the-art optimizations to perform inference efficiently on NVIDIA GPUs. TensorRT-LLM also contains components to create Python and C++ runtimes that execute those TensorRT engines.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":55,"issueCount":586,"starsCount":7392,"forksCount":799,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,3,5,0,6,5,3,2,2,5,1,1,4,1,1,1,3,2,1,1,6,1,0,4,3,1,1,1,1,1,1,1,1,1,1,1,4,1,1,1,1,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T20:56:44.807Z"}},{"type":"Public","name":"spark-rapids-jni","owner":"NVIDIA","isFork":false,"description":"RAPIDS Accelerator JNI For Apache Spark","allTopics":[],"primaryLanguage":{"name":"Cuda","color":"#3A4E3A"},"pullRequestCount":5,"issueCount":72,"starsCount":32,"forksCount":58,"license":"Apache License 2.0","participation":[12,11,27,12,18,15,14,14,14,13,9,47,12,22,11,13,12,9,19,25,17,25,19,11,1,5,13,12,28,23,16,18,12,14,21,16,11,23,9,29,18,17,12,13,10,20,30,20,15,13,17,18],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T20:30:43.526Z"}},{"type":"Public","name":"cloudai","owner":"NVIDIA","isFork":false,"description":"CloudAI Benchmark Framework","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":0,"starsCount":20,"forksCount":10,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,38,42,47,88,25,34,10,16],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T19:46:41.914Z"}},{"type":"Public","name":"DALI","owner":"NVIDIA","isFork":false,"description":"A GPU-accelerated library containing highly optimized building blocks and an execution engine for data processing to accelerate deep learning training and inference applications.","allTopics":["python","machine-learning","deep-learning","neural-network","mxnet","gpu","image-processing","pytorch","gpu-tensorflow","data-processing","data-augmentation","audio-processing","paddle","image-augmentation","fast-data-pipeline"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":35,"issueCount":186,"starsCount":4982,"forksCount":609,"license":"Apache License 2.0","participation":[9,5,3,11,13,4,9,10,11,6,4,5,11,11,5,10,9,16,7,15,20,15,13,4,0,2,6,8,5,11,6,12,9,6,14,11,9,11,6,13,9,7,3,9,7,4,7,13,13,10,4,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T18:33:39.020Z"}},{"type":"Public","name":"GenerativeAIExamples","owner":"NVIDIA","isFork":false,"description":"Generative AI reference workflows optimized for accelerated infrastructure and microservice architecture.","allTopics":["microservice","gpu-acceleration","nemo","tensorrt","rag","triton-inference-server","large-language-models","llm","llm-inference","retrieval-augmented-generation"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":5,"issueCount":20,"starsCount":1778,"forksCount":294,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,2,0,1,2,1,3,0,2,2,0,1,2,2,0,5,1,0,0,1,2,1,2,1,1,0,0,1,0,0,0,0,1,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T18:14:06.870Z"}},{"type":"Public","name":"NeMo-Guardrails","owner":"NVIDIA","isFork":false,"description":"NeMo Guardrails is an open-source toolkit for easily adding programmable guardrails to LLM-based conversational systems.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":33,"issueCount":173,"starsCount":3730,"forksCount":329,"license":"Other","participation":[27,16,26,32,5,21,15,25,90,24,33,42,54,68,55,52,53,52,82,44,38,46,37,31,1,19,22,46,30,49,46,90,46,99,117,62,41,99,16,0,25,23,6,11,8,9,11,33,39,9,7,18],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T18:04:33.189Z"}},{"type":"Public","name":"k8s-test-infra","owner":"NVIDIA","isFork":false,"description":"K8s-test-infra","allTopics":[],"primaryLanguage":{"name":"Go","color":"#00ADD8"},"pullRequestCount":1,"issueCount":0,"starsCount":2,"forksCount":4,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,6,13,0,5,0,10,0,0,3,0,0,8,0,2,0,25,4,0,1,0,0,7,4,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T17:11:16.214Z"}},{"type":"Public","name":"cccl","owner":"NVIDIA","isFork":false,"description":"CUDA Core Compute Libraries","allTopics":["cpp","hpc","gpu","modern-cpp","parallel-computing","cuda","nvidia","gpu-acceleration","cuda-kernels","gpu-computing","parallel-algorithm","parallel-programming","nvidia-gpu","gpu-programming","cuda-library","cpp-programming","cuda-programming","accelerated-computing","cuda-cpp"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":39,"issueCount":633,"starsCount":951,"forksCount":119,"license":"Other","participation":[43,26,22,25,23,35,31,16,18,30,38,24,29,48,30,17,19,26,14,12,14,6,11,6,1,1,4,13,11,3,8,15,12,20,20,10,7,9,7,17,18,4,11,22,15,7,9,18,22,15,10,14],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T15:44:18.255Z"}},{"type":"Public","name":"JAX-Toolbox","owner":"NVIDIA","isFork":false,"description":"JAX-Toolbox","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":35,"issueCount":111,"starsCount":206,"forksCount":35,"license":"Apache License 2.0","participation":[6,18,3,8,6,4,5,19,3,11,6,10,11,13,6,8,9,7,1,9,6,8,6,9,0,5,19,9,12,11,12,14,12,15,13,12,2,6,2,9,8,1,14,8,8,7,5,7,6,4,3,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T15:44:03.071Z"}},{"type":"Public","name":"k8s-dra-driver","owner":"NVIDIA","isFork":false,"description":"Dynamic Resource Allocation (DRA) for NVIDIA GPUs in Kubernetes","allTopics":[],"primaryLanguage":{"name":"Go","color":"#00ADD8"},"pullRequestCount":15,"issueCount":17,"starsCount":191,"forksCount":35,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,25,16,28,0,13,5,9,6,25,0,27,0,0,3,0,1,0,0,0,22,3,0,10,1,6,3,15,4,3,1,1,0,8,0,11,1,0,1,0,1,1,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T15:34:22.403Z"}},{"type":"Public","name":"k8s-samples","owner":"NVIDIA","isFork":false,"description":"Sample Dockerfiles for Docker Hub images","allTopics":[],"primaryLanguage":{"name":"Dockerfile","color":"#384d54"},"pullRequestCount":1,"issueCount":0,"starsCount":1,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T15:32:09.178Z"}},{"type":"Public","name":"NV-Kernels","owner":"NVIDIA","isFork":false,"description":"Ubuntu kernels which are optimized for NVIDIA server systems","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":13,"issueCount":0,"starsCount":8,"forksCount":7,"license":"Other","participation":[1682,1673,1767,2334,2147,1901,1540,1526,1074,1948,2049,1696,1763,1821,1688,1775,1271,1093,1236,1578,1710,2093,1938,2014,780,854,1298,1071,1762,1948,1918,1999,2026,1780,1691,1338,1308,1666,1620,1799,1615,1605,1454,1015,581,438,354,337,228,251,178,56],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T15:00:36.685Z"}},{"type":"Public","name":"NeMo-Framework-Launcher","owner":"NVIDIA","isFork":false,"description":"Provides end-to-end model development pipelines for LLMs and Multimodal models that can be launched on-prem or cloud-native.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":13,"issueCount":23,"starsCount":420,"forksCount":123,"license":"Apache License 2.0","participation":[5,14,10,12,13,13,10,6,9,27,9,0,8,36,2,12,7,6,12,16,13,26,37,24,4,16,20,25,24,20,29,51,19,31,36,30,17,5,9,21,17,24,14,23,56,29,3,20,23,11,5,19],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T14:46:07.048Z"}},{"type":"Public","name":"nvidia-container-toolkit","owner":"NVIDIA","isFork":false,"description":"Build and run containers leveraging NVIDIA GPUs","allTopics":[],"primaryLanguage":{"name":"Go","color":"#00ADD8"},"pullRequestCount":14,"issueCount":267,"starsCount":1887,"forksCount":214,"license":"Apache License 2.0","participation":[10,22,0,0,27,12,5,25,8,0,0,5,0,1,5,4,7,4,6,41,11,15,8,5,0,2,12,2,7,24,17,23,3,12,9,11,13,4,9,18,14,5,2,9,5,7,9,12,13,19,8,8],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T13:14:41.359Z"}},{"type":"Public","name":"modulus","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for building, training, and fine-tuning deep learning models using state-of-the-art Physics-ML methods","allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":99,"starsCount":792,"forksCount":169,"license":"Apache License 2.0","participation":[1,4,9,7,13,3,1,3,1,2,6,11,5,3,3,4,3,10,5,8,8,8,4,4,0,6,3,7,5,5,5,4,7,9,6,1,5,0,4,15,4,4,5,9,1,4,4,1,4,0,10,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T13:09:15.706Z"}},{"type":"Public","name":"gpu-admin-tools","owner":"NVIDIA","isFork":false,"description":"GPU Admin Tools. Includes Confidential Computing controls for H100, and other functionality","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":11,"forksCount":4,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T10:32:03.488Z"}},{"type":"Public","name":"NeMo-speech-data-processor","owner":"NVIDIA","isFork":false,"description":"A toolkit for processing speech data and creating speech datasets","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":1,"starsCount":65,"forksCount":21,"license":"Apache License 2.0","participation":[1,17,20,0,0,0,0,0,0,0,0,0,0,0,0,1,1,20,0,3,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,5,0,0,1,0,1,2,2,1,0,1,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T09:45:16.084Z"}},{"type":"Public","name":"edk2","owner":"NVIDIA","isFork":false,"description":"NVIDIA fork of tianocore/edk2","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":14,"issueCount":0,"starsCount":17,"forksCount":13,"license":"Other","participation":[17,19,10,39,6,4,2,1,2,2,8,3,3,4,0,0,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T09:06:53.362Z"}},{"type":"Public","name":"TransformerEngine","owner":"NVIDIA","isFork":false,"description":"A library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.","allTopics":["python","machine-learning","deep-learning","gpu","cuda","pytorch","jax","fp8"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":31,"issueCount":115,"starsCount":1604,"forksCount":256,"license":"Apache License 2.0","participation":[5,17,2,7,9,7,3,5,4,1,3,11,7,12,5,10,1,8,6,6,6,11,5,8,0,5,9,6,15,11,10,4,7,5,10,4,6,2,13,4,14,8,13,4,8,9,9,5,26,13,6,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T08:24:21.241Z"}},{"type":"Public","name":"spark-rapids-tools","owner":"NVIDIA","isFork":false,"description":"User tools for Spark RAPIDS","allTopics":[],"primaryLanguage":{"name":"Scala","color":"#c22d40"},"pullRequestCount":7,"issueCount":207,"starsCount":44,"forksCount":34,"license":"Apache License 2.0","participation":[12,4,8,3,2,9,4,7,8,11,4,6,4,4,7,2,6,5,2,4,3,2,4,2,5,4,4,3,4,12,1,7,2,5,4,4,3,13,9,11,11,6,10,8,3,6,6,9,19,4,6,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T00:26:34.200Z"}},{"type":"Public","name":"aistore","owner":"NVIDIA","isFork":false,"description":"AIStore: scalable storage for AI applications","allTopics":["object-storage","multiple-backends","distributed-shuffle","deploy-anywhere","linear-scalability","etl-offload","small-file-datasets","network-of-clusters"],"primaryLanguage":{"name":"Go","color":"#00ADD8"},"pullRequestCount":0,"issueCount":0,"starsCount":1156,"forksCount":153,"license":"MIT License","participation":[9,9,24,24,18,23,21,25,20,18,19,27,15,15,14,14,12,16,7,15,16,11,16,13,9,12,18,16,20,17,25,27,14,18,22,20,28,15,15,16,12,19,20,26,16,16,13,11,15,22,23,19],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T00:13:48.236Z"}},{"type":"Public","name":"nim-deploy","owner":"NVIDIA","isFork":false,"description":"A collection of YAML files, Helm Charts, Operator code, and guides to act as an example reference implementation for NVIDIA NIM deployment.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":7,"starsCount":55,"forksCount":21,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,22,0,0,6,11],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T23:18:25.724Z"}}],"repositoryCount":480,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"NVIDIA repositories"}