{"payload":{"pageCount":8,"repositories":[{"type":"Public","name":"diffusers","owner":"huggingface","isFork":false,"description":"🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.","allTopics":["deep-learning","pytorch","image-generation","flax","hacktoberfest","diffusion","text2image","image2image","jax","score-based-generative-modeling","stable-diffusion","stable-diffusion-diffusers","latent-diffusion-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":134,"issueCount":354,"starsCount":23317,"forksCount":4801,"license":"Apache License 2.0","participation":[21,12,19,44,31,45,37,43,28,32,54,37,33,48,41,35,32,24,29,34,42,51,36,25,52,23,36,49,31,40,45,26,24,36,45,33,35,39,36,47,29,32,14,23,23,30,21,34,18,26,24,25],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T08:04:23.858Z"}},{"type":"Public","name":"dataset-viewer","owner":"huggingface","isFork":false,"description":"Lightweight web API for visualizing and exploring any dataset - computer vision, speech, text, and tabular - stored on the Hugging Face Hub","allTopics":["nlp","data","machine-learning","api-rest","datasets","huggingface"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":151,"starsCount":632,"forksCount":64,"license":"Apache License 2.0","participation":[32,25,28,28,6,25,26,47,15,22,14,22,18,16,28,36,30,21,15,26,17,18,20,6,8,17,12,5,0,18,16,14,14,17,23,11,25,21,17,19,10,12,21,23,8,12,13,9,33,13,10,14],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T08:03:45.365Z"}},{"type":"Public","name":"doc-builder","owner":"huggingface","isFork":false,"description":"The package used to build the documentation of our Hugging Face repos","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":45,"starsCount":72,"forksCount":30,"license":"Apache License 2.0","participation":[0,0,0,2,0,0,0,0,2,1,2,0,0,2,16,9,1,2,2,4,5,0,10,3,4,0,0,0,0,0,0,1,2,11,0,0,0,0,2,0,1,0,5,0,1,2,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T08:02:42.517Z"}},{"type":"Public","name":"tgi-gaudi","owner":"huggingface","isFork":true,"description":"Large Language Model Text Generation Inference on Habana Gaudi","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":3,"starsCount":17,"forksCount":916,"license":"Apache License 2.0","participation":[4,5,12,5,22,15,16,10,17,7,6,6,15,1,9,18,9,2,8,1,3,1,4,10,4,11,13,7,1,4,8,11,20,10,11,15,18,25,3,9,14,9,15,19,4,4,6,3,2,1,2,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:56:42.973Z"}},{"type":"Public","name":"transformers","owner":"huggingface","isFork":false,"description":"🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.","allTopics":["python","seq2seq","flax","language-models","nlp-library","hacktoberfest","jax","pytorch-transformers","model-hub","nlp","machine-learning","natural-language-processing","deep-learning","tensorflow","pytorch","transformer","speech-recognition","pretrained-models","language-model","bert"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":244,"issueCount":868,"starsCount":127458,"forksCount":25265,"license":"Apache License 2.0","participation":[68,67,50,34,61,84,48,64,54,74,75,73,38,58,37,60,52,66,59,71,67,70,59,47,45,75,61,34,5,28,64,52,52,52,35,67,42,60,60,56,57,51,56,58,78,67,70,49,61,75,58,57],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:56:42.875Z"}},{"type":"Public","name":"datatrove","owner":"huggingface","isFork":false,"description":"Freeing data processing from scripting madness by providing a set of platform-agnostic customizable pipeline processing blocks.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":23,"starsCount":1649,"forksCount":97,"license":"Apache License 2.0","participation":[15,18,38,22,7,7,12,3,8,3,0,5,3,2,0,0,3,10,10,2,12,1,34,10,11,8,9,4,0,1,0,11,5,16,2,8,6,11,2,3,7,1,2,8,9,2,10,2,6,12,3,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:41:49.411Z"}},{"type":"Public","name":"cookbook","owner":"huggingface","isFork":false,"description":"Open-source AI cookbook","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":20,"issueCount":16,"starsCount":1422,"forksCount":186,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,2,0,4,0,0,0,9,30,41,23,0,44,54,29,24,19,0,3,3,0,6,8,7,9,4,18,21,39],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:38:39.251Z"}},{"type":"Public","name":"text-generation-inference","owner":"huggingface","isFork":false,"description":"Large Language Model Text Generation Inference","allTopics":["nlp","bloom","deep-learning","inference","pytorch","falcon","transformer","gpt","starcoder"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":14,"issueCount":144,"starsCount":8220,"forksCount":916,"license":"Apache License 2.0","participation":[4,5,12,5,22,15,16,10,17,7,6,6,15,1,9,18,9,2,8,1,3,1,4,10,3,7,13,5,0,3,4,4,20,10,11,11,6,12,2,3,11,4,9,18,11,23,9,6,18,18,18,20],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:36:44.150Z"}},{"type":"Public","name":"optimum-habana","owner":"huggingface","isFork":false,"description":"Easy and lightning fast training of 🤗 Transformers on Habana Gaudi processor (HPU)","allTopics":["transformers","bert","fine-tuning","hpu","habana"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":45,"issueCount":17,"starsCount":115,"forksCount":133,"license":"Apache License 2.0","participation":[8,6,5,7,5,10,8,3,11,12,20,9,11,15,14,14,8,9,7,21,9,10,17,14,22,11,10,4,4,9,9,12,10,9,9,14,10,10,12,23,7,12,10,9,10,24,24,8,9,11,9,37],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:31:10.431Z"}},{"type":"Public","name":"blog","owner":"huggingface","isFork":false,"description":"Public repo for HF blog posts","allTopics":["hacktoberfest"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":65,"issueCount":135,"starsCount":2096,"forksCount":644,"license":null,"participation":[22,17,9,9,22,22,14,12,8,15,27,17,22,37,13,18,9,9,18,9,16,9,2,4,13,18,11,4,4,8,9,8,13,24,5,15,33,19,7,15,21,6,23,22,22,9,7,22,11,20,13,9],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:16:16.117Z"}},{"type":"Public","name":"peft","owner":"huggingface","isFork":false,"description":"🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.","allTopics":["python","adapter","transformers","pytorch","lora","diffusion","parameter-efficient-learning","llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":33,"starsCount":14591,"forksCount":1371,"license":"Apache License 2.0","participation":[13,16,5,8,17,11,7,7,2,11,8,6,10,6,11,10,9,5,4,5,12,20,11,13,17,22,12,6,4,7,15,6,12,11,16,18,12,9,11,4,9,2,10,7,4,17,5,5,11,9,7,9],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T07:11:11.549Z"}},{"type":"Public","name":"pytorch-image-models","owner":"huggingface","isFork":false,"description":"The largest collection of PyTorch image encoders / backbones. Including train, eval, inference, export scripts, and pretrained weights -- ResNet, ResNeXT, EfficientNet, NFNet, Vision Transformer (ViT), MobileNetV4, MobileNet-V3 & V2, RegNet, DPN, CSPNet, Swin Transformer, MaxViT, CoAtNet, ConvNeXt, and more","allTopics":["pytorch","imagenet","image-classification","resnet","pretrained-models","mixnet","pretrained-weights","distributed-training","dual-path-networks","mobilenet-v2","mobile-deep-learning","mobilenetv3","efficientnet","augmix","randaugment","nfnets","normalization-free-training","vision-transformer-models","convnext","maxvit"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":27,"issueCount":54,"starsCount":30357,"forksCount":4611,"license":"Apache License 2.0","participation":[7,1,1,1,0,1,12,18,11,24,19,14,0,3,3,9,4,7,16,7,10,1,18,11,4,5,2,1,5,6,2,2,9,5,16,8,0,1,1,9,7,2,7,11,4,7,13,20,17,15,6,20],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T06:49:39.895Z"}},{"type":"Public","name":"text-embeddings-inference","owner":"huggingface","isFork":false,"description":"A blazing fast inference solution for text embeddings models","allTopics":["ai","ml","embeddings","huggingface","llm"],"primaryLanguage":{"name":"Rust","color":"#dea584"},"pullRequestCount":8,"issueCount":66,"starsCount":2153,"forksCount":125,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,26,10,6,2,5,7,4,5,0,0,1,0,4,0,1,3,0,2,0,8,5,6,1,7,3,2,8,1,1,0,0,1,0,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T06:47:03.858Z"}},{"type":"Public","name":"optimum-tpu","owner":"huggingface","isFork":false,"description":"Google TPU optimizations for transformers models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":35,"forksCount":7,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,2,3,1,11,3,0,5,6,0,3,1,5,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T06:41:41.015Z"}},{"type":"Public","name":"candle","owner":"huggingface","isFork":false,"description":"Minimalist ML framework for Rust","allTopics":[],"primaryLanguage":{"name":"Rust","color":"#dea584"},"pullRequestCount":60,"issueCount":250,"starsCount":14080,"forksCount":778,"license":"Apache License 2.0","participation":[26,199,178,111,60,50,91,50,100,76,103,62,60,56,52,46,28,29,37,38,41,32,18,8,11,8,42,20,29,19,37,10,1,19,23,16,19,24,14,19,36,34,26,24,24,16,7,5,8,10,11,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T06:15:46.897Z"}},{"type":"Public","name":"nanotron","owner":"huggingface","isFork":false,"description":"Minimalistic large language model 3D-parallelism training","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":27,"issueCount":32,"starsCount":902,"forksCount":80,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,1,0,0,0,0,0,0,0,0,0,0,8,23,9,25,34,101,89,50,98,61,27,73,21,28,19,21,34,62,43,29,12,16,6,2,3,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T06:07:10.471Z"}},{"type":"Public","name":"autotrain-advanced","owner":"huggingface","isFork":false,"description":"🤗 AutoTrain Advanced","allTopics":["natural-language-processing","natural-language-understanding","huggingface","autotrain","python","machine-learning","deep-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":22,"starsCount":3548,"forksCount":423,"license":"Apache License 2.0","participation":[7,2,0,8,4,5,29,4,15,22,7,20,2,3,2,9,4,3,16,4,5,1,5,11,13,26,40,25,3,0,0,1,4,4,9,12,11,10,22,14,17,9,8,4,12,33,14,27,13,18,8,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T05:24:33.433Z"}},{"type":"Public","name":"lm-evaluation-harness","owner":"huggingface","isFork":true,"description":"A framework for few-shot evaluation of language models.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":0,"starsCount":2,"forksCount":1448,"license":"MIT License","participation":[101,62,101,68,128,36,35,89,160,67,69,63,43,86,38,16,33,32,32,3,53,19,47,84,71,45,35,22,8,6,18,17,9,16,10,8,20,13,15,12,8,4,5,2,2,5,19,6,3,11,8,10],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T04:53:28.057Z"}},{"type":"Public","name":"trl","owner":"huggingface","isFork":false,"description":"Train transformer language models with reinforcement learning.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":13,"issueCount":46,"starsCount":8485,"forksCount":1026,"license":"Apache License 2.0","participation":[6,14,4,7,9,12,10,8,3,9,8,15,12,19,10,4,6,12,10,12,8,11,5,3,12,12,3,21,4,20,8,12,13,10,0,10,11,5,4,16,13,8,7,13,19,3,8,5,1,12,6,14],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T23:27:35.687Z"}},{"type":"Public","name":"accelerate","owner":"huggingface","isFork":false,"description":"🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":14,"issueCount":107,"starsCount":7224,"forksCount":844,"license":"Apache License 2.0","participation":[10,14,16,13,25,15,6,14,6,11,15,11,14,11,0,7,5,9,10,7,8,18,17,3,11,10,5,6,4,14,8,11,6,12,14,9,13,16,14,5,16,8,11,15,7,12,14,10,5,8,4,12],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T23:20:52.437Z"}},{"type":"Public","name":"ratchet","owner":"huggingface","isFork":false,"description":"A cross-platform browser ML framework.","allTopics":[],"primaryLanguage":{"name":"Rust","color":"#dea584"},"pullRequestCount":5,"issueCount":25,"starsCount":397,"forksCount":20,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,44,71,71,78,74,35,67,42,29,49,43,18,50,78,47,24,13,0,25,35,22,17],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T21:59:26.661Z"}},{"type":"Public","name":"lerobot","owner":"huggingface","isFork":false,"description":"🤗 LeRobot: End-to-end Learning for Real-World Robotics in Pytorch","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":23,"issueCount":16,"starsCount":3813,"forksCount":290,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,2,1,7,22,47,68,37,82,34,65,79,18,19,19,15,14,7,16,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T21:17:17.537Z"}},{"type":"Public","name":"chat-ui","owner":"huggingface","isFork":false,"description":"Open source codebase powering the HuggingChat app","allTopics":["typescript","svelte","hacktoberfest","tailwindcss","huggingface","svelte-kit","sveltekit","llm","chatgpt"],"primaryLanguage":{"name":"TypeScript","color":"#3178c6"},"pullRequestCount":23,"issueCount":198,"starsCount":6605,"forksCount":927,"license":"Apache License 2.0","participation":[7,2,0,3,3,8,5,4,1,13,2,0,5,4,17,7,16,14,9,8,1,0,13,6,9,17,5,6,6,7,5,11,20,21,15,21,16,12,13,12,9,10,18,16,26,18,18,5,7,36,31,16],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T20:22:26.481Z"}},{"type":"Public","name":"huggingface.js","owner":"huggingface","isFork":false,"description":"Utilities to use the Hugging Face Hub API","allTopics":["machine-learning","inference","hub","api-client","huggingface"],"primaryLanguage":{"name":"TypeScript","color":"#3178c6"},"pullRequestCount":25,"issueCount":87,"starsCount":1253,"forksCount":151,"license":"MIT License","participation":[2,0,6,0,0,8,6,0,0,1,1,0,0,0,0,5,11,5,4,10,9,7,12,31,23,8,24,6,0,1,2,14,5,6,5,13,22,24,7,24,16,18,13,21,12,10,16,16,18,20,12,19],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T20:16:44.732Z"}},{"type":"Public","name":"optimum-intel","owner":"huggingface","isFork":false,"description":"🤗 Optimum Intel: Accelerate inference with Intel optimization tools","allTopics":["optimization","intel","transformers","pruning","distillation","onnx","openvino","diffusers","inference","quantization"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":20,"issueCount":10,"starsCount":341,"forksCount":95,"license":"Apache License 2.0","participation":[6,4,3,10,2,9,27,2,3,1,7,1,5,4,5,5,2,5,4,1,12,1,0,0,3,5,5,3,2,8,10,9,12,12,6,8,5,7,15,16,14,10,8,6,18,25,15,7,14,4,11,17],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T17:20:04.896Z"}},{"type":"Public","name":"hub-docs","owner":"huggingface","isFork":false,"description":"Docs of the Hugging Face Hub","allTopics":["machine-learning","hacktoberfest"],"primaryLanguage":null,"pullRequestCount":18,"issueCount":70,"starsCount":246,"forksCount":208,"license":"Apache License 2.0","participation":[20,10,11,10,7,3,5,2,2,2,8,5,13,4,10,17,15,11,19,11,20,12,10,23,16,2,6,0,1,4,8,4,7,6,6,3,2,5,2,14,2,4,5,6,2,2,1,3,3,5,6,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T17:07:42.655Z"}},{"type":"Public","name":"optimum-neuron","owner":"huggingface","isFork":false,"description":"Easy, fast and very cheap training and inference on AWS Trainium and Inferentia chips.","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":16,"issueCount":84,"starsCount":173,"forksCount":49,"license":"Apache License 2.0","participation":[3,8,4,11,8,7,0,8,2,11,8,4,10,7,5,4,3,5,5,4,8,8,9,12,2,2,4,5,0,0,6,12,6,5,7,8,2,5,3,1,5,13,12,8,4,3,5,2,6,4,8,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T16:47:07.041Z"}},{"type":"Public","name":"lighteval","owner":"huggingface","isFork":false,"description":"LightEval is a lightweight LLM evaluation suite that Hugging Face has been using internally with the recently released LLM data processing library datatrove and LLM training library nanotron.","allTopics":["evaluation","evaluation-metrics","evaluation-framework","huggingface"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":44,"starsCount":439,"forksCount":51,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,20,12,16,2,12,12,9,6,8,7,3,5,2,4,5,2,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T15:52:08.375Z"}},{"type":"Public","name":"optimum-nvidia","owner":"huggingface","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":40,"starsCount":826,"forksCount":79,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,2,2,10,11,10,17,37,23,10,9,2,0,2,1,0,0,0,0,1,6,5,0,2,16,1,25,11,1,0,3,0,0,2,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T15:46:36.909Z"}},{"type":"Public","name":"optimum-quanto","owner":"huggingface","isFork":false,"description":"A pytorch quantization backend for optimum","allTopics":["pytorch","quantization","optimum"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":621,"forksCount":32,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,13,29,14,18,22,0,3,8,2,39,51,33,14,4,6,3,12,6,11,6,20,31,12,21,10,13,7,3,18,6,1,6,9,4,4,17,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T14:22:21.740Z"}}],"repositoryCount":226,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}