{"payload":{"header_redesign_enabled":false,"results":[{"id":"451691984","archived":false,"color":"#DA5B0B","followers":4454,"has_funding_file":false,"hl_name":"salesforce/BLIP","hl_trunc_description":"PyTorch code for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation ","language":"Jupyter Notebook","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":451691984,"name":"BLIP","owner_id":453694,"owner_login":"salesforce","updated_at":"2024-05-20T21:21:02.703Z","has_issues":true}},"sponsorable":false,"topics":["image-captioning","visual-reasoning","visual-question-answering","vision-language","vision-language-transformer","image-text-retrieval","vision-and-language-pre-training"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":47,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Asalesforce%252FBLIP%2B%2Blanguage%253A%2522Jupyter%2BNotebook%2522","metadata":null,"csrf_tokens":{"/salesforce/BLIP/star":{"post":"M-iAkgWX5ox1Kv_m6WVe30DRNrnyl1IyEqpnY9lQNORRlFJ3_Jf-hx-WivPppZchSEOudvmEdwLxG1RGqArwIg"},"/salesforce/BLIP/unstar":{"post":"qikc5XWzET4IwlQLwS5QC4r3dCEcO7AO58VYLGd33sjpO5HaTdN156NsO-jG17OWlQa_dK_sh2jx29ftGjJXOg"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"4i_YNwfKBXmqxLXo_oSWu8nPSLl0Bw9IX-TwqBQi0vLj00hoJk2_Es3EmPXYc2Ul6v61r66eria_IgZJgnvgJw"}}},"title":"Repository search results"}