{"payload":{"header_redesign_enabled":false,"results":[{"id":"842468267","archived":false,"color":"#f34b7d","followers":14,"has_funding_file":false,"hl_name":"Bruce-Lee-LY/decoding_attention","hl_trunc_description":"Decoding Attention is specially optimized for multi head attention (MHA) using CUDA core for the decoding stage of LLM inference.","language":"C++","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":842468267,"name":"decoding_attention","owner_id":46511649,"owner_login":"Bruce-Lee-LY","updated_at":"2024-09-14T23:57:20.237Z","has_issues":true}},"sponsorable":false,"topics":["gpu","cuda","inference","nvidia","mha","multi-head-attention","llm","large-language-model","flash-attention","cuda-core","decoding-attention","flashinfer"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":84,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253ABruce-Lee-LY%252Fdecoding_attention%2B%2Blanguage%253AC%252B%252B","metadata":null,"warn_limited_results":false,"csrf_tokens":{"/Bruce-Lee-LY/decoding_attention/star":{"post":"kQJ8NfYvyx1TMe_PMDKYJsCsfiZGPRBfyJoLC0847_FBJ5TWkCX1SBMMoxH7LaVCb1eWgTmH9umqlbgTUbzKzg"},"/Bruce-Lee-LY/decoding_attention/unstar":{"post":"ri9QTHNSl0Syq9GVbpn8AnAFc0FlrzbHRVGYSF_nWzUnKCMvDGQzjUytW9SWm2tboOI1Ondb7Ks-qHjjsAwxWg"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"Lz_2JM3edwfw1Q9fsV5WdzQ-B2txRdaREq7CK-gAv289LXDsMkfl_pr6iJQBHMX1gkXCB5_p-7roZPUnZJgReA"}}},"title":"Repository search results"}