Skip to content

Commit

Permalink
Do fp16 for sdpa_bench.
Browse files Browse the repository at this point in the history
  • Loading branch information
liuliu committed Dec 11, 2024
1 parent aafc394 commit 0adf85a
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 5 deletions.
6 changes: 4 additions & 2 deletions bin/nnc/sdpa_bench.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,13 @@ int main(int argc, char** argv)
ccv_nnc_tensor_t* const gpu_o_tensor = ccv_nnc_tensor_new(0, GPU_TENSOR_NHWC(000, 16F, B, R, Hq, D), 0);
ccv_nnc_cmd_exec(CMD_DATA_TRANSFER_FORWARD(), ccv_nnc_no_hint, 0, TENSOR_LIST(q_tensor_f16, k_tensor_f16, v_tensor_f16), TENSOR_LIST(gpu_q_tensor, gpu_k_tensor, gpu_v_tensor), 0);

ccv_nnc_cmd_t scaled_dot_product_attention = CMD_SCALED_DOT_PRODUCT_ATTENTION_FORWARD(scale, is_causal);
scaled_dot_product_attention.info.scaled_dot_product_attention.flags = CCV_NNC_GEMM_16F;
for (int i = 0; i < 5; i++)
ccv_nnc_cmd_exec(CMD_SCALED_DOT_PRODUCT_ATTENTION_FORWARD(scale, is_causal), ccv_nnc_no_hint, 0, TENSOR_LIST(gpu_q_tensor, gpu_k_tensor, gpu_v_tensor, NULL, NULL, NULL), TENSOR_LIST(gpu_o_tensor, NULL), 0);
ccv_nnc_cmd_exec(scaled_dot_product_attention, ccv_nnc_no_hint, 0, TENSOR_LIST(gpu_q_tensor, gpu_k_tensor, gpu_v_tensor, NULL, NULL, NULL), TENSOR_LIST(gpu_o_tensor, NULL), 0);
double elapsed_time = get_current_time();
for (int i = 0; i < 40; i++)
ccv_nnc_cmd_exec(CMD_SCALED_DOT_PRODUCT_ATTENTION_FORWARD(scale, is_causal), ccv_nnc_no_hint, 0, TENSOR_LIST(gpu_q_tensor, gpu_k_tensor, gpu_v_tensor, NULL, NULL, NULL), TENSOR_LIST(gpu_o_tensor, NULL), 0);
ccv_nnc_cmd_exec(scaled_dot_product_attention, ccv_nnc_no_hint, 0, TENSOR_LIST(gpu_q_tensor, gpu_k_tensor, gpu_v_tensor, NULL, NULL, NULL), TENSOR_LIST(gpu_o_tensor, NULL), 0);
elapsed_time = get_current_time() - elapsed_time;
printf("%d, %d, %d, %d, %d, %d, %2.3f\n", B, R, C, Hq, Hk, D, elapsed_time);

Expand Down
6 changes: 3 additions & 3 deletions lib/nnc/mfa/v2/AttentionDescriptor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -455,9 +455,9 @@ std::vector<AttentionParameterRow> AttentionDescriptor::forwardMixed(MTL::Device
return {
AttentionParameterRow(32, 16, 128, 16, { AttentionOperand::Q, AttentionOperand::O }),
AttentionParameterRow(96, 16, 128, 32, { AttentionOperand::Q, AttentionOperand::O }),
AttentionParameterRow(160, 16, 128, 32, { AttentionOperand::O }),
AttentionParameterRow(224, 16, 128, 32, { AttentionOperand::Q }),
AttentionParameterRow(384, 16, 128, 32, {})
AttentionParameterRow(160, 32, 128, 32, { AttentionOperand::O }),
AttentionParameterRow(224, 32, 128, 32, { AttentionOperand::Q }),
AttentionParameterRow(384, 32, 128, 32, {})
};
} else {
return {
Expand Down

0 comments on commit 0adf85a

Please sign in to comment.