-
Notifications
You must be signed in to change notification settings - Fork 1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: ref. cross entropy, add CUDA, fix grad test
- Loading branch information
1 parent
879dcb8
commit c5fb49b
Showing
9 changed files
with
167 additions
and
21 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
#include "common.cuh" | ||
#include "cross-entropy-loss.cuh" | ||
#include "sumrows.cuh" | ||
|
||
#include <cmath> | ||
#include <cstdint> | ||
|
||
static __global__ void cross_entropy_loss_f32(const float * logits, const float * labels, float * dst, const int nclasses, const int k) { | ||
const int warp_id = threadIdx.x / WARP_SIZE; | ||
const int lane_id = threadIdx.x % WARP_SIZE; | ||
const int i0 = blockDim.x*blockIdx.x + warp_id*WARP_SIZE; | ||
|
||
const int ne_tmp = WARP_SIZE*nclasses; | ||
|
||
extern __shared__ float tmp_all[]; | ||
float * tmp_logits = tmp_all + (2*warp_id + 0)*ne_tmp; | ||
float * tmp_labels = tmp_all + (2*warp_id + 1)*ne_tmp; | ||
|
||
// Each warp first loads ne_tmp logits/labels into shared memory: | ||
for (int i = lane_id; i < ne_tmp; i += WARP_SIZE) { | ||
const int ig = i0*nclasses + i; // ig == i global | ||
|
||
tmp_logits[i] = ig < k*nclasses ? logits[ig] : 0.0f; | ||
tmp_labels[i] = ig < k*nclasses ? labels[ig] : 0.0f; | ||
} | ||
|
||
// Each thread in the warp then calculates the cross entropy loss for a single row. | ||
// TODO: pad in order to avoid shared memory bank conflicts. | ||
|
||
// Find maximum for softmax: | ||
float max = -INFINITY; | ||
for (int i = 0; i < nclasses; ++i) { | ||
max = fmaxf(max, tmp_logits[lane_id*nclasses + i]); | ||
} | ||
|
||
// Calculate log(softmax(logits)) which is just logits - max: | ||
float sum = 0.0f; | ||
for (int i = 0; i < nclasses; ++i) { | ||
float val = tmp_logits[lane_id*nclasses + i] - max; | ||
sum += expf(val); | ||
tmp_logits[lane_id*nclasses + i] = val; | ||
} | ||
sum = logf(sum); | ||
|
||
// log(exp(logits - max) / sum) = (logits - max) - log(sum) | ||
float loss = 0.0f; | ||
for (int i = 0; i < nclasses; ++i) { | ||
loss += (tmp_logits[lane_id*nclasses + i] - sum) * tmp_labels[lane_id*nclasses + i]; | ||
} | ||
loss = -warp_reduce_sum(loss) / (float)k; | ||
|
||
__syncthreads(); | ||
|
||
if (lane_id == 0) { | ||
tmp_all[warp_id] = loss; | ||
} | ||
|
||
__syncthreads(); | ||
|
||
if (warp_id != 0) { | ||
return; | ||
} | ||
|
||
loss = lane_id < CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE/WARP_SIZE ? tmp_all[lane_id] : 0.0f; | ||
loss = warp_reduce_sum(loss); | ||
|
||
if (lane_id != 0) { | ||
return; | ||
} | ||
|
||
dst[blockIdx.x] = loss; | ||
} | ||
|
||
void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { | ||
const ggml_tensor * src0 = dst->src[0]; | ||
const ggml_tensor * src1 = dst->src[1]; | ||
|
||
GGML_ASSERT(src0->type == GGML_TYPE_F32); | ||
GGML_ASSERT(src1->type == GGML_TYPE_F32); | ||
GGML_ASSERT( dst->type == GGML_TYPE_F32); | ||
|
||
GGML_ASSERT(ggml_is_contiguous(src0)); | ||
GGML_ASSERT(ggml_is_contiguous(src1)); | ||
GGML_ASSERT(ggml_is_contiguous(dst)); | ||
|
||
const int64_t ne00 = src0->ne[0]; | ||
const int64_t nrows = ggml_nrows(src0); | ||
|
||
const float * src0_d = (const float *) src0->data; | ||
const float * src1_d = (const float *) src1->data; | ||
float * dst_d = (float *) dst->data; | ||
|
||
ggml_cuda_pool & pool = ctx.pool(); | ||
cudaStream_t stream = ctx.stream(); | ||
|
||
const dim3 blocks_dim(CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE, 1, 1); | ||
const dim3 blocks_num((nrows + CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE - 1) / CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE, 1, 1); | ||
const int shmem = 2*CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE*ne00*sizeof(float); | ||
|
||
ggml_cuda_pool_alloc<float> dst_tmp(pool, blocks_num.x); | ||
|
||
cross_entropy_loss_f32<<<blocks_num, blocks_dim, shmem, stream>>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows); | ||
|
||
// Combine results from individual blocks: | ||
sum_rows_f32_cuda(dst_tmp.ptr, dst_d, blocks_num.x, 1, stream); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
#include "common.cuh" | ||
|
||
#define CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE 256 | ||
|
||
void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,5 @@ | ||
#include "common.cuh" | ||
|
||
void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream); | ||
|
||
void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters