train_gpt2_fp32.cu

源程序

llm.c/test_gpt2_fp32.cu at master · karpathy/llm.c (github.com)

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <float.h>
#include <string.h>
#include <unistd.h>#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cublasLt.h>
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
#include "utils.h"
#include "tokenizer.h"#define CEIL_DIV(M, N) (((M) + (N)-1) / (N))void cudaCheck(cudaError_t error, const char *file, int line) {if (error != cudaSuccess) {printf("[CUDA ERROR] at file %s:%d:\n%s\n", file, line,cudaGetErrorString(error));exit(EXIT_FAILURE);}
};
#define cudaCheck(err) (cudaCheck(err, __FILE__, __LINE__))void cublasCheck(cublasStatus_t status, const char *file, int line)
{if (status != CUBLAS_STATUS_SUCCESS) {printf("[cuBLAS ERROR]: %d %s %d\n", status, file, line);exit(EXIT_FAILURE);}
}
#define cublasCheck(status) { cublasCheck((status), __FILE__, __LINE__); }static size_t cublaslt_workspace_size = 32 * 1024 * 1024;
static void* cublaslt_workspace = NULL;
static cublasComputeType_t cublas_compute_type;
cublasHandle_t cublas_handle;
cublasLtHandle_t cublaslt_handle;namespace cg = cooperative_groups;__device__ inline float4 add_float4(const float4& a, const float4& b) {return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}__global__ void encoder_forward_kernel3(float4* out,const int* inp, const float4* wte, const float4* wpe,int B, int T, int C) {int C4 = C / 4;int idx = blockIdx.x * blockDim.x + threadIdx.x;int N = B * T * C4;if (idx < N) {int bt = idx / C4;int b = bt / T;int t = bt % T;int c4 = idx % C4;int ix = inp[b * T + t];out[b * T * C4 + t * C4 + c4] = add_float4(wte[ix * C4 + c4], wpe[t * C4 + c4]);}
}__global__ void encoder_backward_kernel(float* dwte, float* dwpe,const float* dout, const int* inp,int B, int T, int C) {int idx = blockIdx.x * blockDim.x + threadIdx.x;int N = B * T * C;if (idx < N) {int bt = idx / C;int b = bt / T;int t = bt % T;int c = idx % C;int ix = inp[b * T + t];const float* dout_btc = dout + b * T * C + t * C + c;float* dwte_ix = dwte + ix * C + c;float* dwpe_tc = dwpe + t * C + c;atomicAdd(dwte_ix, *dout_btc);atomicAdd(dwpe_tc, *dout_btc);}
}__global__ void layernorm_forward_kernel3(float* __restrict__ out, float* __restrict__ mean, float* __restrict__ rstd,const float*  __restrict__ inp, const float*  __restrict__ weight,const float* __restrict__ bias, int N, int C) {cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();if(idx >= N) {return;}const float* x = inp + idx * C;float sum = 0.0f;for (int i = warp.thread_rank(); i < C; i += warp.size()) {sum += x[i];}sum = cg::reduce(warp, sum, cg::plus<float>{});float m = sum / C;if(warp.thread_rank() == 0 && mean != nullptr) {__stcs(mean + idx, m);}sum = 0.0f;for (int i = warp.thread_rank(); i < C; i += warp.size()) {float diff = x[i] - m;sum += diff * diff;}sum = cg::reduce(warp, sum, cg::plus<float>{});float s = rsqrtf(sum / C + 1e-5f);if(warp.thread_rank() == 0 && rstd != nullptr) {__stcs(rstd + idx, s);}float* o = out + idx * C;for (int c = warp.thread_rank(); c < C; c += warp.size()) {float n = s * (__ldcs(x+c) - m);__stcs(o+c, n * weight[c] + bias[c]);}
}__global__ void permute_kernel(float* q, float* k, float* v,const float* inp,int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int inp_idx = (b * N * 3 * NH * d) + (n * 3 * NH * d) + (0 * NH * d) + (nh_ * d) + d_;q[idx] = __ldcs(&inp[inp_idx]);k[idx] = __ldcs(&inp[inp_idx + NH * d]);v[idx] = __ldcs(&inp[inp_idx + 2 * (NH * d)]);}
}__global__ void permute_kernel_backward(float* dinp,const float* dq, const float* dk, const float* dv,int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int inp_idx = (b * N * 3 * NH * d) + (n * 3 * NH * d) + (0 * NH * d) + (nh_ * d) + d_;dinp[inp_idx] = dq[idx];dinp[inp_idx + NH * d] = dk[idx];dinp[inp_idx + 2 * (NH * d)] = dv[idx];}
}__global__ void unpermute_kernel(float* inp, float *out, int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int other_idx = (b * NH * N * d) + (n * NH * d) + (nh_ * d) + d_;out[other_idx] = __ldcs(&inp[idx]);}
}__global__ void unpermute_kernel_backward(float* dinp, const float *dout, int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int other_idx = (b * NH * N * d) + (n * NH * d) + (nh_ * d) + d_;dinp[idx] = dout[other_idx];}
}__device__ float& vec_at(float4& vec, int index) {return reinterpret_cast<float*>(&vec)[index];
}__device__ float vec_at(const float4& vec, int index) {return reinterpret_cast<const float*>(&vec)[index];
}__global__ void softmax_forward_kernel5(float* out, float inv_temperature, const float* inp, int N, int T) {assert(T % 4  == 0);cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = (gridDim.x - blockIdx.x - 1) * warp.meta_group_size() + warp.meta_group_rank(); if(idx >= N * T) {return;}int own_pos = idx % T;int pos_by_4 = own_pos / 4;const float* x = inp + idx * T;float maxval = -FLT_MAX;float sumval = 0.0f;const float4* x_vec = reinterpret_cast<const float4*>(x);for (int i = warp.thread_rank(); i < pos_by_4; i += warp.size()) {float4 v = x_vec[i];float old_maxval = maxval;for(int k = 0; k < 4; ++k) {maxval = fmaxf(maxval, vec_at(v, k));}sumval *= expf(inv_temperature * (old_maxval - maxval));for(int k = 0; k < 4; ++k) {sumval += expf(inv_temperature * (vec_at(v, k) - maxval));}}if(4*pos_by_4 + warp.thread_rank() <= own_pos) {float old_maxval = maxval;maxval = fmaxf(maxval, x[4*pos_by_4 + warp.thread_rank()]);sumval *= expf(inv_temperature * (old_maxval - maxval));sumval += expf(inv_temperature * (x[4*pos_by_4 + warp.thread_rank()] - maxval));}float global_maxval = cg::reduce(warp, maxval, cg::greater<float>{});sumval *= expf(inv_temperature * (maxval - global_maxval));float sum = cg::reduce(warp, sumval, cg::plus<float>{});float norm = 1.f / sum;for (int i = warp.thread_rank(); i <= own_pos; i += warp.size()) {float ev = expf(inv_temperature * (__ldcs(x + i) - global_maxval));__stcs(out + idx * T + i, ev * norm);}
}__global__ void residual_forward_kernel(float* out, float* inp1, float* inp2, int N) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < N) {out[idx] = __ldcs(&inp1[idx]) + __ldcs(&inp2[idx]);}
}#define GELU_SCALING_FACTOR sqrtf(2.0f / M_PI)
__global__ void gelu_forward_kernel(float* out, const float* inp, int N) {int i = blockIdx.x * blockDim.x + threadIdx.x;if (i < N) {float xi = inp[i];float cube = 0.044715f * xi * xi * xi;out[i] = 0.5f * xi * (1.0f + tanhf(GELU_SCALING_FACTOR * (xi + cube)));}
}__global__ void gelu_backward_kernel(float* dinp, const float* inp, const float* dout, const int N) {int i = blockIdx.x * blockDim.x + threadIdx.x;if (i < N) {float x = inp[i];float cube = 0.044715f * x * x * x;float tanh_arg = GELU_SCALING_FACTOR * (x + cube);float tanh_out = tanhf(tanh_arg);float coshf_out = coshf(tanh_arg);float sech_out = 1.0f / (coshf_out * coshf_out);float local_grad = 0.5f * (1.0f + tanh_out) + x * 0.5f * sech_out * GELU_SCALING_FACTOR * (1.0f + 3.0f * 0.044715f * x * x);dinp[i] = local_grad * dout[i];}
}__global__ void matmul_backward_bias_kernel4(float* dbias, const float* dout, int B, int T, int OC) {extern __shared__ float smem[]; const int warp_id = threadIdx.x / warpSize; const int lane_id = threadIdx.x % warpSize; const int tl = blockIdx.x * warpSize; const int vstep = blockDim.x / warpSize; const float* dout_col = dout + tl + lane_id;float dout_sum = 0.0f;for (int row = warp_id; row < B * T; row += vstep) {dout_sum += dout_col[row * OC];}smem[lane_id + warp_id * warpSize] = dout_sum;__syncthreads();dout_sum = 0.0f;if (warp_id == 0) {for (int j = 0; j < vstep; j++) {dout_sum += smem[lane_id + j * warpSize];}dbias[tl + lane_id] += dout_sum;}
}__global__ void layernorm_backward_kernel2(float* dinp, float* dweight, float* dbias,const float* dout, const float* inp, const float* weight, const float* mean, const float* rstd,int B, int T, int C) {extern __shared__ float shared[]; namespace cg = cooperative_groups;cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();int N = B * T;if(idx >= N) { return; } // thread guardsint b = idx / T;int t = idx % T;const float* dout_bt = dout + b * T * C + t * C;const float* inp_bt = inp + b * T * C + t * C;float* dinp_bt = dinp + b * T * C + t * C;const float mean_bt = mean[b * T + t];const float rstd_bt = rstd[b * T + t];float* dbias_shared = shared;float* dweight_shared = shared + C;#pragma unrollfor(int i = threadIdx.x; i < C; i+= blockDim.x){dbias_shared[i] = 0.0f;dweight_shared[i] = 0.0f;}__syncthreads();float dnorm_mean = 0.0f;float dnorm_norm_mean = 0.0f;for (int i = warp.thread_rank(); i < C; i  += warp.size()) {float norm_bti = (inp_bt[i] - mean_bt) * rstd_bt;float dnorm_i = weight[i] * dout_bt[i];dnorm_mean += dnorm_i;dnorm_norm_mean += dnorm_i * norm_bti;}dnorm_mean = cg::reduce(warp, dnorm_mean, cg::plus<float>{});dnorm_norm_mean = cg::reduce(warp, dnorm_norm_mean, cg::plus<float>{});dnorm_mean = dnorm_mean / C;dnorm_norm_mean = dnorm_norm_mean / C;for (int i = warp.thread_rank(); i < C; i += warp.size()) {float norm_bti = (inp_bt[i] - mean_bt) * rstd_bt;float dnorm_i = weight[i] * dout_bt[i];atomicAdd(&dbias_shared[i], dout_bt[i]);atomicAdd(&dweight_shared[i], norm_bti * dout_bt[i]);float dval = 0.0f;dval += dnorm_i;dval -= dnorm_mean;dval -= norm_bti * dnorm_norm_mean; dval *= rstd_bt; dinp_bt[i] += dval;}__syncthreads();for(int i = threadIdx.x; i < C; i+= blockDim.x){atomicAdd(&dbias[i], dbias_shared[i]);atomicAdd(&dweight[i], dweight_shared[i]);}
}__global__ void softmax_autoregressive_backward_kernel(float* dpreatt, const float* datt, const float* att,int B, int T, int C, float scale) {constexpr const int BlockSize = 256;constexpr int T_per_block = 4;cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);__shared__ float block_acc[32];int idx = blockIdx.y;int t0 = T - 1 - T_per_block*blockIdx.x;att += idx * T * T;datt += idx * T * T;dpreatt += idx * T * T;if (warp.meta_group_rank() == 0) {block_acc[warp.thread_rank()] = 0;}for(int to = 0; to < T_per_block; ++to) {int t = t0 - to;if(t < 0) return;const float* att_bth = att + t * T;const float* datt_bth = datt + t * T;float* dpreatt_bth = dpreatt + t * T;float local_sum = 0;for (int t2 = block.thread_rank(); t2 <= t; t2 += BlockSize) {local_sum += att_bth[t2] * datt_bth[t2];}block_acc[warp.meta_group_rank()] = cg::reduce(warp, local_sum, cg::plus<float>{});block.sync();local_sum = cg::reduce(warp, block_acc[warp.thread_rank()], cg::plus<float>{});for (int t3 = block.thread_rank(); t3 <= t; t3 += BlockSize) {float acc = __ldcs(att_bth + t3) * (__ldcs(datt_bth + t3) - local_sum);__stcs(dpreatt_bth + t3, scale * acc);}}
}__device__ inline float lerp(float start, float end, float weight) {return fma(weight, end, fma(-weight, start, start));
}__global__ void adamw_kernel2(float* params_memory, float* grads_memory, float* m_memory, float* v_memory, long num_parameters,float learning_rate, float beta1, float beta2, float beta1_correction, float beta2_correction, float eps, float weight_decay) {int i = blockIdx.x * blockDim.x + threadIdx.x;if (i >= num_parameters) return; float grad = grads_memory[i];float m = m_memory[i];float v = v_memory[i];// update the first moment (momentum)m = lerp(grad, m, beta1);m_memory[i] = m;// update the second moment (RMSprop)v = lerp(grad * grad, v, beta2);v_memory[i] = v;m /= beta1_correction; v /= beta2_correction; params_memory[i] -= learning_rate * (m / (sqrtf(v) + eps) + weight_decay * params_memory[i]);
}struct SoftmaxParams {float Scale;float Offset;
};__device__ SoftmaxParams prepare_softmax_blockwide_nofloat4(cg::thread_block_tile<32>& warp,int idx, const float* inp, int V, int P) {const float* x = inp + idx * P;float thread_maxval = -INFINITY;float thread_sumval = 0.0f;for (int i = V + threadIdx.x - blockDim.x; i >= 0; i -= blockDim.x) {float v = x[i];float old_maxval = thread_maxval;thread_maxval = fmaxf(thread_maxval, v);thread_sumval *= expf((old_maxval - thread_maxval));thread_sumval += expf(v - thread_maxval);}__shared__ float shared_maxval[32];__shared__ float shared_sumval[32];int num_warps = blockDim.x / 32;int warp_id = threadIdx.x / 32;int lane_id = threadIdx.x % 32;float warp_maxval = cg::reduce(warp, thread_maxval, cg::greater<float>{});if (lane_id == 0) { shared_maxval[warp_id] = warp_maxval; }__syncthreads();warp_maxval = (lane_id < num_warps) ? shared_maxval[lane_id] : -FLT_MAX;float block_maxval = cg::reduce(warp, warp_maxval, cg::greater<float>{});thread_sumval *= expf(thread_maxval - block_maxval);float warp_sumval = cg::reduce(warp, thread_sumval, cg::plus<float>{});if (lane_id == 0) { shared_sumval[warp_id] = warp_sumval; }__syncthreads();warp_sumval = (lane_id < num_warps) ? shared_sumval[lane_id] : 0.0f;float block_sumval = cg::reduce(warp, warp_sumval, cg::plus<float>{});return SoftmaxParams{1.f / block_sumval, block_maxval};
}__global__ void fused_classifier_kernel3(float* logits, float* losses, float* probs,const float* dlosses, const int* targets,int B, int T, int V, int P) {namespace cg = cooperative_groups;cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = blockIdx.x;int ix = targets[idx];SoftmaxParams sp = prepare_softmax_blockwide_nofloat4(warp, idx, logits, V, P);if(threadIdx.x == 0) {float prob = expf(logits[idx * P + ix] - sp.Offset) * sp.Scale;losses[idx] = -logf(prob);}float dloss = dlosses != NULL ? dlosses[idx] : 1.0f / (B*T);const float* logits_vec = logits + idx * P;for (int i = threadIdx.x; i < V; i += blockDim.x) {// this is the 2nd read of logits after the one in prepare_softmax2// this data will never be needed again, so we reduce cache persistencefloat v = __ldcs(&logits_vec[i]);float prob = expf(v - sp.Offset) * sp.Scale;if (probs != NULL) {probs[idx * P + i] = prob;}float indicator = (i == ix) ? 1.0f : 0.0f;logits[idx * P + i] = (prob - indicator) * dloss;}
}void encoder_forward(float* out,const int* inp, const float* wte, const float* wpe,int B, int T, int C) {assert(C % 4 == 0);const int block_size = 512;const int N = B * T * C;const int grid_size = CEIL_DIV(N / 4, block_size);encoder_forward_kernel3<<<grid_size, block_size>>>((float4*) out, inp, (float4*) wte, (float4*) wpe, B, T, C);cudaCheck(cudaGetLastError());
}void encoder_backward(float* dwte, float* dwpe,const float* dout, const int* inp,int B, int T, int C) {const int N = B * T * C;const int block_size = 256;const int grid_size = CEIL_DIV(N, block_size);encoder_backward_kernel<<<grid_size, block_size>>>(dwte, dwpe, dout, inp, B, T, C);cudaCheck(cudaGetLastError());
}void layernorm_forward(float* out, float* mean, float* rstd,float* inp, float* weight, float* bias,int B, int T, int C) {const int block_size = 512;const int N = B * T;const int grid_size = CEIL_DIV(N * 32, block_size);layernorm_forward_kernel3<<<grid_size, block_size>>>(out, mean, rstd, inp, weight, bias, N, C);cudaCheck(cudaGetLastError());
}void matmul_forward_cublaslt(float* out,float* inp, float* weight, float* bias,int B, int T, int C, int OC) {int has_bias = (bias != NULL);if(((uintptr_t)bias % 16) != 0) {printf("Bias pointer is not aligned (cuBLASLt requirement)!\n");exit(EXIT_FAILURE);}int returnedResults = 0;cublasLtMatmulDesc_t operationDesc;cublasLtMatmulPreference_t preference;cublasLtMatrixLayout_t weightLayout;cublasLtMatrixLayout_t inputLayout;cublasLtMatrixLayout_t outputLayout;cublasLtMatrixLayout_t biasLayout;cublasLtMatmulHeuristicResult_t heuristic;cublasOperation_t opNoTranspose = CUBLAS_OP_N;cublasOperation_t opTranspose = CUBLAS_OP_T;cublasLtEpilogue_t epilogueBias = CUBLASLT_EPILOGUE_BIAS;cublasCheck(cublasLtMatmulDescCreate(&operationDesc, cublas_compute_type, CUDA_R_32F));cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &opTranspose, sizeof(opTranspose)));cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &opNoTranspose, sizeof(opNoTranspose)));if(has_bias) {cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogueBias,sizeof(epilogueBias)));}cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias)));cublasCheck(cublasLtMatrixLayoutCreate(&weightLayout, CUDA_R_32F, C, OC, C));cublasCheck(cublasLtMatrixLayoutCreate(&inputLayout, CUDA_R_32F, C, B*T, C));cublasCheck(cublasLtMatrixLayoutCreate(&outputLayout, CUDA_R_32F, OC, B*T, OC));cublasCheck(cublasLtMatrixLayoutCreate(&biasLayout, CUDA_R_32F, OC, 1, OC));cublasCheck(cublasLtMatmulPreferenceCreate(&preference));cublasCheck(cublasLtMatmulPreferenceSetAttribute(preference,CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES,&cublaslt_workspace_size, sizeof(cublaslt_workspace_size)));cublasCheck(cublasLtMatmulAlgoGetHeuristic(cublaslt_handle, operationDesc,weightLayout, inputLayout, outputLayout, outputLayout,preference, 1, &heuristic, &returnedResults));if (returnedResults == 0) {printf("No cuBLASLt algorithm: B: %d, T: %d, C: %d, OC: %d, bias: %d\n", B, T, C, OC, has_bias);exit(EXIT_FAILURE);}const float alpha = 1.0f, beta = 0.0f;cublasCheck(cublasLtMatmul(cublaslt_handle, operationDesc,&alpha, weight, weightLayout, inp, inputLayout, &beta,out, outputLayout, out, outputLayout, &heuristic.algo,cublaslt_workspace, cublaslt_workspace_size, 0));cublasCheck(cublasLtMatmulPreferenceDestroy(preference));cublasCheck(cublasLtMatmulDescDestroy(operationDesc));cublasCheck(cublasLtMatrixLayoutDestroy(weightLayout));cublasCheck(cublasLtMatrixLayoutDestroy(inputLayout));cublasCheck(cublasLtMatrixLayoutDestroy(outputLayout));cublasCheck(cublasLtMatrixLayoutDestroy(biasLayout));
}void attention_forward(float* out, float* qkvr, float* att,float* inp,int B, int T, int C, int NH) {const int block_size = 256;const int softmax_block_size = 256;int HS = C / NH; // head sizefloat *q, *k, *v;q = qkvr + 0 * B * T * C;k = qkvr + 1 * B * T * C;v = qkvr + 2 * B * T * C;int total_threads = B * NH * T * HS;int num_blocks = CEIL_DIV(total_threads, block_size);permute_kernel<<<num_blocks, block_size>>>(q, k, v, inp, B, T, NH, HS);cudaCheck(cudaGetLastError());const float alpha = 1.0f;const float beta = 0.0f;float* preatt = inp;cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, T, T, HS, &alpha, k, HS, T * HS, q, HS, T * HS, &beta, preatt, T, T * T, B * NH));float scale = 1.0 / sqrtf(HS);int grid_size = CEIL_DIV(B * NH * T * 32, softmax_block_size);softmax_forward_kernel5<<<grid_size, softmax_block_size>>>(att, scale, preatt, B * NH, T);cudaCheck(cudaGetLastError());float* vaccum = inp;cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, HS, T, T, &alpha, v, HS, T * HS, att, T, T * T, &beta, vaccum, HS, T * HS, B * NH));num_blocks = CEIL_DIV(B * T * C, block_size);unpermute_kernel<<<num_blocks, block_size>>>(vaccum, out, B, T, NH, HS);cudaCheck(cudaGetLastError());
}void residual_forward(float* out, float* inp1, float* inp2, int N) {const int block_size = 256;const int grid_size = CEIL_DIV(N, block_size);residual_forward_kernel<<<grid_size, block_size>>>(out, inp1, inp2, N);cudaCheck(cudaGetLastError());
}void gelu_forward(float* out, const float* inp, int N) {const int block_size = 128;const int grid_size = CEIL_DIV(N, block_size);gelu_forward_kernel<<<grid_size, block_size>>>(out, inp, N);cudaCheck(cudaGetLastError());
}void gelu_backward(float* dinp, const float* inp, const float* dout, const int N) {const int block_size = 128;const int grid_size = CEIL_DIV(N, block_size);gelu_backward_kernel<<<grid_size, block_size>>>(dinp, inp, dout, N);cudaCheck(cudaGetLastError());
}void matmul_backward(float* dinp, float* dweight, float* dbias,float* dout, float* inp, float* weight,int B, int T, int C, int OC) {float one = 1.0f;float zero = 0.0f;cublasCheck(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, C, B*T, OC, &one, weight, C, dout, OC, &zero, dinp, C));cublasCheck(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, C, OC, B*T, &one, inp, C, dout, OC, &one, dweight, C));if (dbias != NULL) {const int block_size = 1024;const int grid_size = OC / 32; matmul_backward_bias_kernel4<<<grid_size, block_size, block_size * sizeof(float)>>>(dbias, dout, B, T, OC);cudaCheck(cudaGetLastError());}
}void layernorm_backward(float* dinp, float* dweight, float* dbias,const float* dout, const float* inp, const  float* weight, const float* mean, const float* rstd,int B, int T, int C) {const int block_size = 512;const int N = B * T;const int grid_size = CEIL_DIV(32*N, block_size);size_t shared_mem_size = 2 * C * sizeof(float);layernorm_backward_kernel2<<<grid_size, block_size, shared_mem_size>>>(dinp, dweight, dbias, dout, inp, weight, mean, rstd, B, T, C);cudaCheck(cudaGetLastError());
}void attention_backward(float* dinp, float* dqkvr, float* dpreatt, float* datt, float* scratch,const float* dout,const float* qkvr, const float* att,int B, int T, int C, int NH) {const int block_size = 256;int HS = C / NH; // head sizeconst float one = 1.0f;const float zero = 0.0f; // note beta = 1.0f so that we accumulate gradients (+=)const float *q, *k, *v;q = qkvr + 0 * B * T * C;k = qkvr + 1 * B * T * C;v = qkvr + 2 * B * T * C;float *dq, *dk, *dv;dq = dqkvr + 0 * B * T * C;dk = dqkvr + 1 * B * T * C;dv = dqkvr + 2 * B * T * C;int num_blocks = CEIL_DIV(B * T * C, block_size);unpermute_kernel_backward<<<num_blocks, block_size>>>(scratch, dout, B, T, NH, HS);cudaCheck(cudaGetLastError());cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, T, T, HS, &one, v, HS, T * HS, scratch, HS, T * HS, &zero, datt, T, T * T, B * NH));cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, HS, T, T, &one, scratch, HS, T * HS, att, T, T * T, &zero, dv, HS, T * HS, B * NH));int hs = C / NH; // head sizefloat scale = 1.0f / sqrtf(hs);softmax_autoregressive_backward_kernel<<<dim3(T / 4, B * NH), 256>>>(dpreatt, datt, att, B, T, C, scale);cudaCheck(cudaGetLastError());cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, HS, T, T, &one, k, HS, T * HS, dpreatt, T, T * T, &zero, dq, HS, T * HS, B * NH));cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, HS, T, T, &one, q, HS, T * HS, dpreatt, T, T * T, &zero, dk, HS, T * HS, B * NH));num_blocks = CEIL_DIV(B * NH * T * HS, block_size);permute_kernel_backward<<<num_blocks, block_size>>>(dinp, dq, dk, dv, B, T, NH, HS);cudaCheck(cudaGetLastError());
}void fused_classifier3(float* logits, float* losses,const float* dlosses, const int* targets,int B, int T, int V, int P) {const int block_size = 1024;const int N = B * T;const int grid_size = N;fused_classifier_kernel3<<<grid_size, block_size>>>(logits, losses, NULL, dlosses, targets, B, T, V, P);cudaCheck(cudaGetLastError());
}typedef struct {int max_seq_len; int vocab_size; int padded_vocab_size;int num_layers;int num_heads;int channels; 
} GPT2Config;#define NUM_PARAMETER_TENSORS 16
typedef struct {float* wte; float* wpe; float* ln1w; float* ln1b; float* qkvw; float* qkvb; float* attprojw;float* attprojb;float* ln2w; float* ln2b;float* fcw; float* fcb; float* fcprojw;float* fcprojb; float* lnfw;float* lnfb;
} ParameterTensors;void fill_in_parameter_sizes(size_t* param_sizes, GPT2Config config) {int Vp = config.padded_vocab_size;int C = config.channels;int maxT = config.max_seq_len;int L = config.num_layers;param_sizes[0] = Vp * C; param_sizes[1] = maxT * C; param_sizes[2] = L * C; param_sizes[3] = L * C; param_sizes[4] = L * (3 * C) * C; param_sizes[5] = L * (3 * C); param_sizes[6] = L * C * C; param_sizes[7] = L * C; param_sizes[8] = L * C; param_sizes[9] = L * C; param_sizes[10] = L * (4 * C) * C; param_sizes[11] = L * (4 * C); param_sizes[12] = L * C * (4 * C); param_sizes[13] = L * C; param_sizes[14] = C; param_sizes[15] = C; 
}float* malloc_and_point_parameters(ParameterTensors* params, size_t* param_sizes, int on_device) {size_t num_parameters = 0;for (size_t i = 0; i < NUM_PARAMETER_TENSORS; i++) {num_parameters += param_sizes[i];}float* params_memory;if (on_device) {cudaCheck(cudaMalloc((void**)&params_memory, num_parameters * sizeof(float)));} else {params_memory = (float*)mallocCheck(num_parameters * sizeof(float));}float** ptrs[] = {&params->wte, &params->wpe, &params->ln1w, &params->ln1b, &params->qkvw, &params->qkvb,&params->attprojw, &params->attprojb, &params->ln2w, &params->ln2b, &params->fcw, &params->fcb,&params->fcprojw, &params->fcprojb, &params->lnfw, &params->lnfb};float* params_memory_iterator = params_memory;for (size_t i = 0; i < NUM_PARAMETER_TENSORS; i++) {*(ptrs[i]) = params_memory_iterator;params_memory_iterator += param_sizes[i];}return params_memory;
}#define NUM_ACTIVATION_TENSORS 21
typedef struct {float* encoded; float* ln1; float* ln1_mean; float* ln1_rstd; float* atty; float* att; float* attproj; float* residual2; float* ln2; float* ln2_mean; float* ln2_rstd; float* fch; float* fch_gelu; float* fcproj; float* residual3; float* lnf; float* lnf_mean; float* lnf_rstd; float* losses; float* qkvr; float* output;
} ActivationTensors;void fill_in_activation_sizes(size_t* act_sizes, int B, int T, GPT2Config config) {size_t Vp = config.padded_vocab_size;size_t L = config.num_layers;size_t NH = config.num_heads;size_t C = config.channels;act_sizes[0] = B * T * C; act_sizes[1] = L * B * T * C; act_sizes[2] = L * B * T; act_sizes[3] = L * B * T; act_sizes[4] = L * B * T * C; act_sizes[5] = L * B * NH * T * T; act_sizes[6] = L * B * T * C; act_sizes[7] = L * B * T * C; act_sizes[8] = L * B * T * C; act_sizes[9] = L * B * T; act_sizes[10] = L * B * T; act_sizes[11] = L * B * T * 4*C; act_sizes[12] = L * B * T * 4*C; act_sizes[13] = L * B * T * C; act_sizes[14] = L * B * T * C; act_sizes[15] = B * T * C; act_sizes[16] = B * T; act_sizes[17] = B * T; act_sizes[18] = B * T; act_sizes[19] = L * B * T * 3*C; // qkvract_sizes[20] = B * T * max(3*C, max(NH*T, Vp)); // output / scratch
}#define NUM_BACKWARD_TENSORS 3
typedef struct {float* bt4c; float* preatt; float* residual3; 
} GradActTensors;void fill_in_grad_act_sizes(size_t* act_sizes, int B, int T, GPT2Config config) {size_t NH = config.num_heads;size_t C = config.channels;act_sizes[0] = B * T * 4 * C; act_sizes[1] = B * NH * T * T; act_sizes[2] = B * T * C; 
}float* malloc_and_point(float** targets[], const size_t* act_sizes, int n) {size_t num_activations = 0;for (size_t i = 0; i < n; i++) {num_activations += act_sizes[i];}float* acts_memory;cudaCheck(cudaMalloc((void**)&acts_memory, num_activations * sizeof(float)));float* acts_memory_iterator = acts_memory;for (size_t i = 0; i < n; i++) {*(targets[i]) = acts_memory_iterator;acts_memory_iterator += act_sizes[i];}return acts_memory;
}float* malloc_and_point_activations(ActivationTensors* acts, const size_t* act_sizes) {float** ptrs[] = {&acts->encoded, &acts->ln1, &acts->ln1_mean, &acts->ln1_rstd, &acts->atty,&acts->att, &acts->attproj, &acts->residual2, &acts->ln2, &acts->ln2_mean,&acts->ln2_rstd, &acts->fch, &acts->fch_gelu, &acts->fcproj, &acts->residual3, &acts->lnf,&acts->lnf_mean, &acts->lnf_rstd, &acts->losses, &acts->qkvr, &acts->output};return malloc_and_point(ptrs, act_sizes, NUM_ACTIVATION_TENSORS);
}float* malloc_and_point_backward(GradActTensors* acts, const size_t* act_sizes) {float** ptrs[] = {&acts->bt4c, &acts->preatt, &acts->residual3};return malloc_and_point(ptrs, act_sizes, NUM_BACKWARD_TENSORS);
}typedef struct {GPT2Config config;ParameterTensors params;size_t param_sizes[NUM_PARAMETER_TENSORS];float* params_memory;size_t num_parameters;ParameterTensors grads;float* grads_memory;float* m_memory;float* v_memory;ActivationTensors acts;size_t act_sizes[NUM_ACTIVATION_TENSORS];float* acts_memory;size_t num_activations;GradActTensors grads_acts;size_t num_grad_acts;float* grads_acts_memory;int batch_size; int seq_len;int* inputs; int* targets; float mean_loss; float* cpu_losses; 
} GPT2;void gpt2_build_from_checkpoint(GPT2 *model, const char* checkpoint_path) {FILE *model_file = fopenCheck(checkpoint_path, "rb");int model_header[256];freadCheck(model_header, sizeof(int), 256, model_file);if (model_header[0] != 20240326) { fprintf(stderr, "Bad magic model file\n"); exit(EXIT_FAILURE); }if (model_header[1] != 3) {// was bumped from 1 -> 3 to incorporate the padded vocab sizefprintf(stderr, "Bad version in model file\n");fprintf(stderr, "---> HINT: try to re-run `python train_gpt2.py`\n");exit(EXIT_FAILURE);}model->config.max_seq_len = model_header[2];model->config.vocab_size = model_header[3];model->config.num_layers = model_header[4];model->config.num_heads = model_header[5];model->config.channels = model_header[6];model->config.padded_vocab_size = model_header[7];fill_in_parameter_sizes(model->param_sizes, model->config);size_t num_parameters = 0;for (size_t i = 0; i < NUM_PARAMETER_TENSORS; i++) {num_parameters += model->param_sizes[i];}model->num_parameters = num_parameters;model->params_memory = malloc_and_point_parameters(&model->params, model->param_sizes, 1);float* params_memory_cpu = (float*)mallocCheck(num_parameters * sizeof(float));freadCheck(params_memory_cpu, sizeof(float), num_parameters, model_file);cudaCheck(cudaMemcpy(model->params_memory, params_memory_cpu, num_parameters * sizeof(float), cudaMemcpyHostToDevice));free(params_memory_cpu);fcloseCheck(model_file);model->acts_memory = NULL;model->grads_memory = NULL;model->m_memory = NULL;model->v_memory = NULL;model->grads_acts_memory = NULL;model->inputs = NULL;model->targets = NULL;model->cpu_losses = NULL;model->batch_size = 0;model->seq_len = 0;model->mean_loss = -1.0f; // -1.0f will designate no loss
}void gpt2_forward(GPT2 *model, int* inputs, int* targets, int B, int T) {if (model->params_memory == NULL) {printf("Error: model was not initialized properly.\n");exit(EXIT_FAILURE);}int V = model->config.vocab_size;int Vp = model->config.padded_vocab_size;int L = model->config.num_layers;int NH = model->config.num_heads;int C = model->config.channels;for(int i = 0; i < B * T; i++) {assert(0 <= inputs[i] && inputs[i] < V);if (targets != NULL) {assert(0 <= targets[i] && targets[i] < V);}}if(model->acts_memory == NULL) {model->batch_size = B;model->seq_len = T;fill_in_activation_sizes(model->act_sizes, B, T, model->config);size_t num_activations = 0;for (size_t i = 0; i < NUM_ACTIVATION_TENSORS; i++) {num_activations += model->act_sizes[i];}model->num_activations = num_activations;model->acts_memory = malloc_and_point_activations(&model->acts, model->act_sizes);printf("allocated %zu MiB for activations\n", (num_activations * sizeof(float)) >> 20); cudaCheck(cudaMalloc((void**)&model->inputs, B * T * sizeof(int)));cudaCheck(cudaMalloc((void**)&model->targets, B * T * sizeof(int)));cudaCheck(cudaMallocHost((void**)&model->cpu_losses, B * T * sizeof(float)));} else {if (B != model->batch_size || T != model->seq_len) {printf("Model: B=%d T=%d, Desired: B=%d T=%d\n", model->batch_size, model->seq_len, B, T);exit(EXIT_FAILURE);}}cudaCheck(cudaMemcpy(model->inputs, inputs, B * T * sizeof(int), cudaMemcpyHostToDevice));if (targets != NULL) {cudaCheck(cudaMemcpy(model->targets, targets, B * T * sizeof(int), cudaMemcpyHostToDevice));}ParameterTensors params = model->params; ActivationTensors acts = model->acts;float* residual;encoder_forward(acts.encoded, model->inputs, params.wte, params.wpe, B, T, C); for (int l = 0; l < L; l++) {residual = l == 0 ? acts.encoded : acts.residual3 + (l-1) * B * T * C;float* l_ln1w = params.ln1w + l * C;float* l_ln1b = params.ln1b + l * C;float* l_qkvw = params.qkvw + l * 3*C * C;float* l_qkvb = params.qkvb + l * 3*C;float* l_attprojw = params.attprojw + l * C * C;float* l_attprojb = params.attprojb + l * C;float* l_ln2w = params.ln2w + l * C;float* l_ln2b = params.ln2b + l * C;float* l_fcw = params.fcw + l * 4*C * C;float* l_fcb = params.fcb + l * 4*C;float* l_fcprojw = params.fcprojw + l * C * 4*C;float* l_fcprojb = params.fcprojb + l * C;float* l_ln1 = acts.ln1 + l * B * T * C;float* l_ln1_mean = acts.ln1_mean + l * B * T;float* l_ln1_rstd = acts.ln1_rstd + l * B * T;float* l_qkvr = acts.qkvr + l * B * T * 3*C;float* l_atty = acts.atty + l * B * T * C;float* l_att = acts.att + l * B * NH * T * T;float* l_attproj = acts.attproj + l * B * T * C;float* l_residual2 = acts.residual2 + l * B * T * C;float* l_ln2 = acts.ln2 + l * B * T * C;float* l_ln2_mean = acts.ln2_mean + l * B * T;float* l_ln2_rstd = acts.ln2_rstd + l * B * T;float* l_fch = acts.fch + l * B * T * 4*C;float* l_fch_gelu = acts.fch_gelu + l * B * T * 4*C;float* l_fcproj = acts.fcproj + l * B * T * C;float* l_residual3 = acts.residual3 + l * B * T * C;float* scratch = acts.output;layernorm_forward(l_ln1, l_ln1_mean, l_ln1_rstd, residual, l_ln1w, l_ln1b, B, T, C);matmul_forward_cublaslt(scratch, l_ln1, l_qkvw, l_qkvb, B, T, C, 3*C);attention_forward(l_atty, l_qkvr, l_att, scratch, B, T, C, NH);matmul_forward_cublaslt(l_attproj, l_atty, l_attprojw, l_attprojb, B, T, C, C);residual_forward(l_residual2, residual, l_attproj, B*T*C);layernorm_forward(l_ln2, l_ln2_mean, l_ln2_rstd, l_residual2, l_ln2w, l_ln2b, B, T, C);matmul_forward_cublaslt(l_fch, l_ln2, l_fcw, l_fcb, B, T, C, 4*C);gelu_forward(l_fch_gelu, l_fch, B*T*4*C);matmul_forward_cublaslt(l_fcproj, l_fch_gelu, l_fcprojw, l_fcprojb, B, T, 4*C, C);residual_forward(l_residual3, l_residual2, l_fcproj, B*T*C);}residual = acts.residual3 + (L-1) * B * T * C; // last residual is in residual3layernorm_forward(acts.lnf, acts.lnf_mean, acts.lnf_rstd, residual, params.lnfw, params.lnfb, B, T, C);matmul_forward_cublaslt(acts.output, acts.lnf, params.wte, NULL, B, T, C, Vp);if (targets != NULL) {fused_classifier3(acts.output, acts.losses, NULL, model->targets, B, T, V, Vp);cudaCheck(cudaMemcpy(model->cpu_losses, acts.losses, B * T * sizeof(float), cudaMemcpyDeviceToHost));float mean_loss = 0.0f;for (int i=0; i<B*T; i++) { mean_loss += model->cpu_losses[i]; }mean_loss /= B*T;model->mean_loss = mean_loss;} else {model->mean_loss = -1.0f;}
}void gpt2_zero_grad(GPT2 *model) {if (model->grads_acts_memory != NULL) { cudaCheck(cudaMemset(model->grads_acts_memory, 0, model->num_grad_acts * sizeof(float))); }if (model->grads_memory != NULL) { cudaCheck(cudaMemset(model->grads_memory, 0, model->num_parameters * sizeof(float))); }
}void gpt2_backward(GPT2 *model) {if (model->mean_loss == -1.0f) {printf("Error: must forward with targets before backward\n");exit(EXIT_FAILURE);}if (model->grads_memory == NULL) {model->grads_memory = malloc_and_point_parameters(&model->grads, model->param_sizes, 1);printf("allocated %zu MiB for parameter gradients\n", (model->num_parameters * sizeof(float)) >> 20);size_t bw_act_sizes[NUM_ACTIVATION_TENSORS];GPT2Config cfg = model->config;cfg.num_layers = 1; // copy the configuration but override number of layers to 1fill_in_grad_act_sizes(bw_act_sizes, model->batch_size, model->seq_len, cfg);model->grads_acts_memory = malloc_and_point_backward(&model->grads_acts, bw_act_sizes);model->num_grad_acts = 0;for (int i = 0; i < NUM_BACKWARD_TENSORS; i++) {model->num_grad_acts += bw_act_sizes[i];}printf("allocated %zu MiB for activation gradients\n", (model->num_grad_acts * sizeof(float)) >> 20);gpt2_zero_grad(model);}int B = model->batch_size;int T = model->seq_len;int Vp = model->config.padded_vocab_size;int L = model->config.num_layers;int NH = model->config.num_heads;int C = model->config.channels;ParameterTensors params = model->params; ParameterTensors grads = model->grads;ActivationTensors acts = model->acts;GradActTensors grads_acts = model->grads_acts;matmul_backward(grads_acts.bt4c, grads.wte, NULL, acts.output, acts.lnf, params.wte, B, T, C, Vp);float* residual = acts.residual3 + (L-1) * B * T * C; float* dresidual = grads_acts.residual3; layernorm_backward(dresidual, grads.lnfw, grads.lnfb, grads_acts.bt4c, residual, params.lnfw, acts.lnf_mean, acts.lnf_rstd, B, T, C);for (int l = L-1; l >= 0; l--) {residual = l == 0 ? acts.encoded : acts.residual3 + (l-1) * B * T * C;float* l_ln1w = params.ln1w + l * C;float* l_qkvw = params.qkvw + l * 3*C * C;float* l_attprojw = params.attprojw + l * C * C;float* l_ln2w = params.ln2w + l * C;float* l_fcw = params.fcw + l * 4*C * C;float* l_fcprojw = params.fcprojw + l * C * 4*C;float* dl_ln1w = grads.ln1w + l * C;float* dl_ln1b = grads.ln1b + l * C;float* dl_qkvw = grads.qkvw + l * 3*C * C;float* dl_qkvb = grads.qkvb + l * 3*C;float* dl_attprojw = grads.attprojw + l * C * C;float* dl_attprojb = grads.attprojb + l * C;float* dl_ln2w = grads.ln2w + l * C;float* dl_ln2b = grads.ln2b + l * C;float* dl_fcw = grads.fcw + l * 4*C * C;float* dl_fcb = grads.fcb + l * 4*C;float* dl_fcprojw = grads.fcprojw + l * C * 4*C;float* dl_fcprojb = grads.fcprojb + l * C;float* l_ln1 = acts.ln1 + l * B * T * C;float* l_ln1_mean = acts.ln1_mean + l * B * T;float* l_ln1_rstd = acts.ln1_rstd + l * B * T;float* l_qkvr = acts.qkvr + l * B * T * 3*C;float* l_atty = acts.atty + l * B * T * C;float* l_att = acts.att + l * B * NH * T * T;float* l_residual2 = acts.residual2 + l * B * T * C;float* l_ln2 = acts.ln2 + l * B * T * C;float* l_ln2_mean = acts.ln2_mean + l * B * T;float* l_ln2_rstd = acts.ln2_rstd + l * B * T;float* l_fch = acts.fch + l * B * T * 4*C;float* l_fch_gelu = acts.fch_gelu + l * B * T * 4*C;float* dl_btc = acts.lnf;float* dl_bt4c = grads_acts.bt4c;float* dl_preatt = grads_acts.preatt;float* scratch = acts.output;matmul_backward(dl_bt4c, dl_fcprojw, dl_fcprojb, dresidual, l_fch_gelu, l_fcprojw, B, T, 4*C, C);gelu_backward(dl_bt4c, l_fch, dl_bt4c, B*T*4*C);matmul_backward(dl_btc, dl_fcw, dl_fcb, dl_bt4c, l_ln2, l_fcw, B, T, C, 4 * C);layernorm_backward(dresidual, dl_ln2w, dl_ln2b, dl_btc, l_residual2, l_ln2w, l_ln2_mean, l_ln2_rstd, B, T, C);matmul_backward(dl_btc, dl_attprojw, dl_attprojb, dresidual, l_atty, l_attprojw, B, T, C, C);float* buffer_a = l_atty;float* buffer_b = l_fch;  attention_backward(dl_bt4c, buffer_b, dl_preatt, scratch, buffer_a, dl_btc, l_qkvr, l_att, B, T, C, NH);matmul_backward(dl_btc, dl_qkvw, dl_qkvb, dl_bt4c, l_ln1, l_qkvw, B, T, C, 3 * C);layernorm_backward(dresidual, dl_ln1w, dl_ln1b, dl_btc, residual, l_ln1w, l_ln1_mean, l_ln1_rstd, B, T, C);}encoder_backward(grads.wte, grads.wpe, dresidual, model->inputs, B, T, C);
}void gpt2_update(GPT2 *model, float learning_rate, float beta1, float beta2, float eps, float weight_decay, int t) {if (model->m_memory == NULL) {cudaCheck(cudaMalloc((void**)&model->m_memory, model->num_parameters * sizeof(float)));cudaCheck(cudaMalloc((void**)&model->v_memory, model->num_parameters * sizeof(float)));cudaCheck(cudaMemset(model->m_memory, 0, model->num_parameters * sizeof(float)));cudaCheck(cudaMemset(model->v_memory, 0, model->num_parameters * sizeof(float)));printf("allocated %zu MiB for AdamW optimizer state m\n", (model->num_parameters * sizeof(float)) >> 20);printf("allocated %zu MiB for AdamW optimizer state v\n", (model->num_parameters * sizeof(float)) >> 20);}int block_size = 512;int num_blocks = CEIL_DIV(model->num_parameters, block_size);float beta1_correction = 1.0f - powf(beta1, t);float beta2_correction = 1.0f - powf(beta2, t);adamw_kernel2<<<num_blocks, block_size>>>(model->params_memory, model->grads_memory, model->m_memory, model->v_memory,model->num_parameters,learning_rate, beta1, beta2, beta1_correction, beta2_correction, eps, weight_decay);cudaCheck(cudaGetLastError());
}void gpt2_free(GPT2 *model) {cudaCheck(cudaFree(model->params_memory));cudaCheck(cudaFree(model->grads_memory));cudaCheck(cudaFree(model->m_memory));cudaCheck(cudaFree(model->v_memory));cudaCheck(cudaFree(model->acts_memory));cudaCheck(cudaFree(model->grads_acts_memory));cudaCheck(cudaFree(model->inputs));cudaCheck(cudaFree(model->targets));cudaFreeHost(model->cpu_losses);
}#ifndef TESTING
typedef struct {int B;int T;FILE* tokens_file;long file_size;long current_position;// output memoryint* batch;int* inputs;int* targets;long num_batches;
} DataLoader;void dataloader_init(DataLoader *loader, const char* filename, int B, int T) {loader->B = B;loader->T = T;loader->tokens_file = fopenCheck(filename, "rb");fseekCheck(loader->tokens_file, 0, SEEK_END);loader->file_size = ftell(loader->tokens_file);fseekCheck(loader->tokens_file, 0, SEEK_SET);if (loader->file_size < (B * T + 1) * sizeof(int)) {printf("Error: file size is too small for the batch size and sequence length\n");exit(EXIT_FAILURE);}loader->current_position = 0; cudaMallocHost((void**)&loader->batch, (B * T + 1) * sizeof(int));loader->inputs = loader->batch;loader->targets = loader->batch + 1; loader->num_batches = loader->file_size / (B * T * sizeof(int));
}void dataloader_reset(DataLoader *loader) {loader->current_position = 0;
}void dataloader_next_batch(DataLoader *loader) {int B = loader->B;int T = loader->T;if (loader->current_position + (B*T+1) * sizeof(int) > loader->file_size) {loader->current_position = 0;}fseekCheck(loader->tokens_file, loader->current_position, SEEK_SET);freadCheck(loader->batch, sizeof(int), B*T+1, loader->tokens_file);loader->current_position += B*T * sizeof(int);
}void dataloader_free(DataLoader *loader) {fcloseCheck(loader->tokens_file);cudaFreeHost(loader->batch);
}#define GPT2_EOT 50256unsigned int random_u32(unsigned long long *state) {*state ^= *state >> 12;*state ^= *state << 25;*state ^= *state >> 27;return (*state * 0x2545F4914F6CDD1Dull) >> 32;
}
float random_f32(unsigned long long *state) { return (random_u32(state) >> 8) / 16777216.0f;
}int sample_softmax(const float* logits, int n, float coin) {double norm = 0;for (int i = 0; i < n; i++) {norm += expf(logits[i]);}coin *= norm;float cdf = 0.0f;for (int i = 0; i < n; i++) {cdf += expf(logits[i]);if (coin < cdf) {return i;}}return n - 1; 
}typedef struct {FILE *logfile;int flush_every; // every how many steps to flush the log
} Logger;void logger_init(Logger *logger, const char *filename) {logger->flush_every = 20;logger->logfile = NULL;if (filename != NULL) { logger->logfile = fopenCheck(filename, "w"); }
}void logger_log_val(Logger *logger, int step, float val_loss) {if (logger->logfile != NULL) {fprintf(logger->logfile, "s:%d tel:%.4f\n", step, val_loss);}
}void logger_log_train(Logger *logger, int step, float train_loss) {if (logger->logfile != NULL) {fprintf(logger->logfile, "s:%d trl:%.4f\n", step, train_loss);if (step % 10 == 0) { fflush(logger->logfile); }}
}void logger_free(Logger *logger) {if (logger->logfile != NULL) { fclose(logger->logfile); }
}void error_usage() {fprintf(stderr, "Usage:   ./train_gpt2fp32cu [options]\n");fprintf(stderr, "Example: ./train_gpt2fp32cu -i data/TinyStories -v 100 -s 100 -g 144 -o stories.log\n");fprintf(stderr, "Options:\n");fprintf(stderr, "  -i <string> input dataset prefix (default = data/tiny_shakespeare)\n");fprintf(stderr, "  -o <string> output log file (default = NULL)\n");fprintf(stderr, "  -b <int>    batch size B (default = 4)\n");fprintf(stderr, "  -t <int>    sequence length T (default = 1024)\n");fprintf(stderr, "  -l <float>  learning rate (default = 3e-4f)\n");fprintf(stderr, "  -v <int>    val_loss_every, how often we evaluate val loss (default = 20)\n");fprintf(stderr, "  -m <int>    val_max_batches, up to how many val batches to estimate val loss? (default = 20)\n");fprintf(stderr, "  -s <int>    sample_every, how often we inference the model (default = 20)\n");fprintf(stderr, "  -g <int>    genT, how many steps of inference we do (default = 64)\n");exit(EXIT_FAILURE);
}int main(int argc, char *argv[]) {const char* input_dataset_prefix = "data/tiny_shakespeare"; const char* output_log_file = NULL;int B = 4; int T = 1024; float learning_rate = 3e-4f;int val_loss_every = 20; int val_max_batches = 20; int sample_every = 20; int genT = 64; for (int i = 1; i < argc; i+=2) {if (i + 1 >= argc) { error_usage(); } // must have arg after flagif (argv[i][0] != '-') { error_usage(); } // must start with dashif (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)// read in the argsif (argv[i][1] == 'i') { input_dataset_prefix = argv[i+1]; }else if (argv[i][1] == 'o') { output_log_file = argv[i+1]; }else if (argv[i][1] == 'b') { B = atoi(argv[i+1]); }else if (argv[i][1] == 't') { T = atoi(argv[i+1]); }else if (argv[i][1] == 'l') { learning_rate = atof(argv[i+1]); }else if (argv[i][1] == 'v') { val_loss_every = atoi(argv[i+1]); }else if (argv[i][1] == 'm') { val_max_batches = atoi(argv[i+1]); }else if (argv[i][1] == 's') { sample_every = atoi(argv[i+1]); }else if (argv[i][1] == 'g') { genT = atoi(argv[i+1]); }else { error_usage(); }}printf("+-----------------------+----------------------------------------------------+\n");printf("| Parameter             | Value                                              |\n");printf("+-----------------------+----------------------------------------------------+\n");printf("| input dataset prefix  | %-50s |\n", input_dataset_prefix);printf("| output log file       | %-50s |\n", output_log_file == NULL ? "NULL" : output_log_file);printf("| batch size B          | %-50d |\n", B);printf("| sequence length T     | %-50d |\n", T);printf("| learning rate         | %-50f |\n", learning_rate);printf("| val_loss_every        | %-50d |\n", val_loss_every);printf("| val_max_batches       | %-50d |\n", val_max_batches);printf("| sample_every          | %-50d |\n", sample_every);printf("| genT                  | %-50d |\n", genT);printf("+-----------------------+----------------------------------------------------+\n");int deviceIdx = 0;cudaCheck(cudaSetDevice(deviceIdx));cudaDeviceProp deviceProp;cudaGetDeviceProperties(&deviceProp, deviceIdx);cublasCheck(cublasCreate(&cublas_handle));cublasCheck(cublasLtCreate(&cublaslt_handle));int enable_tf32 = deviceProp.major >= 8 ? 1 : 0;cublas_compute_type = enable_tf32 ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;cublasMath_t cublas_math_mode = enable_tf32 ? CUBLAS_TF32_TENSOR_OP_MATH : CUBLAS_DEFAULT_MATH;cublasCheck(cublasSetMathMode(cublas_handle, cublas_math_mode));cudaCheck(cudaMalloc(&cublaslt_workspace, cublaslt_workspace_size));printf("| device                | %-50s |\n", deviceProp.name);printf("| TF32                  | %-50s |\n", enable_tf32 ? "enabled" : "disabled");printf("+-----------------------+----------------------------------------------------+\n");GPT2 model;gpt2_build_from_checkpoint(&model, "gpt2_124M.bin");printf("| max_sequence_length T | %-50d |\n", model.config.max_seq_len);printf("| vocab_size V          | %-50d |\n", model.config.vocab_size);printf("| padded_vocab_size Vp  | %-50d |\n", model.config.padded_vocab_size);printf("| num_layers L          | %-50d |\n", model.config.num_layers);printf("| num_heads NH          | %-50d |\n", model.config.num_heads);printf("| channels C            | %-50d |\n", model.config.channels);printf("| num_parameters        | %-50zu |\n", model.num_parameters);printf("+-----------------------+----------------------------------------------------+\n");char train_tokens_filename[128];char val_tokens_filename[128];assert(strlen(input_dataset_prefix) < 100); // being bit lazy here, make sure we don't overflowsprintf(train_tokens_filename, "%s_train.bin", input_dataset_prefix);sprintf(val_tokens_filename, "%s_val.bin", input_dataset_prefix);DataLoader train_loader;dataloader_init(&train_loader, train_tokens_filename, B, T);DataLoader val_loader;dataloader_init(&val_loader, val_tokens_filename, B, T);int train_num_batches = train_loader.num_batches; // let's do 1 epoch by default for nowint val_num_batches = train_loader.num_batches < val_max_batches ? train_loader.num_batches : val_max_batches;printf("| train_num_batches     | %-50d |\n", train_num_batches);printf("| val_num_batches       | %-50d |\n", val_num_batches);printf("+-----------------------+----------------------------------------------------+\n");printf("allocated %d MiB for model parameters\n", (int)round(model.num_parameters * sizeof(float) / (1024 * 1024)));Logger logger;logger_init(&logger, output_log_file);Tokenizer tokenizer;tokenizer_init(&tokenizer, "gpt2_tokenizer.bin");unsigned long long rng_state = 1337;int* gen_tokens = (int*)mallocCheck(B * T * sizeof(int));float* cpu_logits = (float*)mallocCheck(model.config.vocab_size * sizeof(float));struct timespec start, end;double total_sum_iteration_time_s = 0.0;for (int step = 0; step <= train_num_batches; step++) {int last_step = step == train_num_batches;if (step % val_loss_every == 0 || last_step) {float val_loss = 0.0f;dataloader_reset(&val_loader);for (int i = 0; i < val_num_batches; i++) {dataloader_next_batch(&val_loader);gpt2_forward(&model, val_loader.inputs, val_loader.targets, B, T);val_loss += model.mean_loss;}val_loss /= val_num_batches;printf("val loss %f\n", val_loss);logger_log_val(&logger, step, val_loss);}if (step > 0 && step % sample_every == 0 || last_step) {for(int i = 0; i < B * T; ++i) {gen_tokens[i] = GPT2_EOT;}printf("generating:\n---\n");for (int t = 1; t < genT; t++) {gpt2_forward(&model, gen_tokens, NULL, B, T);float* logits = model.acts.output + (t - 1) * model.config.padded_vocab_size;cudaCheck(cudaMemcpy(cpu_logits, logits, model.config.vocab_size * sizeof(float), cudaMemcpyDeviceToHost));float coin = random_f32(&rng_state);int next_token = sample_softmax(cpu_logits, model.config.vocab_size, coin);gen_tokens[t] = next_token;if (tokenizer.init_ok) {const char* token_str = tokenizer_decode(&tokenizer, next_token);safe_printf(token_str);} else {printf("%d ", next_token);}fflush(stdout);}printf("\n---\n");}if (last_step) { break; }clock_gettime(CLOCK_MONOTONIC, &start);dataloader_next_batch(&train_loader);gpt2_forward(&model, train_loader.inputs, train_loader.targets, B, T);gpt2_zero_grad(&model);gpt2_backward(&model);gpt2_update(&model, learning_rate, 0.9f, 0.999f, 1e-8f, 0.0f, step+1);cudaCheck(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &end);double time_elapsed_s = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) / 1e9;total_sum_iteration_time_s += time_elapsed_s;int tokens_per_second = (B * T) / time_elapsed_s;printf("step %4d/%d: train loss %f (%f ms, %d tok/s)\n", step + 1, train_num_batches, model.mean_loss, time_elapsed_s * 1000, tokens_per_second);logger_log_train(&logger, step, model.mean_loss);}printf("total average iteration time: %f ms\n", total_sum_iteration_time_s / train_num_batches * 1000);dataloader_free(&train_loader);dataloader_free(&val_loader);tokenizer_free(&tokenizer);gpt2_free(&model);free(cpu_logits);free(gen_tokens);cudaCheck(cudaFree(cublaslt_workspace));cublasCheck(cublasDestroy(cublas_handle));cublasCheck(cublasLtDestroy(cublaslt_handle));logger_free(&logger);return 0;
}
#endif

解读

这段代码是一个使用C语言编写的训练GPT-2模型的程序,它利用了NVIDIA的CUDA平台进行GPU加速。GPT-2是一个基于Transformer架构的自然语言处理模型,常用于文本生成任务。这个程序包含了数据加载、模型构建、前向传播、反向传播、参数更新以及模型保存等功能。下面是对程序主要部分的分析:

  1. 头文件包含:程序开始处包含了多个头文件,这些头文件提供了访问标准库、数学库、时间库、断言、浮点数精度、字符串操作以及UNIX系统调用的功能。

  2. CUDA和cuBLAS库:程序使用了CUDA运行时API和cuBLAS库,这是NVIDIA提供的一些用于GPU加速计算的库。

  3. 自定义宏定义:例如CEIL_DIV用于计算除法的上限,cudaCheckcublasCheck用于检查CUDA和cuBLAS操作的错误。

  4. 设备函数和内核:程序中定义了一些设备函数(在GPU上执行的函数)和内核(可以在GPU上并行执行的函数),例如encoder_forward_kernel3encoder_backward_kernel等,这些内核函数实现了模型的前向传播和反向传播计算。

  5. 前向传播和反向传播函数:程序实现了encoder_forwardencoder_backwardlayernorm_forwardmatmul_forward_cublaslt等函数,这些函数用于执行模型的前向传播。matmul_backwardlayernorm_backward等函数用于执行模型的反向传播。

  6. 注意力机制:程序实现了注意力机制的前向和后向传播,这是Transformer模型的核心部分。

  7. 优化器:程序实现了AdamW优化器的adamw_kernel2内核函数,用于更新模型的参数。

  8. 模型构建gpt2_build_from_checkpoint函数用于从检查点文件中恢复模型的参数。

  9. 模型前向传播gpt2_forward函数执行模型的前向传播,并计算损失。

  10. 模型反向传播和参数更新gpt2_backward函数执行模型的反向传播,计算梯度;gpt2_update函数使用AdamW优化器更新模型参数。

  11. 数据加载器DataLoader结构和相关函数用于从文件中加载训练和验证数据。

  12. 日志记录Logger结构和相关函数用于记录训练过程中的损失和其他信息。

  13. 主函数main函数是程序的入口点,它初始化模型、数据加载器、日志记录器,然后进入训练循环。在循环中,它执行数据加载、模型前向传播、损失计算、反向传播、参数更新,并定期记录验证损失和生成文本样本。

  14. 错误处理和参数解析:程序提供了错误处理和命令行参数解析,以便于用户自定义训练参数。

  15. 随机数生成:程序实现了随机数生成器,用于文本生成过程中的随机抽样。

  16. Tokenizer:程序使用了Tokenizer来处理文本数据,将文本转换为模型可以理解的数字ID。

整体来看,这个程序是一个完整的训练循环实现,包括数据加载、模型构建、训练、验证和文本生成。它展示了如何使用CUDA和cuBLAS库在GPU上执行复杂的机器学习模型训练。

这个C语言程序是一个用于训练GPT-2模型的完整示例,它使用了NVIDIA CUDA和cuBLAS库来实现GPU加速。程序的主要目的是展示如何在GPU上使用这些技术来训练一个基于Transformer架构的语言模型。以下是对程序主要部分的解读:

  1. 头文件和库:程序包含了多个C标准库头文件,以及CUDA运行时API、cuBLAS库和合作组(cooperative groups)库,后者用于在CUDA内核中实现更高效的并行计算。

  2. 宏定义CEIL_DIV宏用于计算除法的上限。cudaCheckcublasCheck宏用于检查CUDA和cuBLAS操作的错误,并在出错时退出程序。

  3. CUDA和cuBLAS环境设置:程序初始化了CUDA和cuBLAS环境,包括创建句柄和分配工作空间。

  4. 设备函数:定义了一些设备函数,如add_float4,这些函数在GPU上执行,用于进行特定的数学运算。

  5. CUDA内核:程序包含多个CUDA内核函数,例如encoder_forward_kernel3encoder_backward_kernellayernorm_forward_kernel3等,这些内核实现了模型的前向传播和反向传播计算。

  6. 前向传播和反向传播函数:程序实现了encoder_forwardencoder_backwardlayernorm_forwardmatmul_forward_cublaslt等函数,用于执行模型的前向传播和反向传播。

  7. 注意力机制:实现了注意力机制的前向和后向传播,这是Transformer模型的核心部分。

  8. 优化器:实现了AdamW优化器的adamw_kernel2内核函数,用于更新模型的参数。

  9. 模型构建和初始化gpt2_build_from_checkpoint函数用于从检查点文件中恢复模型的参数。

  10. 模型前向传播gpt2_forward函数执行模型的前向传播,并计算损失。

  11. 模型反向传播和参数更新gpt2_backward函数执行模型的反向传播,计算梯度;gpt2_update函数使用AdamW优化器更新模型参数。

  12. 数据加载器DataLoader结构和相关函数用于从文件中加载训练和验证数据。

  13. 日志记录Logger结构和相关函数用于记录训练过程中的损失和其他信息。

  14. 主函数main函数是程序的入口点,它初始化模型、数据加载器、日志记录器,然后进入训练循环。在循环中,它执行数据加载、模型前向传播、损失计算、反向传播、参数更新,并定期记录验证损失和生成文本样本。

  15. Tokenizer:程序使用了Tokenizer来处理文本数据,将文本转换为模型可以理解的数字ID。

  16. 命令行参数解析:程序提供了命令行参数解析,以便于用户自定义训练参数。

  17. 随机数生成:程序实现了随机数生成器,用于文本生成过程中的随机抽样。

  18. 性能测量:程序使用clock_gettime来测量训练步骤的执行时间,并计算每秒处理的令牌数。

  19. 释放资源:在训练结束后,程序会释放所有分配的资源,包括CUDA内存、文件句柄和日志文件。

这个程序是一个完整的训练循环实现,展示了如何使用CUDA和cuBLAS库在GPU上执行复杂的机器学习模型训练。程序的结构清晰,包含了训练过程中的所有关键步骤,是一个学习如何在GPU上进行深度学习模型训练的很好的示例。

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://xiahunao.cn/news/3031189.html

如若内容造成侵权/违法违规/事实不符,请联系瞎胡闹网进行投诉反馈,一经查实,立即删除!

相关文章

【机器学习300问】85、Adam梯度下降优化算法的原理是什么?

Adam优化算法取了两个算法名称的首字母——Adaptive Moment Estimation的缩写&#xff0c;结合了Momentum算法和RMSprop算法的优点。在Momentum中&#xff0c;会计算前一时刻的梯度&#xff0c;并将其用于当前时刻的梯度更新&#xff1b;而RMSprop会对梯度的大小进行自适应调整…

闭散列哈希表

一、什么是 哈希 &#xff1f; 1.1 哈希概念 与 哈希冲突 在正式介绍闭散列哈希之前&#xff0c;我们需要明确 哈希 的概念。 哈希 &#xff1a;构造一种数据存储结构&#xff0c;通过函数 HashFunc() &#xff0c;使 元素的存储位置 与 其对应的键值 建立一一映射关系&…

数电——集成计数器(部分)

数电77-集成二进制同步计数器_哔哩哔哩_bilibili 74LS191 同步二进制可逆计数器 单时钟 功能&#xff1a; 要想有置零功能&#xff0c;可以将输入改为0000&#xff0c;然后运用功能里的置数功能 双时钟74LS193 四位同步二进制可逆计数器 功能&#xff1a; 74LS197 二-八-…

nestjs 全栈进阶--中间件

视频教程 22_nest中中间件_哔哩哔哩_bilibili 1. 介绍 在Nest.js框架中&#xff0c;中间件&#xff08;Middleware&#xff09;是一个非常重要的概念&#xff0c;它是HTTP请求和响应生命周期中的一个重要组成部分&#xff0c;允许开发者在请求到达最终的目的控制器方法之前或…

Java苍穹外卖05-订单状态定时处理-数据统计-导出excel

一、订单状态定时处理 1.Spring Task ①介绍 应用场景&#xff1a; ②cron表达式 https://cron.qqe2.com/ ③入门案例 2.需求分析 3.代码开发 每一分钟检查是否存在超时15分钟的订单 每天凌晨一点处理上一条处于派送中的订单 mapper&#xff1a; 二、来单提醒、客户催单 1…

前端崽的java study笔记

文章目录 basic1、sprint boot概述2、sprint boot入门3、yml 配置信息书写和获取 basic 1、sprint boot概述 sprint boot特性&#xff1a; 起步依赖&#xff08;maven坐标&#xff09;&#xff1a;解决配置繁琐的问题&#xff0c;只需要引入sprint boot起步依赖的坐标就行 自动…

IF:23.2|从实验室到田间,微生物干预提高植物抗逆

期刊&#xff1a;Nature Food 影响因子&#xff1a;23.2 发表时间&#xff1a;2023年10月 本研究介绍了一种名为SynCom的微生物组合&#xff0c;该组合Rhodococcus erythropolis和Pseudomonas aeruginosa两种微生物组成。这两种微生物能够帮助水稻抵抗铝毒害和磷缺乏&…

##13 如何在Python中优雅地使用异常处理

文章目录 引言1. 异常处理基础2. 处理多种异常3. 捕捉所有异常4. finally 语句5. 自定义异常结语参考链接 引言 在编程中&#xff0c;错误是在所难免的。Python提供了异常处理机制&#xff0c;允许程序在遇到错误时优雅地恢复。本文将介绍Python中异常处理的基本概念&#xff…

activiti 工作流基本使用

Activiti 介绍 Activiti 是一个开源架构的工作流引擎&#xff0c;基于bpmn2.0 标准进行流程定义。其前身是JBPM&#xff0c;Activiti 通过嵌入到业务系统开发中进行使用。 官方是这样介绍 activiti的&#xff1a; Activiti 是领先的轻量级、以 Java 为中心的开源 BPMN 引擎&…

前端小技巧:如何自定义网页的右键菜单(如何禁用网页的右键菜单)

文章目录 📖 介绍 📖🏡 演示环境 🏡📒 右键菜单设置 📒📝 自定义右键菜单实现步骤📝 示例代码📝 涉及的JavaScript语法和参数📝 禁用特定区域的右键菜单⚓️ 相关链接 ⚓️📖 介绍 📖 在网页设计中,一个直观且个性化的右键菜单可以显著提升用户的交互…

【全开源】JAVA上门家政服务系统源码微信小程序+微信公众号+APP+H5

功能介绍 用户端&#xff1a;精准分类、支持家政、维修、万能服务、一口价、报价、线上、各类家政服务、优惠专区、师傅入驻、商家入驻、我的需求、补费明细、我的投诉 师傅端&#xff1a;接单池、消息通知、接单管理、今日订单、师傅入驻、我的钱包、实名认证 商家端&#…

WM Transaction Code 仓库管理模块事务代码大全

1.1 LE-WM 仓库管理 Warehouse Management 仓库管理事务码 描述 LB01 Create Transfer Requirement 创建转储需求 LB02 Change transfer requirement 修改转储需求 LB03 Display Transfer Requirement 显示转储需求 LB10 TRs for Storage Type 按仓储类型的转储请求 …

乡村振兴与农村基础设施建设:加大农村基础设施建设投入,提升农村公共服务水平,改善农民生产生活条件,构建宜居宜业的美丽乡村

一、引言 乡村振兴是我国现代化进程中的重要战略&#xff0c;而农村基础设施建设则是乡村振兴的基石。随着城市化进程的加快&#xff0c;农村基础设施建设滞后的问题日益凸显&#xff0c;成为制约乡村发展的瓶颈。因此&#xff0c;加大农村基础设施建设投入&#xff0c;提升农…

利用自适应深度学习优化OCR文字识别性能

摘要&#xff1a; 随着深度学习技术的不断发展&#xff0c;OCR&#xff08;Optical Character Recognition&#xff0c;光学字符识别&#xff09;系统在文档处理、图像搜索和自动化数据提取等领域扮演着重要角色。然而&#xff0c;由于不同场景下文本的多样性和复杂性&#xf…

uni-appH5Android混合开发三 || uni-app调用Android原生方法的三种方式

前言&#xff1a; 关于H5的调用Android原生方法的方式有很多&#xff0c;在该片文章中我主要简单介绍三种与Android原生方法交互的方式。 uni-app跨平台框架介绍和快速入门 uni-app跨平台框架介绍和快速入门 一、H5方法调用android原生方法 H5 Android开发规范官方文档&#…

08.1.自定义图形

自定义图形 创建图形 随便选择几个参数直接添加 选择自定义折线图形查看

大语言模型的后处理

后处理的输入 常规意义上的大模型处理流程 import torch from transformers import LlamaForCausalLM, LlamaTokenizer# 加载模型和tokenizer model LlamaForCausalLM.from_pretrained("decapoda-research/llama-7b-hf") tokenizer LlamaTokenizer.from_pretrain…

c++11 标准模板(STL)本地化库 - 平面类别(std::money_put) - 格式化货币值为字符序列以输出

本地化库 本地环境设施包含字符分类和字符串校对、数值、货币及日期/时间格式化和分析&#xff0c;以及消息取得的国际化支持。本地环境设置控制流 I/O 、正则表达式库和 C 标准库的其他组件的行为。 平面类别 格式化货币值为字符序列以输出 std::money_put template< …

OpenCV使用 Kinect 和其他兼容 OpenNI 的深度传感器(75)

返回:OpenCV系列文章目录&#xff08;持续更新中......&#xff09; 上一篇:使用 OpenCV 创建视频(74) 下一篇 :OpenCV使用 Orbbec Astra 3D 相机(76) 目的&#xff1a;​ 通过 VideoCapture 类支持与 OpenNI 兼容的深度传感器&#xff08;Kinect、XtionPRO 等&#xff09;。…

Soviet Kitchen

苏联厨房-具有道具和带有碰撞器的模块化建筑部件的游戏环境资产 内部资产包: 网格-253 前言-98 材料-26 纹理-116 网格格式-(FBX) 纹理格式-(PNG) 资产列表: _BigShelf 多边形计数-1986 文本大小-2048x2048 可以 多边形计数-277 结构尺寸-512x512 _Celling 多边形计数-1 …