./llama-perplexity -m /mnt/llm/models/Qwen2.5-Coder-7B-Instruct-128k-Q6_K.gguf -f /mnt/llm/models/datasets/wiki.train.raw.txt -ngl 99999 -fa -b 2048 -c 6114 -sm none
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 2 CUDA devices:
Device 0: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
Device 1: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
build: 1108 (c9c6e01d) with cc (GCC) 14.2.1 20240912 (Red Hat 14.2.1-3) for x86_64-redhat-linux
llama_load_model_from_file: using device CUDA0 (NVIDIA GeForce RTX 3090) - 24111 MiB free
llama_model_loader: loaded meta data with 27 key-value pairs and 339 tensors from /mnt/llm/models/Qwen2.5-Coder-7B-Instruct-128k-Q6_K.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = qwen2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Qwen2.5 Coder 7B Instruct
llama_model_loader: - kv 3: general.organization str = Qwen
llama_model_loader: - kv 4: general.finetune str = Instruct
llama_model_loader: - kv 5: general.basename str = Qwen2.5-Coder
llama_model_loader: - kv 6: general.size_label str = 7B
llama_model_loader: - kv 7: qwen2.block_count u32 = 28
llama_model_loader: - kv 8: qwen2.context_length u32 = 131072
llama_model_loader: - kv 9: qwen2.embedding_length u32 = 3584
llama_model_loader: - kv 10: qwen2.feed_forward_length u32 = 18944
llama_model_loader: - kv 11: qwen2.attention.head_count u32 = 28
llama_model_loader: - kv 12: qwen2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 13: qwen2.rope.freq_base f32 = 1000000.000000
llama_model_loader: - kv 14: qwen2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 15: general.file_type u32 = 18
llama_model_loader: - kv 16: tokenizer.ggml.model str = gpt2
llama_model_loader: - kv 17: tokenizer.ggml.pre str = qwen2
llama_model_loader: - kv 18: tokenizer.ggml.tokens arr[str,152064] = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv 19: tokenizer.ggml.token_type arr[i32,152064] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv 20: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",...
llama_model_loader: - kv 21: tokenizer.ggml.eos_token_id u32 = 151645
llama_model_loader: - kv 22: tokenizer.ggml.padding_token_id u32 = 151665
llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 151643
llama_model_loader: - kv 24: tokenizer.ggml.add_bos_token bool = false
llama_model_loader: - kv 25: tokenizer.chat_template str = {%- if tools %}\n {{- '<|im_start|>...
llama_model_loader: - kv 26: general.quantization_version u32 = 2
llama_model_loader: - type f32: 141 tensors
llama_model_loader: - type q6_K: 198 tensors
llm_load_vocab: special tokens cache size = 23
llm_load_vocab: token to piece cache size = 0.9310 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = qwen2
llm_load_print_meta: vocab type = BPE
llm_load_print_meta: n_vocab = 152064
llm_load_print_meta: n_merges = 151387
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 131072
llm_load_print_meta: n_embd = 3584
llm_load_print_meta: n_layer = 28
llm_load_print_meta: n_head = 28
llm_load_print_meta: n_head_kv = 4
llm_load_print_meta: n_rot = 128
llm_load_print_meta: n_swa = 0
llm_load_print_meta: n_embd_head_k = 128
llm_load_print_meta: n_embd_head_v = 128
llm_load_print_meta: n_gqa = 7
llm_load_print_meta: n_embd_k_gqa = 512
llm_load_print_meta: n_embd_v_gqa = 512
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 18944
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 1000000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 131072
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 7B
llm_load_print_meta: model ftype = Q6_K
llm_load_print_meta: model params = 7.62 B
llm_load_print_meta: model size = 5.82 GiB (6.56 BPW)
llm_load_print_meta: general.name = Qwen2.5 Coder 7B Instruct
llm_load_print_meta: BOS token = 151643 '<|endoftext|>'
llm_load_print_meta: EOS token = 151645 '<|im_end|>'
llm_load_print_meta: EOT token = 151645 '<|im_end|>'
llm_load_print_meta: PAD token = 151665 '<|PAD_TOKEN|>'
llm_load_print_meta: LF token = 148848 'ÄĬ'
llm_load_print_meta: FIM PRE token = 151659 '<|fim_prefix|>'
llm_load_print_meta: FIM SUF token = 151661 '<|fim_suffix|>'
llm_load_print_meta: FIM MID token = 151660 '<|fim_middle|>'
llm_load_print_meta: FIM PAD token = 151662 '<|fim_pad|>'
llm_load_print_meta: FIM REP token = 151663 '<|repo_name|>'
llm_load_print_meta: FIM SEP token = 151664 '<|file_sep|>'
llm_load_print_meta: EOG token = 151643 '<|endoftext|>'
llm_load_print_meta: EOG token = 151645 '<|im_end|>'
llm_load_print_meta: EOG token = 151662 '<|fim_pad|>'
llm_load_print_meta: EOG token = 151663 '<|repo_name|>'
llm_load_print_meta: EOG token = 151664 '<|file_sep|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: offloading 28 repeating layers to GPU
llm_load_tensors: offloading output layer to GPU
llm_load_tensors: offloaded 29/29 layers to GPU
llm_load_tensors: CUDA0 model buffer size = 5532.43 MiB
llm_load_tensors: CPU_Mapped model buffer size = 426.36 MiB
.......................................................................................
llama_new_context_with_model: n_seq_max = 1
llama_new_context_with_model: n_ctx = 6144
llama_new_context_with_model: n_ctx_per_seq = 6144
llama_new_context_with_model: n_batch = 2048
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 1000000.0
llama_new_context_with_model: freq_scale = 1
llama_new_context_with_model: n_ctx_per_seq (6144) < n_ctx_train (131072) -- the full capacity of the model will not be utilized
llama_kv_cache_init: CUDA0 KV buffer size = 336.00 MiB
llama_new_context_with_model: KV self size = 336.00 MiB, K (f16): 168.00 MiB, V (f16): 168.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.58 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 304.00 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 19.01 MiB
llama_new_context_with_model: graph nodes = 875
llama_new_context_with_model: graph splits = 2
common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable)
system_info: n_threads = 12 (n_threads_batch = 12) / 24 | CUDA : ARCHS = 860 | F16 = 1 | USE_GRAPHS = 1 | PEER_MAX_BATCH_SIZE = 512 | FA_ALL_QUANTS = 1 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX_VNNI = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 |
perplexity: tokenizing the input ..
perplexity: tokenization took 3847.93 ms
perplexity: calculating perplexity over 411 chunks, n_ctx=6114, batch_size=2048, n_seq=1
perplexity: 2.45 seconds per pass - ETA 16.78 minutes
[1]9.9005,[2]10.1277,[3]8.7797,[4]8.8872,[5]8.8445,[6]8.3996,[7]8.5144,[8]8.7408,[9]8.5662,[10]8.3672,[11]8.4745,[12]8.4324,[13]8.5226,[14]8.6839,[15]8.7338,[16]8.7595,[17]8.8943,[18]8.9764,[19]9.0896,[20]8.8531,[21]8.7883,[22]8.8229,[23]8.8937,[24]8.8060,[25]8.7939,[26]8.7325,[27]8.7480,[28]8.6756,[29]8.6977,[30]8.6592,[31]8.7171,[32]8.7000,[33]8.7420,[34]8.6214,[35]8.6080,[36]8.6280,[37]8.6619,[38]8.6559,[39]8.6212,[40]8.5418,[41]8.5233,[42]8.4308,[43]8.5438,[44]8.5131,[45]8.5128,[46]8.4269,[47]8.4488,[48]8.3611,[49]8.3392,[50]8.3317,[51]8.3652,[52]8.4113,[53]8.3988,[54]8.4221,[55]8.4427,[56]8.4256,[57]8.3906,[58]8.4268,[59]8.4889,[60]8.5656,[61]8.5528,[62]8.5337,[63]8.4542,[64]8.5092,[65]8.5372,[66]8.5300,[67]8.5532,[68]8.5342,[69]8.4698,[70]8.4975,[71]8.5381,[72]8.5263,[73]8.5691,[74]8.5530,[75]8.5423,[76]8.5456,[77]8.5195,[78]8.5440,[79]8.5691,[80]8.6091,[81]8.5939,[82]8.5764,[83]8.5389,[84]8.5353,[85]8.5573,[86]8.6101,[87]8.5902,[88]8.6161,[89]8.6363,[90]8.6399,[91]8.6407,[92]8.6323,[93]8.6502,[94]8.6516,[95]8.6472,[96]8.6238,[97]8.6389,[98]8.6312,[99]8.6256,[100]8.5921,[101]8.6209,[102]8.6061,[103]8.5897,[104]8.6267,[105]8.6436,[106]8.6406,[107]8.6203,[108]8.6043,[109]8.5841,[110]8.5604,[111]8.5776,[112]8.5959,[113]8.5700,[114]8.5519,[115]8.5580,[116]8.5541,[117]8.5468,[118]8.5384,[119]8.5243,[120]8.5359,[121]8.5281,[122]8.5457,[123]8.5159,[124]8.5238,[125]8.5405,[126]8.5464,[127]8.5217,[128]8.4996,[129]8.5169,[130]8.5269,[131]8.5029,[132]8.4994,[133]8.4571,[134]8.4630,[135]8.4497,[136]8.4505,[137]8.4643,[138]8.4665,[139]8.4568,[140]8.4515,[141]8.4506,[142]8.4499,[143]8.4438,[144]8.4445,[145]8.4408,[146]8.4605,[147]8.4491,[148]8.4585,[149]8.4348,[150]8.4172,[151]8.4079,[152]8.4124,[153]8.4157,[154]8.4183,[155]8.4062,[156]8.4117,[157]8.4112,[158]8.4149,[159]8.4253,[160]8.4274,[161]8.4323,[162]8.4545,[163]8.4536,[164]8.4528,[165]8.4450,[166]8.4367,[167]8.4359,[168]8.4373,[169]8.4388,[170]8.4308,[171]8.4227,[172]8.4395,[173]8.4501,[174]8.4315,[175]8.4322,[176]8.4310,[177]8.4435,[178]8.4521,[179]8.4473,[180]8.4509,[181]8.4636,[182]8.4680,[183]8.4787,[184]8.4672,[185]8.4551,[186]8.4688,[187]8.4671,[188]8.4710,[189]8.4532,[190]8.4308,[191]8.4384,[192]8.4190,[193]8.4275,[194]8.4260,[195]8.4054,[196]8.4097,[197]8.4065,[198]8.4187,[199]8.4127,[200]8.4175,[201]8.4136,[202]8.4075,[203]8.4069,[204]8.4153,[205]8.4138,[206]8.4120,[207]8.3999,[208]8.3896,[209]8.3953,[210]8.3880,[211]8.3894,[212]8.3893,[213]8.3740,[214]8.3563,[215]8.3377,[216]8.3373,[217]8.3414,[218]8.3463,[219]8.3585,[220]8.3555,[221]8.3405,[222]8.3429,[223]8.3371,[224]8.3366,[225]8.3300,[226]8.3389,[227]8.3488,[228]8.3479,[229]8.3689,[230]8.3694,[231]8.3741,[232]8.3891,[233]8.3884,[234]8.3733,[235]8.3677,[236]8.3696,[237]8.3741,[238]8.3865,[239]8.3924,[240]8.4039,[241]8.3847,[242]8.3470,[243]8.3388,[244]8.3278,[245]8.3315,[246]8.3293,[247]8.3191,[248]8.3266,[249]8.3226,[250]8.3288,[251]8.3249,[252]8.3249,[253]8.3240,[254]8.3269,[255]8.3100,[256]8.2946,[257]8.2956,[258]8.3101,[259]8.3123,[260]8.3233,[261]8.3385,[262]8.3442,[263]8.3485,[264]8.3397,[265]8.3261,[266]8.3215,[267]8.3148,[268]8.2950,[269]8.3019,[270]8.2932,[271]8.3006,[272]8.3020,[273]8.2963,[274]8.2996,[275]8.3011,[276]8.2933,[277]8.2929,[278]8.2919,[279]8.2872,[280]8.2992,[281]8.3011,[282]8.2958,[283]8.2924,[284]8.2924,[285]8.2942,[286]8.3034,[287]8.3070,[288]8.2996,[289]8.3043,[290]8.3101,[291]8.3252,[292]8.3251,[293]8.3036,[294]8.3007,[295]8.2992,[296]8.3006,[297]8.3143,[298]8.3116,[299]8.3095,[300]8.3046,[301]8.3019,[302]8.3038,[303]8.3065,[304]8.3024,[305]8.3027,[306]8.3064,[307]8.3116,[308]8.3082,[309]8.3055,[310]8.3132,[311]8.3138,[312]8.3058,[313]8.2894,[314]8.2965,[315]8.2949,[316]8.2922,[317]8.2937,[318]8.2897,[319]8.2958,[320]8.3006,[321]8.3032,[322]8.2966,[323]8.2959,[324]8.3054,[325]8.3175,[326]8.3245,[327]8.3252,[328]8.3265,[329]8.3256,[330]8.3326,[331]8.3446,[332]8.3460,[333]8.3426,[334]8.3390,[335]8.3387,[336]8.3377,[337]8.3339,[338]8.3310,[339]8.3359,[340]8.3439,[341]8.3441,[342]8.3452,[343]8.3589,[344]8.3615,[345]8.3666,[346]8.3683,[347]8.3710,[348]8.3599,[349]8.3520,[350]8.3444,[351]8.3454,[352]8.3479,[353]8.3434,[354]8.3342,[355]8.3327,[356]8.3285,[357]8.3267,[358]8.3261,[359]8.3353,[360]8.3393,[361]8.3399,[362]8.3404,[363]8.3414,[364]8.3380,[365]8.3336,[366]8.3305,[367]8.3348,[368]8.3423,[369]8.3425,[370]8.3459,[371]8.3528,[372]8.3553,[373]8.3539,[374]8.3536,[375]8.3499,[376]8.3512,[377]8.3455,[378]8.3492,[379]8.3484,[380]8.3407,[381]8.3418,[382]8.3420,[383]8.3369,[384]8.3373,[385]8.3397,[386]8.3357,[387]8.3403,[388]8.3357,[389]8.3219,[390]8.3292,[391]8.3354,[392]8.3347,[393]8.3418,[394]8.3511,[395]8.3505,[396]8.3492,[397]8.3554,[398]8.3596,[399]8.3649,[400]8.3731,[401]8.3748,[402]8.3882,[403]8.3991,[404]8.4033,[405]8.3965,[406]8.3868,[407]8.3833,[408]8.3825,[409]8.3868,[410]8.3886,[411]8.3891,
Final estimate: PPL = 8.3891 +/- 0.02016
llama_perf_context_print: load time = 856.79 ms
llama_perf_context_print: prompt eval time = 838697.82 ms / 2512854 tokens ( 0.33 ms per token, 2996.14 tokens per second)
llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
llama_perf_context_print: total time = 915157.10 ms / 2512855 tokens
./llama-perplexity -m /mnt/llm/models/Qwen2.5-Coder-7B-Instruct-128k-Q6_K.gguf -f /mnt/llm/models/datasets/wiki.train.raw.txt -ngl 99999 -fa -b 2048 -c 6114 -sm none -ctk q8_0 -ctv q8_0
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 2 CUDA devices:
Device 0: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
Device 1: NVIDIA GeForce RTX 3090, compute capability 8.6, VMM: yes
build: 1108 (c9c6e01d) with cc (GCC) 14.2.1 20240912 (Red Hat 14.2.1-3) for x86_64-redhat-linux
llama_load_model_from_file: using device CUDA0 (NVIDIA GeForce RTX 3090) - 24111 MiB free
llama_model_loader: loaded meta data with 27 key-value pairs and 339 tensors from /mnt/llm/models/Qwen2.5-Coder-7B-Instruct-128k-Q6_K.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = qwen2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Qwen2.5 Coder 7B Instruct
llama_model_loader: - kv 3: general.organization str = Qwen
llama_model_loader: - kv 4: general.finetune str = Instruct
llama_model_loader: - kv 5: general.basename str = Qwen2.5-Coder
llama_model_loader: - kv 6: general.size_label str = 7B
llama_model_loader: - kv 7: qwen2.block_count u32 = 28
llama_model_loader: - kv 8: qwen2.context_length u32 = 131072
llama_model_loader: - kv 9: qwen2.embedding_length u32 = 3584
llama_model_loader: - kv 10: qwen2.feed_forward_length u32 = 18944
llama_model_loader: - kv 11: qwen2.attention.head_count u32 = 28
llama_model_loader: - kv 12: qwen2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 13: qwen2.rope.freq_base f32 = 1000000.000000
llama_model_loader: - kv 14: qwen2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 15: general.file_type u32 = 18
llama_model_loader: - kv 16: tokenizer.ggml.model str = gpt2
llama_model_loader: - kv 17: tokenizer.ggml.pre str = qwen2
llama_model_loader: - kv 18: tokenizer.ggml.tokens arr[str,152064] = ["!", "\"", "#", "$", "%", "&", "'", ...
llama_model_loader: - kv 19: tokenizer.ggml.token_type arr[i32,152064] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv 20: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",...
llama_model_loader: - kv 21: tokenizer.ggml.eos_token_id u32 = 151645
llama_model_loader: - kv 22: tokenizer.ggml.padding_token_id u32 = 151665
llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 151643
llama_model_loader: - kv 24: tokenizer.ggml.add_bos_token bool = false
llama_model_loader: - kv 25: tokenizer.chat_template str = {%- if tools %}\n {{- '<|im_start|>...
llama_model_loader: - kv 26: general.quantization_version u32 = 2
llama_model_loader: - type f32: 141 tensors
llama_model_loader: - type q6_K: 198 tensors
llm_load_vocab: special tokens cache size = 23
llm_load_vocab: token to piece cache size = 0.9310 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = qwen2
llm_load_print_meta: vocab type = BPE
llm_load_print_meta: n_vocab = 152064
llm_load_print_meta: n_merges = 151387
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 131072
llm_load_print_meta: n_embd = 3584
llm_load_print_meta: n_layer = 28
llm_load_print_meta: n_head = 28
llm_load_print_meta: n_head_kv = 4
llm_load_print_meta: n_rot = 128
llm_load_print_meta: n_swa = 0
llm_load_print_meta: n_embd_head_k = 128
llm_load_print_meta: n_embd_head_v = 128
llm_load_print_meta: n_gqa = 7
llm_load_print_meta: n_embd_k_gqa = 512
llm_load_print_meta: n_embd_v_gqa = 512
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 18944
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 1000000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 131072
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 7B
llm_load_print_meta: model ftype = Q6_K
llm_load_print_meta: model params = 7.62 B
llm_load_print_meta: model size = 5.82 GiB (6.56 BPW)
llm_load_print_meta: general.name = Qwen2.5 Coder 7B Instruct
llm_load_print_meta: BOS token = 151643 '<|endoftext|>'
llm_load_print_meta: EOS token = 151645 '<|im_end|>'
llm_load_print_meta: EOT token = 151645 '<|im_end|>'
llm_load_print_meta: PAD token = 151665 '<|PAD_TOKEN|>'
llm_load_print_meta: LF token = 148848 'ÄĬ'
llm_load_print_meta: FIM PRE token = 151659 '<|fim_prefix|>'
llm_load_print_meta: FIM SUF token = 151661 '<|fim_suffix|>'
llm_load_print_meta: FIM MID token = 151660 '<|fim_middle|>'
llm_load_print_meta: FIM PAD token = 151662 '<|fim_pad|>'
llm_load_print_meta: FIM REP token = 151663 '<|repo_name|>'
llm_load_print_meta: FIM SEP token = 151664 '<|file_sep|>'
llm_load_print_meta: EOG token = 151643 '<|endoftext|>'
llm_load_print_meta: EOG token = 151645 '<|im_end|>'
llm_load_print_meta: EOG token = 151662 '<|fim_pad|>'
llm_load_print_meta: EOG token = 151663 '<|repo_name|>'
llm_load_print_meta: EOG token = 151664 '<|file_sep|>'
llm_load_print_meta: max token length = 256
llm_load_tensors: offloading 28 repeating layers to GPU
llm_load_tensors: offloading output layer to GPU
llm_load_tensors: offloaded 29/29 layers to GPU
llm_load_tensors: CUDA0 model buffer size = 5532.43 MiB
llm_load_tensors: CPU_Mapped model buffer size = 426.36 MiB
.......................................................................................
llama_new_context_with_model: n_seq_max = 1
llama_new_context_with_model: n_ctx = 6144
llama_new_context_with_model: n_ctx_per_seq = 6144
llama_new_context_with_model: n_batch = 2048
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 1000000.0
llama_new_context_with_model: freq_scale = 1
llama_new_context_with_model: n_ctx_per_seq (6144) < n_ctx_train (131072) -- the full capacity of the model will not be utilized
llama_kv_cache_init: CUDA0 KV buffer size = 178.50 MiB
llama_new_context_with_model: KV self size = 178.50 MiB, K (q8_0): 89.25 MiB, V (q8_0): 89.25 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.58 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 304.00 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 19.01 MiB
llama_new_context_with_model: graph nodes = 875
llama_new_context_with_model: graph splits = 2
common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable)
system_info: n_threads = 12 (n_threads_batch = 12) / 24 | CUDA : ARCHS = 860 | F16 = 1 | USE_GRAPHS = 1 | PEER_MAX_BATCH_SIZE = 512 | FA_ALL_QUANTS = 1 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX_VNNI = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 |
perplexity: tokenizing the input ..
perplexity: tokenization took 4019.35 ms
perplexity: calculating perplexity over 411 chunks, n_ctx=6114, batch_size=2048, n_seq=1
perplexity: 2.43 seconds per pass - ETA 16.62 minutes
[1]9.9202,[2]10.1469,[3]8.7887,[4]8.8921,[5]8.8528,[6]8.4082,[7]8.5220,[8]8.7453,[9]8.5711,[10]8.3722,[11]8.4779,[12]8.4358,[13]8.5259,[14]8.6891,[15]8.7391,[16]8.7653,[17]8.8992,[18]8.9813,[19]9.0935,[20]8.8566,[21]8.7917,[22]8.8263,[23]8.8961,[24]8.8084,[25]8.7959,[26]8.7343,[27]8.7492,[28]8.6769,[29]8.6995,[30]8.6610,[31]8.7187,[32]8.7014,[33]8.7433,[34]8.6228,[35]8.6100,[36]8.6303,[37]8.6643,[38]8.6586,[39]8.6243,[40]8.5450,[41]8.5263,[42]8.4335,[43]8.5469,[44]8.5164,[45]8.5155,[46]8.4293,[47]8.4509,[48]8.3634,[49]8.3417,[50]8.3340,[51]8.3675,[52]8.4134,[53]8.4011,[54]8.4242,[55]8.4447,[56]8.4277,[57]8.3928,[58]8.4291,[59]8.4912,[60]8.5676,[61]8.5549,[62]8.5359,[63]8.4564,[64]8.5113,[65]8.5394,[66]8.5322,[67]8.5554,[68]8.5360,[69]8.4717,[70]8.4994,[71]8.5400,[72]8.5282,[73]8.5712,[74]8.5550,[75]8.5442,[76]8.5475,[77]8.5214,[78]8.5460,[79]8.5711,[80]8.6111,[81]8.5959,[82]8.5783,[83]8.5407,[84]8.5373,[85]8.5593,[86]8.6122,[87]8.5921,[88]8.6182,[89]8.6386,[90]8.6422,[91]8.6431,[92]8.6350,[93]8.6531,[94]8.6545,[95]8.6500,[96]8.6266,[97]8.6416,[98]8.6338,[99]8.6283,[100]8.5948,[101]8.6236,[102]8.6087,[103]8.5923,[104]8.6293,[105]8.6463,[106]8.6433,[107]8.6229,[108]8.6069,[109]8.5871,[110]8.5633,[111]8.5806,[112]8.5990,[113]8.5729,[114]8.5547,[115]8.5609,[116]8.5570,[117]8.5496,[118]8.5414,[119]8.5272,[120]8.5388,[121]8.5312,[122]8.5488,[123]8.5188,[124]8.5267,[125]8.5435,[126]8.5494,[127]8.5247,[128]8.5026,[129]8.5198,[130]8.5299,[131]8.5060,[132]8.5024,[133]8.4601,[134]8.4661,[135]8.4528,[136]8.4536,[137]8.4674,[138]8.4696,[139]8.4600,[140]8.4546,[141]8.4538,[142]8.4533,[143]8.4473,[144]8.4480,[145]8.4443,[146]8.4641,[147]8.4528,[148]8.4622,[149]8.4386,[150]8.4210,[151]8.4118,[152]8.4163,[153]8.4196,[154]8.4222,[155]8.4101,[156]8.4155,[157]8.4150,[158]8.4187,[159]8.4290,[160]8.4312,[161]8.4360,[162]8.4582,[163]8.4574,[164]8.4566,[165]8.4487,[166]8.4405,[167]8.4397,[168]8.4411,[169]8.4427,[170]8.4346,[171]8.4267,[172]8.4435,[173]8.4541,[174]8.4356,[175]8.4363,[176]8.4350,[177]8.4474,[178]8.4562,[179]8.4514,[180]8.4551,[181]8.4678,[182]8.4721,[183]8.4829,[184]8.4714,[185]8.4593,[186]8.4731,[187]8.4714,[188]8.4751,[189]8.4573,[190]8.4350,[191]8.4427,[192]8.4233,[193]8.4320,[194]8.4305,[195]8.4098,[196]8.4141,[197]8.4108,[198]8.4230,[199]8.4172,[200]8.4220,[201]8.4182,[202]8.4121,[203]8.4115,[204]8.4198,[205]8.4183,[206]8.4165,[207]8.4044,[208]8.3941,[209]8.3998,[210]8.3925,[211]8.3940,[212]8.3939,[213]8.3787,[214]8.3609,[215]8.3422,[216]8.3419,[217]8.3460,[218]8.3510,[219]8.3631,[220]8.3601,[221]8.3450,[222]8.3475,[223]8.3417,[224]8.3412,[225]8.3346,[226]8.3434,[227]8.3534,[228]8.3525,[229]8.3735,[230]8.3741,[231]8.3788,[232]8.3938,[233]8.3931,[234]8.3778,[235]8.3723,[236]8.3742,[237]8.3787,[238]8.3910,[239]8.3970,[240]8.4086,[241]8.3893,[242]8.3516,[243]8.3434,[244]8.3324,[245]8.3361,[246]8.3339,[247]8.3236,[248]8.3311,[249]8.3272,[250]8.3334,[251]8.3296,[252]8.3295,[253]8.3287,[254]8.3315,[255]8.3146,[256]8.2992,[257]8.3001,[258]8.3147,[259]8.3169,[260]8.3278,[261]8.3430,[262]8.3487,[263]8.3529,[264]8.3442,[265]8.3306,[266]8.3259,[267]8.3192,[268]8.2996,[269]8.3064,[270]8.2978,[271]8.3052,[272]8.3066,[273]8.3008,[274]8.3041,[275]8.3056,[276]8.2978,[277]8.2973,[278]8.2964,[279]8.2916,[280]8.3036,[281]8.3056,[282]8.3001,[283]8.2968,[284]8.2968,[285]8.2986,[286]8.3078,[287]8.3114,[288]8.3040,[289]8.3087,[290]8.3145,[291]8.3297,[292]8.3296,[293]8.3081,[294]8.3051,[295]8.3036,[296]8.3050,[297]8.3186,[298]8.3160,[299]8.3139,[300]8.3091,[301]8.3063,[302]8.3083,[303]8.3108,[304]8.3068,[305]8.3072,[306]8.3108,[307]8.3161,[308]8.3126,[309]8.3100,[310]8.3176,[311]8.3183,[312]8.3102,[313]8.2938,[314]8.3008,[315]8.2992,[316]8.2965,[317]8.2980,[318]8.2940,[319]8.3002,[320]8.3049,[321]8.3075,[322]8.3009,[323]8.3002,[324]8.3097,[325]8.3217,[326]8.3288,[327]8.3295,[328]8.3308,[329]8.3299,[330]8.3369,[331]8.3490,[332]8.3503,[333]8.3470,[334]8.3433,[335]8.3430,[336]8.3420,[337]8.3382,[338]8.3353,[339]8.3402,[340]8.3482,[341]8.3484,[342]8.3495,[343]8.3631,[344]8.3658,[345]8.3709,[346]8.3726,[347]8.3752,[348]8.3642,[349]8.3562,[350]8.3486,[351]8.3496,[352]8.3521,[353]8.3476,[354]8.3384,[355]8.3368,[356]8.3325,[357]8.3308,[358]8.3301,[359]8.3394,[360]8.3434,[361]8.3440,[362]8.3446,[363]8.3456,[364]8.3422,[365]8.3377,[366]8.3345,[367]8.3388,[368]8.3463,[369]8.3465,[370]8.3500,[371]8.3568,[372]8.3593,[373]8.3579,[374]8.3576,[375]8.3539,[376]8.3553,[377]8.3495,[378]8.3532,[379]8.3524,[380]8.3447,[381]8.3458,[382]8.3459,[383]8.3409,[384]8.3414,[385]8.3438,[386]8.3397,[387]8.3444,[388]8.3398,[389]8.3260,[390]8.3332,[391]8.3393,[392]8.3387,[393]8.3458,[394]8.3551,[395]8.3545,[396]8.3534,[397]8.3597,[398]8.3639,[399]8.3692,[400]8.3774,[401]8.3792,[402]8.3925,[403]8.4034,[404]8.4076,[405]8.4006,[406]8.3911,[407]8.3876,[408]8.3868,[409]8.3910,[410]8.3928,[411]8.3934,
Final estimate: PPL = 8.3934 +/- 0.02017
llama_perf_context_print: load time = 879.24 ms
llama_perf_context_print: prompt eval time = 763739.58 ms / 2512854 tokens ( 0.30 ms per token, 3290.20 tokens per second)
llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
llama_perf_context_print: total time = 840693.22 ms / 2512855 tokens