llama-cpp-bench / parse_results.py
bobchenyx's picture
Upload folder using huggingface_hub
de561c0 verified
#!/usr/bin/env python3
"""Parse all benchmark result files and print both summary tables + write CSV."""
import json
import os
import re
import csv
RESULTS_DIR = os.path.dirname(os.path.abspath(__file__))
KEYS = [
("llama-3.1-8b-instruct", "Llama-3.1-8B", "f16", "F16"),
("llama-3.1-8b-instruct", "Llama-3.1-8B", "Q8_0", "Q8_0"),
("llama-3.1-8b-instruct", "Llama-3.1-8B", "Q4_K_M", "Q4_K_M"),
("llama-3.1-8b-instruct", "Llama-3.1-8B", "Q2_K", "Q2_K"),
("qwen2.5-7b-instruct", "Qwen2.5-7B", "f16", "F16"),
("qwen2.5-7b-instruct", "Qwen2.5-7B", "Q8_0", "Q8_0"),
("qwen2.5-7b-instruct", "Qwen2.5-7B", "Q4_K_M", "Q4_K_M"),
("qwen2.5-7b-instruct", "Qwen2.5-7B", "Q2_K", "Q2_K"),
("gemma-2-9b-it", "Gemma-2-9B", "f16", "F16"),
("gemma-2-9b-it", "Gemma-2-9B", "Q8_0", "Q8_0"),
("gemma-2-9b-it", "Gemma-2-9B", "Q4_K_M", "Q4_K_M"),
("gemma-2-9b-it", "Gemma-2-9B", "Q2_K", "Q2_K"),
]
def path(prefix, suffix):
return os.path.join(RESULTS_DIR, f"{prefix}_{suffix}")
def parse_bench(prefix):
"""Return (prefill_ts, prefill_std, decode_ts, decode_std, weight_gib) or Nones."""
p = path(prefix, "bench.json")
if not os.path.exists(p):
return None, None, None, None, None
try:
data = json.load(open(p))
prefill_ts = prefill_std = decode_ts = decode_std = weight_gib = None
for r in data:
if r.get("n_prompt", 0) > 0 and r.get("n_gen", 0) == 0:
prefill_ts = round(r["avg_ts"], 1)
prefill_std = round(r.get("stddev_ts", 0), 1)
weight_gib = round(r["model_size"] / (1024**3), 2)
elif r.get("n_gen", 0) > 0 and r.get("n_prompt", 0) == 0:
decode_ts = round(r["avg_ts"], 1)
decode_std = round(r.get("stddev_ts", 0), 1)
return prefill_ts, prefill_std, decode_ts, decode_std, weight_gib
except Exception as e:
print(f" WARN bench parse error for {prefix}: {e}")
return None, None, None, None, None
def parse_vram(prefix):
"""Return peak VRAM (GiB) from nvidia-smi dmon log.
When monitoring a single GPU (-i N), this is simply the max fb value seen.
When monitoring all GPUs (older logs), we fall back to sum of
(peak βˆ’ min) per GPU as an estimate of our job's incremental VRAM.
"""
p = path(prefix, "vram.log")
if not os.path.exists(p):
return None
try:
from collections import defaultdict
gpu_min = defaultdict(lambda: float("inf"))
gpu_max = defaultdict(lambda: 0)
with open(p) as f:
has_timestamp = False
for header_line in f:
if header_line.strip().startswith("#") and "Time" in header_line:
has_timestamp = True
if not header_line.strip().startswith("#"):
break
f.seek(0)
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = line.split()
if len(parts) < 3:
continue
try:
if has_timestamp:
# cols: HH:MM:SS gpu_idx fb_mb bar1_mb ...
gpu = int(parts[1])
fb = int(parts[2])
else:
# cols: gpu_idx fb_mb bar1_mb ccpm_mb
gpu = int(parts[0])
fb = int(parts[1])
if fb < gpu_min[gpu]:
gpu_min[gpu] = fb
if fb > gpu_max[gpu]:
gpu_max[gpu] = fb
except (ValueError, IndexError):
continue
if not gpu_max:
return None
n_gpus = len(gpu_max)
if n_gpus == 1:
# Single-GPU log: peak fb is exactly our job's peak VRAM
peak_mib = max(gpu_max.values())
else:
# Multi-GPU log: use delta (peak - min) per GPU to strip baseline
peak_mib = sum(gpu_max[g] - gpu_min[g] for g in gpu_max)
return round(peak_mib / 1024, 2)
except Exception as e:
print(f" WARN vram parse error for {prefix}: {e}")
return None
def parse_ttft(prefix):
"""Return (ttft_ms, tpot_ms, latency_ms) or (None, None, None)."""
p = path(prefix, "ttft.json")
if not os.path.exists(p):
return None, None, None
try:
d = json.load(open(p))
return d.get("ttft_ms"), d.get("tpot_ms"), d.get("latency_ms")
except Exception as e:
print(f" WARN ttft parse error for {prefix}: {e}")
return None, None, None
def parse_ppl(prefix):
"""Return (perplexity, ppl_std) or (None, None)."""
p = path(prefix, "ppl.txt")
if not os.path.exists(p):
return None, None
try:
text = open(p).read()
m = re.search(r"PPL\s*=\s*([\d.]+)\s*\+/-\s*([\d.]+)", text)
if m:
return float(m.group(1)), float(m.group(2))
m = re.search(r"PPL\s*=\s*([\d.]+)", text)
return (float(m.group(1)), None) if m else (None, None)
except Exception as e:
print(f" WARN ppl parse error for {prefix}: {e}")
return None, None
def parse_hellaswag(prefix):
"""Return accuracy % float or None."""
p = path(prefix, "hellaswag.txt")
if not os.path.exists(p):
return None
try:
text = open(p).read().strip()
# format: "400\t78.50000000%\t[74.2%, 82.2%]"
m = re.search(r"([\d.]+)%", text)
return round(float(m.group(1)), 2) if m else None
except Exception as e:
print(f" WARN hellaswag parse error for {prefix}: {e}")
return None
def parse_winogrande(prefix):
"""Return accuracy % float or None."""
p = path(prefix, "winogrande.txt")
if not os.path.exists(p):
return None
try:
text = open(p).read().strip()
# format: "1267\t73.4807\t..." β€” second col is % correct
parts = text.split()
return round(float(parts[1]), 2) if len(parts) >= 2 else None
except Exception as e:
print(f" WARN winogrande parse error for {prefix}: {e}")
return None
def fmt(val, fmt_str, missing="β€”"):
return format(val, fmt_str) if val is not None else missing
def main():
rows = []
for (file_prefix_base, model_label, quant_file, quant_label) in KEYS:
prefix = f"{file_prefix_base}-{quant_file}"
prefill, prefill_std, decode, decode_std, weight = parse_bench(prefix)
peak_vram = parse_vram(prefix)
ttft, tpot, latency = parse_ttft(prefix)
ppl, ppl_std = parse_ppl(prefix)
hellaswag = parse_hellaswag(prefix)
winogrande = parse_winogrande(prefix)
rows.append({
"model": model_label,
"quant": quant_label,
"prefill_ts": prefill,
"prefill_std": prefill_std,
"decode_ts": decode,
"decode_std": decode_std,
"weight_gib": weight,
"peak_vram": peak_vram,
"ttft_ms": ttft,
"tpot_ms": tpot,
"latency_ms": latency,
"ppl": ppl,
"ppl_std": ppl_std,
"hellaswag": hellaswag,
"winogrande": winogrande,
})
# ── Write CSV ─────────────────────────────────────────────────────────────
csv_path = os.path.join(RESULTS_DIR, "benchmark_results.csv")
fieldnames = ["model", "quant", "prefill_ts", "prefill_std",
"decode_ts", "decode_std",
"ttft_ms", "tpot_ms", "latency_ms",
"weight_gib", "peak_vram",
"ppl", "ppl_std", "hellaswag", "winogrande"]
with open(csv_path, "w", newline="") as f:
w = csv.DictWriter(f, fieldnames=fieldnames)
w.writeheader()
w.writerows(rows)
print(f"CSV written β†’ {csv_path}\n")
# ── Table 1: Speed & Memory ───────────────────────────────────────────────
print("TABLE 1 β€” Speed & Memory")
print("─" * 120)
hdr = (f"{'Model':<22} {'Quant':<8} {'Prefill (t/s)':>18} {'Decode (t/s)':>18}"
f" {'TTFT':>8} {'TPOT':>8} {'Latency':>9} {'Wt(GiB)':>8} {'PkVRAM':>8}")
print(hdr)
print(f"{'':22} {'':8} {'mean Β± std':>18} {'mean Β± std':>18}"
f" {'(ms)':>8} {'(ms)':>8} {'(ms)':>9} {'':>8} {'(GiB)':>8}")
print("─" * 120)
prev_model = None
for r in rows:
if r["model"] != prev_model and prev_model is not None:
print()
prev_model = r["model"]
idx = rows.index(r)
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
if r["prefill_ts"] is not None and r["prefill_std"] is not None:
prefill_col = f"{r['prefill_ts']:.1f}Β±{r['prefill_std']:.1f}"
else:
prefill_col = "β€”"
if r["decode_ts"] is not None and r["decode_std"] is not None:
decode_col = f"{r['decode_ts']:.1f}Β±{r['decode_std']:.1f}"
else:
decode_col = "β€”"
print(
f"{r['model'] if is_first else '':22}"
f" {r['quant']:<8}"
f" {prefill_col:>18}"
f" {decode_col:>18}"
f" {fmt(r['ttft_ms'], '8.1f')}"
f" {fmt(r['tpot_ms'], '8.1f')}"
f" {fmt(r['latency_ms'], '9.1f')}"
f" {fmt(r['weight_gib'], '8.2f')}"
f" {fmt(r['peak_vram'], '8.2f')}"
)
print("─" * 120)
# ── Table 2: Quality ──────────────────────────────────────────────────────
print("\nTABLE 2 β€” Quality")
print("─" * 70)
print(f"{'Model':<22} {'Quant':<8} {'PPL↓':>8} {'HellaSwag↑':>12} {'Winogrande↑':>13}")
print(f"{'':22} {'':8} {'':>8} {'(%)':>12} {'(%)':>13}")
print("─" * 70)
prev_model = None
for r in rows:
if r["model"] != prev_model and prev_model is not None:
print()
prev_model = r["model"]
idx = rows.index(r)
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
print(
f"{r['model'] if is_first else '':22}"
f" {r['quant']:<8}"
f" {fmt(r['ppl'], '8.2f')}"
f" {fmt(r['hellaswag'], '12.2f')}"
f" {fmt(r['winogrande'], '13.2f')}"
)
print("─" * 70)
# ── LaTeX: Quality table ─────────────────────────────────────────────────
print()
print(r"""\begin{table}[t]
\centering
\caption{%
Model quality metrics at multiple quantization levels.
\textbf{PPL} = perplexity on the Wikitext-2 test set
(lower is better).
\textbf{HellaSwag} = accuracy on 400 commonsense-NLI tasks from the
HellaSwag validation set (higher is better).
\textbf{Winogrande} = accuracy on 1{,}267 debiased pronoun-resolution
tasks (higher is better).
Both accuracy benchmarks are evaluated via log-likelihood ranking.%
}
\label{tab:llamacpp_quality}
\begin{tabular}{@{} l l r rr @{}}
\toprule
\textbf{Model} & \textbf{Quant} &
\textbf{PPL\,$\downarrow$} &
\textbf{HellaSwag (\%)\,$\uparrow$} &
\textbf{Winogrande (\%)\,$\uparrow$} \\""")
prev_model = None
for r in rows:
idx = rows.index(r)
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
if is_first:
print(r" \midrule")
quant = r["quant"].replace("_", r"\_")
name_col = r["model"] if is_first else " " * len(r["model"])
if r["ppl"] is not None and r["ppl_std"] is not None:
ppl_str = f"${r['ppl']:.2f}\\pm{r['ppl_std']:.2f}$"
elif r["ppl"] is not None:
ppl_str = f"{r['ppl']:.2f}"
else:
ppl_str = "---"
hs = f"{r['hellaswag']:.2f}" if r["hellaswag"] is not None else "---"
wg = f"{r['winogrande']:.2f}" if r["winogrande"] is not None else "---"
print(f" {name_col} & {quant} & {ppl_str} & {hs} & {wg} \\\\")
print(r""" \bottomrule
\end{tabular}
\end{table}""")
# ── LaTeX: Speed & Memory table ──────────────────────────────────────────
print()
print(r"""\begin{table*}[t]
\centering
\caption{%
Inference speed and memory usage for three open-weight LLMs at multiple
quantization levels, measured on NVIDIA~L40S GPU
using llama.cpp.
\textbf{Prefill} = prompt-processing throughput at 512 input tokens
(PP512, tokens/s);
\textbf{Decode} = text-generation throughput at 128 output tokens
(TG128, tokens/s);
\textbf{TTFT} = time-to-first-token;
\textbf{TPOT} = time-per-output-token;
\textbf{Latency} = TTFT\,+\,TPOT (mean of 3 streaming runs,
512-token prompt, 128 output tokens, temperature~0);
\textbf{Weight} = model-weight VRAM footprint;
\textbf{Peak} = maximum VRAM during benchmark.%
}
\label{tab:llamacpp_speed_memory}
\resizebox{\linewidth}{!}{%
\begin{tabular}{@{} l l rr rrr rr @{}}
\toprule
\multirow{2}{*}{\textbf{Model}} &
\multirow{2}{*}{\textbf{Quant}} &
\multicolumn{2}{c}{\textbf{Throughput (t/s)}} &
\multicolumn{3}{c}{\textbf{Latency (ms)}} &
\multicolumn{2}{c}{\textbf{VRAM (GiB)}} \\
\cmidrule(lr){3-4}\cmidrule(lr){5-7}\cmidrule(lr){8-9}
& &
\textbf{Prefill} & \textbf{Decode} &
\textbf{TTFT} & \textbf{TPOT} & \textbf{Total} &
\textbf{Weight} & \textbf{Peak} \\""")
prev_model = None
for r in rows:
idx = rows.index(r)
is_first = idx == 0 or rows[idx-1]["model"] != r["model"]
if is_first:
print(r" \midrule")
quant = r["quant"].replace("_", r"\_")
name_col = r["model"] if is_first else " " * len(r["model"])
if r["prefill_ts"] is not None and r["prefill_std"] is not None:
prefill_str = f"${r['prefill_ts']:.0f}\\pm{r['prefill_std']:.0f}$"
elif r["prefill_ts"] is not None:
prefill_str = f"{r['prefill_ts']:.0f}"
else:
prefill_str = "---"
if r["decode_ts"] is not None and r["decode_std"] is not None:
decode_str = f"${r['decode_ts']:.1f}\\pm{r['decode_std']:.1f}$"
elif r["decode_ts"] is not None:
decode_str = f"{r['decode_ts']:.1f}"
else:
decode_str = "---"
ttft = f"{r['ttft_ms']:.1f}" if r["ttft_ms"] is not None else "---"
tpot = f"{r['tpot_ms']:.2f}" if r["tpot_ms"] is not None else "---"
lat = f"{r['latency_ms']:.1f}" if r["latency_ms"] is not None else "---"
wt = f"{r['weight_gib']:.2f}" if r["weight_gib"] is not None else "---"
pk = f"{r['peak_vram']:.2f}" if r["peak_vram"] is not None else "---"
print(
f" {name_col} & {quant} & "
f"{prefill_str} & {decode_str} & "
f"{ttft} & {tpot} & {lat} & "
f"{wt} & {pk} \\\\"
)
print(r""" \bottomrule
\end{tabular}%
}
\end{table*}""")
if __name__ == "__main__":
main()