ariG23498 HF Staff commited on
Commit
f33dbc4
·
verified ·
1 Parent(s): d478cd6

Create benchmark-kernels-with-without.py

Browse files
Files changed (1) hide show
  1. benchmark-kernels-with-without.py +90 -0
benchmark-kernels-with-without.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os; os.environ["CUDA_VISIBLE_DEVICES"]="0"
2
+
3
+ import torch
4
+ from torch.utils import benchmark
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
6
+
7
+ def load_model(use_kernels):
8
+ model_id = "openai/gpt-oss-20b"
9
+ quantization_config = Mxfp4Config(dequantize=True)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_id,
12
+ dtype="auto",
13
+ device_map="cuda:0",
14
+ use_kernels=use_kernels,
15
+ quantization_config=quantization_config,
16
+ ).eval()
17
+ return model
18
+
19
+ def generate(model, model_inputs, max_new_tokens):
20
+ with torch.inference_mode():
21
+ model.generate(
22
+ **model_inputs,
23
+ do_sample=False,
24
+ temperature=None,
25
+ max_new_tokens=max_new_tokens,
26
+ eos_token_id=-1,
27
+ disable_compile=True,
28
+ )
29
+
30
+ if __name__ == "__main__":
31
+ results = []
32
+ max_new_tokens = 256
33
+ batch_size = 256
34
+ base_prompts = [
35
+ "What is Tensor Parallelism?",
36
+ "Explain machine learning fundamentals.",
37
+ "How do neural networks work?",
38
+ "What are the benefits of distributed computing?",
39
+ "Describe the attention mechanism in transformers.",
40
+ "What is gradient descent?",
41
+ "How does backpropagation work?",
42
+ "Explain the concept of overfitting.",
43
+ ]
44
+
45
+ for use_kernels in [True, False]:
46
+ model = load_model(use_kernels)
47
+ for batch_size in [32, 64, 128, 256]:
48
+ messages = [
49
+ [{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size)
50
+ ]
51
+ tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
52
+ texts = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low") for m in messages]
53
+ inputs = tokenizer(
54
+ texts,
55
+ return_tensors="pt",
56
+ padding=True,
57
+ padding_side="left",
58
+ ).to("cuda:0")
59
+
60
+ label = "time taken to generate"
61
+ results.append(
62
+ benchmark.Timer(
63
+ stmt="generate(model, model_inputs, max_new_tokens)",
64
+ setup='from __main__ import generate',
65
+ globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens},
66
+ num_threads=torch.get_num_threads(),
67
+ label=label,
68
+ sub_label=f"num tokens: {max_new_tokens} batch size: {batch_size}",
69
+ description=f"use kernels: {use_kernels}"
70
+ ).timeit(5)
71
+ )
72
+ inputs.to("cpu")
73
+ del inputs
74
+
75
+ model.to("cpu")
76
+ del model
77
+
78
+ compare = benchmark.Compare(results)
79
+ compare.print()
80
+
81
+
82
+ # [---------------------------- time taken to generate ----------------------------]
83
+ # | use kernels: True | use kernels: False
84
+ # 12 threads: ----------------------------------------------------------------------
85
+ # num tokens: 256 batch size: 32 | 12.7 | 9.1
86
+ # num tokens: 256 batch size: 64 | 12.7 | 10.0
87
+ # num tokens: 256 batch size: 128 | 12.8 | 13.9
88
+ # num tokens: 256 batch size: 256 | 15.0 | 21.2
89
+
90
+ # Times are in seconds (s).