ariG23498 HF Staff commited on
Commit
76f9d51
·
verified ·
1 Parent(s): f33dbc4

Create benchmark-mxfp4-kernels.py

Browse files
Files changed (1) hide show
  1. benchmark-mxfp4-kernels.py +87 -0
benchmark-mxfp4-kernels.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os; os.environ["CUDA_VISIBLE_DEVICES"]="0"
2
+
3
+ import torch
4
+ from torch.utils import benchmark
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
6
+
7
+ def load_model(in_mxfp4):
8
+ model_id = "openai/gpt-oss-20b"
9
+ if not in_mxfp4:
10
+ quantization_config = Mxfp4Config(dequantize=True)
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ model_id,
13
+ dtype="auto",
14
+ device_map="cuda:0",
15
+ use_kernels=True,
16
+ quantization_config=quantization_config,
17
+ ).eval()
18
+
19
+ else:
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ model_id,
22
+ dtype="auto",
23
+ device_map="cuda:0",
24
+ ).eval()
25
+
26
+ return model
27
+
28
+ def generate(model, model_inputs, max_new_tokens):
29
+ with torch.inference_mode():
30
+ model.generate(
31
+ **model_inputs,
32
+ do_sample=False,
33
+ temperature=None,
34
+ max_new_tokens=max_new_tokens,
35
+ eos_token_id=-1,
36
+ disable_compile=True,
37
+ )
38
+
39
+ if __name__ == "__main__":
40
+ results = []
41
+ max_new_tokens = 256
42
+ batch_size = 256
43
+ base_prompts = [
44
+ "What is Tensor Parallelism?",
45
+ "Explain machine learning fundamentals.",
46
+ "How do neural networks work?",
47
+ "What are the benefits of distributed computing?",
48
+ "Describe the attention mechanism in transformers.",
49
+ "What is gradient descent?",
50
+ "How does backpropagation work?",
51
+ "Explain the concept of overfitting.",
52
+ ]
53
+ for in_mxfp4 in [True, False]:
54
+ model = load_model(in_mxfp4)
55
+ for batch_size in [32, 64, 128, 256]:
56
+ messages = [
57
+ [{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size)
58
+ ]
59
+ tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
60
+ texts = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low") for m in messages]
61
+ inputs = tokenizer(
62
+ texts,
63
+ return_tensors="pt",
64
+ padding=True,
65
+ padding_side="left",
66
+ ).to("cuda:0")
67
+
68
+ label = "time taken to generate"
69
+ results.append(
70
+ benchmark.Timer(
71
+ stmt="generate(model, model_inputs, max_new_tokens)",
72
+ setup='from __main__ import generate',
73
+ globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens},
74
+ num_threads=torch.get_num_threads(),
75
+ label=label,
76
+ sub_label=f"num tokens: {max_new_tokens} batch size: {batch_size}",
77
+ description=f"in mxfp4: {in_mxfp4}"
78
+ ).timeit(5)
79
+ )
80
+ inputs.to("cpu")
81
+ del inputs
82
+
83
+ model.to("cpu")
84
+ del model
85
+
86
+ compare = benchmark.Compare(results)
87
+ compare.print()