ariG23498 HF Staff commited on
Commit
58b5e52
·
verified ·
1 Parent(s): 939d4af

Create tp_gpt_oss.py

Browse files
Files changed (1) hide show
  1. tp_gpt_oss.py +56 -0
tp_gpt_oss.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch import distributed as dist
4
+ from transformers import GptOssForCausalLM, PreTrainedTokenizerFast
5
+
6
+ def initialize_process():
7
+ # torchrun exports: LOCAL_RANK
8
+ local_rank = int(os.environ["LOCAL_RANK"])
9
+ torch.cuda.set_device(local_rank)
10
+ dist.init_process_group(backend="nccl")
11
+
12
+ def run_inference():
13
+ model_id = "openai/gpt-oss-120b"
14
+ tok = PreTrainedTokenizerFast.from_pretrained(model_id)
15
+
16
+ # built in TP
17
+ model = GptOssForCausalLM.from_pretrained(
18
+ model_id,
19
+ tp_plan="auto",
20
+ torch_dtype="auto",
21
+ ).eval()
22
+
23
+ messages = [
24
+ {"role": "system", "content": "Be concise."},
25
+ {"role": "user", "content": "Explain KV caching briefly."},
26
+ ]
27
+ inputs = tok.apply_chat_template(
28
+ messages,
29
+ add_generation_prompt=True,
30
+ return_tensors="pt",
31
+ return_dict=True,
32
+ reasoning_effort="low",
33
+ )
34
+
35
+ local_rank = int(os.environ["LOCAL_RANK"])
36
+ device = torch.device(f"cuda:{local_rank}")
37
+ inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()}
38
+
39
+ with torch.inference_mode():
40
+ out = model.generate(**inputs, max_new_tokens=128)
41
+ torch.cuda.synchronize(device)
42
+
43
+ # keep output from rank 0 only
44
+ dist.barrier()
45
+ if dist.get_rank() == 0:
46
+ print(tok.decode(out[0][inputs["input_ids"].shape[-1]:]))
47
+
48
+ def main():
49
+ initialize_process()
50
+ try:
51
+ run_inference()
52
+ finally:
53
+ dist.destroy_process_group()
54
+
55
+ if __name__ == "__main__":
56
+ main()