push
Browse files- model_manager.py +2 -2
model_manager.py
CHANGED
|
@@ -138,7 +138,7 @@ def generate_chat_response(prompt: str, max_length: int = 512, temperature: floa
|
|
| 138 |
raise
|
| 139 |
|
| 140 |
generated_ids_trimmed = [
|
| 141 |
-
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs
|
| 142 |
]
|
| 143 |
generated_text = style_processor.batch_decode(
|
| 144 |
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
|
@@ -228,7 +228,7 @@ async def generate_chat_response_streaming(prompt: str, max_length: int = 512, t
|
|
| 228 |
raise
|
| 229 |
|
| 230 |
generated_ids_trimmed = [
|
| 231 |
-
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs
|
| 232 |
]
|
| 233 |
generated_text = style_processor.batch_decode(
|
| 234 |
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
|
|
|
| 138 |
raise
|
| 139 |
|
| 140 |
generated_ids_trimmed = [
|
| 141 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs['input_ids'], outputs)
|
| 142 |
]
|
| 143 |
generated_text = style_processor.batch_decode(
|
| 144 |
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
|
|
|
| 228 |
raise
|
| 229 |
|
| 230 |
generated_ids_trimmed = [
|
| 231 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs['input_ids'], outputs)
|
| 232 |
]
|
| 233 |
generated_text = style_processor.batch_decode(
|
| 234 |
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|