You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
使用chatglm-6b-int4-slim报错如下
我就问了一句你好
--------------------Collect environment info--------------------
sys.platform: win32
Python: 3.10.9 (tags/v3.10.9:1dd9be6, Dec 6 2022, 20:01:21) [MSC v.1934 64 bit (AMD64)]
Python executable: D:\Python\Python310\python.exe
PyTorch: 2.0.0+cu118
Gradio: 3.22.1
Transformers: 4.26.1
GPU 0: NVIDIA GeForce RTX 3060 Laptop GPU
------------------------------Done------------------------------
Traceback (most recent call last):
File "D:\Python\Python310\lib\site-packages\gradio\routes.py", line 393, in run_predict
output = await app.get_blocks().process_api(
File "D:\Python\Python310\lib\site-packages\gradio\blocks.py", line 1069, in process_api
result = await self.call_function(
File "D:\Python\Python310\lib\site-packages\gradio\blocks.py", line 892, in call_function
prediction = await anyio.to_thread.run_sync(
File "D:\Python\Python310\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "D:\Python\Python310\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "D:\Python\Python310\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "D:\Python\Python310\lib\site-packages\gradio\utils.py", line 549, in async_iteration
return next(iterator)
File "D:\Desktop\CreativeChatGLM-master\web_demo.py", line 52, in predict
for response in inference(query, max_length, top_p, temperature, allow_generate, history):
File "D:\Desktop\CreativeChatGLM-master\web_demo.py", line 40, in inference
for response, history in model.stream_chat_continue(tokenizer, input, history, max_length=max_length,
File "D:\Python\Python310\lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
response = gen.send(None)
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 1195, in stream_chat_continue
for outputs in self.stream_generate(**batch_input, **gen_kwargs):
File "D:\Python\Python310\lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
response = gen.send(None)
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 1270, in stream_generate
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 999, in prepare_inputs_for_generation
attention_mask, position_ids = self.get_masks_and_position_ids(
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 945, in get_masks_and_position_ids
seq_length = seq.index(self.config.bos_token_id)
ValueError: 130004 is not in list
The text was updated successfully, but these errors were encountered:
使用chatglm-6b-int4-slim报错如下
我就问了一句你好
--------------------Collect environment info--------------------
sys.platform: win32
Python: 3.10.9 (tags/v3.10.9:1dd9be6, Dec 6 2022, 20:01:21) [MSC v.1934 64 bit (AMD64)]
Python executable: D:\Python\Python310\python.exe
PyTorch: 2.0.0+cu118
Gradio: 3.22.1
Transformers: 4.26.1
GPU 0: NVIDIA GeForce RTX 3060 Laptop GPU
------------------------------Done------------------------------
Traceback (most recent call last):
File "D:\Python\Python310\lib\site-packages\gradio\routes.py", line 393, in run_predict
output = await app.get_blocks().process_api(
File "D:\Python\Python310\lib\site-packages\gradio\blocks.py", line 1069, in process_api
result = await self.call_function(
File "D:\Python\Python310\lib\site-packages\gradio\blocks.py", line 892, in call_function
prediction = await anyio.to_thread.run_sync(
File "D:\Python\Python310\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "D:\Python\Python310\lib\site-packages\anyio_backends_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "D:\Python\Python310\lib\site-packages\anyio_backends_asyncio.py", line 867, in run
result = context.run(func, *args)
File "D:\Python\Python310\lib\site-packages\gradio\utils.py", line 549, in async_iteration
return next(iterator)
File "D:\Desktop\CreativeChatGLM-master\web_demo.py", line 52, in predict
for response in inference(query, max_length, top_p, temperature, allow_generate, history):
File "D:\Desktop\CreativeChatGLM-master\web_demo.py", line 40, in inference
for response, history in model.stream_chat_continue(tokenizer, input, history, max_length=max_length,
File "D:\Python\Python310\lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
response = gen.send(None)
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 1195, in stream_chat_continue
for outputs in self.stream_generate(**batch_input, **gen_kwargs):
File "D:\Python\Python310\lib\site-packages\torch\utils_contextlib.py", line 35, in generator_context
response = gen.send(None)
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 1270, in stream_generate
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 999, in prepare_inputs_for_generation
attention_mask, position_ids = self.get_masks_and_position_ids(
File "D:\Desktop\CreativeChatGLM-master\chatglm\modeling_chatglm.py", line 945, in get_masks_and_position_ids
seq_length = seq.index(self.config.bos_token_id)
ValueError: 130004 is not in list
The text was updated successfully, but these errors were encountered: