Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ValueError: not enough values to unpack (expected 2, got 1) #61

Open
Chirag-Mphasis opened this issue Dec 24, 2024 · 1 comment
Open

Comments

@Chirag-Mphasis
Copy link

I am currently trying to run the demo and when I run it, it gives the following error

Traceback (most recent call last):
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\queueing.py", line 714, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\blocks.py", line 2047, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\blocks.py", line 1606, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\utils.py", line 714, in async_iteration
return await anext(iterator)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\utils.py", line 708, in anext
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\anyio_backends_asyncio.py", line 2505, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\anyio_backends_asyncio.py", line 1005, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\utils.py", line 691, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\anaconda3\envs\ootb\Lib\site-packages\gradio\utils.py", line 852, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\Documents\computer_use_ootb-main\app.py", line 273, in process_input
for loop_msg in sampling_loop_sync(
File "C:\Users\chirag_tubakad\Documents\computer_use_ootb-main\computer_use_demo\loop.py", line 175, in sampling_loop_sync
vlm_response = planner(messages=messages)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\chirag_tubakad\Documents\computer_use_ootb-main\computer_use_demo\gui_agent\planner\api_vlm_planner.py", line 141, in call
vlm_response, token_usage = run_oai_interleaved(
^^^^^^^^^^^^^^^^^^^^^^^^^
ValueError: not enough values to unpack (expected 2, got 1)

Any help would be appreciated!

@reproduce-bot
Copy link

The following script is generated by AI Agent to help reproduce the issue:

# computer_use_ootb/reproduce.py
import os
import requests
from unittest.mock import patch

def run_oai_interleaved(messages: list, system: str, llm: str, api_key: str, max_tokens=256, temperature=0):
    api_key = api_key or os.environ.get("OPENAI_API_KEY")
    if not api_key:
        raise ValueError("OPENAI_API_KEY is not set")
    
    headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
    final_messages = [{"role": "system", "content": system}]
    if type(messages) == list:
        for item in messages:
            contents = []
            if isinstance(item, dict):
                for cnt in item["content"]:
                    if isinstance(cnt, str):
                        # Placeholder function for is_image_path
                        def is_image_path(path):
                            return False  # Assuming no image paths for simplicity

                        if is_image_path(cnt):
                            base64_image = encode_image(cnt)
                            content = {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
                        else:
                            content = {"type": "text", "text": cnt}
                    contents.append(content)
                message = {"role": item["role"], "content": contents}
            else:
                contents.append({"type": "text", "text": item})
                message = {"role": "user", "content": contents}
            final_messages.append(message)
    elif isinstance(messages, str):
        final_messages = [{"role": "user", "content": messages}]
    
    payload = {
        "model": llm,
        "messages": final_messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
    }
    # Mocking requests.post to simulate API response
    with patch("requests.post") as mock_post:
        mock_response = mock_post.return_value
        # Simulating invalid response to reproduce the issue
        mock_response.json.side_effect = Exception("not enough values to unpack (expected 2, got 1)")
        response = mock_response
    try:
        text = response.json()['choices'][0]['message']['content']
        token_usage = int(response.json()['usage']['total_tokens'])
        return text, token_usage
    except Exception as e:
        return response.json()

def test_run_oai_interleaved():
    try:
        # These are dummy values for testing purposes
        messages = [{"role": "user", "content": ["Hello, how are you?"]}]
        system = "You are a helpful assistant."
        llm = "gpt-3.5-turbo"
        api_key = "test_api_key"  # Replace with a valid API key for actual testing
        result = run_oai_interleaved(messages, system, llm, api_key)
        
        # The test should fail before the issue is resolved
        assert isinstance(result, tuple), "The result should be a tuple"
        assert len(result) == 2, "The tuple should have 2 elements"
        print("Test passed successfully with no errors!")
    except Exception as e:
        raise AssertionError(e)

if __name__ == "__main__":
    test_run_oai_interleaved()

How to run:

python3 computer_use_ootb/reproduce.py

Expected Result:

Traceback (most recent call last):
  File "computer_use_ootb/reproduce.py", line 62, in test_run_oai_interleaved
    result = run_oai_interleaved(messages, system, llm, api_key)
  File "computer_use_ootb/reproduce.py", line 53, in run_oai_interleaved
    return response.json()
  File "/usr/local/lib/python3.10/unittest/mock.py", line 1114, in __call__
    return self._mock_call(*args, **kwargs)
  File "/usr/local/lib/python3.10/unittest/mock.py", line 1118, in _mock_call
    return self._execute_mock_call(*args, **kwargs)
  File "/usr/local/lib/python3.10/unittest/mock.py", line 1173, in _execute_mock_call
    raise effect
  File "computer_use_ootb/reproduce.py", line 49, in run_oai_interleaved
    text = response.json()['choices'][0]['message']['content']
  File "/usr/local/lib/python3.10/unittest/mock.py", line 1114, in __call__
    return self._mock_call(*args, **kwargs)
  File "/usr/local/lib/python3.10/unittest/mock.py", line 1118, in _mock_call
    return self._execute_mock_call(*args, **kwargs)
  File "/usr/local/lib/python3.10/unittest/mock.py", line 1173, in _execute_mock_call
    raise effect
Exception: not enough values to unpack (expected 2, got 1)

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "computer_use_ootb/reproduce.py", line 72, in <module>
    test_run_oai_interleaved()
  File "computer_use_ootb/reproduce.py", line 69, in test_run_oai_interleaved
    raise AssertionError(e)
AssertionError: not enough values to unpack (expected 2, got 1)

Thank you for your valuable contribution to this project and we appreciate your feedback! Please respond with an emoji if you find this script helpful. Feel free to comment below if any improvements are needed.

Best regards from an AI Agent!
@Chirag-Mphasis

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants