Quantcast
Channel: Active questions tagged python - Stack Overflow
Viewing all articles
Browse latest Browse all 23131

I keep getting a circular reference when using ChatGPT with LangChain

$
0
0

I took a couple of code samples, added a little Streamlit markup, and blew up my project. Everything processes just fine until you ask this app a question - then I get a circular reference error. Can somebody please take a look and let me know what I am doing wrong?

Error:

An error occurred: Circular reference detected on line <traceback object at 0x0000027FDC728440>Error in tracing queueTraceback (most recent call last):  File "\venv\Lib\site-packages\langsmith\client.py", line 4112, in     _tracing_thread_handle_batchclient.batch_ingest_runs(create=create, update=update, pre_sampled=True)  File "\venv\Lib\site-packages\langsmith\client.py", line 1220, in batch_ingest_runs"post": [_dumps_json(run) for run in raw_body["post"]],             ^^^^^^^^^^^^^^^^  File "\venv\Lib\site-packages\langsmith\client.py", line 236, in _dumps_json    return _dumps_json_single(obj, functools.partial(_serialize_json, depth=depth))           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^  File "\venv\Lib\site-packages\langsmith\client.py", line 211, in _dumps_json_single    return orjson.dumps(           ^^^^^^^^^^^^^

Code:

import osimport streamlit as st  # used to create our UI frontendfrom langchain_openai import ChatOpenAI # used for GPT3.5/4 modelfrom langchain_community.document_loaders import TextLoaderfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain_openai import OpenAIEmbeddingsfrom langchain_community.vectorstores import Chromafrom langchain.chains import ConversationalRetrievalChainfrom langchain_community.document_loaders import YoutubeLoaderfrom langchain_community.callbacks import get_openai_callbackdef init_messages():    clear_button = st.sidebar.button("Clear Conversation", key="clear")    if clear_button or "messages" not in st.session_state:        st.session_state.messages = []        st.session_state.costs = []def clear_history():    if'history' in st.session_state:        del st.session_state['history']def main():    try:        # We first setup our initial form        st.session_state["FailedLogins"] = 0        st.title(":football: :green[The Moss Masher] :dog:")        st.header("Mash your long documents and videos into easy answers!")        st.sidebar.image("./FullLogo.png", width=300, use_column_width="always")        st.sidebar.title("Options and Costs")        aiTemp = st.sidebar.slider("How Strict (0) to Creative(10) do you want your conversation:", min_value=0.0,                                   max_value=2.0, value=0.0, step=0.01)        container = st.container()        with container:            with st.form(key="frmOptions", clear_on_submit=True):                st.markdown("To use this app, please provided an OpenAI key")                st.markdown("First, create an OpenAI account or sign in: ""https://platform.openai.com/signup and then go to the API key page, ""https://platform.openai.com/account/api-keys, and create new secret key.")                userEnteredCode = st.text_input("Please enter your API Key:")                submit_button = st.form_submit_button(label='Authenticate My Key')                st.session_state["holdMe"] = os.environ["OPENAI_API_KEY"]        if submit_button:            # We need to set our environmental variable            container.markdown("Key entered, please select a file or enter a Youtube video URL")            # if os.environ["MOSS_MASHER"] != userEnteredCode:            #     os.environ["OPENAI_API_KEY"] = userEnteredCode            # else:            #     os.environ["OPENAI_API_KEY"] = st.session_state["holdMe"]        with container:            uploaded_file = st.file_uploader('Select your file and click Add File:', type=['pdf', 'docx', 'txt'])            add_file = st.button('Add File', on_click=clear_history)            youtube_url = st.text_input('Or enter your Youtube URL')        if uploaded_file and add_file:            with st.spinner('Processing your file, this may take a while...'):                st.session_state['haveData'] = "No"                bytes_data = uploaded_file.read()                file_name = os.path.join('./', uploaded_file.name)                with open(file_name, 'wb') as f:                    f.write(bytes_data)                name, extension = os.path.splitext(file_name)                if extension == '.pdf':                    from langchain_community.document_loaders import PyPDFLoader                    loader = PyPDFLoader(file_name)                elif extension == '.docx':                    from langchain_community.document_loaders import Docx2txtLoader                    loader = Docx2txtLoader(file_name)                elif extension == '.txt':                    from langchain_community.document_loaders import TextLoader                    loader = TextLoader(file_name)                documents = loader.load()                text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)                chunks = text_splitter.split_documents(documents)                embeddings = OpenAIEmbeddings()                vector_store = Chroma.from_documents(chunks, embeddings)                os.remove(file_name)                # initialize OpenAI instance                retriever = vector_store.as_retriever()                llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=aiTemp)                crc = ConversationalRetrievalChain.from_llm(llm, retriever)                st.session_state.crc = crc                # success message when file is chunked and embedded        if youtube_url:            with st.spinner('Processing video...'):                st.session_state['haveData'] = "No"                loader = YoutubeLoader.from_youtube_url(youtube_url)                documents = loader.load()                text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)                chunks = text_splitter.split_documents(documents)                embeddings = OpenAIEmbeddings()                vector_store = Chroma.from_documents(chunks, embeddings)                # initialize OpenAI instance                retriever = vector_store.as_retriever()                llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=aiTemp)                crc = ConversationalRetrievalChain.from_llm(llm, retriever)                st.session_state.crc = crc                st.write("____________________________________________________________")        question = st.text_input('Enter your question here!')        if question:            with st.spinner('Looking for your answer now....'):                if "crc" in st.session_state:                    crc = st.session_state.crc                if 'history' not in st.session_state:                    st.session_state['history'] = []                if 'costs' not in st.session_state:                    st.session_state['costs'] = []                with get_openai_callback() as cb:                    response = crc.invoke({'question': question, 'chat_history': st.session_state['history']})                    st.session_state['costs'].append(cb.total_cost)                st.session_state['history'].append((question, response))                st.write(response)                for prompts in st.session_state['history']:                    st.write("Question: "+ prompts[0])                    st.write("Answer: "+ prompts[1])                costs = st.session_state.get('costs', [])                st.sidebar.markdown("## Costs")                st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")                for cost in costs:                    st.sidebar.markdown(f"- ${cost:.5f}")            question = None    except Exception as error:        print(f"An error occurred: {error} on line {error.__traceback__}")        st.error("Your API key was not valid, please close the site and then try again.")    finally:        print("Done")if __name__ == "__main__":    main()

Viewing all articles
Browse latest Browse all 23131

Trending Articles



<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>