1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162 |
- from langchain.chat_models import ChatOpenAI
- #from langchain.llms import OpenAI
- from langchain.chains import ConversationChain
- from langchain.memory import ConversationBufferWindowMemory
- from langchain.prompts.prompt import PromptTemplate
- from langchain.schema import messages_from_dict, messages_to_dict
- from time import sleep
- import sys
- import os
-
- def slowprint(s):
- for c in s+'\n':
- sys.stdout.write(c)
- sys.stdout.flush()
- sleep(0.01)
-
- MODEL_NAME = "gpt-3.5-turbo-16k-0613"
-
- # Read prompt template
- template = open("prompt-template.txt").read().strip()
-
- PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
-
- memory = ConversationBufferWindowMemory(ai_prefix="DoctorK", human_prefix="JJ", k=64)
- for line in open("book-chats.txt").readlines():
- v=[s.strip() for s in line.split(":")]
- if len(v)!=2:
- continue
- if v[0].lower()=="doc":
- memory.chat_memory.add_ai_message(v[1])
- else:
- memory.chat_memory.add_user_message(v[1])
-
- conversation = ConversationChain(
- prompt=PROMPT,
- llm=ChatOpenAI(model_name=MODEL_NAME),
- verbose=False,
- memory=memory
- )
-
- sys.stderr.write(chr(27) + "[2J") # Clear screen ;)
- sys.stderr.flush()
- slowprint("""=====================================================================
- Interactive Talk-Program loaded and started.
- Users:
- * [Doctor Kernel]
- * [JJ] <- you
- =====================================================================""")
- slowprint("Doctor Kernel: {}".format(conversation.predict(input="Hello again, doctor. Please remind me what we were talking about last time.")))
- while True:
- prompt = ""
- while not prompt:
- try:
- prompt = input("> ").strip()
- except EOFError:
- slowprint("""
- =====================================================================
- End of Talk
- =====================================================================""")
- sys.exit(0)
- slowprint("\nJJ: {}".format(prompt))
- slowprint("Doctor Kernel: {}".format(conversation.predict(input=prompt)))
|