Skip to content

Commit 6220e48

Browse files
author
Dinesh Sajwan
committed
feat(visualqa): updated documentation
1 parent 18756a5 commit 6220e48

File tree

3 files changed

+5
-7
lines changed

3 files changed

+5
-7
lines changed

lambda/aws-qa-appsync-opensearch/question_answering/src/qa_agent/doc_qa.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,8 @@ def run_qa_agent_rag_no_memory(input_params):
124124
template = """\n\nHuman: {context}
125125
Answer from this text: {question}
126126
\n\nAssistant:"""
127-
verbose = input_params.get(input_params['verbose'],False)
128127
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
129-
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=verbose)
128+
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=input_params['verbose'])
130129

131130
try:
132131
tmp = chain.predict(context=source_documents, question=decoded_question)
@@ -220,9 +219,8 @@ def run_qa_agent_from_single_document_no_memory(input_params):
220219
template = """\n\nHuman: {context}
221220
Answer from this text: {question}
222221
\n\nAssistant:"""
223-
verbose = input_params.get(input_params['verbose'],False)
224222
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
225-
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=verbose)
223+
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=input_params['verbose'])
226224

227225
try:
228226
logger.info(f'file content is: {_file_content}')

lambda/aws-qa-appsync-opensearch/question_answering/src/qa_agent/image_qa.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -228,9 +228,8 @@ def generate_vision_answer_sagemaker(_qa_llm,input_params,decoded_question,statu
228228

229229
template = """\n\nUser: {question}![]({image})<end_of_utterance>
230230
\n\nAssistant:"""
231-
verbose = input_params.get(input_params['verbose'],False)
232231
prompt = PromptTemplate(template=template, input_variables=["image", "question"])
233-
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=verbose)
232+
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=input_params['verbose'])
234233

235234
try:
236235
logger.info(f'decoded_question is: {decoded_question}')

src/patterns/gen-ai/aws-qa-appsync-opensearch/README.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,8 @@ Mutation call to trigger the question:
198198
,
199199
retrieval:{
200200
max_docs:10
201-
}
201+
},
202+
verbose:false
202203
203204
) {
204205
jobid

0 commit comments

Comments
 (0)