diff --git a/backend/score.py b/backend/score.py index 75181e096..6d3ffd0e6 100644 --- a/backend/score.py +++ b/backend/score.py @@ -1,3 +1,4 @@ +from src.entities.user import user_info from fastapi import FastAPI, File, UploadFile, Form, Request, HTTPException from fastapi_health import health from fastapi.middleware.cors import CORSMiddleware @@ -218,7 +219,8 @@ async def extract_knowledge_graph_from_file( access_token=Form(None), retry_condition=Form(None), additional_instructions=Form(None), - email=Form(None) + email=Form(None), + user_obj: user_info=Form(None) ): """ Calls 'extract_graph_from_file' in a new thread to create Neo4jGraph from a @@ -241,22 +243,22 @@ async def extract_knowledge_graph_from_file( if source_type == 'local file': file_name = sanitize_filename(file_name) merged_file_path = validate_file_path(MERGED_DIR, file_name) - uri_latency, result = await extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions) + uri_latency, result = await extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj) elif source_type == 's3 bucket' and source_url: - uri_latency, result = await extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions) + uri_latency, result = await extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj) elif source_type == 'web-url': - uri_latency, result = await extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions) + uri_latency, result = await extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj) elif source_type == 'youtube' and source_url: - uri_latency, result = await extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions) + uri_latency, result = await extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj) elif source_type == 'Wikipedia' and wiki_query: - uri_latency, result = await extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions) + uri_latency, result = await extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj) elif source_type == 'gcs bucket' and gcs_bucket_name: - uri_latency, result = await extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions) + uri_latency, result = await extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj) else: return create_api_response('Failed',message='source_type is other than accepted source') extract_api_time = time.time() - start_time @@ -1096,5 +1098,61 @@ async def get_schema_visualization(uri=Form(None), userName=Form(None), password finally: gc.collect() +@app.post("/set_user_info") +async def set_user_info(uri=Form(None), + userName=Form(None), + password=Form(None), + database=Form(None), + email=Form(None)): + try: + start = time.time() + graph = create_graph_database_connection(uri, userName, password, database) + graphDb_data_Access = graphDBdataAccess(graph) + result = graphDb_data_Access.save_user_info(email, database) + end = time.time() + elapsed_time = end - start + return create_api_response('Success', data=result,message=f"Total elapsed API time {elapsed_time:.2f}") + except Exception as e: + message="Unable to save the detail of user in DB" + error_message = str(e) + logging.info(message) + logging.exception(f'Exception:{error_message}') + return create_api_response("Failed", message=message, error=error_message) + finally: + gc.collect() + +@app.post("/get_user_info") +async def get_user_info(uri=Form(None), + userName=Form(None), + password=Form(None), + database=Form(None), + email=Form(None), + token_chunk_size:int=Form(None)): + try: + start = time.time() + graph = create_graph_database_connection(uri, userName, password, database) + graphDb_data_Access = graphDBdataAccess(graph) + # result = graphDb_data_Access.get_user_detail(email) + MAX_TOKEN_CHUNK_SIZE = int(os.getenv('MAX_TOKEN_CHUNK_SIZE', 10000)) + chunk_to_be_created = int(MAX_TOKEN_CHUNK_SIZE / int(token_chunk_size)) + userInfo = user_info() + userInfo.chunk_limits= chunk_to_be_created + userInfo.readonly= False + userInfo.rate_limit= 60000 + userInfo.remaining_limit = 40000 + userInfo.is_chunk_limit_applicable = True + end = time.time() + elapsed_time = end - start + logger.log_struct(userInfo, "INFO") + return create_api_response('Success', data=userInfo,message=f"Total elapsed API time {elapsed_time:.2f}") + except Exception as e: + message="Unable to get the details of user in DB" + error_message = str(e) + logging.info(message) + logging.exception(f'Exception:{error_message}') + return create_api_response("Failed", message=message, error=error_message) + finally: + gc.collect() + if __name__ == "__main__": uvicorn.run(app) \ No newline at end of file diff --git a/backend/src/create_chunks.py b/backend/src/create_chunks.py index 523d2b77c..b058b1121 100644 --- a/backend/src/create_chunks.py +++ b/backend/src/create_chunks.py @@ -1,5 +1,6 @@ from langchain_text_splitters import TokenTextSplitter from langchain.docstore.document import Document +from src.entities.user import user_info from langchain_neo4j import Neo4jGraph import logging from src.document_sources.youtube import get_chunks_with_timestamps, get_calculated_timestamps @@ -14,7 +15,7 @@ def __init__(self, pages: list[Document], graph: Neo4jGraph): self.pages = pages self.graph = graph - def split_file_into_chunks(self,token_chunk_size, chunk_overlap): + def split_file_into_chunks(self,token_chunk_size, chunk_overlap, user_obj: user_info): """ Split a list of documents(file pages) into chunks of fixed size. @@ -33,7 +34,7 @@ def split_file_into_chunks(self,token_chunk_size, chunk_overlap): chunks = [] for i, document in enumerate(self.pages): page_number = i + 1 - if len(chunks) >= chunk_to_be_created: + if user_obj.is_chunk_limit_applicable and (len(chunks) >= user_obj.chunk_limits or user_info.remaining_limit <= 0) or len(chunks) >= chunk_to_be_created: break else: for chunk in text_splitter.split_documents([document]): diff --git a/backend/src/entities/user.py b/backend/src/entities/user.py new file mode 100644 index 000000000..26d176de8 --- /dev/null +++ b/backend/src/entities/user.py @@ -0,0 +1,9 @@ +from pydantic import BaseModel +from typing import Optional + +class user_info(BaseModel): + chunk_limits:Optional[int] = 50 + readonly:Optional[bool] = False + rate_limit:Optional[int] = 100000 + remaining_limit:Optional[int] = 100000 + is_chunk_limit_applicable:Optional[bool] = True \ No newline at end of file diff --git a/backend/src/graphDB_dataAccess.py b/backend/src/graphDB_dataAccess.py index 397227a9a..4c9e4b35f 100644 --- a/backend/src/graphDB_dataAccess.py +++ b/backend/src/graphDB_dataAccess.py @@ -583,4 +583,10 @@ def get_websource_url(self,file_name): RETURN d.url AS url """ param = {"file_name" : file_name} - return self.execute_query(query, param) \ No newline at end of file + return self.execute_query(query, param) + + def save_user_info(self,email, database): + domain = "@neo4j.com" + is_neo4j_user = domain in email + write_access = self.check_account_access(database=database) + return {"is_neo4j_user": is_neo4j_user, "write_access": write_access} \ No newline at end of file diff --git a/backend/src/main.py b/backend/src/main.py index 41e69e6f4..af0f3d31c 100644 --- a/backend/src/main.py +++ b/backend/src/main.py @@ -1,3 +1,4 @@ +from src.entities.user import user_info from langchain_neo4j import Neo4jGraph from src.shared.constants import (BUCKET_UPLOAD,BUCKET_FAILED_FILE, PROJECT_ID, QUERY_TO_GET_CHUNKS, QUERY_TO_DELETE_EXISTING_ENTITIES, @@ -226,7 +227,7 @@ def create_source_node_graph_url_wikipedia(graph, model, wiki_query, source_type lst_file_name.append({'fileName':obj_source_node.file_name,'fileSize':obj_source_node.file_size,'url':obj_source_node.url, 'language':obj_source_node.language, 'status':'Success'}) return lst_file_name,success_count,failed_count -async def extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, fileName, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions): +async def extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, fileName, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj:user_info): logging.info(f'Process file name :{fileName}') if not retry_condition: @@ -238,11 +239,11 @@ async def extract_graph_from_file_local_file(uri, userName, password, database, file_name, pages, file_extension = get_documents_from_file_by_path(merged_file_path,fileName) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'File content is not available for file : {file_name}') - return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, True, merged_file_path, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, is_uploaded_from_local= True, merged_file_path= merged_file_path) else: - return await processing_source(uri, userName, password, database, model, fileName, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, True, merged_file_path, retry_condition, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, fileName, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, retry_condition, True, merged_file_path) -async def extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions): +async def extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj:user_info): if not retry_condition: if(aws_access_key_id==None or aws_secret_access_key==None): raise LLMGraphBuilderException('Please provide AWS access and secret keys') @@ -252,48 +253,48 @@ async def extract_graph_from_file_s3(uri, userName, password, database, model, s if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'File content is not available for file : {file_name}') - return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition=retry_condition, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, retry_condition) -async def extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions): +async def extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj:user_info): if not retry_condition: pages = get_documents_from_web_page(source_url) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'Content is not available for given URL : {file_name}') - return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition=retry_condition, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, retry_condition) -async def extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions): +async def extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj:user_info): if not retry_condition: file_name, pages = get_documents_from_youtube(source_url) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'Youtube transcript is not available for file : {file_name}') - return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition=retry_condition, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, retry_condition) -async def extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions): +async def extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj:user_info): if not retry_condition: file_name, pages = get_documents_from_Wikipedia(wiki_query, language) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'Wikipedia page is not available for file : {file_name}') - return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj) else: - return await processing_source(uri, userName, password, database, model, file_name,[], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition=retry_condition, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name,[], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, retry_condition) -async def extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions): +async def extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition, additional_instructions, user_obj:user_info): if not retry_condition: file_name, pages = get_documents_from_gcs(gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'File content is not available for file : {file_name}') - return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, retry_condition=retry_condition, additional_instructions=additional_instructions) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj, retry_condition) -async def processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, is_uploaded_from_local=None, merged_file_path=None, retry_condition=None, additional_instructions=None): +async def processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, token_chunk_size, chunk_overlap, chunks_to_combine, additional_instructions, user_obj:user_info, retry_condition = None, is_uploaded_from_local=False, merged_file_path=None): """ Extracts a Neo4jGraph from a PDF file based on the model. @@ -322,7 +323,7 @@ async def processing_source(uri, userName, password, database, model, file_name, graphDb_data_Access = graphDBdataAccess(graph) create_chunk_vector_index(graph) start_get_chunkId_chunkDoc_list = time.time() - total_chunks, chunkId_chunkDoc_list = get_chunkId_chunkDoc_list(graph, file_name, pages, token_chunk_size, chunk_overlap, retry_condition) + total_chunks, chunkId_chunkDoc_list = get_chunkId_chunkDoc_list(graph, file_name, pages, token_chunk_size, chunk_overlap, retry_condition, user_obj) end_get_chunkId_chunkDoc_list = time.time() elapsed_get_chunkId_chunkDoc_list = end_get_chunkId_chunkDoc_list - start_get_chunkId_chunkDoc_list logging.info(f'Time taken to create list chunkids with chunk document: {elapsed_get_chunkId_chunkDoc_list:.2f} seconds') @@ -509,7 +510,7 @@ async def processing_chunks(chunkId_chunkDoc_list,graph,uri, userName, password, rel_count = count_response[file_name].get('relationshipCount',"0") return node_count,rel_count,latency_processing_chunk -def get_chunkId_chunkDoc_list(graph, file_name, pages, token_chunk_size, chunk_overlap, retry_condition): +def get_chunkId_chunkDoc_list(graph, file_name, pages, token_chunk_size, chunk_overlap, retry_condition, user_obj: user_info): if not retry_condition: logging.info("Break down file into chunks") bad_chars = ['"', "\n", "'"] @@ -522,7 +523,7 @@ def get_chunkId_chunkDoc_list(graph, file_name, pages, token_chunk_size, chunk_o text = text.replace(j, '') pages[i]=Document(page_content=str(text), metadata=pages[i].metadata) create_chunks_obj = CreateChunksofDocument(pages, graph) - chunks = create_chunks_obj.split_file_into_chunks(token_chunk_size, chunk_overlap) + chunks = create_chunks_obj.split_file_into_chunks(token_chunk_size, chunk_overlap, user_obj) chunkId_chunkDoc_list = create_relation_between_chunks(graph,file_name,chunks) return len(chunks), chunkId_chunkDoc_list diff --git a/frontend/src/components/Content.tsx b/frontend/src/components/Content.tsx index 4f8904fd9..c7113590e 100644 --- a/frontend/src/components/Content.tsx +++ b/frontend/src/components/Content.tsx @@ -45,7 +45,7 @@ import retry from '../services/Retry'; import { showErrorToast, showNormalToast, showSuccessToast } from '../utils/Toasts'; import { useMessageContext } from '../context/UserMessages'; import PostProcessingToast from './Popups/GraphEnhancementDialog/PostProcessingCheckList/PostProcessingToast'; -import { getChunkText } from '../services/getChunkText'; +import { getChunkText } from '../services/GetChunkTexts'; import ChunkPopUp from './Popups/ChunkPopUp'; import { isExpired, isFileReadyToProcess } from '../utils/Utils'; import { useHasSelections } from '../hooks/useHasSelections'; @@ -616,7 +616,7 @@ const Content: React.FC = ({ setRetryLoading(true); const response = await retry(filename, retryoption); setRetryLoading(false); - if (response.data.status === 'Failure') { + if (response.data.status === 'Failed') { throw new Error(response.data.error); } else if ( response.data.status === 'Success' && @@ -836,32 +836,6 @@ const Content: React.FC = ({ > )} - {showExpirationModal && filesForProcessing.length && ( - }> - setShowExpirationModal(false)} - loading={extractLoading} - selectedRows={childRef.current?.getSelectedRows() as CustomFile[]} - isLargeDocumentAlert={false} - > - - )} - {showExpirationModal && filesForProcessing.length && ( - }> - setShowExpirationModal(false)} - loading={extractLoading} - selectedRows={childRef.current?.getSelectedRows() as CustomFile[]} - isLargeDocumentAlert={false} - > - - )} {showDeletePopUp && ( = ({ hideModal, open }) => { diff --git a/frontend/src/components/DataSources/GCS/GCSModal.tsx b/frontend/src/components/DataSources/GCS/GCSModal.tsx index f0ee06996..1b3e70928 100644 --- a/frontend/src/components/DataSources/GCS/GCSModal.tsx +++ b/frontend/src/components/DataSources/GCS/GCSModal.tsx @@ -4,7 +4,7 @@ import { useFileContext } from '../../../context/UsersFiles'; import { urlScanAPI } from '../../../services/URLScan'; import { CustomFileBase, GCSModalProps, fileName, nonoautherror } from '../../../types'; import { v4 as uuidv4 } from 'uuid'; -import CustomModal from '../../../HOC/CustomModal'; +import CustomModal from '../../UI/CustomModal'; import { useGoogleLogin } from '@react-oauth/google'; import { useAlertContext } from '../../../context/Alert'; import { buttonCaptions } from '../../../utils/Constants'; diff --git a/frontend/src/components/FileTable.tsx b/frontend/src/components/FileTable.tsx index 1617d88a1..f3b0847a8 100644 --- a/frontend/src/components/FileTable.tsx +++ b/frontend/src/components/FileTable.tsx @@ -74,7 +74,7 @@ const FileTable: ForwardRefRenderFunction = (props, re const { connectionStatus, setConnectionStatus, onInspect, onRetry, onChunkView } = props; const { filesData, setFilesData, model, rowSelection, setRowSelection, setSelectedRows, setProcessedCount, queue } = useFileContext(); - const { userCredentials, isReadOnlyUser } = useCredentials(); + const { userCredentials, isReadOnlyUser, isNeo4jUser } = useCredentials(); const columnHelper = createColumnHelper(); const [columnFilters, setColumnFilters] = useState([]); const [isLoading, setIsLoading] = useState(false); @@ -1020,7 +1020,7 @@ const FileTable: ForwardRefRenderFunction = (props, re ); - } else if (connectionStatus) { + } else if (connectionStatus && !isNeo4jUser) { return ( diff --git a/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx b/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx index 0cf693138..1f62a05a4 100644 --- a/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx +++ b/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx @@ -14,6 +14,7 @@ import { useFileContext } from '../../../../context/UsersFiles'; import { showNormalToast } from '../../../../utils/Toasts'; import { OnChangeValue } from 'react-select'; import { OptionType } from '../../../../types'; +import { useCredentials } from '../../../../context/UserCredentials'; export default function AdditionalInstructionsText({ closeEnhanceGraphSchemaDialog, @@ -22,6 +23,7 @@ export default function AdditionalInstructionsText({ }) { const { breakpoints } = tokens; const tablet = useMediaQuery(`(min-width:${breakpoints.xs}) and (max-width: ${breakpoints.lg})`); + const { isNeo4jUser } = useCredentials(); const { additionalInstructions, setAdditionalInstructions, @@ -134,7 +136,11 @@ export default function AdditionalInstructionsText({ `, }} type='creatable' - helpText='The maximum token limit is 10,000 for LLM processing. The total number of chunks will be calculated as 10,000 divided by the tokens per chunk you select. For example, selecting 500 tokens per chunk results in 20 chunks (10,000 / 500).' + helpText={ + !isNeo4jUser + ? 'The maximum token limit is 10,000 for LLM processing. The total number of chunks will be calculated as 10,000 divided by the tokens per chunk you select. For example, selecting 500 tokens per chunk results in 20 chunks (10,000 / 500).' + : null + } />