%%capture
# update or install the necessary libraries
!pip install --upgrade openai
!pip install --upgrade langchain
!pip install --upgrade python-dotenv
!pip install --upgrade pypdf
!pip install --upgrade faiss-cpuchatGPT over your data
Notebook inspired by: - Tutorial: ChatGPT Over Your Data - Build a GitHub Support Bot with GPT3, LangChain, and Python - Meet Bricky - a conversational bot using OpenAI
Installing packages needed.
%%capture
# update or install the necessary libraries
!pip install --upgrade openai
!pip install --upgrade langchain
!pip install --upgrade python-dotenv
!pip install --upgrade pypdf
!pip install --upgrade faiss-cpu
!pip install --upgrade tiktokenfrom langchain.llms import OpenAI
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.docstore.document import Document
import requests
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts import PromptTemplate
import pathlib
import subprocess
import tempfile
import pickle
import openai
import os
import IPython
from dotenv import load_dotenvload_dotenv()
# API configuration
openai.api_key = os.getenv("OPENAI_API_KEY")
# for LangChain
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY")Loading markdown documents for github using FAISS
def get_github_docs(repo_owner, repo_name):
with tempfile.TemporaryDirectory() as d:
subprocess.check_call(
f"git clone --depth 1 https://github.com/{repo_owner}/{repo_name}.git .",
cwd=d,
shell=True,
)
git_sha = (
subprocess.check_output("git rev-parse HEAD", shell=True, cwd=d)
.decode("utf-8")
.strip()
)
repo_path = pathlib.Path(d)
markdown_files = list(repo_path.glob("**/*.md")) + list(
repo_path.glob("**/*.mdx")
)
for markdown_file in markdown_files:
with open(markdown_file, "r") as f:
relative_path = markdown_file.relative_to(repo_path)
github_url = f"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}"
yield Document(page_content=f.read(), metadata={"source": github_url})def source_docs():
#return list(get_github_docs("dagster-io", "dagster"))
#Sagemaker docs: awsdocs, amazon-sagemaker-developer-guide
return list(get_github_docs("awsdocs", "amazon-sagemaker-developer-guide"))def search_index(source_docs):
source_chunks = []
splitter = CharacterTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=0)
for source in source_docs:
for chunk in splitter.split_text(source.page_content):
source_chunks.append(Document(page_content=chunk, metadata=source.metadata))
with open("search_index.pickle", "wb") as f:
pickle.dump(FAISS.from_documents(source_chunks, OpenAIEmbeddings()), f)chain = load_qa_with_sources_chain(OpenAI(temperature=0))def print_answer(question):
with open("search_index.pickle", "rb") as f:
search_index = pickle.load(f)
print(
chain(
{
"input_documents": search_index.similarity_search(question, k=4),
"question": question,
},
return_only_outputs=True,
)["output_text"]
)print(search_index(source_docs()))Cloning into '.'...
Created a chunk of size 1056, which is longer than the specified 1024
Created a chunk of size 1807, which is longer than the specified 1024
None
#print_answer("who is the lead singer of matchbox 20")
#print_answer("what are the types of sagemaker endpoints?")
print_answer("Can I use SageMaker for Training and Inference with Apache Spark?") Yes, you can use SageMaker for Training and Inference with Apache Spark.
SOURCES:
https://github.com/awsdocs/amazon-sagemaker-developer-guide/blob/d514c7799d1c934c96e97655b71dbd9cd78cd59b/doc_source/apache-spark.md
https://github.com/awsdocs/amazon-sagemaker-developer-guide/blob/d514c7799d1c934c96e97655b71dbd9cd78cd59b/doc_source/how-it-works-prog-model.md
Loading from pdf files using FAISS
from langchain.document_loaders import PyPDFLoader
filename = "./example_data/2021-sustainability-report-amazon.pdf"
loader = PyPDFLoader(filename)
pages = loader.load_and_split()
print(f'PDF contains {len(pages)} pages')PDF contains 133 pages
def search_index_pdf(source_docs):
source_chunks = []
splitter = CharacterTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=0)
for source in source_docs:
for chunk in splitter.split_text(source.page_content):
source_chunks.append(Document(page_content=chunk, metadata=source.metadata))
with open("search_index.pickle", "wb") as f:
pickle.dump(FAISS.from_documents(source_chunks, OpenAIEmbeddings()), f)print(search_index_pdf(pages))None
print_answer("When is Amazon net-zero carbon?") Amazon is aiming to reach net-zero carbon by 2030.
SOURCES: 2021-sustainability-report-amazon.pdf