File size: 7,226 Bytes
8cd1f1e
 
 
 
 
 
 
 
 
 
 
 
 
 
40eb760
fbd690d
 
 
 
8cd1f1e
40eb760
8cd1f1e
 
40eb760
8cd1f1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e514fa8
8cd1f1e
 
 
 
e514fa8
 
 
 
 
 
8cd1f1e
 
 
 
 
 
 
 
40eb760
fbd690d
 
 
 
 
 
 
 
 
 
 
 
 
40eb760
fbd690d
 
 
40eb760
fbd690d
40eb760
fbd690d
8cd1f1e
40eb760
8cd1f1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7b0635
 
b19bb41
 
 
8cd1f1e
 
 
e514fa8
8cd1f1e
 
 
 
e514fa8
8cd1f1e
 
 
 
 
 
b19bb41
8cd1f1e
 
 
 
 
 
b19bb41
8cd1f1e
 
 
 
 
 
b19bb41
8cd1f1e
 
 
 
 
0ba41da
fbd690d
e514fa8
 
 
 
 
 
fbd690d
 
e514fa8
 
 
 
0ba41da
e514fa8
 
 
8cd1f1e
 
 
 
 
e514fa8
8cd1f1e
 
12db858
8cd1f1e
 
 
 
 
 
 
40eb760
8cd1f1e
 
40eb760
8cd1f1e
 
12db858
8cd1f1e
 
 
 
 
 
 
40eb760
8cd1f1e
 
 
 
 
 
 
40eb760
8cd1f1e
 
 
 
 
 
 
40eb760
8cd1f1e
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import pandas as pd
from tqdm import tqdm
import pinecone
import torch
from sentence_transformers import SentenceTransformer
from transformers import (
    pipeline,
    AutoTokenizer,
    AutoModelForCausalLM,
    AutoModelForSeq2SeqLM,
)
import streamlit as st
import openai


@st.experimental_singleton
def get_data():
    data = pd.read_csv("earnings_calls_sentencewise.csv")
    return data


# Initialize models from HuggingFace


@st.experimental_singleton
def get_t5_model():
    return pipeline("summarization", model="t5-small", tokenizer="t5-small")


@st.experimental_singleton
def get_flan_t5_model():
    return pipeline(
        "summarization", model="google/flan-t5-small", tokenizer="google/flan-t5-small"
    )


@st.experimental_singleton
def get_mpnet_embedding_model():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = SentenceTransformer(
        "sentence-transformers/all-mpnet-base-v2", device=device
    )
    model.max_seq_length = 512
    return model


@st.experimental_singleton
def get_sgpt_embedding_model():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = SentenceTransformer(
        "Muennighoff/SGPT-125M-weightedmean-nli-bitfit", device=device
    )
    model.max_seq_length = 512
    return model


@st.experimental_memo
def save_key(api_key):
    return api_key


def query_pinecone(query, top_k, model, index, threshold=0.5):
    # generate embeddings for the query
    xq = model.encode([query]).tolist()
    # search pinecone index for context passage with the answer
    xc = index.query(xq, top_k=top_k, include_metadata=True)
    # filter the context passages based on the score threshold
    filtered_matches = []
    for match in xc["matches"]:
        if match["score"] >= threshold:
            filtered_matches.append(match)
    xc["matches"] = filtered_matches
    return xc


def format_query(query_results):
    # extract passage_text from Pinecone search result
    context = [result["metadata"]["Text"] for result in query_results["matches"]]
    return context


def sentence_id_combine(data, query_results, lag=2):
    # Extract sentence IDs from query results
    ids = [result["metadata"]["Sentence_id"] for result in query_results["matches"]]
    # Generate new IDs by adding a lag value to the original IDs
    new_ids = [id + i for id in ids for i in range(-lag, lag + 1)]
    # Remove duplicates and sort the new IDs
    new_ids = sorted(set(new_ids))
    # Create a list of lookup IDs by grouping the new IDs in groups of lag*2+1
    lookup_ids = [
        new_ids[i : i + (lag * 2 + 1)] for i in range(0, len(new_ids), lag * 2 + 1)
    ]
    # Create a list of context sentences by joining the sentences corresponding to the lookup IDs
    context_list = [
        ". ".join(data.Text.iloc[lookup_id].to_list()) for lookup_id in lookup_ids
    ]
    return context_list


def text_lookup(data, sentence_ids):
    context = ". ".join(data.iloc[sentence_ids].to_list())
    return context


def gpt3_summary(text):
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=text + "\n\nTl;dr",
        temperature=0.1,
        max_tokens=512,
        top_p=1.0,
        frequency_penalty=0.0,
        presence_penalty=1,
    )
    return response.choices[0].text


def gpt3_qa(query, answer):
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt="Q: " + query + "\nA: " + answer,
        temperature=0,
        max_tokens=512,
        top_p=1,
        frequency_penalty=0.0,
        presence_penalty=0.0,
        stop=["\n"],
    )
    return response.choices[0].text


st.title("Abstractive Question Answering")

st.write(
    "The app uses the quarterly earnings call transcripts for 10 companies (Apple, AMD, Amazon, Cisco, Google, Microsoft, Nvidia, ASML, Intel, Micron) for the years 2016 to 2020."
)

query_text = st.text_input("Input Query", value="Who is the CEO of Apple?")

num_results = int(st.number_input("Number of Results to query", 1, 5, value=3))


# Choose encoder model

encoder_models_choice = ["SGPT", "MPNET"]

encoder_model = st.selectbox("Select Encoder Model", encoder_models_choice)


# Choose decoder model

decoder_models_choice = ["FLAN-T5", "T5", "GPT3 (QA_davinci)", "GPT3 (summary_davinci)"]

decoder_model = st.selectbox("Select Decoder Model", decoder_models_choice)


if encoder_model == "MPNET":
    # Connect to pinecone environment
    pinecone.init(api_key=st.secrets["pinecone_mpnet"], environment="us-east1-gcp")
    pinecone_index_name = "week2-all-mpnet-base"
    pinecone_index = pinecone.Index(pinecone_index_name)
    retriever_model = get_mpnet_embedding_model()

elif encoder_model == "SGPT":
    # Connect to pinecone environment
    pinecone.init(api_key=st.secrets["pinecone_sgpt"], environment="us-east1-gcp")
    pinecone_index_name = "week2-sgpt-125m"
    pinecone_index = pinecone.Index(pinecone_index_name)
    retriever_model = get_sgpt_embedding_model()


window = int(st.number_input("Sentence Window Size", 0, 3, value=0))

threshold = float(
    st.number_input(
        label="Similarity Score Threshold", step=0.05, format="%.2f", value=0.55
    )
)

data = get_data()

query_results = query_pinecone(
    query_text, num_results, retriever_model, pinecone_index, threshold
)

if threshold <= 0.60:
    context_list = sentence_id_combine(data, query_results, lag=window)
else:
    context_list = format_query(query_results)


st.subheader("Answer:")


if decoder_model == "GPT3 (summary_davinci)":
    openai_key = st.text_input(
        "Enter OpenAI key",
        value=st.secrets["openai_key"],
        type="password",
    )
    api_key = save_key(openai_key)
    openai.api_key = api_key
    output_text = []
    for context_text in context_list:
        output_text.append(gpt3_summary(context_text))
    generated_text = ". ".join(output_text)
    st.write(gpt3_summary(generated_text))

elif decoder_model == "GPT3 (QA_davinci)":
    openai_key = st.text_input(
        "Enter OpenAI key",
        value=st.secrets["openai_key"],
        type="password",
    )
    api_key = save_key(openai_key)
    openai.api_key = api_key
    output_text = []
    for context_text in context_list:
        output_text.append(gpt3_qa(query_text, context_text))
    generated_text = ". ".join(output_text)
    st.write(gpt3_qa(query_text, generated_text))

elif decoder_model == "T5":
    t5_pipeline = get_t5_model()
    output_text = []
    for context_text in context_list:
        output_text.append(t5_pipeline(context_text)[0]["summary_text"])
    generated_text = ". ".join(output_text)
    st.write(t5_pipeline(generated_text)[0]["summary_text"])

elif decoder_model == "FLAN-T5":
    flan_t5_pipeline = get_flan_t5_model()
    output_text = []
    for context_text in context_list:
        output_text.append(flan_t5_pipeline(context_text)[0]["summary_text"])
    generated_text = ". ".join(output_text)
    st.write(flan_t5_pipeline(generated_text)[0]["summary_text"])

show_retrieved_text = st.checkbox("Show Retrieved Text", value=False)

if show_retrieved_text:

    st.subheader("Retrieved Text:")

    for context_text in context_list:
        st.markdown(f"- {context_text}")