Skip to content

Commit 84934c1

Browse files
parse faq notebook
1 parent 7cb8b47 commit 84934c1

File tree

2 files changed

+216
-0
lines changed

2 files changed

+216
-0
lines changed

01-intro/README.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,3 +26,18 @@ Video
2626

2727
Video - codespaces
2828

29+
* Installing libraries
30+
* Alternative: installing anaconda or miniconda
31+
32+
```bash
33+
pip install tqdm notebook==7.1.2 openai elasticsearch pandas scikit-learn
34+
```
35+
36+
## 1.3 Retrieval
37+
38+
Video
39+
40+
* We will use the search engine we build in the [build-your-own-search-engine workshop](https://github.com/alexeygrigorev/build-your-own-search-engine): [minsearch](https://github.com/alexeygrigorev/minsearch)
41+
* Indexing the documents
42+
* Peforming the search
43+

01-intro/parse-faq.ipynb

Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 8,
6+
"id": "4cd1eaa8-3424-41ad-9cf2-3e8548712865",
7+
"metadata": {},
8+
"outputs": [],
9+
"source": [
10+
"import io\n",
11+
"\n",
12+
"import requests\n",
13+
"import docx"
14+
]
15+
},
16+
{
17+
"cell_type": "code",
18+
"execution_count": 24,
19+
"id": "8180e7e4-b90d-4900-a59b-d22e5d6537c4",
20+
"metadata": {},
21+
"outputs": [],
22+
"source": [
23+
"def clean_line(line):\n",
24+
" line = line.strip()\n",
25+
" line = line.strip('\\uFEFF')\n",
26+
" return line\n",
27+
"\n",
28+
"def read_faq(file_id):\n",
29+
" url = f'https://docs.google.com/document/d/{file_id}/export?format=docx'\n",
30+
" \n",
31+
" response = requests.get(url)\n",
32+
" response.raise_for_status()\n",
33+
" \n",
34+
" with io.BytesIO(response.content) as f_in:\n",
35+
" doc = docx.Document(f_in)\n",
36+
"\n",
37+
" questions = []\n",
38+
"\n",
39+
" question_heading_style = 'heading 2'\n",
40+
" section_heading_style = 'heading 1'\n",
41+
" \n",
42+
" heading_id = ''\n",
43+
" section_title = ''\n",
44+
" question_title = ''\n",
45+
" answer_text_so_far = ''\n",
46+
" \n",
47+
" for p in doc.paragraphs:\n",
48+
" style = p.style.name.lower()\n",
49+
" p_text = clean_line(p.text)\n",
50+
" \n",
51+
" if len(p_text) == 0:\n",
52+
" continue\n",
53+
" \n",
54+
" if style == section_heading_style:\n",
55+
" section_title = p_text\n",
56+
" continue\n",
57+
" \n",
58+
" if style == question_heading_style:\n",
59+
" answer_text_so_far = answer_text_so_far.strip()\n",
60+
" if answer_text_so_far != '' and section_title != '' and question_title != '':\n",
61+
" questions.append({\n",
62+
" 'text': answer_text_so_far,\n",
63+
" 'section': section_title,\n",
64+
" 'question': question_title,\n",
65+
" })\n",
66+
" answer_text_so_far = ''\n",
67+
" \n",
68+
" question_title = p_text\n",
69+
" continue\n",
70+
" \n",
71+
" answer_text_so_far += '\\n' + p_text\n",
72+
" \n",
73+
" answer_text_so_far = answer_text_so_far.strip()\n",
74+
" if answer_text_so_far != '' and section_title != '' and question_title != '':\n",
75+
" questions.append({\n",
76+
" 'text': answer_text_so_far,\n",
77+
" 'section': section_title,\n",
78+
" 'question': question_title,\n",
79+
" })\n",
80+
"\n",
81+
" return questions"
82+
]
83+
},
84+
{
85+
"cell_type": "code",
86+
"execution_count": 25,
87+
"id": "7d3c2dd7-f64a-4dc7-a4e3-3e8aadfa720f",
88+
"metadata": {},
89+
"outputs": [],
90+
"source": [
91+
"faq_documents = {\n",
92+
" 'data-engineering-zoomcamp': '19bnYs80DwuUimHM65UV3sylsCn2j1vziPOwzBwQrebw',\n",
93+
" 'machine-learning-zoomcamp': '1LpPanc33QJJ6BSsyxVg-pWNMplal84TdZtq10naIhD8',\n",
94+
" 'mlops-zoomcamp': '12TlBfhIiKtyBv8RnsoJR6F72bkPDGEvPOItJIxaEzE0',\n",
95+
"}"
96+
]
97+
},
98+
{
99+
"cell_type": "code",
100+
"execution_count": 27,
101+
"id": "f94efe26-05e8-4ae5-a0fa-0a8e16852816",
102+
"metadata": {},
103+
"outputs": [
104+
{
105+
"name": "stdout",
106+
"output_type": "stream",
107+
"text": [
108+
"data-engineering-zoomcamp\n",
109+
"machine-learning-zoomcamp\n",
110+
"mlops-zoomcamp\n"
111+
]
112+
}
113+
],
114+
"source": [
115+
"documents = []\n",
116+
"\n",
117+
"for course, file_id in faq_documents.items():\n",
118+
" print(course)\n",
119+
" course_documents = read_faq(file_id)\n",
120+
" documents.append({'course': course, 'documents': course_documents})"
121+
]
122+
},
123+
{
124+
"cell_type": "code",
125+
"execution_count": 29,
126+
"id": "06b8d8be-f656-4cc3-893f-b159be8fda21",
127+
"metadata": {},
128+
"outputs": [],
129+
"source": [
130+
"import json"
131+
]
132+
},
133+
{
134+
"cell_type": "code",
135+
"execution_count": 32,
136+
"id": "30d50bc1-8d26-44ee-8734-cafce05e0523",
137+
"metadata": {},
138+
"outputs": [],
139+
"source": [
140+
"with open('documents.json', 'wt') as f_out:\n",
141+
" json.dump(documents, f_out, indent=2)"
142+
]
143+
},
144+
{
145+
"cell_type": "code",
146+
"execution_count": 33,
147+
"id": "0eabb1c6-5cc6-4d4d-a6da-e27d41cea546",
148+
"metadata": {},
149+
"outputs": [
150+
{
151+
"name": "stdout",
152+
"output_type": "stream",
153+
"text": [
154+
"[\n",
155+
" {\n",
156+
" \"course\": \"data-engineering-zoomcamp\",\n",
157+
" \"documents\": [\n",
158+
" {\n",
159+
" \"text\": \"The purpose of this document is to capture frequently asked technical questions\\nThe exact day and hour of the course will be 15th Jan 2024 at 17h00. The course will start with the first \\u201cOffice Hours'' live.1\\nSubscribe to course public Google Calendar (it works from Desktop only).\\nRegister before the course starts using this link.\\nJoin the course Telegram channel with announcements.\\nDon\\u2019t forget to register in DataTalks.Club's Slack and join the channel.\",\n",
160+
" \"section\": \"General course-related questions\",\n",
161+
" \"question\": \"Course - When will the course start?\"\n",
162+
" },\n",
163+
" {\n"
164+
]
165+
}
166+
],
167+
"source": [
168+
"!head documents.json"
169+
]
170+
},
171+
{
172+
"cell_type": "code",
173+
"execution_count": null,
174+
"id": "1b21af5c-2f6d-49e7-92e9-ca229e2473b9",
175+
"metadata": {},
176+
"outputs": [],
177+
"source": []
178+
}
179+
],
180+
"metadata": {
181+
"kernelspec": {
182+
"display_name": "Python 3 (ipykernel)",
183+
"language": "python",
184+
"name": "python3"
185+
},
186+
"language_info": {
187+
"codemirror_mode": {
188+
"name": "ipython",
189+
"version": 3
190+
},
191+
"file_extension": ".py",
192+
"mimetype": "text/x-python",
193+
"name": "python",
194+
"nbconvert_exporter": "python",
195+
"pygments_lexer": "ipython3",
196+
"version": "3.9.13"
197+
}
198+
},
199+
"nbformat": 4,
200+
"nbformat_minor": 5
201+
}

0 commit comments

Comments
 (0)