-
Notifications
You must be signed in to change notification settings - Fork 0
/
cv_bankas_scraper_to_sql.py
54 lines (43 loc) · 1.79 KB
/
cv_bankas_scraper_to_sql.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import numpy as np
from bs4 import BeautifulSoup
import requests
import pandas as pd
import sqlite3
pages = np.arange(1, 3, 1)
def get_data() -> list[str]:
texts = []
for page in pages:
page = 'https://www.cvbankas.lt/?keyw=Python&page=' + str(page)
text = requests.get(page).text
texts.append(text)
return texts
def parse_required_fields(texts) -> list[dict]:
result = []
texts = str(texts)
soup = BeautifulSoup(texts, 'lxml')
jobs = soup.find_all('article', class_='list_article list_article_rememberable jobadlist_list_article_rememberable')
for index, job in enumerate(jobs, 1):
job_item = {
'index': index,
'title': job.find('h3', class_='list_h3').text.replace('\\n', '').replace(' ', ''),
'company_name': job.find('span', class_='dib mt5').text.replace('\\n', '').replace(' ', ''),
'city_or_country': job.find('span', class_='list_city').text.replace('\\n', '').replace(' ', ''),
'advertisement_link': job.find('a', class_='list_a can_visited list_a_has_logo') \
.attrs['href'].replace('\\n', '')}
try:
job_item['salary'] = job.find('span', class_='salary_amount').text.replace('\\n', '').replace(' ', '')
except AttributeError:
job_item['salary'] = 0
result.append(job_item)
return result
def write_to_sql() -> None:
conn = sqlite3.connect('test_database')
c = conn.cursor()
conn.commit()
jobs = parse_required_fields(get_data())
df = pd.DataFrame(jobs, columns=['index', 'title', 'company_name',
'city_or_country', 'advertisement_link', 'salary'])
df.to_sql('jobs', conn, if_exists='replace', index=False)
c.execute('''
SELECT * FROM jobs
''')