{
"Название": "",
"Статус": "",
"Время окончания приема заявок": "",
"Полное описание условий конкурса": ""
"Документы":
{
"Формы заявок":
"Договора и инструкции":
"Формы отчетов":
}
},
import requests
import json
from bs4 import BeautifulSoup
import chardet
import xlsxwriter
PAGES_COUNT = 10
OUT_FILENAME = 'out.json'
OUT_XLSX_FILENAME = 'out.xlsx'
def get_soup(url, **kwargs):
response = requests.get(url, **kwargs)
if response.status_code == 200:
soup = BeautifulSoup(response.text, features='html.parser')
else:
soup = None
return soup
def crawl_products(pages_count):
urls = []
fmt = 'https://www.rfbr.ru/rffi/ru/contest?CONTEST_ITEMS=7&order=2&page={page}'
for page_n in range(1, 1 + pages_count):
print('page: {}'.format(page_n))
page_url = fmt.format(page=page_n)
soup = get_soup(page_url)
if soup is None:
break
for tag in soup.select('.tr .link'):
href = tag.attrs['href']
url = 'https://www.rfbr.ru/rffi/ru/contest{}'.format(href)
urls.append(url)
return urls
def parse_products(urls):
data = []
for url in urls:
print('product: {}'.format(url))
soup = get_soup(url)
if soup is None:
break
name = soup.select_one('a.link').text.strip()
amount = soup.select_one('.ta-c').text.strip()
timestart = soup.select_one('.ta-c').text.strip()
fullrule = soup.select_one('p').text.strip()
item = {
'Название': name,
'Статус': amount,
'Время окончания приема заявок': timestart,
'Полное описание условий конкурса': fullrule,
}
data.append(item)
return data
def dump_to_json(filename, data, **kwargs):
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('indent', 1)
with open(OUT_FILENAME, 'w') as f:
json.dump(data, f, **kwargs)
def dump_to_xlsx(filename, data):
if not len(data):
return None
with xlsxwriter.Workbook(filename) as workbook:
ws = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
headers = ['Название', 'Статус', 'Время окончания приема заявок', 'Полное описание условий конкурса']
for col, h in enumerate(headers):
ws.write_string(0, col, h, cell_format=bold)
for row, item in enumerate(data, start=1):
ws.write_string(row, 0, item['Название'])
ws.write_string(row, 1, item['Статус'])
ws.write_string(row, 2, item['Время окончания приема заявок'])
ws.write_string(row, 3, item['Полное описание условий конкурса'])
def main():
urls = crawl_products(PAGES_COUNT)
data = parse_products(urls)
dump_to_json(OUT_FILENAME, data)
dump_to_xlsx(OUT_XLSX_FILENAME, data)
with open(OUT_FILENAME, 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=1)
if __name__ == '__main__':
main()
import requests
import json
from bs4 import BeautifulSoup
import chardet
import xlsxwriter
import re
from datetime import date, timedelta
PAGES_COUNT = 100
OUT_FILENAME = 'out.json'
import warnings
warnings.filterwarnings("ignore")
def get_soup(url, **kwargs):
response = requests.get(url, **kwargs, verify=False)
if response.status_code == 200:
soup = BeautifulSoup(response.text, features='html.parser')
else:
soup = None
return soup
def crawl_products(pages_count):
urls = []
fmt = 'https://www.rfbr.ru/rffi/ru/contest?CONTEST_STATUS_ID=-1&CONTEST_TYPE=-1&CONTEST_YEAR=-1&page={page}'
for page_n in range(1, 1 + pages_count):
print('page: {}'.format(page_n))
page_url = fmt.format(page=page_n)
soup = get_soup(page_url)
if soup is None:
break
for tag in soup.select('.tr .link'):
href = tag.attrs['href']
url = 'https://www.rfbr.ru/rffi/ru/contest{}'.format(href)
urls.append(url)
return urls
def parse_products(urls):
data = []
for url in urls:
print('product: {}'.format(url))
soup = get_soup(url)
if soup is None:
break
for i in soup.find_all("h1"):
name = i.text
for j in soup.find_all("main", {"class":"template__main"}):
for jj in j.find_all("div", {"class":"sfc l-3 mt-5 mb-10 lh-xl"}):
ja = re.sub(r'[^\x00-\x7f]', r'', str(jj))
jo = re.sub(r'\<[^>]*\>', '', str(ja))
ji = re.sub(r'_', '', str(jo))
ju = re.sub(r' ', '', str(ji))
je = re.sub(r' :', '', str(ju))
jy = je[13:]
amount = jy
rponse = requests.get(url, verify=False)
sp = BeautifulSoup(rponse.text, "lxml")
document ={}
dcs = sp(attrs={"class": "list-in article"})
for z in dcs:
document[z.h2.text] = list(z.ol.stripped_strings)
# document[z.h2.text] = tuple(z.ol.stripped_strings)
# в одну строку с разделителем запятая
for z in dcs:
document[z.h2.text] = ', '.join(z.ol.stripped_strings)
try:
article = [l.get_text(strip=True) for l in soup.find_all("p") if l.get_text(strip=True).startswith('Условия')]
art = str(article).replace("['", '').replace("']", '')
except:
article = [l.get_text(strip=True) for l in soup.find_all("strong") if l.get_text(strip=True).startswith('Условия')]
art = str(article).replace("['", '').replace("']", '')
for row in soup.select('td'):
cols = row.select('td')
cols = [c.text.strip() for c in cols]
item = {
'Название': name,
'Статус': 'Заявки не принимаются',
'Время окончания приема заявок': amount,
'Полное описание условий конкурса': art
}
item['Документы'] = document
data.append(item)
return data
def dump_to_json(filename, data, **kwargs):
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('indent', 1)
with open(OUT_FILENAME, 'w') as f:
json.dump(data, f, **kwargs)
def main():
urls = crawl_products(PAGES_COUNT)
data = parse_products(urls)
dump_to_json(OUT_FILENAME, data)
with open(OUT_FILENAME, 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=1)
if __name__ == '__main__':
main()