import requests
from bs4 import BeautifulSoup
import csv
HOST = 'https://www.dns-shop.ru'
URL = 'https://www.dns-shop.ru/catalog/17a8932c16404e77/personalnye-kompyutery/'
HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36'
}
FILE ='laptops.csv'
def get_html(url, params=None):
r = requests.get(url, headers=HEADERS, params=params)
return r
def get_content(html):
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('div', class_='catalog-product')
laptops = []
for item in items:
laptops.append({
'title': item.find('div', class_='catalog-product__name').find('span').get_text(strip=True) ,
'price': item.find('div', class_='product-buy').find('div', class_='product-buy__price-wrap').find('div', class_='product-buy__price').get_text(strip=True),
})
return laptops
def save_file(items, path):
with open (path, 'w', newline='') as file:
writer = csv.writer(file, delimiter=';')
writer.writerow(['Название ВУЗа','Цена'])
for item in items:
writer.writerow([item ['title'], item ['price']])
def parse():
PAGENATION = input('Количество страниц: ')
PAGENATION = int(PAGENATION.strip())
html = get_html(URL)
if html.status_code == 200:
laptops = []
for page in range (1, PAGENATION+1):
print(f'Идет парсинг страницы {page}')
html= get_html(URL, params={'p' : page})
laptops.extend(get_content(html.text))
save_file(laptops, FILE)
print(laptops)
print(len(laptops))
else:
print('Error')
parse()
Не могу найти ошибку. На выходе получаю пустой массив.