import requests
from bs4 import BeautifulSoup
response = requests.get('https://www.roseltorg.ru/procedure/0114500000820000578')
soup = BeautifulSoup(response.text,"html.parser")
table = soup.find_all('table',class_='lot-item__data-table')[1]
info_ = table.find_all('p',class_='data-table__info')
inn = info_[0].text
name = info_[1].text
address = info_[2].text
phone = info_[3].text
mail = info_[4].text
place = info_[5].text
print(f'''ИНН: {inn}
Название орг: {name}
Адрес: {address}
Телефон: {phone}
E-mail: {mail}
Место проведения: {place}
''')
s = requests.Session()
s.headers.update(headers)
s.post('https://edu.tatar.ru/logon', data={'main_login': 'login', 'main_password':'pass'})
response = s.get('https://edu.tatar.ru')
print(response.text)
import requests
from bs4 import BeautifulSoup
url = 'https://koronavirusa.site/ru'
page = requests.get(url)
soup = BeautifulSoup(page.text, "html.parser")
container = soup.find('div', class_='sppb-container-inner')
data = container.find_all('div',class_='sppb-animated-number')
infected = data[0].text
died = data[1].text
healed = data[2].text
print(f'''Заражено: {infected}
Умерло: {died}
Выздоровело: {healed}''')
Заражено: 1,990,746
Умерло: 125,919
Выздоровело: 466,997
import requests
from bs4 import BeautifulSoup as bs
def parser():
file = open("test.txt", "w")
HOST = "https://www.avito.ru"
max_pages = 9
for x in range(1, max_pages + 1):
page = requests.get("https://www.avito.ru/moskovskaya_oblast_krasnogorsk/igry_pristavki_i_programmy?p=" + str( x ) )
soup = bs(page.content, "html.parser")
buys = []
items = soup.find_all("div", class_ = "snippet-horizontal")
for item in items:
title = item.find("a", class_= "snippet-link").get_text(strip=True)
link = item.find("a", class_="snippet-link").get("href")
print(f"{title} {HOST}{link}\n")
file.writelines(f"{title} {HOST}{link}\n")
file.close()
parser()
nRow, nCol = ax.shape # Строк и колонок в массиве
nRow = ax.shape # Строк и колонок в массиве
url = 'https://phys-ege.sdamgia.ru/test?filter=all&category_id=205'
responce = requests.post(url,data={'ajax':'1','skip':'10'})
table = soup.find('table', {"class": "cntr wa mlra"})
names = table.find_all('a',class_='lwhite')
for name in names:
nick = name.text
print(nick)
import requests
from bs4 import BeautifulSoup
from lxml import html
import time
cookies = {'PHPSESSID':'c3a1cde86c8c8c9f0e3877403ad4935e.1585230344.54638591'}
headers = {'user-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'}
for i in range(1,6):
mrush = open('ParsNick.txt','a')
html_text = requests.get(f"http://mrush.mobi/best?pvp=0&page={i}",cookies=cookies,headers=headers).text
soup = BeautifulSoup(html_text, 'html.parser')
table = soup.find('table', {"class": "cntr wa mlra"})
names = table.find_all('a',class_='lwhite')
for name in names:
nick = name.text
print(nick)
mrush.write(nick+'\n')
mrush.close()
time.sleep(1)
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("user-data-dir=C:\\Users\\Юзер\\AppData\\Local\\Google\\Chrome\\User Data\\Профиль")
driver = webdriver.Chrome(executable_path=r'C:\path\to\chromedriver.exe', chrome_options=options)
driver.get("https://www.google.com")