headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Mobile Safari/537.36"
}
data = requests.get(url, headers=headers)
import requests
from bs4 import BeautifulSoup
url = "https://realt.by/sale-flats/object/2562548/"
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Mobile Safari/537.36"
}
data = requests.get(url, headers=headers)
soup = BeautifulSoup(data.text, features="html.parser")
img = (
soup.find("div", attrs={"class": "swiper-wrapper"})
.findAll("img", class_="blur-sm scale-105")[1]
.get("src")
)
print(img)
https://static.realt.by/thumb/c/600x400/6f57b1d409f96f2b1ede7f082f120b50/ja/e/site15nf8eja/7c30f38145.jpg
Open a new command prompt as administrator.
Type the following command:net stop dps
. Hit the Enter key.
Now, execute the following command:del /F /S /Q /A "%windir%\System32\sru\*
".
Finally, execute the commandnet start dps
Sub ExtractURL()
Dim rng As Range
For Each rng In Selection
rng.Offset(0, 1).Value = rng.Hyperlinks(1).Address
Next rng
End Sub
AND – =IF(AND(Something is True, Something else is True), Value if True, Value if False)
OR – =IF(OR(Something is True, Something else is True), Value if True, Value if False)
NOT – =IF(NOT(Something is True), Value if True, Value if False)
from bs4 import BeautifulSoup
import requests
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"}
estrade_url = 'https://www.instagram.com/estrade.pmk/'
page = requests.get(estrade_url,headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
print(soup.prettify())
import requests
from bs4 import BeautifulSoup
main_url = 'https://uristhome.ru'
docs_url = "https://uristhome.ru/document"
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
down_link = []
r = requests.get(docs_url, headers=headers)
soup = BeautifulSoup(r.content, 'html.parser')
for doc in soup.find("ul",{"class": "y_articles-document-list"}):
down_link.append(main_url+doc.find("a").attrs['href'])
with open('download_link.txt', 'a') as nf:
nf.writelines('\n'.join(docs))
import requests
from bs4 import BeautifulSoup
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
down_link = open('download_links.txt', 'r')
docs = []
counter = 0
for links in down_link.readlines():
try:
r = requests.get(links, headers=headers)
soup = BeautifulSoup(r.content, 'html.parser')
x = soup.find("div",{"class": "filefield-file"}).find("a").attrs['href']
counter += 1
print(counter)
print(x)
docs.append(x)
except:
pass
with open('documents_link.txt', 'a') as nf:
nf.writelines('\n'.join(docs))
=VLOOKUP(O2;$Q:$R;2;FALSE)
=ВПР(O2;$Q:$R;2;ЛОЖЬ)