aiohttp
(можно заменить на requests) и pycryptodome
import json
import hashlib
import asyncio
import aiohttp
from Crypto.Cipher import AES
URL = "https://proverkacheka.com"
API = "/api/v1/check/get"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Referer": "https://proverkacheka.com/",
"Origin": "https://proverkacheka.com",
}
def compute_token(qrraw: str, qr: str) -> str:
base = qrraw + qr
d = "0"
for i in range(10000):
h = hashlib.md5((base + (d := str(i))).encode()).hexdigest()
if len(h.split("0")) - 1 > 4:
break
return "0." + d
async def get_crypted_json(qrraw: str, qr: str) -> bytes:
form = aiohttp.FormData()
form.add_field(name="qrraw", value=qrraw)
form.add_field(name="qr", value=qr)
form.add_field(name="token", value=compute_token(qrraw, qr))
async with aiohttp.ClientSession(headers=headers) as client:
# Получаем куки
await client.get(URL)
response = await client.post(URL + API, data=form, headers={"Cookie": "ENGID=1.1"})
if "+crypto" not in response.headers["Content-Type"]:
raise ValueError("Invalid token")
return await response.read()
async def main() -> None:
basekey = "38s91"
decryptkey = "f65nm"
qrraw = "t=20201017T1923&s=1498.00&fn=9282440300669857&i=25151&fp=1186123459&n=1"
qr = "3"
crypted_json = await get_crypted_json(qrraw, qr)
crypted_data, nonce = crypted_json[:-12], crypted_json[-12:]
key = hashlib.sha256((basekey + decryptkey).encode()).digest()
cipher = AES.new(key=key, mode=AES.MODE_GCM, nonce=nonce)
decrypted_data = cipher.decrypt(crypted_data)
# В plain_text присутствует какие-то лишние символы на конце, скорее всего паддинг для зашифровки. Не проверял.
plain_text = decrypted_data.decode(errors="ignore")
# Отрезаем лишние символы справа
pt = plain_text[:plain_text.rfind("}") + 1]
with open("decoded.json", "wt", encoding="utf-8") as fp:
loaded = json.loads(pt)
json.dump(loaded, fp, ensure_ascii=False, indent=4)
if __name__ == "__main__":
asyncio.run(main())
def get_session():
session = requests.Session()
session.headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'ru,en-US;q=0.5',
'Accept-Encoding':'gzip, deflate, br',
'DNT':'1',
'Connection':'keep-alive',
'Upgrade-Insecure-Requests':'1',
'Pragma':'no-cache',
'Cache-Control':'no-cache'}
return cfscrape.create_scraper(sess=session)
session = get_session() # Дальше работать как с обычной requests.Session
import requests
import cfscrape
def get_session():
session = requests.Session()
session.headers = {
'Host':'www.artstation.com',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'ru,en-US;q=0.5',
'Accept-Encoding':'gzip, deflate, br',
'DNT':'1',
'Connection':'keep-alive',
'Upgrade-Insecure-Requests':'1',
'Pragma':'no-cache',
'Cache-Control':'no-cache'}
return cfscrape.create_scraper(sess=session)
def artstation():
url = 'https://www.artstation.com/kyuyongeom'
page_url = 'https://www.artstation.com/users/kyuyongeom/projects.json'
post_pattern = 'https://www.artstation.com/projects/{}.json'
session = get_session()
absolute_links = []
response = session.get(page_url, params={'page':1}).json()
pages, modulo = divmod(response['total_count'], 50)
if modulo: pages += 1
for page in range(1, pages+1):
if page != 1:
response = session.get(page_url, params={'page':page}).json()
for post in response['data']:
shortcode = post['permalink'].split('/')[-1]
inner_resp = session.get(post_pattern.format(shortcode)).json()
for img in inner_resp['assets']:
if img['asset_type'] == 'image':
absolute_links.append(img['image_url'])
with open('links.txt', 'w') as file:
file.write('\n'.join(absolute_links))
if __name__ == '__main__':
artstation()
import re
from collections import defaultdict
ips = defaultdict(list)
regular = re.compile(r'Host: ([\d\.]+).+?Ports: (\d+)/')
with open('res.txt', 'r') as f:
for line in f:
line = line.strip()
if not line.startswith('#'):
ip, port = regular.search(line).groups()
ips[ip].append(port)
for k, v in ips.items(): # Выведет:
print(k, ', '.join(v)) # 192.168.1.1 80, 801
# 192.168.1.2 801, 445
# 192.168.1.3 80, 8080, 21
with open('outputfile.txt', 'w') as f: # Запишет тоже самое
for k, v in ips.items():
f.write('{} {}\n'.format(k, ', '.join(v)))