from functools import reduce
def to_seconds(text):
mult = {
'min': 60,
'h': 60*60,
'd': 24*60*60
}
return reduce(lambda summ, x:summ + int(''.join(filter(str.isdigit, x))) * mult.get(''.join(filter(str.isalpha, x))), text.split(' '), 0)
for im in ['2h', '4h 30min', '14d']:
print(im, to_seconds(im))
# 2h 7200
# 4h 30min 16200
# 14d 1209600
знаю только его айпи, в данном случае 1.1.1.1
data = '''
Сервер: test3.com
Address: 3.3.3.3
Сервер: test.test2.com
Address: 2.2.2.2
Имя: test2.test.ru
Address: 1.1.1.1
'''
parsed = [x.split(':') for x in filter(lambda x:any([
x.startswith('Сервер:'),
x.startswith('Имя:'),
x.startswith('Address:')
]), data.split('\n'))]
servers = dict([(x[1][1].strip(), x[0][1].strip()) for x in zip(parsed[::2], parsed[1::2])])
servers['1.1.1.1']
# 'test2.test.ru'
curl https://yandex.ru | grep -o -E 'href=\".*?\"' | sed 's/href=\"//' | sed 's/\"//' | sort | uniq
# //yandex.ru/opensearch.xml
# //yastatic.net/jquery/2.1.4/jquery.min.js
# https://afisha.yandex.ru/rostov-na-donu/cinema/cyrano-2022? utm_source=yamain&utm_medium=yamain_afisha_kp
# https://afisha.yandex.ru/rostov-na-donu/cinema/dog-2021?utm_source=yamain&utm_medium=yamain_afisha_kp
# https://afisha.yandex.ru/rostov-na-donu/cinema/kroletsyp-i-khomiak-tmy?utm_source=yamain&utm_medium=yamain_afisha_kp
...
import io
import requests
from lxml import etree
data = requests.get('https://yandex.ru').text
parser = etree.HTMLParser()
tree = etree.parse(io.StringIO(data), parser)
for im in tree.xpath('//a'):
print(im.get('href'))
text = '''GamesFree:66:genshin impact cheats:genshin impact cheat
GamesFree:63:synapse x cracked download:synapse x crack
GamesFree:69:synapse x cracked download:synapse x crack'''.split('\n')
sorted_text = sorted(text, key=lambda x:int(x.split(':')[1]), reverse=True)
print('\n'.join(sorted_text))
# GamesFree:69:synapse x cracked download:synapse x crack
# GamesFree:66:genshin impact cheats:genshin impact cheat
# GamesFree:63:synapse x cracked download:synapse x crack
Подскажите, пожалуйста, почему так происходит
for block in soup.find_all(class_=re.compile("OrderSnippetContainerStyles"))