SELECT
C.CODE AS "Табельный",
C.NAME AS "Ф.И.О.",
T.TRANZDATE AS "Дата",
max(T.TRANZTIME) AS "Время",
T.INFOSTR AS "Карта",
sum(T.SUMM) AS "Сумма"
FROM
DOCUMENT D
LEFT JOIN TRANZT T ON D.ID = T.DOCUMENTID
JOIN CLIENT C ON D.CLIENTID = C.ID
WHERE
T.TRANZDATE >='20.02.2020' AND T.TRANZDATE <='20.02.2020' AND
T.TRANZTIME >='18:55:00' AND T.TRANZTIME <='23:59:59' AND
D.STATE = 1 AND
D.ISFISCAL = 1 AND
D.CLIENTID >=0 AND
T.TRANZTYPE = '36'
GROUP BY
C.CODE,
C.NAME,
T.TRANZDATE,
-- T.TRANZTIME,
T.INFOSTR
-- ,T.SUMMr=range(20, 2*10**7);
print(choices(r, (1/x for x in r)))from guppy import hpy; from random import choices; h=hpy(); print(h.heap()); r=range(20, 2*10**7); print(choices(r, (1/x for x in r))); print(h.heap())
Partition of a set of 257681 objects. Total size = 30586285 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 77410 30 8264794 27 8264794 27 str
1 69723 27 5198776 17 13463570 44 tuple
2 28709 11 2133716 7 15597286 51 bytes
3 14447 6 2089272 7 17686558 58 types.CodeType
4 15188 6 2065568 7 19752126 65 function
5 2013 1 1932488 6 21684614 71 type
6 3316 1 1515992 5 23200606 76 dict (no owner)
7 684 0 937144 3 24137750 79 dict of module
8 2013 1 932560 3 25070310 82 dict of type
9 1866 1 633536 2 25703846 84 set
<863 more rows. Type e.g. '_.more' to view.>
[9585326]
Partition of a set of 257636 objects. Total size = 30579517 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 77406 30 8264562 27 8264562 27 str
1 69724 27 5198832 17 13463394 44 tuple
2 28709 11 2133716 7 15597110 51 bytes
3 14447 6 2089272 7 17686382 58 types.CodeType
4 15186 6 2065296 7 19751678 65 function
5 2013 1 1932488 6 21684166 71 type
6 3310 1 1514552 5 23198718 76 dict (no owner)
7 684 0 937144 3 24135862 79 dict of module
8 2013 1 932560 3 25068422 82 dict of type
9 1866 1 633536 2 25701958 84 set
<859 more rows. Type e.g. '_.more' to view.>def choices(self, population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
random = self.random
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [population[_int(random() * total)] for i in range(k)]
cum_weights = list(_itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
bisect = _bisect.bisect
total = cum_weights[-1]
hi = len(cum_weights) - 1
return [population[bisect(cum_weights, random() * total, 0, hi)]
for i in range(k)]def choices(a=20, b=2*10**7, k=1):
from random import Random
import itertools
from bisect import bisect
random = Random().random
n = b - a
population = range(a, b)
weights = (1/x for x in population)
cum_weights = list(itertools.accumulate(weights))
# get_weight = lambda idx: 1 / (a + idx)
# get_cum_weight = lambda idx: ????
total = cum_weights[-1]
hi = len(cum_weights) - 1
return [
population[bisect(cum_weights, random() * total, 0, hi)]
for i in range(k)
]cum_weights = list(itertools.accumulate(weights))С[n]=1/(a+0)+1/(a+1)+1/(a+2)+...+1/(a+n)echo '[{"name": "ha-collector-data-test1"}, {"name": "ha-collector-data-test2"}]' | py "[item['name'] for item in json.load(sys.stdin) if item['name'].startswith('ha-collector-data-')]"import json
FILTER='[{"name": "ha-collector-data-test1"}, {"name": "ha-collector-data-test2"}, {"name": "ha-collector-data-test3"}]'
filter_data = json.loads(FILTER)
items = [
item['name']
for item in filter_data
if item['name'].startswith('ha-collector-data-')
] <meta http-equiv="content-type" content="text/html; charset=windows-1251">import csv
import sys
import typing
import json
def deep_walk(j, path=()):
if isinstance(j, dict):
for k, v in j.items():
yield from deep_walk(v, path + (f'.{k}',))
elif isinstance(j, list):
for i, v in enumerate(j):
yield from deep_walk(v, path + (f'[{i}]',))
else:
yield path, j
def json2csv(data: typing.Union[dict, list], dest: typing.TextIO = None):
field_set = set()
records = []
if isinstance(data, dict):
data = [data]
for item in data:
record = {''.join(path).lstrip('.'): value for path, value in deep_walk(item)}
records.append(record)
field_set.update(record.keys())
w = csv.DictWriter(dest or sys.stdout, fieldnames=list(sorted(field_set)))
w.writeheader()
w.writerows(records)
if __name__ == '__main__':
if sys.argv[1:]:
with open(sys.argv[1]) as f:
json2csv(json.load(f))
else:
json2csv(json.load(sys.stdin))