-> Seq Scan on cdr_data_90mln cdr_data_master (cost=0.00..90934.00 rows=5000000 width=99) (actual time=10.520..6740.251 rows=5000000 loops=1)
CREATE TABLE data (
id bigserial,
something text,
primary key (id)
);
CREATE TABLE data_id1m (CHECK ( id < 1000000 ), primary key (id)) INHERITS (data);
CREATE TABLE data_id2m (CHECK ( id >= 1000000 AND id < 2000000 ), primary key (id)) INHERITS (data);
CREATE TABLE data_id3m (CHECK ( id >= 2000000 AND id < 3000000 ), primary key (id)) INHERITS (data);
CREATE TABLE data_id4m (CHECK ( id >= 3000000 AND id < 4000000 ), primary key (id)) INHERITS (data);
CREATE TABLE data_id5m (CHECK ( id >= 4000000 AND id < 5000000 ), primary key (id)) INHERITS (data);
CREATE OR REPLACE FUNCTION data_insert_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF ( NEW.id < 1000000 ) THEN
INSERT INTO data_id1m VALUES (NEW.*);
ELSIF ( NEW.id >= 1000000 AND NEW.id < 2000000 ) THEN
INSERT INTO data_id2m VALUES (NEW.*);
ELSIF ( NEW.id >= 2000000 AND NEW.id < 3000000 ) THEN
INSERT INTO data_id3m VALUES (NEW.*);
ELSIF ( NEW.id >= 3000000 AND NEW.id < 4000000 ) THEN
INSERT INTO data_id4m VALUES (NEW.*);
ELSIF ( NEW.id >= 4000000 AND NEW.id < 5000000 ) THEN
INSERT INTO data_id5m VALUES (NEW.*);
ELSE
RAISE EXCEPTION 'Date out of range. Fix the data_insert_trigger() function!';
END IF;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
CREATE TRIGGER insert_data_trigger
BEFORE INSERT ON data
FOR EACH ROW EXECUTE PROCEDURE data_insert_trigger();
insert into data (something)
select repeat(i::text, 20) from generate_series(1, 4500000) i;
test=# explain analyze select * from data order by id desc limit 10; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- Limit (cost=0.08..0.84 rows=10 width=146) (actual time=18.751..18.817 rows=10 loops=1) -> Result (cost=0.08..341945.50 rows=4504501 width=146) (actual time=18.748..18.796 rows=10 loops=1) -> Merge Append (cost=0.08..341945.50 rows=4504501 width=146) (actual time=18.746..18.777 rows=10 loops=1) Sort Key: public.data.id -> Index Scan Backward using data_pkey on data (cost=0.00..8.27 rows=1 width=40) (actual time=0.005..0.005 rows=0 loops=1) -> Index Scan Backward using data_id1m_pkey on data_id1m data (cost=0.00..45302.34 rows=999999 width=126) (actual time=5.790..5.790 rows=1 loops=1) -> Index Scan Backward using data_id2m_pkey on data_id2m data (cost=0.00..48194.36 rows=1000000 width=151) (actual time=7.983..7.983 rows=1 loops=1) -> Index Scan Backward using data_id3m_pkey on data_id3m data (cost=0.00..48206.36 rows=1000000 width=152) (actual time=0.009..0.009 rows=1 loops=1) -> Index Scan Backward using data_id4m_pkey on data_id4m data (cost=0.00..48206.36 rows=1000000 width=152) (actual time=4.934..4.934 rows=1 loops=1) -> Index Scan Backward using data_id5m_pkey on data_id5m data (cost=0.00..24326.82 rows=504501 width=152) (actual time=0.014..0.025 rows=10 loops=1) Total runtime: 18.878 ms (11 rows) test=# explain analyze select max(id) from data; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Result (cost=0.16..0.17 rows=1 width=0) (actual time=136.437..136.438 rows=1 loops=1) InitPlan 1 (returns $0) -> Limit (cost=0.08..0.16 rows=1 width=8) (actual time=136.425..136.426 rows=1 loops=1) -> Merge Append (cost=0.08..353206.75 rows=4504501 width=8) (actual time=136.422..136.422 rows=1 loops=1) Sort Key: public.data.id -> Index Scan Backward using data_pkey on data (cost=0.00..8.27 rows=1 width=8) (actual time=7.295..7.295 rows=0 loops=1) Index Cond: (id IS NOT NULL) -> Index Scan Backward using data_id1m_pkey on data_id1m data (cost=0.00..47802.34 rows=999999 width=8) (actual time=45.417..45.417 rows=1 loops=1) Index Cond: (id IS NOT NULL) -> Index Scan Backward using data_id2m_pkey on data_id2m data (cost=0.00..50694.36 rows=1000000 width=8) (actual time=40.746..40.746 rows=1 loops=1) Index Cond: (id IS NOT NULL) -> Index Scan Backward using data_id3m_pkey on data_id3m data (cost=0.00..50706.36 rows=1000000 width=8) (actual time=21.661..21.661 rows=1 loops=1) Index Cond: (id IS NOT NULL) -> Index Scan Backward using data_id4m_pkey on data_id4m data (cost=0.00..50706.36 rows=1000000 width=8) (actual time=21.018..21.018 rows=1 loops=1) Index Cond: (id IS NOT NULL) -> Index Scan Backward using data_id5m_pkey on data_id5m data (cost=0.00..25588.07 rows=504501 width=8) (actual time=0.273..0.273 rows=1 loops=1) Index Cond: (id IS NOT NULL) Total runtime: 136.530 ms (18 rows)
$client->method(array(
'id' => 21,
'name' => 'Vasya Pupkin'
));
// или
$arg = new stdClass;
$arg->id = 17;
$arg->name = 'Vasya Pupkin';
$arg->foo = 3.14;
$client->method($arg);
// для сложных типов надо делать что-то типа:
$arg->name = new SoapVar('Vasya Pupkin', XSD_STRING, 'string', 'http://www.w3.org/2001/XMLSchema');
<xs:complexType name="PerformTransactionArguments">
<xs:complexContent>
<xs:extension base="tns:GenericArguments">
<xs:sequence>
<xs:element name="amount" type="xs:long" minOccurs="1"/>
<xs:element name="parameters" type="tns:GenericParam" nillable="true" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="serviceId" type="xs:long" minOccurs="1"/>
<xs:element name="transactionId" type="xs:long" minOccurs="1"/>
<xs:element name="transactionTime" type="xs:dateTime" minOccurs="1"/>
</xs:sequence>
</xs:extension>
</xs:complexContent>
</xs:complexType>
docs.djangoproject.com/en/dev/howto/deployment/wsgi/gunicorn/
подхватится все из gunicorn.d, что не кончается на .example
стартовать, например, через /usr/sbin/service
service gunicorn start
в автозагрузку:
update-rc.d gunicorn defaults