PostgreSQL Database Management System
=====================================
-
+
This directory contains the source code distribution of the PostgreSQL
database management system.
dnl @synopsis AC_FUNC_ACCEPT_ARGTYPES
dnl
dnl Checks the data types of the three arguments to accept(). Results are
-dnl placed into the symbols ACCEPT_TYPE_RETURN and ACCEPT_TYPE_ARG[123],
+dnl placed into the symbols ACCEPT_TYPE_RETURN and ACCEPT_TYPE_ARG[123],
dnl consistent with the following example:
dnl
dnl #define ACCEPT_TYPE_RETURN int
# which is *not* 'socklen_t *'). If we detect that, then we assume
# 'int' as the result, because that ought to work best.
#
-# On Win32, accept() returns 'unsigned int PASCAL'
+# On Win32, accept() returns 'unsigned int PASCAL'
AC_DEFUN([AC_FUNC_ACCEPT_ARGTYPES],
[AC_MSG_CHECKING([types of arguments for accept()])
dnl that by making the help string look the same, which is why we need to
dnl save the default that was passed in previously.
m4_define([_pgac_helpdefault], m4_ifdef([pgac_defined_$1_$2_bool], [m4_defn([pgac_defined_$1_$2_bool])], [$3]))dnl
-PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6],
+PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6],
[AC_MSG_ERROR([no argument expected for --$1-$2 option])],
[m4_case([$3],
yes, [pgac_arg_to_variable([$1], [$2])=yes
#
PGAC_ARG_BOOL(enable, dtrace, no,
[build with DTrace support],
-[AC_DEFINE([ENABLE_DTRACE], 1,
+[AC_DEFINE([ENABLE_DTRACE], 1,
[Define to 1 to enable DTrace support. (--enable-dtrace)])
AC_CHECK_PROGS(DTRACE, dtrace)
if test -z "$DTRACE"; then
can set it bigger if you need bigger tuples (although TOAST should
reduce the need to have large tuples, since fields can be spread
across multiple tuples).
-
+
BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ
is currently 2^15 (32768). This is determined by the 15-bit widths
of the lp_off and lp_len fields in ItemIdData (see
include/storage/itemid.h).
-
+
Changing BLCKSZ requires an initdb.
-])
+])
#
# Relation segment size
RELSEG_SIZE is the maximum number of blocks allowed in one disk file.
Thus, the maximum size of a single file is RELSEG_SIZE * BLCKSZ;
relations bigger than that are divided into multiple files.
-
+
RELSEG_SIZE * BLCKSZ must be less than your OS' limit on file size.
This is often 2 GB or 4GB in a 32-bit operating system, unless you
have large file support enabled. By default, we make the limit 1 GB
buffers, else direct I/O may fail.
Changing XLOG_BLCKSZ requires an initdb.
-])
+])
#
# WAL segment size
# enable profiling if --enable-profiling
if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then
if test "$GCC" = yes; then
- AC_DEFINE([PROFILE_PID_DIR], 1,
+ AC_DEFINE([PROFILE_PID_DIR], 1,
[Define to 1 to allow profiling output to be saved separately for each process.])
CFLAGS="$CFLAGS -pg $PLATFORM_PROFILE_FLAGS"
else
AC_MSG_CHECKING(for krb5_free_unparsed_name)
AC_TRY_LINK([#include <krb5.h>],
[krb5_free_unparsed_name(NULL,NULL);],
- [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name])
+ [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name])
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)])
fi
AC_CHECK_SIZEOF([off_t])
# If we don't have largefile support, can't handle segsize >= 2GB.
-if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then
- AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.])
+if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then
+ AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.])
fi
#
# To properly translate all NLS languages strings, we must support the
# *printf() %$ format, which allows *printf() arguments to be selected
- # by position in the translated string.
- #
+ # by position in the translated string.
+ #
# libintl versions < 0.13 use the native *printf() functions, and Win32
# *printf() doesn't understand %$, so we must use our /port versions,
# which do understand %$. libintl versions >= 0.13 include their own
AC_CHECK_SIZEOF([long])
# Decide whether float4 is passed by value: user-selectable, enabled by default
-AC_MSG_CHECKING([whether to build with float4 passed by value])
+AC_MSG_CHECKING([whether to build with float4 passed by value])
PGAC_ARG_BOOL(enable, float4-byval, yes, [disable float4 passed by value],
[AC_DEFINE([USE_FLOAT4_BYVAL], 1,
[Define to 1 if you want float4 values to be passed by value. (--enable-float4-byval)])
if test "$PORTNAME" = "win32"; then
AC_CONFIG_COMMANDS([check_win32_symlinks],[
-# Links sometimes fail undetected on Mingw -
+# Links sometimes fail undetected on Mingw -
# so here we detect it and warn the user
for FILE in $CONFIG_LINKS
do
lo -
Large Object maintenance
ltree -
Tree-like data structures
-oid2name -
+oid2name -
Maps numeric files to table names
Functions to get information about SSL certificates
-start-scripts -
+start-scripts -
Scripts for starting the server at boot time on various platforms.
tablefunc -
# contrib/btree_gin/Makefile
MODULE_big = btree_gin
-OBJS = btree_gin.o
+OBJS = btree_gin.o
DATA_built = btree_gin.sql
DATA = uninstall_btree_gin.sql
CREATE TABLE test_cidr (
i cidr
);
-INSERT INTO test_cidr VALUES
+INSERT INTO test_cidr VALUES
( '1.2.3.4' ),
( '1.2.4.4' ),
( '1.2.5.4' ),
CREATE TABLE test_date (
i date
);
-INSERT INTO test_date VALUES
+INSERT INTO test_date VALUES
( '2004-10-23' ),
( '2004-10-24' ),
( '2004-10-25' ),
CREATE TABLE test_inet (
i inet
);
-INSERT INTO test_inet VALUES
+INSERT INTO test_inet VALUES
( '1.2.3.4/16' ),
( '1.2.4.4/16' ),
( '1.2.5.4/16' ),
CREATE TABLE test_interval (
i interval
);
-INSERT INTO test_interval VALUES
+INSERT INTO test_interval VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
CREATE TABLE test_macaddr (
i macaddr
);
-INSERT INTO test_macaddr VALUES
+INSERT INTO test_macaddr VALUES
( '22:00:5c:03:55:08' ),
( '22:00:5c:04:55:08' ),
( '22:00:5c:05:55:08' ),
CREATE TABLE test_time (
i time
);
-INSERT INTO test_time VALUES
+INSERT INTO test_time VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
CREATE TABLE test_timestamp (
i timestamp
);
-INSERT INTO test_timestamp VALUES
+INSERT INTO test_timestamp VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
CREATE TABLE test_timestamptz (
i timestamptz
);
-INSERT INTO test_timestamptz VALUES
+INSERT INTO test_timestamptz VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
CREATE TABLE test_timetz (
i timetz
);
-INSERT INTO test_timetz VALUES
+INSERT INTO test_timetz VALUES
( '03:55:08 GMT+2' ),
( '04:55:08 GMT+2' ),
( '05:55:08 GMT+2' ),
i cidr
);
-INSERT INTO test_cidr VALUES
+INSERT INTO test_cidr VALUES
( '1.2.3.4' ),
( '1.2.4.4' ),
( '1.2.5.4' ),
i date
);
-INSERT INTO test_date VALUES
+INSERT INTO test_date VALUES
( '2004-10-23' ),
( '2004-10-24' ),
( '2004-10-25' ),
i inet
);
-INSERT INTO test_inet VALUES
+INSERT INTO test_inet VALUES
( '1.2.3.4/16' ),
( '1.2.4.4/16' ),
( '1.2.5.4/16' ),
i interval
);
-INSERT INTO test_interval VALUES
+INSERT INTO test_interval VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
i macaddr
);
-INSERT INTO test_macaddr VALUES
+INSERT INTO test_macaddr VALUES
( '22:00:5c:03:55:08' ),
( '22:00:5c:04:55:08' ),
( '22:00:5c:05:55:08' ),
i time
);
-INSERT INTO test_time VALUES
+INSERT INTO test_time VALUES
( '03:55:08' ),
( '04:55:08' ),
( '05:55:08' ),
i timestamp
);
-INSERT INTO test_timestamp VALUES
+INSERT INTO test_timestamp VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
i timestamptz
);
-INSERT INTO test_timestamptz VALUES
+INSERT INTO test_timestamptz VALUES
( '2004-10-26 03:55:08' ),
( '2004-10-26 04:55:08' ),
( '2004-10-26 05:55:08' ),
i timetz
);
-INSERT INTO test_timetz VALUES
+INSERT INTO test_timetz VALUES
( '03:55:08 GMT+2' ),
( '04:55:08 GMT+2' ),
( '05:55:08 GMT+2' ),
-- Create the operator class
CREATE OPERATOR CLASS gist_oid_ops
-DEFAULT FOR TYPE oid USING gist
+DEFAULT FOR TYPE oid USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_int2_ops
-DEFAULT FOR TYPE int2 USING gist
+DEFAULT FOR TYPE int2 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_int4_ops
-DEFAULT FOR TYPE int4 USING gist
+DEFAULT FOR TYPE int4 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_int8_ops
-DEFAULT FOR TYPE int8 USING gist
+DEFAULT FOR TYPE int8 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_float4_ops
-DEFAULT FOR TYPE float4 USING gist
+DEFAULT FOR TYPE float4 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_float8_ops
-DEFAULT FOR TYPE float8 USING gist
+DEFAULT FOR TYPE float8 USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- timestamp ops
---
+--
--
--
RETURNS bool
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_ts_compress(internal)
RETURNS internal
AS 'MODULE_PATHNAME'
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_ts_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_ts_union(bytea, internal)
RETURNS gbtreekey16
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_timestamp_ops
-DEFAULT FOR TYPE timestamp USING gist
+DEFAULT FOR TYPE timestamp USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_timestamptz_ops
-DEFAULT FOR TYPE timestamptz USING gist
+DEFAULT FOR TYPE timestamptz USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- time ops
---
+--
--
--
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_time_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_time_union(bytea, internal)
RETURNS gbtreekey16
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_time_ops
-DEFAULT FOR TYPE time USING gist
+DEFAULT FOR TYPE time USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
STORAGE gbtreekey16;
CREATE OPERATOR CLASS gist_timetz_ops
-DEFAULT FOR TYPE timetz USING gist
+DEFAULT FOR TYPE timetz USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- date ops
---
+--
--
--
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_date_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_date_union(bytea, internal)
RETURNS gbtreekey8
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_date_ops
-DEFAULT FOR TYPE date USING gist
+DEFAULT FOR TYPE date USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
--
--
-- interval ops
---
+--
--
--
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_intv_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gbt_intv_union(bytea, internal)
RETURNS gbtreekey32
AS 'MODULE_PATHNAME'
-- Create the operator class
CREATE OPERATOR CLASS gist_interval_ops
-DEFAULT FOR TYPE interval USING gist
+DEFAULT FOR TYPE interval USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_cash_ops
-DEFAULT FOR TYPE money USING gist
+DEFAULT FOR TYPE money USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_macaddr_ops
-DEFAULT FOR TYPE macaddr USING gist
+DEFAULT FOR TYPE macaddr USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_text_ops
-DEFAULT FOR TYPE text USING gist
+DEFAULT FOR TYPE text USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_text_penalty (internal, internal, internal),
FUNCTION 6 gbt_text_picksplit (internal, internal),
FUNCTION 7 gbt_text_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
---- Create the operator class
CREATE OPERATOR CLASS gist_bpchar_ops
-DEFAULT FOR TYPE bpchar USING gist
+DEFAULT FOR TYPE bpchar USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_text_penalty (internal, internal, internal),
FUNCTION 6 gbt_text_picksplit (internal, internal),
FUNCTION 7 gbt_text_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
-- Create the operator class
CREATE OPERATOR CLASS gist_bytea_ops
-DEFAULT FOR TYPE bytea USING gist
+DEFAULT FOR TYPE bytea USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_bytea_penalty (internal, internal, internal),
FUNCTION 6 gbt_bytea_picksplit (internal, internal),
FUNCTION 7 gbt_bytea_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
--
-- Create the operator class
CREATE OPERATOR CLASS gist_numeric_ops
-DEFAULT FOR TYPE numeric USING gist
+DEFAULT FOR TYPE numeric USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_numeric_penalty (internal, internal, internal),
FUNCTION 6 gbt_numeric_picksplit (internal, internal),
FUNCTION 7 gbt_numeric_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
--
--
-- Create the operator class
CREATE OPERATOR CLASS gist_bit_ops
-DEFAULT FOR TYPE bit USING gist
+DEFAULT FOR TYPE bit USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_bit_penalty (internal, internal, internal),
FUNCTION 6 gbt_bit_picksplit (internal, internal),
FUNCTION 7 gbt_bit_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
-- Create the operator class
CREATE OPERATOR CLASS gist_vbit_ops
-DEFAULT FOR TYPE varbit USING gist
+DEFAULT FOR TYPE varbit USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
FUNCTION 5 gbt_bit_penalty (internal, internal, internal),
FUNCTION 6 gbt_bit_picksplit (internal, internal),
FUNCTION 7 gbt_bit_same (internal, internal, internal),
- STORAGE gbtreekey_var;
+ STORAGE gbtreekey_var;
-- Create the operator class
CREATE OPERATOR CLASS gist_inet_ops
-DEFAULT FOR TYPE inet USING gist
+DEFAULT FOR TYPE inet USING gist
AS
OPERATOR 1 < ,
OPERATOR 2 <= ,
-- Create the operator class
CREATE OPERATOR CLASS gist_cidr_ops
-DEFAULT FOR TYPE cidr USING gist
+DEFAULT FOR TYPE cidr USING gist
AS
OPERATOR 1 < (inet, inet) ,
OPERATOR 2 <= (inet, inet) ,
OPERATOR 3 = (inet, inet) ,
OPERATOR 4 >= (inet, inet) ,
OPERATOR 5 > (inet, inet) ,
- OPERATOR 6 <> (inet, inet) ,
+ OPERATOR 6 <> (inet, inet) ,
FUNCTION 1 gbt_inet_consistent (internal, inet, int2, oid, internal),
FUNCTION 2 gbt_inet_union (bytea, internal),
FUNCTION 3 gbt_inet_compress (internal),
DROP FUNCTION gbt_intv_same(internal, internal, internal);
DROP FUNCTION gbt_intv_union(bytea, internal);
-
+
DROP FUNCTION gbt_intv_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_intv_penalty(internal,internal,internal);
DROP FUNCTION gbt_intv_decompress(internal);
DROP FUNCTION gbt_date_same(internal, internal, internal);
DROP FUNCTION gbt_date_union(bytea, internal);
-
+
DROP FUNCTION gbt_date_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_date_penalty(internal,internal,internal);
DROP FUNCTION gbt_date_compress(internal);
DROP FUNCTION gbt_time_same(internal, internal, internal);
DROP FUNCTION gbt_time_union(bytea, internal);
-
+
DROP FUNCTION gbt_time_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_time_penalty(internal,internal,internal);
DROP FUNCTION gbt_timetz_compress(internal);
DROP FUNCTION gbt_ts_same(internal, internal, internal);
DROP FUNCTION gbt_ts_union(bytea, internal);
-
+
DROP FUNCTION gbt_ts_picksplit(internal, internal);
-
+
DROP FUNCTION gbt_ts_penalty(internal,internal,internal);
DROP FUNCTION gbt_tstz_compress(internal);
DROP FUNCTION gbt_ts_compress(internal);
-
+
DROP FUNCTION gbt_tstz_consistent(internal,timestamptz,int2,oid,internal);
DROP FUNCTION gbt_ts_consistent(internal,timestamp,int2,oid,internal);
);
--
--- Matching citext to text.
+-- Matching citext to text.
--
CREATE OR REPLACE FUNCTION texticlike(citext, text)
bpchar bpchar,
char char,
chr "char",
- name name,
+ name name,
bytea bytea,
boolean boolean,
float4 float4,
int8 int8,
int4 int4,
int2 int2,
- cidr cidr,
+ cidr cidr,
inet inet,
macaddr macaddr,
money money,
bpchar bpchar,
char char,
chr "char",
- name name,
+ name name,
bytea bytea,
boolean boolean,
float4 float4,
int8 int8,
int4 int4,
int2 int2,
- cidr cidr,
+ cidr cidr,
inet inet,
macaddr macaddr,
money money,
bpchar bpchar,
char char,
chr "char",
- name name,
+ name name,
bytea bytea,
boolean boolean,
float4 float4,
int8 int8,
int4 int4,
int2 int2,
- cidr cidr,
+ cidr cidr,
inet inet,
macaddr macaddr,
money money,
Update the calling convention for all external facing functions. By external
facing, I mean all functions that are directly referenced in cube.sql. Prior
-to my update, all functions used the older V0 calling convention. They now
+to my update, all functions used the older V0 calling convention. They now
use V1.
-New Functions:
+New Functions:
cube(float[]), which makes a zero volume cube from a float array
SET search_path = public;
-- Create the user-defined type for N-dimensional boxes
---
+--
CREATE OR REPLACE FUNCTION cube_in(cstring)
RETURNS cube
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_compress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_decompress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_union(internal, internal)
-RETURNS cube
+RETURNS cube
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION g_cube_same(cube, cube, internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
O_BRACKET paren_list COMMA paren_list C_BRACKET {
int dim;
-
+
dim = delim_count($2, ',') + 1;
if ( (delim_count($4, ',') + 1) != dim ) {
ereport(ERROR,
CUBE_MAX_DIM)));
YYABORT;
}
-
+
*((void **)result) = write_box( dim, $2, $4 );
-
+
}
|
paren_list COMMA paren_list {
int dim;
dim = delim_count($1, ',') + 1;
-
+
if ( (delim_count($3, ',') + 1) != dim ) {
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
CUBE_MAX_DIM)));
YYABORT;
}
-
+
*((void **)result) = write_box( dim, $1, $3 );
}
|
$$ = palloc(scanbuflen + 1);
strcpy($$, $1);
}
- |
+ |
list COMMA CUBEFLOAT {
$$ = $1;
strcat($$, ",");
return (ndelim);
}
-static NDBOX *
+static NDBOX *
write_box(unsigned int dim, char *str1, char *str2)
{
NDBOX * bp;
char * s;
- int i;
+ int i;
int size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
-
+
bp = palloc0(size);
SET_VARSIZE(bp, size);
bp->dim = dim;
-
+
s = str1;
bp->x[i=0] = strtod(s, NULL);
while ((s = strchr(s, ',')) != NULL) {
s++; i++;
bp->x[i] = strtod(s, NULL);
- }
-
+ }
+
s = str2;
bp->x[i=dim] = strtod(s, NULL);
while ((s = strchr(s, ',')) != NULL) {
s++; i++;
bp->x[i] = strtod(s, NULL);
- }
+ }
return(bp);
}
int i, size;
double x;
char * s = str;
-
+
size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
bp = palloc0(size);
SET_VARSIZE(bp, size);
bp->dim = dim;
-
+
i = 0;
x = strtod(s, NULL);
bp->x[0] = x;
x = strtod(s, NULL);
bp->x[i] = x;
bp->x[i+dim] = x;
- }
+ }
return(bp);
}
%{
-/*
-** A scanner for EMP-style numeric ranges
+/*
+ * A scanner for EMP-style numeric ranges
* contrib/cube/cubescan.l
-*/
+ */
#include "postgres.h"
(0, 1, 2)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
cube_subset
---------------------------
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(2424, 160),(2424, 81)
(5 rows)
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(0, 1, 2)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
cube_subset
---------------------------
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(2424, 160),(2424, 81)
(5 rows)
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(0, 1, 2)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
cube_subset
---------------------------
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
(2424, 160),(2424, 81)
(5 rows)
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
c
--------------------------
(337, 455),(240, 359)
SELECT cube('{0,1,2}'::float[], '{3}'::float[]);
SELECT cube(NULL::float[], '{3}'::float[]);
SELECT cube('{0,1,2}'::float[]);
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
-SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
-- Load some example data and build the index
---
+--
CREATE TABLE test_cube (c cube);
\copy test_cube from 'data/test_cube.data'
CREATE INDEX test_cube_ix ON test_cube USING gist (c);
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c;
--- Test sorting
-SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
+-- Test sorting
+SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c;
SHLIB_LINK = $(libpq)
SHLIB_PREREQS = submake-libpq
-DATA_built = dblink.sql
-DATA = uninstall_dblink.sql
+DATA_built = dblink.sql
+DATA = uninstall_dblink.sql
REGRESS = dblink
OUT notify_name TEXT,
OUT be_pid INT4,
OUT extra TEXT
-)
+)
RETURNS setof record
AS 'MODULE_PATHNAME', 'dblink_get_notify'
LANGUAGE C STRICT;
OUT notify_name TEXT,
OUT be_pid INT4,
OUT extra TEXT
-)
+)
RETURNS setof record
AS 'MODULE_PATHNAME', 'dblink_get_notify'
LANGUAGE C STRICT;
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
t1
----
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1;
t1
----
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1;
t1
----
OK
(1 row)
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
t1
----
-- test asynchronous queries
SELECT dblink_connect('dtest1', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
SELECT dblink_connect('dtest2', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1;
SELECT dblink_connect('dtest3', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1;
CREATE TEMPORARY TABLE result AS
SELECT * from result;
SELECT dblink_connect('dtest1', 'dbname=contrib_regression');
-SELECT * from
+SELECT * from
dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1;
SELECT dblink_cancel_query('dtest1');
CONSTRAINT on_surface check(abs(cube_distance(value, '(0)'::cube) /
earth() - 1) < '10e-7'::float8);
-CREATE OR REPLACE FUNCTION sec_to_gc(float8)
+CREATE OR REPLACE FUNCTION sec_to_gc(float8)
RETURNS float8
LANGUAGE SQL
IMMUTABLE STRICT
LANGUAGE SQL
IMMUTABLE STRICT
AS 'SELECT cube_enlarge($1, gc_to_sec($2), 3)';
-
+
--------------- geo_distance
CREATE OR REPLACE FUNCTION geo_distance (point, point)
AS 'MODULE_PATHNAME', 'difference'
LANGUAGE C IMMUTABLE STRICT;
-CREATE OR REPLACE FUNCTION dmetaphone (text) RETURNS text
+CREATE OR REPLACE FUNCTION dmetaphone (text) RETURNS text
AS 'MODULE_PATHNAME', 'dmetaphone'
LANGUAGE C IMMUTABLE STRICT;
-CREATE OR REPLACE FUNCTION dmetaphone_alt (text) RETURNS text
+CREATE OR REPLACE FUNCTION dmetaphone_alt (text) RETURNS text
AS 'MODULE_PATHNAME', 'dmetaphone_alt'
LANGUAGE C IMMUTABLE STRICT;
f
(1 row)
--- delete
+-- delete
select delete('a=>1 , b=>2, c=>3'::hstore, 'a');
delete
--------------------
select hstore 'a=>NULL, b=>qq' ?& ARRAY['c','d'];
select hstore 'a=>NULL, b=>qq' ?& '{}'::text[];
--- delete
+-- delete
select delete('a=>1 , b=>2, c=>3'::hstore, 'a');
select delete('a=>null , b=>2, c=>3'::hstore, 'a');
# contrib/intarray/Makefile
MODULE_big = _int
-OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o
+OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o
DATA_built = _int.sql
DATA = uninstall__int.sql
REGRESS = _int
-#!/usr/bin/perl
+#!/usr/bin/perl
use strict;
# make sure we are in a sane environment.
print <<EOT;
Usage:
$0 -d DATABASE -s SECTIONS [-b NUMBER] [-v] [-e] [-o] [-r] [-a] [-u]
--d DATABASE -DATABASE
--b NUMBER -number of repeats
--s SECTIONS -sections, format sid1[,sid2[,sid3[...]]]]
--v -verbose (show SQL)
+-d DATABASE -DATABASE
+-b NUMBER -number of repeats
+-s SECTIONS -sections, format sid1[,sid2[,sid3[...]]]]
+-v -verbose (show SQL)
-e -show explain
-r -use RD-tree index
-a -AND section
-o -show output
-u -unique
--c -count
+-c -count
EOT
exit;
foreach ( @a ) {
print "$_->{mid}\t$_->{sections}\n";
}
-}
+}
print sprintf("total: %.02f sec; number: %d; for one: %.03f sec; found %d docs\n", $elapsed, $b, $elapsed/$b, $count+1 );
$dbi -> disconnect;
sub exec_sql {
my ($dbi, $sql, @keys) = @_;
my $sth=$dbi->prepare($sql) || die;
- $sth->execute( @keys ) || die;
- my $r;
+ $sth->execute( @keys ) || die;
+ my $r;
my @row;
while ( defined ( $r=$sth->fetchrow_hashref ) ) {
push @row, $r;
- }
- $sth->finish;
+ }
+ $sth->finish;
return @row;
}
sections int[]
);
create table message_section_map (
- mid int not null,
+ mid int not null,
sid int not null
);
sub copytable {
my $t = shift;
-
+
print "COPY $t from stdin;\n";
open( FFF, "$t.tmp") || die;
while(<FFF>) { print; }
* For ISBN with prefix 978
* Range Table as of 2010-Jul-29
*/
-
+
/* where the digit set begins, and how many of them are in the table */
const unsigned ISBN_index[10][2] = {
{0, 6},
RETURNS ltree_gist
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
-
+
CREATE OR REPLACE FUNCTION ltree_gist_out(ltree_gist)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
-
+
CREATE TYPE ltree_gist (
internallength = -1,
input = ltree_gist_in,
output = ltree_gist_out,
storage = plain
-);
+);
CREATE OR REPLACE FUNCTION ltree_consistent(internal,internal,int2,oid,internal)
DROP FUNCTION ltree_consistent(internal,internal,int2,oid,internal);
DROP TYPE ltree_gist CASCADE;
-
+
DROP OPERATOR ^@ (ltxtquery, ltree);
DROP OPERATOR ^@ (ltree, ltxtquery);
# contrib/pg_buffercache/Makefile
MODULE_big = pg_buffercache
-OBJS = pg_buffercache_pages.o
+OBJS = pg_buffercache_pages.o
-DATA_built = pg_buffercache.sql
-DATA = uninstall_pg_buffercache.sql
+DATA_built = pg_buffercache.sql
+DATA = uninstall_pg_buffercache.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
-- Create a view for convenient access.
CREATE VIEW pg_buffercache AS
SELECT P.* FROM pg_buffercache_pages() AS P
- (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid,
+ (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid,
relforknumber int2, relblocknumber int8, isdirty bool, usagecount int2);
-
+
-- Don't want these to be available at public.
REVOKE ALL ON FUNCTION pg_buffercache_pages() FROM PUBLIC;
REVOKE ALL ON pg_buffercache FROM PUBLIC;
# contrib/pg_freespacemap/Makefile
MODULE_big = pg_freespacemap
-OBJS = pg_freespacemap.o
+OBJS = pg_freespacemap.o
-DATA_built = pg_freespacemap.sql
-DATA = uninstall_pg_freespacemap.sql
+DATA_built = pg_freespacemap.sql
+DATA = uninstall_pg_freespacemap.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
RETURNS bool
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
-
+
CREATE OR REPLACE FUNCTION gtrgm_compress(internal)
RETURNS internal
AS 'MODULE_PATHNAME'
DROP FUNCTION gtrgm_decompress(internal);
DROP FUNCTION gtrgm_compress(internal);
-
+
DROP FUNCTION gtrgm_consistent(internal,text,int,oid,internal);
DROP TYPE gtrgm CASCADE;
of time. If you have too much data, you may have to buy more storage
since you need enough room to hold the original data plus the exported
data. pg_upgrade can reduce the amount of time and disk space required
-for many upgrades.
+for many upgrades.
The URL http://momjian.us/main/writings/pgsql/pg_upgrade.pdf contains a
presentation about pg_upgrade internals that mirrors the text
b) For pre-9.0, remove 'regex_flavor'
f) For pre-9.0, adjust extra_float_digits
- Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0
- databases, and extra_float_digits=-3 for >= 9.0 databases.
- It is necessary to modify 9.0 pg_dump to always use -3, and
- modify the pre-9.0 old server to accept extra_float_digits=-3.
+ Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0
+ databases, and extra_float_digits=-3 for >= 9.0 databases.
+ It is necessary to modify 9.0 pg_dump to always use -3, and
+ modify the pre-9.0 old server to accept extra_float_digits=-3.
Once the dump is created, it can be repeatedly loaded into the old
database, upgraded, and dumped out of the new database, and then
3) Create the regression database in the old server.
-4) Load the dump file created above into the regression database;
+4) Load the dump file created above into the regression database;
check for errors while loading.
5) Upgrade the old database to the new major version, as outlined in
namelist[fileno]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir,
maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '_'));
-
+
unlink(new_file);
transfer_relfile(pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
3ea6357a0ee7fad6d0c4b63464f2aafa40c2e91b4b7e1bba8114932fd92b5c8f111e7e50e7b2e541
(1 row)
--- blowfish-448
+-- blowfish-448
SELECT encode(encrypt(
decode('fedcba9876543210', 'hex'),
decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f584023641aba61761f1f1f1f0e0e0e0effffffffffffffff', 'hex'),
-- result: c04504012e4e1f53
-- empty data
-select encode( encrypt('', 'foo', 'bf'), 'hex');
+select encode(encrypt('', 'foo', 'bf'), 'hex');
encode
------------------
1871949bb2311c8e
(1 row)
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789', 'bf'), 'hex');
encode
------------------
42f58af3b2c03f46
(1 row)
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
encode
------------------
86ab6f0bc72b5f22
INSERT INTO ctest VALUES ('password', '', '');
UPDATE ctest SET salt = gen_salt('bf', 8);
UPDATE ctest SET res = crypt(data, salt);
-SELECT res = crypt(data, res) AS "worked"
+SELECT res = crypt(data, res) AS "worked"
FROM ctest;
worked
--------
(1 row)
-- empty data
-select encode( encrypt('', 'foo', 'aes'), 'hex');
+select encode(encrypt('', 'foo', 'aes'), 'hex');
encode
----------------------------------
b48cc3338a2eb293b6007ef72c360d48
(1 row)
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789', 'aes'), 'hex');
encode
----------------------------------
f397f03d2819b7172b68d0706fda4693
(1 row)
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
encode
----------------------------------
5c9db77af02b4678117bcd8a71ae7f53
(1 row)
select decrypt_iv(decode('2c24cb7da91d6d5699801268b0f5adad', 'hex'),
- '0123456', 'abcd', 'aes');
+ '0123456', 'abcd', 'aes');
decrypt_iv
------------
foo
};
static const u4byte rco_tab[10] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
0x00000020, 0x00000040, 0x00000080, 0x0000001b, 0x00000036
};
decode('37363534333231204e6f77206973207468652074696d6520666f722000', 'hex'),
'bf-cbc'), 'hex');
--- blowfish-448
+-- blowfish-448
SELECT encode(encrypt(
decode('fedcba9876543210', 'hex'),
decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f584023641aba61761f1f1f1f0e0e0e0effffffffffffffff', 'hex'),
-- result: c04504012e4e1f53
-- empty data
-select encode( encrypt('', 'foo', 'bf'), 'hex');
+select encode(encrypt('', 'foo', 'bf'), 'hex');
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789', 'bf'), 'hex');
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'bf'), 'hex');
-- decrypt
select decrypt(encrypt('foo', '0123456', 'bf'), '0123456', 'bf');
UPDATE ctest SET salt = gen_salt('bf', 8);
UPDATE ctest SET res = crypt(data, salt);
-SELECT res = crypt(data, res) AS "worked"
+SELECT res = crypt(data, res) AS "worked"
FROM ctest;
DROP TABLE ctest;
'aes-cbc'), 'hex');
-- empty data
-select encode( encrypt('', 'foo', 'aes'), 'hex');
+select encode(encrypt('', 'foo', 'aes'), 'hex');
-- 10 bytes key
-select encode( encrypt('foo', '0123456789', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789', 'aes'), 'hex');
-- 22 bytes key
-select encode( encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
+select encode(encrypt('foo', '0123456789012345678901', 'aes'), 'hex');
-- decrypt
select decrypt(encrypt('foo', '0123456', 'aes'), '0123456', 'aes');
-- iv
select encode(encrypt_iv('foo', '0123456', 'abcd', 'aes'), 'hex');
select decrypt_iv(decode('2c24cb7da91d6d5699801268b0f5adad', 'hex'),
- '0123456', 'abcd', 'aes');
+ '0123456', 'abcd', 'aes');
-- long message
select encode(encrypt('Lets try a longer message.', '0123456789', 'aes'), 'hex');
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_seg (s seg);
\copy test_seg from 'data/test_seg.data'
CREATE INDEX test_seg_ix ON test_seg USING gist (s);
143
(1 row)
--- Test sorting
+-- Test sorting
SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
s
-----------------
(1 row)
-- Load some example data and build the index
---
+--
CREATE TABLE test_seg (s seg);
\copy test_seg from 'data/test_seg.data'
CREATE INDEX test_seg_ix ON test_seg USING gist (s);
143
(1 row)
--- Test sorting
+-- Test sorting
SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
s
-----------------
SET search_path = public;
-- Create the user-defined type for 1-D floating point intervals (seg)
---
+--
CREATE OR REPLACE FUNCTION seg_in(cstring)
RETURNS seg
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_compress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_decompress(internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_union(internal, internal)
-RETURNS seg
+RETURNS seg
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION gseg_same(seg, seg, internal)
-RETURNS internal
+RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
FUNCTION 1 seg_cmp(seg, seg);
CREATE OPERATOR CLASS gist_seg_ops
-DEFAULT FOR TYPE seg USING gist
+DEFAULT FOR TYPE seg USING gist
AS
OPERATOR 1 << ,
OPERATOR 2 &< ,
%{
#define YYPARSE_PARAM result /* need this to pass a pointer (void *) to yyparse */
-
+
#include "postgres.h"
#include <math.h>
extern int seg_yylex(void);
extern int significant_digits(char *str); /* defined in seg.c */
-
+
void seg_yyerror(const char *message);
int seg_yyparse(void *result);
$$.sigd = significant_digits($1);
$$.val = val;
}
- |
+ |
EXTENSION SEGFLOAT {
/* temp variable avoids a gcc 3.3.x bug on Sparc64 */
float val = seg_atof($2);
%{
-/*
-** A scanner for EMP-style numeric ranges
-*/
+/*
+ * A scanner for EMP-style numeric ranges
+ */
#include "postgres.h"
push @rows, $_;
}
-foreach ( sort {
+foreach ( sort {
@ar = split("\t", $a);
$valA = pop @ar;
$valA =~ s/[~<> ]+//g;
SELECT '1'::seg <@ '-1 .. 1'::seg AS bool;
-- Load some example data and build the index
---
+--
CREATE TABLE test_seg (s seg);
\copy test_seg from 'data/test_seg.data'
CREATE INDEX test_seg_ix ON test_seg USING gist (s);
SELECT count(*) FROM test_seg WHERE s @> '11..11.3';
--- Test sorting
+-- Test sorting
SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s;
-- Test functions
idesc text
);
-CREATE TRIGGER ids_nextid
+CREATE TRIGGER ids_nextid
BEFORE INSERT OR UPDATE ON ids
- FOR EACH ROW
+ FOR EACH ROW
EXECUTE PROCEDURE autoinc (id, next_id);
INSERT INTO ids VALUES (0, 'first (-2 ?)');
SELECT * FROM ids;
-UPDATE ids SET id = null, idesc = 'first: -2 --> 2'
+UPDATE ids SET id = null, idesc = 'first: -2 --> 2'
WHERE idesc = 'first (-2 ?)';
-UPDATE ids SET id = 0, idesc = 'second: -1 --> 3'
+UPDATE ids SET id = 0, idesc = 'second: -1 --> 3'
WHERE id = -1;
-UPDATE ids SET id = 4, idesc = 'third: 1 --> 4'
+UPDATE ids SET id = 4, idesc = 'third: 1 --> 4'
WHERE id = 1;
SELECT * FROM ids;
-- Adjust this setting to control where the objects get created.
SET search_path = public;
-CREATE OR REPLACE FUNCTION autoinc()
-RETURNS trigger
+CREATE OR REPLACE FUNCTION autoinc()
+RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE C;
CREATE TRIGGER insert_usernames
BEFORE INSERT OR UPDATE ON username_test
- FOR EACH ROW
+ FOR EACH ROW
EXECUTE PROCEDURE insert_username (username);
INSERT INTO username_test VALUES ('nothing');
-- Adjust this setting to control where the objects get created.
SET search_path = public;
-CREATE OR REPLACE FUNCTION insert_username()
-RETURNS trigger
+CREATE OR REPLACE FUNCTION insert_username()
+RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE C;
CREATE TRIGGER mdt_moddatetime
BEFORE UPDATE ON mdt
- FOR EACH ROW
+ FOR EACH ROW
EXECUTE PROCEDURE moddatetime (moddate);
INSERT INTO mdt VALUES (1, 'first');
--Trigger for table A:
CREATE TRIGGER AT BEFORE DELETE OR UPDATE ON A FOR EACH ROW
-EXECUTE PROCEDURE
+EXECUTE PROCEDURE
check_foreign_key (2, 'cascade', 'ID', 'B', 'REFB', 'C', 'REFC');
/*
2 - means that check must be performed for foreign keys of 2 tables.
-cascade - defines that corresponding keys must be deleted.
+cascade - defines that corresponding keys must be deleted.
ID - name of primary key column in triggered table (A). You may
use as many columns as you need.
B - name of (first) table with foreign keys.
--Trigger for table B:
CREATE TRIGGER BT BEFORE INSERT OR UPDATE ON B FOR EACH ROW
-EXECUTE PROCEDURE
+EXECUTE PROCEDURE
check_primary_key ('REFB', 'A', 'ID');
/*
-REFB - name of foreign key column in triggered (B) table. You may use as
+REFB - name of foreign key column in triggered (B) table. You may use as
many columns as you need, but number of key columns in referenced
table must be the same.
A - referenced table name.
--Trigger for table C:
CREATE TRIGGER CT BEFORE INSERT OR UPDATE ON C FOR EACH ROW
-EXECUTE PROCEDURE
+EXECUTE PROCEDURE
check_primary_key ('REFC', 'A', 'ID');
-- Now try
drop table tttest;
create table tttest (
- price_id int4,
- price_val int4,
+ price_id int4,
+ price_val int4,
price_on abstime,
price_off abstime
);
alter table tttest add column q2 int;
alter table tttest drop column q1;
-create trigger timetravel
+create trigger timetravel
before insert or delete or update on tttest
- for each row
- execute procedure
+ for each row
+ execute procedure
timetravel (price_on, price_off);
insert into tttest values (1, 1, null, null);
insert into tttest(price_id, price_val) values (2, 2);
insert into tttest(price_id, price_val,price_off) values (3, 3, 'infinity');
-insert into tttest(price_id, price_val,price_off) values (4, 4,
+insert into tttest(price_id, price_val,price_off) values (4, 4,
abstime('now'::timestamp - '100 days'::interval));
insert into tttest(price_id, price_val,price_on) values (3, 3, 'infinity'); -- duplicate key
select get_timetravel('tttest'); -- check status
-- we want to correct some date
-update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and
+update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and
price_off <> 'infinity';
-- but this doesn't work
select get_timetravel('tttest'); -- check status
-update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and
+update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and
price_off <> 'infinity';
select * from tttest;
-- isn't it what we need ?
-- get price for price_id == 5 as it was '10-Jan-1990'
-select * from tttest where price_id = 5 and
+select * from tttest where price_id = 5 and
price_on <= '10-Jan-1990' and price_off > '10-Jan-1990';
-- Adjust this setting to control where the objects get created.
SET search_path = public;
-CREATE OR REPLACE FUNCTION timetravel()
-RETURNS trigger
+CREATE OR REPLACE FUNCTION timetravel()
+RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE C;
-CREATE OR REPLACE FUNCTION set_timetravel(name, int4)
-RETURNS int4
+CREATE OR REPLACE FUNCTION set_timetravel(name, int4)
+RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE C RETURNS NULL ON NULL INPUT;
-CREATE OR REPLACE FUNCTION get_timetravel(name)
-RETURNS int4
+CREATE OR REPLACE FUNCTION get_timetravel(name)
+RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE C RETURNS NULL ON NULL INPUT;
#
# Created by David Wheeler, 2002.
-# modified by Ray Aspeitia 12-03-2003 :
+# modified by Ray Aspeitia 12-03-2003 :
# added log rotation script to db startup
-# modified StartupParameters.plist "Provides" parameter to make it easier to
+# modified StartupParameters.plist "Provides" parameter to make it easier to
# start and stop with the SystemStarter utitlity
# use the below command in order to correctly start/stop/restart PG with log rotation script:
'star'
(1 row)
-SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
+SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
to_tsquery('testcfg', 'stars'));
ts_headline
-----------------------------------------------------------------
SELECT to_tsquery('testcfg', 'star');
-SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
+SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
to_tsquery('testcfg', 'stars'));
53
(1 row)
-select to_tsquery('english', 'qwe & sKies ');
+select to_tsquery('english', 'qwe & sKies ');
to_tsquery
---------------
'qwe' & 'sky'
(1 row)
-select to_tsquery('simple', 'qwe & sKies ');
+select to_tsquery('simple', 'qwe & sKies ');
to_tsquery
-----------------
'qwe' & 'skies'
The granite features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
The <b>granite</b> features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
document.write(15);
</script>
</body>
-</html>',
+</html>',
to_tsquery('sea&foo'), 'HighlightAll=true');
headline
-----------------------------------------------------------------------------
53
(1 row)
-select to_tsquery('english', 'qwe & sKies ');
+select to_tsquery('english', 'qwe & sKies ');
to_tsquery
---------------
'qwe' & 'sky'
(1 row)
-select to_tsquery('simple', 'qwe & sKies ');
+select to_tsquery('simple', 'qwe & sKies ');
to_tsquery
-----------------
'qwe' & 'skies'
The granite features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
The <b>granite</b> features of this cliff
(1 row)
-
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
document.write(15);
</script>
</body>
-</html>',
+</html>',
to_tsquery('sea&foo'), 'HighlightAll=true');
headline
-----------------------------------------------------------------------------
<i <b> wow < jqw <> qwerty'));
-select to_tsquery('english', 'qwe & sKies ');
-select to_tsquery('simple', 'qwe & sKies ');
+select to_tsquery('english', 'qwe & sKies ');
+select to_tsquery('simple', 'qwe & sKies ');
select to_tsquery('english', '''the wether'':dc & '' sKies '':BC ');
select to_tsquery('english', 'asd&(and|fghj)');
select to_tsquery('english', '(asd&and)|fghj');
The sculpture of these granite seams,
Upon a woman s face. E. J. Pratt (1882 1964)
', to_tsquery('sea&thousand&years'));
-
+
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
The sculpture of these granite seams,
Upon a woman s face. E. J. Pratt (1882 1964)
', to_tsquery('granite&sea'));
-
+
select headline('Erosion It took the sea a thousand years,
A thousand years to trace
The granite features of this cliff
document.write(15);
</script>
</body>
-</html>',
+</html>',
to_tsquery('sea&foo'), 'HighlightAll=true');
--check debug
select * from public.ts_debug('Tsearch module for PostgreSQL 7.3.3');
CREATE DOMAIN gtsq AS pg_catalog.text;
--dict interface
-CREATE FUNCTION lexize(oid, text)
+CREATE FUNCTION lexize(oid, text)
RETURNS _text
as 'ts_lexize'
LANGUAGE INTERNAL
--built-in dictionaries
CREATE FUNCTION dex_init(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_dex_init'
+ as 'MODULE_PATHNAME', 'tsa_dex_init'
LANGUAGE C;
CREATE FUNCTION dex_lexize(internal,internal,int4)
CREATE FUNCTION snb_ru_init_koi8(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_snb_ru_init_koi8'
+ as 'MODULE_PATHNAME', 'tsa_snb_ru_init_koi8'
LANGUAGE C;
CREATE FUNCTION snb_ru_init_utf8(internal)
CREATE FUNCTION spell_init(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_spell_init'
+ as 'MODULE_PATHNAME', 'tsa_spell_init'
LANGUAGE C;
CREATE FUNCTION spell_lexize(internal,internal,int4)
CREATE FUNCTION syn_init(internal)
RETURNS internal
- as 'MODULE_PATHNAME', 'tsa_syn_init'
+ as 'MODULE_PATHNAME', 'tsa_syn_init'
LANGUAGE C;
CREATE FUNCTION syn_lexize(internal,internal,int4)
RETURNS NULL ON NULL INPUT;
--sql-level interface
-CREATE TYPE tokentype
- as (tokid int4, alias text, descr text);
+CREATE TYPE tokentype
+ as (tokid int4, alias text, descr text);
CREATE FUNCTION token_type(int4)
RETURNS setof tokentype
LANGUAGE C
RETURNS NULL ON NULL INPUT;
-CREATE TYPE tokenout
+CREATE TYPE tokenout
as (tokid int4, token text);
CREATE FUNCTION parse(oid,text)
as 'ts_parse_byid'
LANGUAGE INTERNAL
RETURNS NULL ON NULL INPUT;
-
+
CREATE FUNCTION parse(text,text)
RETURNS setof tokenout
as 'ts_parse_byname'
LANGUAGE INTERNAL
RETURNS NULL ON NULL INPUT;
-
+
CREATE FUNCTION parse(text)
RETURNS setof tokenout
as 'MODULE_PATHNAME', 'tsa_parse_current'
LANGUAGE C
RETURNS NULL ON NULL INPUT;
-
+
--default parser
CREATE FUNCTION prsd_start(internal,int4)
RETURNS internal
STORAGE gtsvector;
--stat info
-CREATE TYPE statinfo
+CREATE TYPE statinfo
as (word text, ndoc int4, nentry int4);
CREATE FUNCTION stat(text)
CREATE OPERATOR CLASS tsvector_ops
FOR TYPE tsvector USING btree AS
OPERATOR 1 < ,
- OPERATOR 2 <= ,
+ OPERATOR 2 <= ,
OPERATOR 3 = ,
OPERATOR 4 >= ,
OPERATOR 5 > ,
REGRESS = unaccent
# Adjust REGRESS_OPTS because we need a UTF8 database
-REGRESS_OPTS = --dbname=$(CONTRIB_TESTDB) --multibyte=UTF8 --no-locale
+REGRESS_OPTS = --dbname=$(CONTRIB_TESTDB) --multibyte=UTF8 --no-locale
ifdef USE_PGXS
PG_CONFIG = pg_config
(1 row)
-select xslt_process( query_to_xml('select x from generate_series(1,5) as
+select xslt_process( query_to_xml('select x from generate_series(1,5) as
x',true,false,'')::text,
$$<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
(1 row)
-select xslt_process( query_to_xml('select x from generate_series(1,5) as
+select xslt_process( query_to_xml('select x from generate_series(1,5) as
x',true,false,'')::text,
$$<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
select query_to_xml('select 1 as x',true,false,'');
-select xslt_process( query_to_xml('select x from generate_series(1,5) as
+select xslt_process( query_to_xml('select x from generate_series(1,5) as
x',true,false,'')::text,
$$<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
Please describe a way to repeat the problem. Please try to provide a
-concise reproducible example, if at all possible:
+concise reproducible example, if at all possible:
----------------------------------------------------------------------
endif
# Enable some extra warnings
-# -wfully-tagged needed to throw a warning on missing tags
+# -wfully-tagged needed to throw a warning on missing tags
# for older tool chains, 2007-08-31
# Note: try "make SPFLAGS=-wxml" to catch a lot of other dubious constructs,
# in particular < and & that haven't been made into entities. It's far too
<listitem>
<para>
<varname>auto_explain.log_buffers</varname> causes <command>EXPLAIN
- (ANALYZE, BUFFERS)</> output, rather than just <command>EXPLAIN</>
- output, to be printed when an execution plan is logged. This parameter is
+ (ANALYZE, BUFFERS)</> output, rather than just <command>EXPLAIN</>
+ output, to be printed when an execution plan is logged. This parameter is
off by default. Only superusers can change this setting. This
parameter has no effect unless <varname>auto_explain.log_analyze</>
parameter is set.
<bibliodiv>
<title>Proceedings and Articles</title>
<para>This section is for articles and newsletters.</para>
-
+
<biblioentry id="OLSON93">
<title>Partial indexing in POSTGRES: research project</title>
<titleabbrev>Olson, 1993</titleabbrev>
<biblioset relation="article">
<title>Generalized Partial Indexes
<ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">(cached version)
-<!--
+<!--
Original URL: http://citeseer.ist.psu.edu/seshadri95generalized.html
-->
</ulink>
locale then the specifications can take the form
<replaceable>language_territory.codeset</>. For example,
<literal>fr_BE.UTF-8</> represents the French language (fr) as
- spoken in Belgium (BE), with a <acronym>UTF-8</> character set
+ spoken in Belgium (BE), with a <acronym>UTF-8</> character set
encoding.
</para>
<listitem>
<para>
Sets the location of the Kerberos server key file. See
- <xref linkend="kerberos-auth"> or <xref linkend="gssapi-auth">
+ <xref linkend="kerberos-auth"> or <xref linkend="gssapi-auth">
for details. This parameter can only be set in the
<filename>postgresql.conf</> file or on the server command line.
</para>
<para>
Sets whether Kerberos and GSSAPI user names should be treated
case-insensitively.
- The default is <literal>off</> (case sensitive). This parameter can only be
+ The default is <literal>off</> (case sensitive). This parameter can only be
set in the <filename>postgresql.conf</> file or on the server command line.
</para>
</listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-shared-preload-libraries" xreflabel="shared_preload_libraries">
<term><varname>shared_preload_libraries</varname> (<type>string</type>)</term>
<indexterm>
when the library is first used. However, the time to start each new
server process might increase slightly, even if that process never
uses the library. So this parameter is recommended only for
- libraries that will be used in most sessions.
+ libraries that will be used in most sessions.
</para>
<note>
On Windows hosts, preloading a library at server start will not reduce
the time required to start each new server process; each server process
will re-load all preload libraries. However, <varname>shared_preload_libraries
- </varname> is still useful on Windows hosts because some shared libraries may
+ </varname> is still useful on Windows hosts because some shared libraries may
need to perform certain operations that only take place at postmaster start
(for example, a shared library may need to reserve lightweight locks
or shared memory and you can't do that after the postmaster has started).
<para>
Every PostgreSQL-supported library has a <quote>magic
- block</> that is checked to guarantee compatibility.
- For this reason, non-PostgreSQL libraries cannot be
+ block</> that is checked to guarantee compatibility.
+ For this reason, non-PostgreSQL libraries cannot be
loaded in this way.
</para>
</listitem>
<para>
<varname>fsync</varname> can only be set in the <filename>postgresql.conf</>
file or on the server command line.
- If you turn this parameter off, also consider turning off
+ If you turn this parameter off, also consider turning off
<xref linkend="guc-full-page-writes">.
</para>
</listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-wal-sync-method" xreflabel="wal_sync_method">
<term><varname>wal_sync_method</varname> (<type>enum</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-full-page-writes" xreflabel="full_page_writes">
<indexterm>
<primary><varname>full_page_writes</> configuration parameter</primary>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-archive-timeout" xreflabel="archive_timeout">
<term><varname>archive_timeout</varname> (<type>integer</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
</variablelist>
</sect2>
<sect2 id="runtime-config-query-constants">
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-cpu-operator-cost" xreflabel="cpu_operator_cost">
<term><varname>cpu_operator_cost</varname> (<type>floating point</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-effective-cache-size" xreflabel="effective_cache_size">
<term><varname>effective_cache_size</varname> (<type>integer</type>)</term>
<indexterm>
<productname>PostgreSQL</productname> supports several methods
for logging server messages, including
<systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem> and
- <systemitem>syslog</systemitem>. On Windows,
+ <systemitem>syslog</systemitem>. On Windows,
<systemitem>eventlog</systemitem> is also supported. Set this
parameter to a list of desired log destinations separated by
- commas. The default is to log to <systemitem>stderr</systemitem>
+ commas. The default is to log to <systemitem>stderr</systemitem>
only.
This parameter can only be set in the <filename>postgresql.conf</>
file or on the server command line.
value</> (<acronym>CSV</>) format, which is convenient for
loading logs into programs.
See <xref linkend="runtime-config-logging-csvlog"> for details.
- <varname>logging_collector</varname> must be enabled to generate
+ <varname>logging_collector</varname> must be enabled to generate
CSV-format log output.
</para>
</indexterm>
<listitem>
<para>
- When <varname>logging_collector</> is enabled,
+ When <varname>logging_collector</> is enabled,
this parameter determines the directory in which log files will be created.
It can be specified as an absolute path, or relative to the
cluster data directory.
</para>
<para>
If CSV-format output is enabled in <varname>log_destination</>,
- <literal>.csv</> will be appended to the timestamped
+ <literal>.csv</> will be appended to the timestamped
log file name to create the file name for CSV-format output.
(If <varname>log_filename</> ends in <literal>.log</>, the suffix is
replaced instead.)
</para>
<para>
Example: To keep 7 days of logs, one log file per day named
- <literal>server_log.Mon</literal>, <literal>server_log.Tue</literal>,
+ <literal>server_log.Mon</literal>, <literal>server_log.Tue</literal>,
etc, and automatically overwrite last week's log with this week's log,
- set <varname>log_filename</varname> to <literal>server_log.%a</literal>,
- <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, and
+ set <varname>log_filename</varname> to <literal>server_log.%a</literal>,
+ <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, and
<varname>log_rotation_age</varname> to <literal>1440</literal>.
</para>
<para>
- Example: To keep 24 hours of logs, one log file per hour, but
- also rotate sooner if the log file size exceeds 1GB, set
- <varname>log_filename</varname> to <literal>server_log.%H%M</literal>,
- <varname>log_truncate_on_rotation</varname> to <literal>on</literal>,
- <varname>log_rotation_age</varname> to <literal>60</literal>, and
+ Example: To keep 24 hours of logs, one log file per hour, but
+ also rotate sooner if the log file size exceeds 1GB, set
+ <varname>log_filename</varname> to <literal>server_log.%H%M</literal>,
+ <varname>log_truncate_on_rotation</varname> to <literal>on</literal>,
+ <varname>log_rotation_age</varname> to <literal>60</literal>, and
<varname>log_rotation_size</varname> to <literal>1000000</literal>.
Including <literal>%M</> in <varname>log_filename</varname> allows
any size-driven rotations that might occur to select a file name
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-syslog-ident" xreflabel="syslog_ident">
<term><varname>syslog_ident</varname> (<type>string</type>)</term>
<indexterm>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-log-min-duration-statement" xreflabel="log_min_duration_statement">
<term><varname>log_min_duration_statement</varname> (<type>integer</type>)</term>
<indexterm>
the text of statements that are logged because of
<varname>log_statement</> will not be repeated in the
duration log message.
- If you are not using <application>syslog</>, it is recommended
+ If you are not using <application>syslog</>, it is recommended
that you log the PID or session ID using
<xref linkend="guc-log-line-prefix">
so that you can link the statement message to the later
<note>
<para>
- Some client programs, like <application>psql</>, attempt
- to connect twice while determining if a password is required, so
+ Some client programs, like <application>psql</>, attempt
+ to connect twice while determining if a password is required, so
duplicate <quote>connection received</> messages do not
necessarily indicate a problem.
</para>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry id="guc-log-line-prefix" xreflabel="log_line_prefix">
<term><varname>log_line_prefix</varname> (<type>string</type>)</term>
<indexterm>
<tip>
<para>
- <application>Syslog</> produces its own
+ <application>Syslog</> produces its own
time stamp and process ID information, so you probably do not want to
include those escapes if you are logging to <application>syslog</>.
</para>
<listitem>
<para>
- Set <varname>log_rotation_size</varname> to 0 to disable
- size-based log rotation, as it makes the log file name difficult
- to predict.
+ Set <varname>log_rotation_size</varname> to 0 to disable
+ size-based log rotation, as it makes the log file name difficult
+ to predict.
</para>
</listitem>
<para>
Every PostgreSQL-supported library has a <quote>magic
- block</> that is checked to guarantee compatibility.
+ block</> that is checked to guarantee compatibility.
For this reason, non-PostgreSQL libraries cannot be
loaded in this way.
</para>
<para>
Refer to the introduction in this manual or to the
-<productname>PostgreSQL</productname>
+<productname>PostgreSQL</productname>
<ulink url="http://www.postgresql.org">web page</ulink>
for subscription information to the no-cost mailing lists.
</para>
<para>
When building from the source distribution, these modules are not built
- automatically, unless you build the "world" target
+ automatically, unless you build the "world" target
(see <xref linkend="build">).
You can build and install all of them by running:
<screen>
<para>
<xref linkend="datatype-table"> shows all the built-in general-purpose data
- types. Most of the alternative names listed in the
+ types. Most of the alternative names listed in the
<quote>Aliases</quote> column are the names used internally by
<productname>PostgreSQL</productname> for historical reasons. In
addition, some internally used or deprecated types are available,
<para>
In addition to ordinary numeric values, the <type>numeric</type>
- type allows the special value <literal>NaN</>, meaning
+ type allows the special value <literal>NaN</>, meaning
<quote>not-a-number</quote>. Any operation on <literal>NaN</>
yields another <literal>NaN</>. When writing this value
as a constant in an SQL command, you must put quotes around it,
<type>float(<replaceable>p</replaceable>)</type> for specifying
inexact numeric types. Here, <replaceable>p</replaceable> specifies
the minimum acceptable precision in <emphasis>binary</> digits.
- <productname>PostgreSQL</productname> accepts
+ <productname>PostgreSQL</productname> accepts
<type>float(1)</type> to <type>float(24)</type> as selecting the
- <type>real</type> type, while
+ <type>real</type> type, while
<type>float(25)</type> to <type>float(53)</type> select
<type>double precision</type>. Values of <replaceable>p</replaceable>
outside the allowed range draw an error.
<para>
Date and time input is accepted in almost any reasonable format, including
- ISO 8601, <acronym>SQL</acronym>-compatible,
+ ISO 8601, <acronym>SQL</acronym>-compatible,
traditional <productname>POSTGRES</productname>, and others.
For some formats, ordering of day, month, and year in date input is
ambiguous and there is support for specifying the expected
See <xref linkend="datetime-appendix">
for the exact parsing rules of date/time input and for the
recognized text fields including months, days of the week, and
- time zones.
+ time zones.
</para>
<para>
Remember that any date or time literal input needs to be enclosed
- in single quotes, like text strings. Refer to
+ in single quotes, like text strings. Refer to
<xref linkend="sql-syntax-constants-generic"> for more
information.
<acronym>SQL</acronym> requires the following syntax
<indexterm>
<primary>date</primary>
</indexterm>
-
+
<para>
<xref linkend="datatype-datetime-date-table"> shows some possible
inputs for the <type>date</type> type.
<para>
Valid input for these types consists of a time of day followed
by an optional time zone. (See <xref
- linkend="datatype-datetime-time-table">
+ linkend="datatype-datetime-time-table">
and <xref linkend="datatype-timezone-table">.) If a time zone is
specified in the input for <type>time without time zone</type>,
it is silently ignored. You can also specify a date but it will
<para>
The <acronym>SQL</acronym> standard differentiates
- <type>timestamp without time zone</type>
- and <type>timestamp with time zone</type> literals by the presence of a
+ <type>timestamp without time zone</type>
+ and <type>timestamp with time zone</type> literals by the presence of a
<quote>+</quote> or <quote>-</quote> symbol and time zone offset after
the time. Hence, according to the standard,
The following <acronym>SQL</acronym>-compatible functions can also
be used to obtain the current time value for the corresponding data
type:
- <literal>CURRENT_DATE</literal>, <literal>CURRENT_TIME</literal>,
- <literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>,
- <literal>LOCALTIMESTAMP</literal>. The latter four accept an
- optional subsecond precision specification. (See <xref
+ <literal>CURRENT_DATE</literal>, <literal>CURRENT_TIME</literal>,
+ <literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>,
+ <literal>LOCALTIMESTAMP</literal>. The latter four accept an
+ optional subsecond precision specification. (See <xref
linkend="functions-datetime-current">.) Note that these are
SQL functions and are <emphasis>not</> recognized in data input strings.
</para>
<itemizedlist>
<listitem>
<para>
- Although the <type>date</type> type
+ Although the <type>date</type> type
cannot have an associated time zone, the
<type>time</type> type can.
- Time zones in the real world have little meaning unless
+ Time zones in the real world have little meaning unless
associated with a date as well as a time,
since the offset can vary through the year with daylight-saving
time boundaries.
<listitem>
<para>
- The default time zone is specified as a constant numeric offset
+ The default time zone is specified as a constant numeric offset
from <acronym>UTC</>. It is therefore impossible to adapt to
daylight-saving time when doing date/time arithmetic across
<acronym>DST</acronym> boundaries.
order in which the values were listed when the type was created.
All standard comparison operators and related
aggregate functions are supported for enums. For example:
-
+
<programlisting>
INSERT INTO person VALUES ('Larry', 'sad');
INSERT INTO person VALUES ('Curly', 'ok');
Moe | happy
(2 rows)
-SELECT name
+SELECT name
FROM person
WHERE current_mood = (SELECT MIN(current_mood) FROM person);
name
<sect2>
<title>Implementation Details</title>
-
+
<para>
An enum value occupies four bytes on disk. The length of an enum
value's textual label is limited by the <symbol>NAMEDATALEN</symbol>
<table id="datatype-net-cidr-table">
<title><type>cidr</> Type Input Examples</title>
<tgroup cols="3">
- <thead>
- <row>
+ <thead>
+ <row>
<entry><type>cidr</type> Input</entry>
<entry><type>cidr</type> Output</entry>
<entry><literal><function>abbrev(<type>cidr</type>)</function></literal></entry>
for searching:
<programlisting>
-SELECT to_tsvector('english', 'The Fat Rats');
+SELECT to_tsvector('english', 'The Fat Rats');
to_tsvector
-----------------
'fat':2 'rat':3
functions for UUIDs, but the core database does not include any
function for generating UUIDs, because no single algorithm is well
suited for every application. The contrib module
- <filename>contrib/uuid-ossp</filename> provides functions that implement
+ <filename>contrib/uuid-ossp</filename> provides functions that implement
several standard algorithms.
Alternatively, UUIDs could be generated by client applications or
other libraries invoked through a server-side function.
checks the input values for well-formedness, and there are support
functions to perform type-safe operations on it; see <xref
linkend="functions-xml">. Use of this data type requires the
- installation to have been built with <command>configure
+ installation to have been built with <command>configure
--with-libxml</>.
</para>
<para>
If the token is a text string, match up with possible strings:
</para>
-
+
<substeps>
<step>
<para>
abbreviation.
</para>
</step>
-
+
<step>
<para>
If not found, do a similar binary-search table lookup to match
</step>
</substeps>
</step>
-
+
<step>
<para>
When the token is a number or number field:
<step>
<para>
If there are eight or six digits,
- and if no other date fields have been previously read, then interpret
+ and if no other date fields have been previously read, then interpret
as a <quote>concatenated date</quote> (e.g.,
<literal>19990118</literal> or <literal>990118</literal>).
The interpretation is <literal>YYYYMMDD</> or <literal>YYMMDD</>.
and a year has already been read, then interpret as day of year.
</para>
</step>
-
+
<step>
<para>
If four or six digits and a year has already been read, then
about 1 day in 128 years.
</para>
- <para>
+ <para>
The accumulating calendar error prompted
Pope Gregory XIII to reform the calendar in accordance with
instructions from the Council of Trent.
the beginnings of the Chinese calendar can be traced back to the 14th
century BC. Legend has it that the Emperor Huangdi invented that
calendar in 2637 BC.
-
+
The People's Republic of China uses the Gregorian calendar
for civil purposes. The Chinese calendar is used for determining
festivals.
<para>
The <quote>Julian Date</quote> is unrelated to the <quote>Julian
- calendar</quote>.
+ calendar</quote>.
The Julian Date system was invented by the French scholar
Joseph Justus Scaliger (1540-1609)
and probably takes its name from Scaliger's father,
<para>
Here is an example. It assumes the developer tools are installed.
<programlisting>
-cc -c foo.c
+cc -c foo.c
cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o
</programlisting>
</para>
</varlistentry>
<varlistentry>
- <term><systemitem class="osname">Tru64 UNIX</></term>
+ <term><systemitem class="osname">Tru64 UNIX</></term>
<indexterm><primary>Tru64 UNIX</><secondary>shared library</></>
<indexterm><primary>Digital UNIX</><see>Tru64 UNIX</></>
<listitem>
<tip>
<para>
- If this is too complicated for you, you should consider using
+ If this is too complicated for you, you should consider using
<ulink url="http://www.gnu.org/software/libtool/">
<productname>GNU Libtool</productname></ulink>,
which hides the platform differences behind a uniform interface.
<para>
It's possible that the ports do not update the main catalog file
- in <filename>/usr/local/share/sgml/catalog.ports</filename> or order
+ in <filename>/usr/local/share/sgml/catalog.ports</filename> or order
isn't proper . Be sure to have the following lines in beginning of file:
<programlisting>
CATALOG "openjade/catalog"
</screen>
</para>
</listitem>
-
+
<listitem>
<para>
To make a <acronym>PDF</acronym>:
<para>
Norm Walsh offers a
<ulink url="http://nwalsh.com/emacs/docbookide/index.html">major mode</ulink>
- specifically for DocBook which also has font-lock and a number of features to
+ specifically for DocBook which also has font-lock and a number of features to
reduce typing.
</para>
</sect2>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Description</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Options</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Exit Status</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Usage</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Environment</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Files</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Diagnostics</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Notes</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>Examples</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>History</term>
<listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term>See Also</term>
<listitem>
<literal>unix:postgresql://<replaceable>hostname</><optional>:<replaceable>port</></optional><optional>/<replaceable>dbname</></optional><optional>?<replaceable>options</></optional></literal>
</simpara>
</listitem>
-
+
<listitem>
<simpara>
an SQL string literal containing one of the above forms
a reference to a character variable containing one of the above forms (see examples)
</simpara>
</listitem>
-
+
<listitem>
<simpara>
<literal>DEFAULT</literal>
The function returns the parsed timestamp on success. On error,
<literal>PGTYPESInvalidTimestamp</literal> is returned and <varname>errno</> is
set to <literal>PGTYPES_TS_BAD_TIMESTAMP</>. See <xref linkend="PGTYPESInvalidTimestamp"> for important notes on this value.
-
</para>
<para>
In general, the input string can contain any combination of an allowed
You can use the following format specifiers for the format mask. The
format specifiers are the same ones that are used in the
<function>strftime</> function in <productname>libc</productname>. Any
- non-format specifier will be copied into the output buffer.
+ non-format specifier will be copied into the output buffer.
<!-- This is from the FreeBSD man page:
http://www.freebsd.org/cgi/man.cgi?query=strftime&apropos=0&sektion=3&manpath=FreeBSD+7.0-current&format=html
-->
<literal>%E*</literal> <literal>%O*</literal> - POSIX locale
extensions. The sequences
<literal>%Ec</literal>
- <literal>%EC</literal>
- <literal>%Ex</literal>
- <literal>%EX</literal>
- <literal>%Ey</literal>
- <literal>%EY</literal>
- <literal>%Od</literal>
+ <literal>%EC</literal>
+ <literal>%Ex</literal>
+ <literal>%EX</literal>
+ <literal>%Ey</literal>
+ <literal>%EY</literal>
+ <literal>%Od</literal>
<literal>%Oe</literal>
- <literal>%OH</literal>
- <literal>%OI</literal>
- <literal>%Om</literal>
- <literal>%OM</literal>
- <literal>%OS</literal>
- <literal>%Ou</literal>
- <literal>%OU</literal>
- <literal>%OV</literal>
- <literal>%Ow</literal>
- <literal>%OW</literal>
- <literal>%Oy</literal>
+ <literal>%OH</literal>
+ <literal>%OI</literal>
+ <literal>%Om</literal>
+ <literal>%OM</literal>
+ <literal>%OS</literal>
+ <literal>%Ou</literal>
+ <literal>%OU</literal>
+ <literal>%OV</literal>
+ <literal>%Ow</literal>
+ <literal>%OW</literal>
+ <literal>%Oy</literal>
are supposed to provide alternative representations.
</para>
<para>
<note>
<para>
On Windows, if the <application>ecpg</> libraries and an application are
- compiled with different flags, this function call will crash the
- application because the internal representation of the
+ compiled with different flags, this function call will crash the
+ application because the internal representation of the
<literal>FILE</> pointers differ. Specifically,
- multithreaded/single-threaded, release/debug, and static/dynamic
+ multithreaded/single-threaded, release/debug, and static/dynamic
flags should be the same for the library and all applications using
that library.
</para>
<function>ECPGget_PGconn(const char *<replaceable>connection_name</replaceable>)
</function> returns the library database connection handle identified by the given name.
If <replaceable>connection_name</replaceable> is set to <literal>NULL</literal>, the current
- connection handle is returned. If no connection handle can be identified, the function returns
+ connection handle is returned. If no connection handle can be identified, the function returns
<literal>NULL</literal>. The returned connection handle can be used to call any other functions
from <application>libpq</application>, if necessary.
</para>
<function>ECPGstatus(int <replaceable>lineno</replaceable>,
const char* <replaceable>connection_name</replaceable>)</function>
returns true if you are connected to a database and false if not.
- <replaceable>connection_name</replaceable> can be <literal>NULL</>
+ <replaceable>connection_name</replaceable> can be <literal>NULL</>
if a single connection is being used.
</para>
</listitem>
Pointer to the field data. The pointer is of <literal>char *</literal> type,
the data pointed by it is in a binary format. Example:
<programlisting>
-int intval;
+int intval;
switch (sqldata->sqlvar[i].sqltype)
{
<listitem>
<para>
Pointer to the NULL indicator. If returned by DESCRIBE or FETCH then it's always a valid pointer.
- If used as input for <literal>EXECUTE ... USING sqlda;</literal> then NULL-pointer value means
+ If used as input for <literal>EXECUTE ... USING sqlda;</literal> then NULL-pointer value means
that the value for this field is non-NULL. Otherwise a valid pointer and <literal>sqlitype</literal>
has to be properly set. Example:
<programlisting>
<listitem>
<para>
Type of the NULL indicator data. It's always SQLSMINT when returning data from the server.
- When the <literal>SQLDA</literal> is used for a parametrized query, the data is treated
+ When the <literal>SQLDA</literal> is used for a parametrized query, the data is treated
according to the set type.
</para>
</listitem>
<varlistentry>
<term><literal>sqltypename</></term>
- <term><literal>sqltypelen</></term>
+ <term><literal>sqltypelen</></term>
<term><literal>sqlownerlen</></term>
<term><literal>sqlsourcetype</></term>
- <term><literal>sqlownername</></term>
- <term><literal>sqlsourceid</></term>
- <term><literal>sqlflags</></term>
- <term><literal>sqlreserved</></term>
+ <term><literal>sqlownername</></term>
+ <term><literal>sqlsourceid</></term>
+ <term><literal>sqlflags</></term>
+ <term><literal>sqlreserved</></term>
<listitem>
<para>
Unused.
<para>
The function returns either -1 if the buffer <literal>cp</> was too
small or <literal>ECPG_INFORMIX_OUT_OF_MEMORY</> if memory was
- exhausted.
+ exhausted.
</para>
</listitem>
</varlistentry>
</listitem>
<listitem>
- <para>
+ <para>
A pointer to the value or a pointer to the pointer.
</para>
</listitem>
<para>
In the sections that follow, we will discuss how you
- can extend the <productname>PostgreSQL</productname>
+ can extend the <productname>PostgreSQL</productname>
<acronym>SQL</acronym> query language by adding:
<itemizedlist spacing="compact" mark="bullet">
<title>How Extensibility Works</title>
<para>
- <productname>PostgreSQL</productname> is extensible because its operation is
- catalog-driven. If you are familiar with standard
+ <productname>PostgreSQL</productname> is extensible because its operation is
+ catalog-driven. If you are familiar with standard
relational database systems, you know that they store information
about databases, tables, columns, etc., in what are
commonly known as system catalogs. (Some systems call
user as tables like any other, but the <acronym>DBMS</acronym> stores
its internal bookkeeping in them. One key difference
between <productname>PostgreSQL</productname> and standard relational database systems is
- that <productname>PostgreSQL</productname> stores much more information in its
+ that <productname>PostgreSQL</productname> stores much more information in its
catalogs: not only information about tables and columns,
but also information about data types, functions, access
methods, and so on. These tables can be modified by
- the user, and since <productname>PostgreSQL</productname> bases its operation
+ the user, and since <productname>PostgreSQL</productname> bases its operation
on these tables, this means that <productname>PostgreSQL</productname> can be
extended by users. By comparison, conventional
- database systems can only be extended by changing hardcoded
+ database systems can only be extended by changing hardcoded
procedures in the source code or by loading modules
specially written by the <acronym>DBMS</acronym> vendor.
</para>
parsed. Each position (either argument or return value) declared as
<type>anyelement</type> is allowed to have any specific actual
data type, but in any given call they must all be the
- <emphasis>same</emphasis> actual type. Each
+ <emphasis>same</emphasis> actual type. Each
position declared as <type>anyarray</type> can have any array data type,
but similarly they must all be the same type. If there are
positions declared <type>anyarray</type> and others declared
</tgroup>
</table>
</sect1>
-
+
<sect1 id="external-extensions">
<title>Extensions</title>
<para>
There are several administration tools available for
<productname>PostgreSQL</>. The most popular is
- <application><ulink url="http://www.pgadmin.org/">pgAdmin III</ulink></>,
+ <application><ulink url="http://www.pgadmin.org/">pgAdmin III</ulink></>,
and there are several commercially available ones as well.
</para>
</sect1>
<!entity unaccent SYSTEM "unaccent.sgml">
<!entity uuid-ossp SYSTEM "uuid-ossp.sgml">
<!entity vacuumlo SYSTEM "vacuumlo.sgml">
-<!entity xml2 SYSTEM "xml2.sgml">
+<!entity xml2 SYSTEM "xml2.sgml">
<!-- appendixes -->
<!entity contacts SYSTEM "contacts.sgml">
is present in other <acronym>SQL</acronym> database management
systems, and in many cases this functionality is compatible and
consistent between the various implementations. This chapter is also
- not exhaustive; additional functions appear in relevant sections of
+ not exhaustive; additional functions appear in relevant sections of
the manual.
</para>
<literal>IS NOT NULL</literal>, respectively, except that the input
expression must be of Boolean type.
</para>
-
+
<!-- IS OF does not conform to the ISO SQL behavior, so it is undocumented here
<para>
<indexterm>
<entry>Return Type</entry>
<entry>Description</entry>
<entry>Example</entry>
- <entry>Result</entry>
+ <entry>Result</entry>
</row>
</thead>
original encoding is specified by
<parameter>src_encoding</parameter>. The
<parameter>string</parameter> must be valid in this encoding.
- Conversions can be defined by <command>CREATE CONVERSION</command>.
+ Conversions can be defined by <command>CREATE CONVERSION</command>.
Also there are some predefined conversions. See <xref
linkend="conversion-names"> for available conversions.
</entry>
</entry>
<entry><type>bytea</type></entry>
<entry>
- Decode binary data from <parameter>string</parameter> previously
+ Decode binary data from <parameter>string</parameter> previously
encoded with <function>encode</>. Parameter type is same as in <function>encode</>.
</entry>
<entry><literal>decode('MTIzAAE=', 'base64')</literal></entry>
<entry><literal>123\000\001</literal></entry>
- </row>
+ </row>
<row>
<entry>
</entry>
<entry><literal>encode(E'123\\000\\001', 'base64')</literal></entry>
<entry><literal>MTIzAAE=</literal></entry>
- </row>
+ </row>
<row>
<entry>
</entry>
<entry><literal>translate('12345', '143', 'ax')</literal></entry>
<entry><literal>a2x5</literal></entry>
- </row>
-
+ </row>
+
</tbody>
</tgroup>
</table>
<row>
<entry><literal>HH12</literal></entry>
<entry>hour of day (01-12)</entry>
- </row>
+ </row>
<row>
<entry><literal>HH24</literal></entry>
<entry>hour of day (00-23)</entry>
- </row>
+ </row>
<row>
<entry><literal>MI</literal></entry>
<entry>minute (00-59)</entry>
- </row>
+ </row>
<row>
<entry><literal>SS</literal></entry>
<entry>second (00-59)</entry>
<row>
<entry><literal>W</literal></entry>
<entry>week of month (1-5) (The first week starts on the first day of the month.)</entry>
- </row>
+ </row>
<row>
<entry><literal>WW</literal></entry>
<entry>week number of year (1-53) (The first week starts on the first day of the year.)</entry>
<entry><literal>TH</literal> suffix</entry>
<entry>upper case ordinal number suffix</entry>
<entry><literal>DDTH</literal>, e.g., <literal>12TH</></entry>
- </row>
+ </row>
<row>
<entry><literal>th</literal> suffix</entry>
<entry>lower case ordinal number suffix</entry>
<entry><literal>FX</literal> prefix</entry>
<entry>fixed format global option (see usage notes)</entry>
<entry><literal>FX Month DD Day</literal></entry>
- </row>
+ </row>
<row>
<entry><literal>TM</literal> prefix</entry>
<entry>translation mode (print localized day and month names based on
<xref linkend="guc-lc-time">)</entry>
<entry><literal>TMMonth</literal></entry>
- </row>
+ </row>
<row>
<entry><literal>SP</literal> suffix</entry>
<entry>spell mode (not implemented)</entry>
<entry><literal>DDSP</literal></entry>
- </row>
+ </row>
</tbody>
</tgroup>
</table>
use some non-digit character or template after <literal>YYYY</literal>,
otherwise the year is always interpreted as 4 digits. For example
(with the year 20000):
- <literal>to_date('200001131', 'YYYYMMDD')</literal> will be
- interpreted as a 4-digit year; instead use a non-digit
+ <literal>to_date('200001131', 'YYYYMMDD')</literal> will be
+ interpreted as a 4-digit year; instead use a non-digit
separator after the year, like
<literal>to_date('20000-1131', 'YYYY-MMDD')</literal> or
<literal>to_date('20000Nov31', 'YYYYMonDD')</literal>.
In a conversion from string to <type>timestamp</type>, millisecond
(<literal>MS</literal>) or microsecond (<literal>US</literal>)
values are used as the
- seconds digits after the decimal point. For example
+ seconds digits after the decimal point. For example
<literal>to_timestamp('12:3', 'SS:MS')</literal> is not 3 milliseconds,
but 300, because the conversion counts it as 12 + 0.3 seconds.
This means for the format <literal>SS:MS</literal>, the input values
</para>
<para>
- Here is a more
- complex example:
+ Here is a more
+ complex example:
<literal>to_timestamp('15:12:02.020.001230', 'HH:MI:SS.MS.US')</literal>
is 15 hours, 12 minutes, and 2 seconds + 20 milliseconds +
- 1230 microseconds = 2.021230 seconds.
+ 1230 microseconds = 2.021230 seconds.
</para>
</listitem>
<row>
<entry><literal>.</literal> (period)</entry>
<entry>decimal point</entry>
- </row>
+ </row>
<row>
<entry><literal>,</literal> (comma)</entry>
<entry>group (thousand) separator</entry>
<listitem>
<para>
- <literal>9</literal> results in a value with the same number of
+ <literal>9</literal> results in a value with the same number of
digits as there are <literal>9</literal>s. If a digit is
not available it outputs a space.
</para>
<para>
<literal>PL</literal>, <literal>SG</literal>, and
<literal>TH</literal> are <productname>PostgreSQL</productname>
- extensions.
+ extensions.
</para>
</listitem>
<entry><literal>TH</literal> suffix</entry>
<entry>upper case ordinal number suffix</entry>
<entry><literal>999TH</literal></entry>
- </row>
+ </row>
<row>
<entry><literal>th</literal> suffix</entry>
<entry>lower case ordinal number suffix</entry>
<row>
<entry><literal>to_char(current_timestamp, 'FMDay, FMDD HH12:MI:SS')</literal></entry>
<entry><literal>'Tuesday, 6 05:39:18'</literal></entry>
- </row>
+ </row>
<row>
<entry><literal>to_char(-0.1, '99.99')</literal></entry>
<entry><literal>' -.10'</literal></entry>
</row>
<row>
<entry><literal>to_char(148.5, '999D999')</literal></entry>
- <entry><literal>' 148,500'</literal></entry>
+ <entry><literal>' 148,500'</literal></entry>
</row>
<row>
<entry><literal>to_char(3148.5, '9G999D999')</literal></entry>
<entry><literal>to_char(-485, '999S')</literal></entry>
<entry><literal>'485-'</literal></entry>
</row>
- <row>
+ <row>
<entry><literal>to_char(-485, '999MI')</literal></entry>
- <entry><literal>'485-'</literal></entry>
+ <entry><literal>'485-'</literal></entry>
</row>
<row>
<entry><literal>to_char(485, '999MI')</literal></entry>
- <entry><literal>'485 '</literal></entry>
+ <entry><literal>'485 '</literal></entry>
</row>
<row>
<entry><literal>to_char(485, 'FM999MI')</literal></entry>
- <entry><literal>'485'</literal></entry>
+ <entry><literal>'485'</literal></entry>
</row>
<row>
<entry><literal>to_char(485, 'PL999')</literal></entry>
- <entry><literal>'+485'</literal></entry>
+ <entry><literal>'+485'</literal></entry>
</row>
- <row>
+ <row>
<entry><literal>to_char(485, 'SG999')</literal></entry>
- <entry><literal>'+485'</literal></entry>
+ <entry><literal>'+485'</literal></entry>
</row>
<row>
<entry><literal>to_char(-485, 'SG999')</literal></entry>
- <entry><literal>'-485'</literal></entry>
+ <entry><literal>'-485'</literal></entry>
</row>
<row>
<entry><literal>to_char(-485, '9SG99')</literal></entry>
- <entry><literal>'4-85'</literal></entry>
+ <entry><literal>'4-85'</literal></entry>
</row>
<row>
<entry><literal>to_char(-485, '999PR')</literal></entry>
- <entry><literal>'<485>'</literal></entry>
+ <entry><literal>'<485>'</literal></entry>
</row>
<row>
<entry><literal>to_char(485, 'L999')</literal></entry>
- <entry><literal>'DM 485</literal></entry>
+ <entry><literal>'DM 485</literal></entry>
</row>
<row>
- <entry><literal>to_char(485, 'RN')</literal></entry>
+ <entry><literal>to_char(485, 'RN')</literal></entry>
<entry><literal>' CDLXXXV'</literal></entry>
</row>
<row>
- <entry><literal>to_char(485, 'FMRN')</literal></entry>
+ <entry><literal>to_char(485, 'FMRN')</literal></entry>
<entry><literal>'CDLXXXV'</literal></entry>
</row>
<row>
<entry><literal>to_char(5.2, 'FMRN')</literal></entry>
- <entry><literal>'V'</literal></entry>
+ <entry><literal>'V'</literal></entry>
</row>
<row>
<entry><literal>to_char(482, '999th')</literal></entry>
- <entry><literal>' 482nd'</literal></entry>
+ <entry><literal>' 482nd'</literal></entry>
</row>
<row>
<entry><literal>to_char(485, '"Good number:"999')</literal></entry>
<entry><literal>'Pre: 485 Post: .800'</literal></entry>
</row>
<row>
- <entry><literal>to_char(12, '99V999')</literal></entry>
+ <entry><literal>to_char(12, '99V999')</literal></entry>
<entry><literal>' 12000'</literal></entry>
</row>
<row>
<entry><literal>to_char(12.4, '99V999')</literal></entry>
<entry><literal>' 12400'</literal></entry>
</row>
- <row>
+ <row>
<entry><literal>to_char(12.45, '99V9')</literal></entry>
<entry><literal>' 125'</literal></entry>
</row>
</screen>
<para>
- When adding an <type>interval</type> value to (or subtracting an
- <type>interval</type> value from) a <type>timestamp with time zone</type>
- value, the days component advances (or decrements) the date of the
- <type>timestamp with time zone</type> by the indicated number of days.
- Across daylight saving time changes (with the session time zone set to a
- time zone that recognizes DST), this means <literal>interval '1 day'</literal>
- does not necessarily equal <literal>interval '24 hours'</literal>.
+ When adding an <type>interval</type> value to (or subtracting an
+ <type>interval</type> value from) a <type>timestamp with time zone</type>
+ value, the days component advances (or decrements) the date of the
+ <type>timestamp with time zone</type> by the indicated number of days.
+ Across daylight saving time changes (with the session time zone set to a
+ time zone that recognizes DST), this means <literal>interval '1 day'</literal>
+ does not necessarily equal <literal>interval '24 hours'</literal>.
For example, with the session time zone set to <literal>CST7CDT</literal>,
<literal>timestamp with time zone '2005-04-02 12:00-07' + interval '1 day' </literal>
- will produce <literal>timestamp with time zone '2005-04-03 12:00-06'</literal>,
- while adding <literal>interval '24 hours'</literal> to the same initial
+ will produce <literal>timestamp with time zone '2005-04-03 12:00-06'</literal>,
+ while adding <literal>interval '24 hours'</literal> to the same initial
<type>timestamp with time zone</type> produces
<literal>timestamp with time zone '2005-04-03 13:00-06'</literal>, as there is
- a change in daylight saving time at <literal>2005-04-03 02:00</literal> in time zone
+ a change in daylight saving time at <literal>2005-04-03 02:00</literal> in time zone
<literal>CST7CDT</literal>.
</para>
a year is in week 1 of that year.
</para>
<para>
- Because of this, it is possible for early January dates to be part of the
+ Because of this, it is possible for early January dates to be part of the
52nd or 53rd week of the previous year. For example, <literal>2005-01-01</>
- is part of the 53rd week of year 2004, and <literal>2006-01-01</> is part of
+ is part of the 53rd week of year 2004, and <literal>2006-01-01</> is part of
the 52nd week of year 2005.
</para>
<term><literal>year</literal></term>
<listitem>
<para>
- The year field. Keep in mind there is no <literal>0 AD</>, so subtracting
+ The year field. Keep in mind there is no <literal>0 AD</>, so subtracting
<literal>BC</> years from <literal>AD</> years should be done with care.
</para>
<lineannotation>Result: </lineannotation><computeroutput>2001-02-16 18:38:40</computeroutput>
</screen>
The first example takes a time stamp without time zone and interprets it as MST time
- (UTC-7), which is then converted to PST (UTC-8) for display. The second example takes
+ (UTC-7), which is then converted to PST (UTC-8) for display. The second example takes
a time stamp specified in EST (UTC-5) and converts it to local time in MST (UTC-7).
</para>
The function <literal><function>timezone</function>(<replaceable>zone</>,
<replaceable>timestamp</>)</literal> is equivalent to the SQL-conforming construct
<literal><replaceable>timestamp</> AT TIME ZONE
- <replaceable>zone</></literal>.
+ <replaceable>zone</></literal>.
</para>
</sect2>
</sect1>
-
+
<sect1 id="functions-enum">
<title>Enum Support Functions</title>
<synopsis>
table_to_xml(tbl regclass, nulls boolean, tableforest boolean, targetns text)
query_to_xml(query text, nulls boolean, tableforest boolean, targetns text)
-cursor_to_xml(cursor refcursor, count int, nulls boolean,
+cursor_to_xml(cursor refcursor, count int, nulls boolean,
tableforest boolean, targetns text)
</synopsis>
The return type of each function is <type>xml</type>.
<para>
The <function>COALESCE</function> function returns the first of its
arguments that is not null. Null is returned only if all arguments
- are null. It is often used to substitute a default value for
+ are null. It is often used to substitute a default value for
null values when data is retrieved for display, for example:
<programlisting>
SELECT COALESCE(description, short_description, '(none)') ...
-- unnest a 2D array
CREATE OR REPLACE FUNCTION unnest2(anyarray)
RETURNS SETOF anyelement AS $$
-select $1[i][j]
+select $1[i][j]
from generate_subscripts($1,1) g1(i),
generate_subscripts($1,2) g2(j);
$$ LANGUAGE sql IMMUTABLE;
<para>
<function>pg_rotate_logfile</> signals the log-file manager to switch
to a new output file immediately. This works only when the built-in
- log collector is running, since otherwise there is no log-file manager
+ log collector is running, since otherwise there is no log-file manager
subprocess.
</para>
<para>
<function>pg_advisory_lock</> locks an application-defined resource,
which can be identified either by a single 64-bit key value or two
- 32-bit key values (note that these two key spaces do not overlap).
+ 32-bit key values (note that these two key spaces do not overlap).
The key type is specified in <literal>pg_locks.objid</>. If
another session already holds a lock on the same resource, the
function will wait until the resource becomes available. The lock
<para>
Currently <productname>PostgreSQL</> provides one built in trigger
- function, <function>suppress_redundant_updates_trigger</>,
+ function, <function>suppress_redundant_updates_trigger</>,
which will prevent any update
that does not actually change the data in the row from taking place, in
contrast to the normal behavior which always performs the update
and space in dead rows that will eventually have to be vacuumed.
However, detecting such situations in client code is not
always easy, or even possible, and writing expressions to detect
- them can be error-prone. An alternative is to use
+ them can be error-prone. An alternative is to use
<function>suppress_redundant_updates_trigger</>, which will skip
updates that don't change the data. You should use this with care,
- however. The trigger takes a small but non-trivial time for each record,
+ however. The trigger takes a small but non-trivial time for each record,
so if most of the records affected by an update are actually changed,
use of this trigger will actually make the update run slower.
</para>
<para>
- The <function>suppress_redundant_updates_trigger</> function can be
+ The <function>suppress_redundant_updates_trigger</> function can be
added to a table like this:
<programlisting>
-CREATE TRIGGER z_min_update
+CREATE TRIGGER z_min_update
BEFORE UPDATE ON tablename
FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
</programlisting>
database, and several geographic information systems.
<productname>POSTGRES</productname> has also been used as an
educational tool at several universities. Finally, Illustra
- Information Technologies (later merged into
+ Information Technologies (later merged into
<ulink url="http://www.informix.com/"><productname>Informix</productname></ulink>,
which is now owned by <ulink
url="http://www.ibm.com/">IBM</ulink>) picked up the code and
commercialized it. In late 1992,
<productname>POSTGRES</productname> became the primary data manager
- for the
+ for the
<ulink url="http://meteora.ucsd.edu/s2k/s2k_home.html">
Sequoia 2000 scientific computing project</ulink>.
</para>
<term>Web Site</term>
<listitem>
<para>
- The <productname>PostgreSQL</productname>
+ The <productname>PostgreSQL</productname>
<ulink url="http://www.postgresql.org">web site</ulink>
carries details on the latest release and other
information to make your work or play with
</indexterm>
<para>
- It is recommended that most users download the binary distribution for
+ It is recommended that most users download the binary distribution for
Windows, available as a one-click installer package
from the <productname>PostgreSQL</productname> website. Building from source
is only intended for people developing <productname>PostgreSQL</productname>
<para>
Finally, the client access library
(<application>libpq</application>) can be built using
- <productname>Visual C++ 7.1</productname> or
+ <productname>Visual C++ 7.1</productname> or
<productname>Borland C++</productname> for compatibility with statically
linked applications built using these tools.
</para>
These builds cannot generate 64-bit binaries.
<productname>Cygwin</productname> is not recommended and should
only be used for older versions of <productname>Windows</productname> where
- the native build does not work, such as
+ the native build does not work, such as
<productname>Windows 98</productname>. <productname>MinGW</productname> is
only recommended if you are building other modules using it. The official
binaries are built using <productname>Visual Studio</productname>.
building from a release file. Note that only Bison 1.875 or versions
2.2 and later will work. Also, Flex version 2.5.31 or later is required.
Bison can be downloaded from <ulink url="http://gnuwin32.sourceforge.net"></>.
- Flex can be downloaded from
+ Flex can be downloaded from
<ulink url="http://www.postgresql.org/ftp/misc/winflex/"></>.
</para></listitem>
</varlistentry>
<term><productname>MIT Kerberos</productname></term>
<listitem><para>
Required for Kerberos authentication support. MIT Kerberos can be
- downloaded from
+ downloaded from
<ulink url="http://web.mit.edu/Kerberos/dist/index.html"></>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><productname>ossp-uuid</productname></term>
<listitem><para>
- Required for UUID-OSSP support (contrib only). Source can be
+ Required for UUID-OSSP support (contrib only). Source can be
downloaded from
<ulink url="http://www.ossp.org/pkg/lib/uuid/"></>.
</para></listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term><filename>interfaces\libpq\Release\libpqdll.lib</filename></term>
<listitem>
<para>
SQL distinguishes between <firstterm>reserved</firstterm> and
<firstterm>non-reserved</firstterm> key words. According to the standard,
- reserved key words
+ reserved key words
are the only real key words; they are never allowed as identifiers.
Non-reserved key words only have a special meaning in particular
contexts and can be used as identifiers in other contexts. Most
</para>
<para>
- <productname>Postgres95</productname> is Copyright © 1994-5
+ <productname>Postgres95</productname> is Copyright © 1994-5
by the Regents of the University of California.
</para>
This parameter specifies the file name of the client SSL
certificate, replacing the default
<filename>~/.postgresql/postgresql.crt</>.
- This parameter is ignored if an SSL connection is not made.
+ This parameter is ignored if an SSL connection is not made.
</para>
</listitem>
</varlistentry>
This makes the large object facility partially obsolete. One
remaining advantage of the large object facility is that it allows values
up to 2 GB in size, whereas <acronym>TOAST</acronym>ed fields can be at
- most 1 GB. Also, large objects can be randomly modified using a read/write
+ most 1 GB. Also, large objects can be randomly modified using a read/write
API that is more efficient than performing such operations using
<acronym>TOAST</acronym>.
</para>
As of <productname>PostgreSQL</> 9.0, large objects have an owner
and a set of access permissions, which can be managed using
<xref linkend="sql-grant"> and
- <xref linkend="sql-revoke">.
+ <xref linkend="sql-revoke">.
For compatibility with prior releases, see
<xref linkend="guc-lo-compat-privileges">.
<literal>SELECT</literal> privileges are required to read a large
- object, and
+ object, and
<literal>UPDATE</literal> privileges are required to write to or
truncate it.
Only the large object owner (or the database superuser) can unlink, comment
Oid lo_creat(PGconn *conn, int mode);
</synopsis>
<indexterm><primary>lo_creat</></>
- creates a new large object.
+ creates a new large object.
The return value is the OID that was assigned to the new large object,
or <symbol>InvalidOid</symbol> (zero) on failure.
Oid lo_import(PGconn *conn, const char *filename);
</synopsis>
<indexterm><primary>lo_import</></>
- <replaceable class="parameter">filename</replaceable>
+ <replaceable class="parameter">filename</replaceable>
specifies the operating system name of
the file to be imported as a large object.
The return value is the OID that was assigned to the new large object,
descriptor for later use in <function>lo_read</function>,
<function>lo_write</function>, <function>lo_lseek</function>,
<function>lo_tell</function>, and <function>lo_close</function>.
- The descriptor is only valid for
+ The descriptor is only valid for
the duration of the current transaction.
On failure, -1 is returned.
</para>
<title>Example Program</title>
<para>
- <xref linkend="lo-example"> is a sample program which shows how the large object
+ <xref linkend="lo-example"> is a sample program which shows how the large object
interface
- in <application>libpq</> can be used. Parts of the program are
+ in <application>libpq</> can be used. Parts of the program are
commented out but are left in the source for the reader's
benefit. This program can also be found in
<filename>src/test/examples/testlo.c</filename> in the source distribution.
</indexterm>
<para>
- <productname>PostgreSQL</productname> provides a rich set of tools
+ <productname>PostgreSQL</productname> provides a rich set of tools
for developers to manage concurrent access to data. Internally,
- data consistency is maintained by using a multiversion
- model (Multiversion Concurrency Control, <acronym>MVCC</acronym>).
+ data consistency is maintained by using a multiversion
+ model (Multiversion Concurrency Control, <acronym>MVCC</acronym>).
This means that while querying a database each transaction sees
a snapshot of data (a <firstterm>database version</firstterm>)
as it was some
data rows, providing <firstterm>transaction isolation</firstterm>
for each database session. <acronym>MVCC</acronym>, by eschewing
explicit locking methodologies of traditional database systems,
- minimizes lock contention in order to allow for reasonable
+ minimizes lock contention in order to allow for reasonable
performance in multiuser environments.
</para>
<synopsis>
pg_archivecleanup <optional> <replaceable>option</> ... </optional> <replaceable>archivelocation</> <replaceable>restartwalfile</>
</synopsis>
- When used as a standalone program all WAL files logically preceding the
+ When used as a standalone program all WAL files logically preceding the
<literal>restartwalfile</> will be removed <replaceable>archivelocation</>.
In this mode, if you specify a <filename>.backup</> file name, then only the file prefix
will be used as the <literal>restartwalfile</>. This allows you to remove
<procedure>
<step performance="optional">
<title>Optionally move the old cluster</title>
-
+
<para>
If you are using a version-specific installation directory, e.g.
<filename>/opt/PostgreSQL/8.4</>, you do not need to move the old cluster. The
one-click installers all use version-specific installation directories.
</para>
-
- <para>
+
+ <para>
If your installation directory is not version-specific, e.g.
<filename>/usr/local/pgsql</>, it is necessary to move the current PostgreSQL install
directory so it does not interfere with the new <productname>PostgreSQL</> installation.
Once the current <productname>PostgreSQL</> server is shut down, it is safe to rename the
PostgreSQL installation directory; assuming the old directory is
<filename>/usr/local/pgsql</>, you can do:
-
+
<programlisting>
mv /usr/local/pgsql /usr/local/pgsql.old
</programlisting>
to rename the directory.
</para>
</step>
-
+
<step>
<title>For source installs, build the new version</title>
-
+
<para>
Build the new PostgreSQL source with <command>configure</> flags that are compatible
with the old cluster. <application>pg_upgrade</> will check <command>pg_controldata</> to make
sure all settings are compatible before starting the upgrade.
</para>
</step>
-
+
<step>
<title>Install the new PostgreSQL binaries</title>
-
+
<para>
Install the new server's binaries and support files. You can use the
same port numbers for both clusters, typically 5432, because the old and
new clusters will not be running at the same time.
</para>
-
+
<para>
For source installs, if you wish to install the new server in a custom
location, use the <literal>prefix</literal> variable:
-
+
<programlisting>
gmake prefix=/usr/local/pgsql.new install
</programlisting>
</para>
</step>
-
+
<step>
<title>Install pg_upgrade and pg_upgrade_support</title>
<application>pg_upgrade_support</> in the new PostgreSQL cluster
</para>
</step>
-
+
<step>
<title>Initialize the new PostgreSQL cluster</title>
-
+
<para>
Initialize the new cluster using <command>initdb</command>.
Again, use compatible <command>initdb</command>
start the new cluster.
</para>
</step>
-
+
<step>
<title>Install custom shared object files</title>
-
+
<para>
Install any custom shared object files (or DLLs) used by the old cluster
into the new cluster, e.g. <filename>pgcrypto.so</filename>, whether they are from <filename>contrib</filename>
<filename>pgcrypto.sql</>, because these will be migrated from the old cluster.
</para>
</step>
-
+
<step>
<title>Adjust authentication</title>
-
+
<para>
<command>pg_upgrade</> will connect to the old and new servers several times,
so you might want to set authentication to <literal>trust</> in
to avoid being prompted repeatedly for a password.
</para>
</step>
-
+
<step>
<title>Stop both servers</title>
-
+
<para>
Make sure both database servers are stopped using, on Unix, e.g.:
-
+
<programlisting>
pg_ctl -D /opt/PostgreSQL/8.4 stop
pg_ctl -D /opt/PostgreSQL/9.0 stop
</programlisting>
-
+
or on Windows, using the proper service names:
-
+
<programlisting>
NET STOP postgresql-8.4
NET STOP postgresql-9.0
</programlisting>
-
+
or
-
+
<programlisting>
NET STOP pgsql-8.3 (<productname>PostgreSQL</> 8.3 and older used a different service name)
</programlisting>
</para>
</step>
-
+
<step>
<title>Run <application>pg_upgrade</></title>
-
+
<para>
Always run the <application>pg_upgrade</> binary of the new server, not the old one.
<application>pg_upgrade</> requires the specification of the old and new cluster's
old cluster once you start the new cluster after the upgrade. See
<literal>pg_upgrade --help</> for a full list of options.
</para>
-
+
<para>
For Windows users, you must be logged into an administrative account, and
then start a shell as the <literal>postgres</> user and set the proper path:
-
+
<programlisting>
RUNAS /USER:postgres "CMD.EXE"
SET PATH=%PATH%;C:\Program Files\PostgreSQL\9.0\bin;
</programlisting>
-
+
and then run <application>pg_upgrade</> with quoted directories, e.g.:
-
+
<programlisting>
pg_upgrade.exe
--old-datadir "C:/Program Files/PostgreSQL/8.4/data"
--old-bindir "C:/Program Files/PostgreSQL/8.4/bin"
--new-bindir "C:/Program Files/PostgreSQL/9.0/bin"
</programlisting>
-
+
Once started, <command>pg_upgrade</> will verify the two clusters are compatible
and then do the migration. You can use <command>pg_upgrade --check</>
to perform only the checks, even if the old server is still
manual adjustments you will need to make after the migration.
<command>pg_upgrade</> requires write permission in the current directory.
</para>
-
+
<para>
Obviously, no one should be accessing the clusters during the migration.
</para>
-
+
<para>
If an error occurs while restoring the database schema, <command>pg_upgrade</> will
exit and you will have to revert to the old cluster as outlined in <xref linkend="pgupgrade-step-revert">
assuming the module is not being used to store user data.
</para>
</step>
-
+
<step>
<title>Restore <filename>pg_hba.conf</></title>
-
+
<para>
If you modified <filename>pg_hba.conf</> to use <literal>trust</>,
restore its original authentication settings.
</para>
</step>
-
+
<step>
<title>Post-migration processing</title>
-
+
<para>
If any post-migration processing is required, pg_upgrade will issue
warnings as it completes. It will also generate script files that must
be run by the administrator. The script files will connect to each
database that needs post-migration processing. Each script should be
run using:
-
+
<programlisting>
psql --username postgres --file script.sql postgres
</programlisting>
-
+
The scripts can be run in any order and can be deleted once they have
been run.
</para>
- <caution>
+ <caution>
<para>
In general it is unsafe to access tables referenced in rebuild scripts
until the rebuild scripts have run to completion; doing so could yield
</para>
</caution>
</step>
-
+
<step>
<title>Statistics</title>
of the migration.
</para>
</step>
-
+
<step>
<title>Delete old cluster</title>
-
+
<para>
Once you are satisfied with the upgrade, you can delete the old
cluster's data directories by running the script mentioned when
(e.g. <filename>bin</>, <filename>share</>).
</para>
</step>
-
+
<step id="pgupgrade-step-revert" performance="optional">
<title>Reverting to old cluster</title>
-
+
<para>
If, after running <command>pg_upgrade</command>, you wish to revert to the old cluster,
there are several options:
</para>
</step>
</procedure>
-
+
</sect2>
-
+
<sect2>
<title>Limitations in Migrating <emphasis>from</> PostgreSQL 8.3</title>
-
+
<para>
Upgrading from PostgreSQL 8.3 has additional restrictions not present
when upgrading from later PostgreSQL releases. For example,
<para>
You must drop any such columns and migrate them manually.
</para>
-
+
<para>
pg_upgrade will require a table rebuild if:
<itemizedlist>
</listitem>
</itemizedlist>
</para>
-
+
<para>
pg_upgrade will require a reindex if:
<itemizedlist>
</listitem>
</itemizedlist>
</para>
-
+
<para>
Also, the default datetime storage format changed to integer after
<productname>PostgreSQL</> 8.3. pg_upgrade will check that the datetime storage format
used by the old and new clusters match. Make sure your new cluster is
built with the configure flag <option>--disable-integer-datetimes</>.
</para>
-
+
<para>
For Windows users, note that due to different integer datetimes settings
used by the one-click installer and the MSI installer, it is only
</para>
</sect2>
-
+
<sect2>
<title>Notes</title>
-
- <para>
+
+ <para>
<application>pg_upgrade</> does not support migration of databases
containing these <type>reg*</> OID-referencing system data types:
<type>regproc</>, <type>regprocedure</>, <type>regoper</>,
<type>regoperator</>, <type>regclass</>, <type>regconfig</>, and
<type>regdictionary</>. (<type>regtype</> can be migrated.)
</para>
-
- <para>
+
+ <para>
All failure, rebuild, and reindex cases will be reported by
<application>pg_upgrade</> if they affect your installation;
post-migration scripts to rebuild tables and indexes will be
generated automatically.
</para>
-
+
<para>
For deployment testing, create a schema-only copy of the old cluster,
insert dummy data, and migrate that.
</para>
-
- <para>
+
+ <para>
If you want to use link mode and you don't want your old cluster
to be modified when the new cluster is started, make a copy of the
old cluster and migrate that with link mode. To make a valid copy
the old server and run <command>rsync</> again to update the copy with any
changes to make it consistent.
</para>
-
+
</sect2>
-
+
</sect1>
</para>
<para>
- If you wish to use the <literal>strict</> pragma with your code you
- have a few options. For temporary global use you can <command>SET</>
+ If you wish to use the <literal>strict</> pragma with your code you
+ have a few options. For temporary global use you can <command>SET</>
<literal>plperl.use_strict</literal> to true.
This will affect subsequent compilations of <application>PL/Perl</>
functions, but not functions already compiled in the current session.
by <literal>spi_exec_query</literal>, or in <literal>spi_query_prepared</literal> which returns a cursor
exactly as <literal>spi_query</literal> does, which can be later passed to <literal>spi_fetchrow</literal>.
The optional second parameter to <literal>spi_exec_prepared</literal> is a hash reference of attributes;
- the only attribute currently supported is <literal>limit</literal>, which sets the maximum number of rows returned by a query.
+ the only attribute currently supported is <literal>limit</literal>, which sets the maximum number of rows returned by a query.
</para>
<para>
<para>
When a session ends normally, not due to a fatal error, any
<literal>END</> blocks that have been defined are executed.
- Currently no other actions are performed. Specifically,
- file handles are not automatically flushed and objects are
+ Currently no other actions are performed. Specifically,
+ file handles are not automatically flushed and objects are
not automatically destroyed.
</para>
</listitem>
If the command is a <command>SELECT</> statement and no <replaceable>loop-body</>
script is given, then only the first row of results are stored into
Tcl variables; remaining rows, if any, are ignored. No storing occurs
- if the
+ if the
query returns no rows. (This case can be detected by checking the
result of <function>spi_exec</function>.) For example:
<programlisting>
different function definitions as long as the number of arguments or their types
differ. Tcl, however, requires all procedure names to be distinct.
PL/Tcl deals with this by making the internal Tcl procedure names contain
- the object
+ the object
ID of the function from the system table <structname>pg_proc</> as part of their name. Thus,
<productname>PostgreSQL</productname> functions with the same name
and different argument types will be different Tcl procedures, too. This
hear about it. Your bug reports play an important part in making
<productname>PostgreSQL</productname> more reliable because even the utmost
care cannot guarantee that every part of
- <productname>PostgreSQL</productname>
+ <productname>PostgreSQL</productname>
will work on every platform under every circumstance.
</para>
message, perhaps parts of the error message.
</para>
- <para>
+ <para>
Another method is to fill in the bug report web-form available
at the project's
<ulink url="http://www.postgresql.org/">web site</ulink>.
</para>
- <para>
+ <para>
Do not send bug reports to any of the user mailing lists, such as
<para>
<command>ABORT</command> rolls back the current transaction and causes
- all the updates made by the transaction to be discarded.
+ all the updates made by the transaction to be discarded.
This command is identical
in behavior to the standard <acronym>SQL</acronym> command
<xref linkend="SQL-ROLLBACK">,
<refnamediv>
<refname>ALTER AGGREGATE</refname>
<refpurpose>change the definition of an aggregate function</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alteraggregate">
<primary>ALTER AGGREGATE</primary>
</indexterm>
ALTER AGGREGATE <replaceable>name</replaceable> ( <replaceable>type</replaceable> [ , ... ] ) SET SCHEMA <replaceable>new_schema</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
aggregate function anyway.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<refnamediv>
<refname>ALTER CONVERSION</refname>
<refpurpose>change the definition of a conversion</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alterconversion">
<primary>ALTER CONVERSION</primary>
</indexterm>
ALTER CONVERSION <replaceable>name</replaceable> OWNER TO <replaceable>new_owner</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
anyway.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<para>
The first form changes certain per-database settings. (See below for
details.) Only the database owner or a superuser can change these settings.
- </para>
+ </para>
<para>
The second form changes the name of the database. Only the database
to this database. -1 means no limit.
</para>
</listitem>
- </varlistentry>
+ </varlistentry>
<varlistentry>
<term><replaceable>new_name</replaceable></term>
The new default tablespace of the database.
</para>
</listitem>
- </varlistentry>
+ </varlistentry>
<varlistentry>
<term><replaceable>configuration_parameter</replaceable></term>
<refsect1>
<title>Compatibility</title>
-
+
<para>
The <command>ALTER DATABASE</command> statement is a
<productname>PostgreSQL</productname> extension.
ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
DROP CONSTRAINT <replaceable class="PARAMETER">constraint_name</replaceable> [ RESTRICT | CASCADE ]
ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
- OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
+ OWNER TO <replaceable class="PARAMETER">new_owner</replaceable>
ALTER DOMAIN <replaceable class="PARAMETER">name</replaceable>
- SET SCHEMA <replaceable class="PARAMETER">new_schema</replaceable>
+ SET SCHEMA <replaceable class="PARAMETER">new_schema</replaceable>
</synopsis>
</refsynopsisdiv>
<refsect1 id="SQL-ALTERDOMAIN-compatibility">
<title>Compatibility</title>
-
+
<para>
<command>ALTER DOMAIN</command> conforms to the <acronym>SQL</acronym>
standard,
<refnamediv>
<refname>ALTER FUNCTION</refname>
<refpurpose>change the definition of a function</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alterfunction">
<primary>ALTER FUNCTION</primary>
</indexterm>
RESET ALL
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
However, a superuser can alter ownership of any function anyway.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<listitem>
<para>
- The data type(s) of the function's arguments (optionally
+ The data type(s) of the function's arguments (optionally
schema-qualified), if any.
</para>
</listitem>
<para>
The third variant changes the name of the group. This is exactly
- equivalent to renaming the role with
+ equivalent to renaming the role with
<xref linkend="sql-alterrole">.
</para>
</refsect1>
<refsect1>
<title>Compatibility</title>
-
+
<para>
There is no <command>ALTER GROUP</command> statement in the SQL
standard.
<term><literal>RENAME</literal></term>
<listitem>
<para>
- The <literal>RENAME</literal> form changes the name of the index.
+ The <literal>RENAME</literal> form changes the name of the index.
There is no effect on the stored data.
</para>
</listitem>
<para>
This form changes the index's tablespace to the specified tablespace and
moves the data file(s) associated with the index to the new tablespace.
- See also
+ See also
<xref linkend="SQL-CREATETABLESPACE">.
</para>
</listitem>
</programlisting>
</para>
- <para>
+ <para>
To move an index to a different tablespace:
<programlisting>
ALTER INDEX distributors SET TABLESPACE fasttablespace;
</programlisting>
</para>
- <para>
+ <para>
To change an index's fill factor (assuming that the index method
supports it):
<programlisting>
<refsect1>
<title>Compatibility</title>
-
+
<para>
There is no <command>ALTER LANGUAGE</command> statement in the SQL
standard.
<refsect1>
<title>Compatibility</title>
-
+
<para>
There is no <command>ALTER LARGE OBJECT</command> statement in the SQL
standard.
<refnamediv>
<refname>ALTER OPERATOR CLASS</refname>
<refpurpose>change the definition of an operator class</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alteropclass">
<primary>ALTER OPERATOR CLASS</primary>
</indexterm>
ALTER OPERATOR CLASS <replaceable>name</replaceable> USING <replaceable class="parameter">index_method</replaceable> OWNER TO <replaceable>new_owner</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
class anyway.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<refnamediv>
<refname>ALTER OPERATOR</refname>
<refpurpose>change the definition of an operator</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alteroperator">
<primary>ALTER OPERATOR</primary>
</indexterm>
ALTER OPERATOR <replaceable>name</replaceable> ( { <replaceable>left_type</replaceable> | NONE } , { <replaceable>right_type</replaceable> | NONE } ) OWNER TO <replaceable>new_owner</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
However, a superuser can alter ownership of any operator anyway.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refnamediv>
<refname>ALTER OPERATOR FAMILY</refname>
<refpurpose>change the definition of an operator family</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alteropfamily">
<primary>ALTER OPERATOR FAMILY</primary>
</indexterm>
ALTER OPERATOR FAMILY <replaceable>name</replaceable> USING <replaceable class="parameter">index_method</replaceable> OWNER TO <replaceable>new_owner</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
Refer to <xref linkend="xindex"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
</para>
</refsect1>
-
+
<refsect1>
<title>Notes</title>
cases where an operator might or might not be lossy.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
OPERATOR 4 >= (int2, int4) ,
OPERATOR 5 > (int2, int4) ,
FUNCTION 1 btint24cmp(int2, int4) ;
-</programlisting>
+</programlisting>
<para>
To remove these entries again:
OPERATOR 4 (int2, int4) ,
OPERATOR 5 (int2, int4) ,
FUNCTION 1 (int2, int4) ;
-</programlisting>
+</programlisting>
</refsect1>
<refsect1>
<refsect1>
<title>Compatibility</title>
-
+
<para>
There is no <command>ALTER SCHEMA</command> statement in the SQL
standard.
<refpurpose>
change the definition of a sequence generator
</refpurpose>
- </refnamediv>
+ </refnamediv>
<indexterm zone="sql-altersequence">
<primary>ALTER SEQUENCE</primary>
<refnamediv>
<refname>ALTER TABLESPACE</refname>
<refpurpose>change the definition of a tablespace</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-altertablespace">
<primary>ALTER TABLESPACE</primary>
</indexterm>
ALTER TABLESPACE <replaceable>name</replaceable> RESET ( <replaceable class="PARAMETER">tablespace_option</replaceable> [, ... ] )
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
(Note that superusers have these privileges automatically.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refnamediv>
<refname>ALTER TEXT SEARCH CONFIGURATION</refname>
<refpurpose>change the definition of a text search configuration</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-altertsconfig">
<primary>ALTER TEXT SEARCH CONFIGURATION</primary>
</indexterm>
ALTER TEXT SEARCH CONFIGURATION <replaceable>name</replaceable> OWNER TO <replaceable>new_owner</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<command>ALTER TEXT SEARCH CONFIGURATION</>.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<programlisting>
ALTER TEXT SEARCH CONFIGURATION my_config
ALTER MAPPING REPLACE english WITH swedish;
-</programlisting>
+</programlisting>
</refsect1>
<refsect1>
<refnamediv>
<refname>ALTER TEXT SEARCH DICTIONARY</refname>
<refpurpose>change the definition of a text search dictionary</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-altertsdictionary">
<primary>ALTER TEXT SEARCH DICTIONARY</primary>
</indexterm>
ALTER TEXT SEARCH DICTIONARY <replaceable>name</replaceable> OWNER TO <replaceable>new_owner</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<command>ALTER TEXT SEARCH DICTIONARY</>.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
Template-specific options can appear in any order.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
<programlisting>
ALTER TEXT SEARCH DICTIONARY my_dict ( StopWords = newrussian );
-</programlisting>
+</programlisting>
<para>
The following example command changes the language option to dutch,
<programlisting>
ALTER TEXT SEARCH DICTIONARY my_dict ( language = dutch, StopWords );
-</programlisting>
+</programlisting>
<para>
The following example command <quote>updates</> the dictionary's
<refnamediv>
<refname>ALTER TEXT SEARCH PARSER</refname>
<refpurpose>change the definition of a text search parser</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-altertsparser">
<primary>ALTER TEXT SEARCH PARSER</primary>
</indexterm>
ALTER TEXT SEARCH PARSER <replaceable>name</replaceable> RENAME TO <replaceable>new_name</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
You must be a superuser to use <command>ALTER TEXT SEARCH PARSER</>.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<refnamediv>
<refname>ALTER TEXT SEARCH TEMPLATE</refname>
<refpurpose>change the definition of a text search template</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-altertstemplate">
<primary>ALTER TEXT SEARCH TEMPLATE</primary>
</indexterm>
ALTER TEXT SEARCH TEMPLATE <replaceable>name</replaceable> RENAME TO <replaceable>new_name</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
You must be a superuser to use <command>ALTER TEXT SEARCH TEMPLATE</>.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
ALTER USER <replaceable class="PARAMETER">name</replaceable> [ [ WITH ] <replaceable class="PARAMETER">option</replaceable> [ ... ] ]
<phrase>where <replaceable class="PARAMETER">option</replaceable> can be:</phrase>
-
+
SUPERUSER | NOSUPERUSER
| CREATEDB | NOCREATEDB
| CREATEROLE | NOCREATEROLE
| LOGIN | NOLOGIN
| CONNECTION LIMIT <replaceable class="PARAMETER">connlimit</replaceable>
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD '<replaceable class="PARAMETER">password</replaceable>'
- | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
+ | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
ALTER USER <replaceable class="PARAMETER">name</replaceable> RENAME TO <replaceable>new_name</replaceable>
<refsect1>
<title>Compatibility</title>
-
+
<para>
The <command>ALTER USER</command> statement is a
<productname>PostgreSQL</productname> extension. The SQL standard
<refnamediv>
<refname>ALTER VIEW</refname>
<refpurpose>change the definition of a view</refpurpose>
- </refnamediv>
-
+ </refnamediv>
+
<indexterm zone="sql-alterview">
<primary>ALTER VIEW</primary>
</indexterm>
ALTER VIEW <replaceable class="parameter">name</replaceable> SET SCHEMA <replaceable class="parameter">new_schema</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
However, a superuser can alter ownership of any view anyway.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
READ WRITE | READ ONLY
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<para>
If the isolation level or read/write mode is specified, the new
transaction has those characteristics, as if
- <xref linkend="sql-set-transaction">
+ <xref linkend="sql-set-transaction">
was executed.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
of the other parameters to this statement.
</para>
</refsect1>
-
+
<refsect1>
<title>Notes</title>
<xref linkend="sql-start-transaction"> has the same functionality
as <command>BEGIN</>.
</para>
-
+
<para>
Use <xref linkend="SQL-COMMIT"> or
<xref linkend="SQL-ROLLBACK">
<para>
Issuing <command>BEGIN</> when already inside a transaction block will
provoke a warning message. The state of the transaction is not affected.
- To nest transactions within a transaction block, use savepoints
+ To nest transactions within a transaction block, use savepoints
(see <xref linkend="sql-savepoint">).
</para>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
disconnects.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
-
+
<para>
<command>CLOSE</command> is fully conforming with the SQL
standard. <command>CLOSE ALL</> is a <productname>PostgreSQL</>
<group><arg>--all</arg><arg>-a</arg></group>
</cmdsynopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<para>
<application>clusterdb</application> accepts the following command-line arguments:
-
+
<variablelist>
<varlistentry>
<term><option>-a</></term>
</para>
<para>
- <application>clusterdb</application> also accepts
+ <application>clusterdb</application> also accepts
the following command-line arguments for connection parameters:
<variablelist>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or local Unix domain socket file
+ Specifies the TCP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
<listitem>
<para>
Force <application>clusterdb</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
</para>
<para>
- Comments can be viewed using <application>psql</application>'s
+ Comments can be viewed using <application>psql</application>'s
<command>\d</command> family of commands.
Other user interfaces to retrieve comments can be built atop
the same built-in functions that <application>psql</application> uses, namely
<function>obj_description</>, <function>col_description</>,
- and <function>shobj_description</>
+ and <function>shobj_description</>
(see <xref linkend="functions-info-comment-table">).
</para>
</refsect1>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term><replaceable>source_type</replaceable></term>
<listitem>
<listitem>
<para>
- The data type(s) of the function's arguments (optionally
+ The data type(s) of the function's arguments (optionally
schema-qualified), if any.
</para>
</listitem>
</para>
</listitem>
</varlistentry>
-
+
</variablelist>
</refsect1>
COMMIT [ WORK | TRANSACTION ]
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
and are guaranteed to be durable if a crash occurs.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<refsect1>
<title>Compatibility</title>
-
+
<para>
The SQL standard only specifies the two forms
<literal>COMMIT</literal> and <literal>COMMIT
<title>Description</title>
<para>
- <command>COMMIT PREPARED</command> commits a transaction that is in
+ <command>COMMIT PREPARED</command> commits a transaction that is in
prepared state.
</para>
</refsect1>
<para>
Commit the transaction identified by the transaction
identifier <literal>foobar</>:
-
+
<programlisting>
COMMIT PREPARED 'foobar';
</programlisting>
value. If there is no final function then the ending state value
is returned as-is.
</para>
-
+
<para>
An aggregate function can provide an initial condition,
that is, an initial value for the internal state value.
of a constant of the state value data type. If it is not supplied
then the state value starts out null.
</para>
-
+
<para>
If the state transition function is declared <quote>strict</quote>,
then it cannot be called with null inputs. With such a transition
When these types are different, you must supply a nonnull initial
condition or use a nonstrict transition function.
</para>
-
+
<para>
If the state transition function is not strict, then it will be called
unconditionally at each input row, and must deal with null inputs
and null transition values for itself. This allows the aggregate
author to have full control over the aggregate's handling of null values.
</para>
-
+
<para>
If the final function is declared <quote>strict</quote>, then it will not
be called when the ending state value is null; instead a null result
<function>avg</function> returns null when it sees there were zero
input rows.
</para>
-
+
<para>
Aggregates that behave like <function>MIN</> or <function>MAX</> can
sometimes be optimized by looking into an index instead of scanning every
written in any order, not just the order illustrated above.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
FOR <replaceable>source_encoding</replaceable> TO <replaceable>dest_encoding</replaceable> FROM <replaceable>function_name</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1 id="sql-createconversion-description">
<title>Description</title>
</para>
</refsect1>
-
+
<refsect1 id="sql-createconversion-compat">
<title>Compatibility</title>
LOCAL</> command: the effects of such a command will persist after
function exit, unless the current transaction is rolled back.
</para>
-
+
<para>
See <xref linkend="sql-set"> and
<xref linkend="runtime-config">
CREATE GROUP <replaceable class="PARAMETER">name</replaceable> [ [ WITH ] <replaceable class="PARAMETER">option</replaceable> [ ... ] ]
<phrase>where <replaceable class="PARAMETER">option</replaceable> can be:</phrase>
-
+
SUPERUSER | NOSUPERUSER
| CREATEDB | NOCREATEDB
| CREATEROLE | NOCREATEROLE
| INHERIT | NOINHERIT
| LOGIN | NOLOGIN
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD '<replaceable class="PARAMETER">password</replaceable>'
- | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
+ | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
| IN ROLE <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| IN GROUP <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| ROLE <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| ADMIN <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| USER <replaceable class="PARAMETER">role_name</replaceable> [, ...]
- | SYSID <replaceable class="PARAMETER">uid</replaceable>
+ | SYSID <replaceable class="PARAMETER">uid</replaceable>
</synopsis>
</refsynopsisdiv>
<command>CREATE GROUP</command> is now an alias for
<xref linkend="sql-createrole">.
</para>
- </refsect1>
-
+ </refsect1>
+
<refsect1>
<title>Compatibility</title>
-
+
<para>
There is no <command>CREATE GROUP</command> statement in the SQL
standard.
an index. The operator class specifies that certain operators will fill
particular roles or <quote>strategies</> for this data type and this
index method. The operator class also specifies the support procedures to
- be used by
+ be used by
the index method when the operator class is selected for an
index column. All the operators and functions used by an operator
class must be defined before the operator class can be created.
Refer to <xref linkend="xindex"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
clauses can appear in any order.
</para>
</refsect1>
-
+
<refsect1>
<title>Notes</title>
cases where an operator might or might not be lossy.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
FUNCTION 5 g_int_penalty (internal, internal, internal),
FUNCTION 6 g_int_picksplit (internal, internal),
FUNCTION 7 g_int_same (_int4, _int4, internal);
-</programlisting>
+</programlisting>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
arguments, use the <literal>OPERATOR()</> syntax, for example:
<programlisting>
COMMUTATOR = OPERATOR(myschema.===) ,
-</programlisting>
+</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Notes</title>
database.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
JOIN = area_join_procedure,
HASHES, MERGES
);
-</programlisting>
+</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
Refer to <xref linkend="xindex"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
</varlistentry>
</variablelist>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
CREATE ROLE <replaceable class="PARAMETER">name</replaceable> [ [ WITH ] <replaceable class="PARAMETER">option</replaceable> [ ... ] ]
<phrase>where <replaceable class="PARAMETER">option</replaceable> can be:</phrase>
-
+
SUPERUSER | NOSUPERUSER
| CREATEDB | NOCREATEDB
| CREATEROLE | NOCREATEROLE
| LOGIN | NOLOGIN
| CONNECTION LIMIT <replaceable class="PARAMETER">connlimit</replaceable>
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD '<replaceable class="PARAMETER">password</replaceable>'
- | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
+ | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
| IN ROLE <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| IN GROUP <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| ROLE <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| ADMIN <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| USER <replaceable class="PARAMETER">role_name</replaceable> [, ...]
- | SYSID <replaceable class="PARAMETER">uid</replaceable>
+ | SYSID <replaceable class="PARAMETER">uid</replaceable>
</synopsis>
</refsynopsisdiv>
</listitem>
</varlistentry>
</variablelist>
- </refsect1>
+ </refsect1>
<refsect1>
<title>Notes</title>
</programlisting>
</para>
- <para>
+ <para>
Create a role that can create databases and manage roles:
<programlisting>
CREATE ROLE admin WITH CREATEDB CREATEROLE;
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<command>CREATE ROLE</command>, are
<productname>PostgreSQL</productname> extensions.
</para>
-
+
<para>
The SQL standard defines the concepts of users and roles, but it
regards them as distinct concepts and leaves all commands defining
<listitem>
<para>
The <literal>OWNED BY</> clause is a <productname>PostgreSQL</>
- extension.
+ extension.
</para>
</listitem>
</itemizedlist>
[ WITH [ NO ] DATA ]
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<member><xref linkend="sql-values"></member>
</simplelist>
</refsect1>
-
+
</refentry>
Refer to <xref linkend="textsearch"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
</varlistentry>
</variablelist>
</refsect1>
-
+
<refsect1>
<title>Notes</title>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
Refer to <xref linkend="textsearch"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
The options can appear in any order.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
language = russian,
stopwords = myrussian
);
-</programlisting>
+</programlisting>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
Refer to <xref linkend="textsearch"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
The arguments can appear in any order, not only the one shown above.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
Refer to <xref linkend="textsearch"> for further information.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
The arguments can appear in any order, not only the one shown above.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
CREATE USER <replaceable class="PARAMETER">name</replaceable> [ [ WITH ] <replaceable class="PARAMETER">option</replaceable> [ ... ] ]
<phrase>where <replaceable class="PARAMETER">option</replaceable> can be:</phrase>
-
+
SUPERUSER | NOSUPERUSER
| CREATEDB | NOCREATEDB
| CREATEROLE | NOCREATEROLE
| LOGIN | NOLOGIN
| CONNECTION LIMIT <replaceable class="PARAMETER">connlimit</replaceable>
| [ ENCRYPTED | UNENCRYPTED ] PASSWORD '<replaceable class="PARAMETER">password</replaceable>'
- | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
+ | VALID UNTIL '<replaceable class="PARAMETER">timestamp</replaceable>'
| IN ROLE <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| IN GROUP <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| ROLE <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| ADMIN <replaceable class="PARAMETER">role_name</replaceable> [, ...]
| USER <replaceable class="PARAMETER">role_name</replaceable> [, ...]
- | SYSID <replaceable class="PARAMETER">uid</replaceable>
+ | SYSID <replaceable class="PARAMETER">uid</replaceable>
</synopsis>
</refsynopsisdiv>
<command>CREATE ROLE</command>.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
-
+
<para>
The <command>CREATE USER</command> statement is a
<productname>PostgreSQL</productname> extension. The SQL standard
</para>
<para>
- The options <option>-D</option>, <option>-l</option>, <option>-E</option>,
+ The options <option>-D</option>, <option>-l</option>, <option>-E</option>,
<option>-O</option>, and
<option>-T</option> correspond to options of the underlying
SQL command <xref linkend="SQL-CREATEDATABASE">; see there for more information
<term><option>--host <replaceable class="parameter">host</replaceable></></term>
<listitem>
<para>
- Specifies the host name of the machine on which the
- server is running. If the value begins with a slash, it is used
+ Specifies the host name of the machine on which the
+ server is running. If the value begins with a slash, it is used
as the directory for the Unix domain socket.
</para>
</listitem>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or the local Unix domain socket file
+ Specifies the TCP port or the local Unix domain socket file
extension on which the server is listening for connections.
</para>
</listitem>
<listitem>
<para>
Force <application>createdb</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
</cmdsynopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<para>
- <application>createlang</application> is a utility for adding a new
+ <application>createlang</application> is a utility for adding a new
programming language to a <productname>PostgreSQL</productname> database.
<application>createlang</application> is just a wrapper around the
<xref linkend="sql-createlanguage">
<para>
<application>createlang</application> accepts the following command-line arguments:
-
+
<variablelist>
<varlistentry>
<term><replaceable class="parameter">langname</replaceable></term>
</para>
<para>
- <application>createlang</application> also accepts
+ <application>createlang</application> also accepts
the following command-line arguments for connection parameters:
-
+
<variablelist>
<varlistentry>
<term><option>-h <replaceable class="parameter">host</replaceable></></term>
<term><option>--host <replaceable class="parameter">host</replaceable></></term>
<listitem>
<para>
- Specifies the host name of the machine on which the
+ Specifies the host name of the machine on which the
server
- is running. If the value begins with a slash, it is used
+ is running. If the value begins with a slash, it is used
as the directory for the Unix domain socket.
</para>
</listitem>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or local Unix domain socket file
+ Specifies the TCP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
<listitem>
<para>
Force <application>createlang</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
Use <xref linkend="app-droplang"> to remove a language.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
<arg><replaceable>username</replaceable></arg>
</cmdsynopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<para>
- <application>createuser</application> creates a
+ <application>createuser</application> creates a
new <productname>PostgreSQL</productname> user (or more precisely, a role).
Only superusers and users with <literal>CREATEROLE</> privilege can create
new users, so <application>createuser</application> must be
<productname>PostgreSQL</productname> installation.
</para>
</listitem>
- </varlistentry>
+ </varlistentry>
<varlistentry>
<term><option>-c <replaceable class="parameter">number</replaceable></></term>
<para>
<application>createuser</application> also accepts the following
command-line arguments for connection parameters:
-
+
<variablelist>
<varlistentry>
<term><option>-h <replaceable class="parameter">host</replaceable></></term>
<term><option>--host <replaceable class="parameter">host</replaceable></></term>
<listitem>
<para>
- Specifies the host name of the machine on which the
+ Specifies the host name of the machine on which the
server
- is running. If the value begins with a slash, it is used
+ is running. If the value begins with a slash, it is used
as the directory for the Unix domain socket.
</para>
</listitem>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or local Unix domain socket file
+ Specifies the TCP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
DROP CAST [ IF EXISTS ] (<replaceable>source_type</replaceable> AS <replaceable>target_type</replaceable>) [ CASCADE | RESTRICT ]
</synopsis>
</refsynopsisdiv>
-
+
<refsect1 id="sql-dropcast-description">
<title>Description</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the cast does not exist. A notice is issued
+ Do not throw an error if the cast does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1 id="sql-dropcast-compat">
<title>Compatibility</title>
DROP CONVERSION [ IF EXISTS ] <replaceable>name</replaceable> [ CASCADE | RESTRICT ]
</synopsis>
</refsynopsisdiv>
-
+
<refsect1 id="sql-dropconversion-description">
<title>Description</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the conversion does not exist.
+ Do not throw an error if the conversion does not exist.
A notice is issued in this case.
</para>
</listitem>
DROP DATABASE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
containing the data. It can only be executed by the database owner.
Also, it cannot be executed while you or anyone else are connected
to the target database. (Connect to <literal>postgres</literal> or any
- other database to issue this command.)
+ other database to issue this command.)
</para>
<para>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the database does not exist. A notice is issued
+ Do not throw an error if the database does not exist. A notice is issued
in this case.
</para>
</listitem>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the domain does not exist. A notice is issued
+ Do not throw an error if the domain does not exist. A notice is issued
in this case.
</para>
</listitem>
<refsect1 id="SQL-DROPDOMAIN-compatibility">
<title>Compatibility</title>
-
+
<para>
This command conforms to the SQL standard, except for the
- <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
+ <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
extension.
</para>
</refsect1>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the function does not exist. A notice is issued
+ Do not throw an error if the function does not exist. A notice is issued
in this case.
</para>
</listitem>
<listitem>
<para>
- The data type(s) of the function's arguments (optionally
+ The data type(s) of the function's arguments (optionally
schema-qualified), if any.
</para>
</listitem>
<refsect1 id="SQL-DROPFUNCTION-compatibility">
<title>Compatibility</title>
-
+
<para>
A <command>DROP FUNCTION</command> statement is defined in the SQL
standard, but it is not compatible with this command.
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the index does not exist. A notice is issued
+ Do not throw an error if the index does not exist. A notice is issued
in this case.
</para>
</listitem>
or the owner of the language to use <command>DROP LANGUAGE</>.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the language does not exist. A notice is issued
+ Do not throw an error if the language does not exist. A notice is issued
in this case.
</para>
</listitem>
</varlistentry>
</variablelist>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refname>DROP OPERATOR CLASS</refname>
<refpurpose>remove an operator class</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-dropopclass">
<primary>DROP OPERATOR CLASS</primary>
</indexterm>
<literal>CASCADE</> for the drop to complete.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the operator class does not exist. A notice is issued
+ Do not throw an error if the operator class does not exist. A notice is issued
in this case.
</para>
</listitem>
</varlistentry>
</variablelist>
</refsect1>
-
+
<refsect1>
<title>Notes</title>
such indexes along with the operator class.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refname>DROP OPERATOR</refname>
<refpurpose>remove an operator</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-dropoperator">
<primary>DROP OPERATOR</primary>
</indexterm>
of the operator.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the operator does not exist. A notice is issued
+ Do not throw an error if the operator does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refname>DROP OPERATOR FAMILY</refname>
<refpurpose>remove an operator family</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-dropopfamily">
<primary>DROP OPERATOR FAMILY</primary>
</indexterm>
<literal>CASCADE</> for the drop to complete.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
drop such indexes along with the operator family.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the role does not exist. A notice is issued
+ Do not throw an error if the role does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
-
+
<para>
The SQL standard defines <command>DROP ROLE</command>, but it allows
only one role to be dropped at a time, and it specifies different
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the rule does not exist. A notice is issued
+ Do not throw an error if the rule does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
even if he does not own some of the objects within the schema.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the schema does not exist. A notice is issued
+ Do not throw an error if the schema does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
-
+
<para>
<command>DROP SCHEMA</command> is fully conforming with the SQL
standard, except that the standard only allows one schema to be
- dropped per command, and apart from the
- <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
+ dropped per command, and apart from the
+ <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
extension.
</para>
</refsect1>
generators. A sequence can only be dropped by its owner or a superuser.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the sequence does not exist. A notice is issued
+ Do not throw an error if the sequence does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<para>
<command>DROP SEQUENCE</command> conforms to the <acronym>SQL</acronym>
standard, except that the standard only allows one
- sequence to be dropped per command, and apart from the
- <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
- extension.
+ sequence to be dropped per command, and apart from the
+ <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
+ extension.
</para>
</refsect1>
DROP TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable> [, ...] [ CASCADE | RESTRICT ]
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
constraint, not the other table entirely.)
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the table does not exist. A notice is issued
+ Do not throw an error if the table does not exist. A notice is issued
in this case.
</para>
</listitem>
<title>Examples</title>
<para>
- To destroy two tables, <literal>films</literal> and
+ To destroy two tables, <literal>films</literal> and
<literal>distributors</literal>:
<programlisting>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<para>
This command conforms to the SQL standard, except that the standard only
- allows one table to be dropped per command, and apart from the
- <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
+ allows one table to be dropped per command, and apart from the
+ <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
extension.
</para>
</refsect1>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the tablespace does not exist. A notice is issued
+ Do not throw an error if the tablespace does not exist. A notice is issued
in this case.
</para>
</listitem>
<refnamediv>
<refname>DROP TRIGGER</refname>
- <refpurpose>remove a trigger</refpurpose>
- </refnamediv>
+ <refpurpose>remove a trigger</refpurpose>
+ </refnamediv>
<indexterm zone="sql-droptrigger">
<primary>DROP TRIGGER</primary>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the trigger does not exist. A notice is issued
+ Do not throw an error if the trigger does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1 id="SQL-DROPTRIGGER-compatibility">
<title>Compatibility</title>
-
+
<para>
The <command>DROP TRIGGER</command> statement in
<productname>PostgreSQL</productname> is incompatible with the SQL
<refname>DROP TEXT SEARCH CONFIGURATION</refname>
<refpurpose>remove a text search configuration</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-droptsconfig">
<primary>DROP TEXT SEARCH CONFIGURATION</primary>
</indexterm>
configuration.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
drop such indexes along with the text search configuration.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refname>DROP TEXT SEARCH DICTIONARY</refname>
<refpurpose>remove a text search dictionary</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-droptsdictionary">
<primary>DROP TEXT SEARCH DICTIONARY</primary>
</indexterm>
dictionary.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
drop such configurations along with the dictionary.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refname>DROP TEXT SEARCH PARSER</refname>
<refpurpose>remove a text search parser</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-droptsparser">
<primary>DROP TEXT SEARCH PARSER</primary>
</indexterm>
parser. You must be a superuser to use this command.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
drop such configurations along with the parser.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<refname>DROP TEXT SEARCH TEMPLATE</refname>
<refpurpose>remove a text search template</refpurpose>
</refnamediv>
-
+
<indexterm zone="sql-droptstemplate">
<primary>DROP TEXT SEARCH TEMPLATE</primary>
</indexterm>
template. You must be a superuser to use this command.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
drop such dictionaries along with the template.
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
Only the owner of a type can remove it.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the type does not exist. A notice is issued
+ Do not throw an error if the type does not exist. A notice is issued
in this case.
</para>
</listitem>
</varlistentry>
</variablelist>
</refsect1>
-
+
<refsect1 id="SQL-DROPTYPE-examples">
<title>Examples</title>
this command you must be the owner of the view.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<term><literal>IF EXISTS</literal></term>
<listitem>
<para>
- Do not throw an error if the view does not exist. A notice is issued
+ Do not throw an error if the view does not exist. A notice is issued
in this case.
</para>
</listitem>
</programlisting>
</para>
</refsect1>
-
+
<refsect1>
<title>Compatibility</title>
<para>
This command conforms to the SQL standard, except that the standard only
- allows one view to be dropped per command, and apart from the
- <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
- extension.
+ allows one view to be dropped per command, and apart from the
+ <literal>IF EXISTS</> option, which is a <productname>PostgreSQL</>
+ extension.
</para>
</refsect1>
<term><option>--host <replaceable class="parameter">host</replaceable></></term>
<listitem>
<para>
- Specifies the host name of the machine on which the
+ Specifies the host name of the machine on which the
server
- is running. If the value begins with a slash, it is used
+ is running. If the value begins with a slash, it is used
as the directory for the Unix domain socket.
</para>
</listitem>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or local Unix domain socket file
+ Specifies the TCP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
<listitem>
<para>
Force <application>dropdb</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
<arg choice="plain"><replaceable>dbname</replaceable></arg>
</cmdsynopsis>
</refsynopsisdiv>
-
+
<refsect1 id="R1-APP-DROPLANG-1">
<title>
Description
</title>
<para>
- <application>droplang</application> is a utility for removing an
+ <application>droplang</application> is a utility for removing an
existing programming language from a
<productname>PostgreSQL</productname> database.
<application>droplang</application> can drop any procedural language,
<para>
<application>droplang</application> accepts the following command line arguments:
-
+
<variablelist>
<varlistentry>
<term><replaceable class="parameter">langname</replaceable></term>
</para>
<para>
- <application>droplang</application> also accepts
+ <application>droplang</application> also accepts
the following command line arguments for connection parameters:
-
+
<variablelist>
<varlistentry>
<term><option>-h <replaceable class="parameter">host</replaceable></></term>
<term><option>--host <replaceable class="parameter">host</replaceable></></term>
<listitem>
<para>
- Specifies the host name of the machine on which the
+ Specifies the host name of the machine on which the
server
- is running. If host begins with a slash, it is used
+ is running. If host begins with a slash, it is used
as the directory for the Unix domain socket.
</para>
</listitem>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the Internet TCP/IP port or local Unix domain socket file
+ Specifies the Internet TCP/IP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
<listitem>
<para>
Force <application>droplang</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
Use <xref linkend="app-createlang"> to add a language.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
<term><replaceable class="parameter">username</replaceable></term>
<listitem>
<para>
- Specifies the name of the <productname>PostgreSQL</productname> user to be removed.
+ Specifies the name of the <productname>PostgreSQL</productname> user to be removed.
You will be prompted for a name if none is specified on the command line.
</para>
</listitem>
<para>
<application>dropuser</application> also accepts the following
command-line arguments for connection parameters:
-
+
<variablelist>
<varlistentry>
<term><option>-h <replaceable class="parameter">host</replaceable></></term>
<term><option>--host <replaceable class="parameter">host</replaceable></></term>
<listitem>
<para>
- Specifies the host name of the machine on which the
+ Specifies the host name of the machine on which the
server
- is running. If the value begins with a slash, it is used
+ is running. If the value begins with a slash, it is used
as the directory for the Unix domain socket.
</para>
</listitem>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or local Unix domain socket file
+ Specifies the TCP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
<listitem>
<para>
Force <application>dropuser</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term><option>-v</option></term>
<listitem>
END [ WORK | TRANSACTION ]
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
that is equivalent to <xref linkend="sql-commit">.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<refsect1>
<title>Compatibility</title>
-
+
<para>
<command>END</command> is a <productname>PostgreSQL</productname>
extension that provides functionality equivalent to <xref
CLOSE liahona;
COMMIT WORK;
</programlisting>
- </para>
+ </para>
</refsect1>
<refsect1>
which the database data will live, generating the shared catalog
tables (tables that belong to the whole cluster rather than to any
particular database), and creating the <literal>template1</literal>
- and <literal>postgres</literal> databases. When you later create a
- new database, everything in the <literal>template1</literal> database is
+ and <literal>postgres</literal> databases. When you later create a
+ new database, everything in the <literal>template1</literal> database is
copied. (Therefore, anything installed in <literal>template1</literal>
is automatically copied into each database created later.)
The <literal>postgres</literal> database is a default database meant
<option>--lc-collate</option> and <option>--lc-ctype</option> options.
Collation orders other than <literal>C</> or <literal>POSIX</> also have
a performance penalty. For these reasons it is important to choose the
- right locale when running <command>initdb</command>.
+ right locale when running <command>initdb</command>.
</para>
<para>
<para>
This option specifies the authentication method for local users
used in <filename>pg_hba.conf</>. Do not use <literal>trust</>
- unless you trust all local users on your system. <literal>Trust</>
+ unless you trust all local users on your system. <literal>Trust</>
is the default for ease of installation.
</para>
</listitem>
</para>
</listitem>
</varlistentry>
-
+
<varlistentry>
<term><option>-W</option></term>
<term><option>--pwprompt</option></term>
— but it won't ensure that what the transaction reads corresponds to
the latest committed values.
</para>
-
+
<para>
If a transaction of this sort is going to change the data in the
table, then it should use <literal>SHARE ROW EXCLUSIVE</> lock mode
reference documentation.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
<programlisting>
BEGIN WORK;
LOCK TABLE films IN SHARE MODE;
-SELECT id FROM films
+SELECT id FROM films
WHERE name = 'Star Wars: Episode I - The Phantom Menace';
-- Do ROLLBACK if record was not returned
-INSERT INTO films_user_comments VALUES
+INSERT INTO films_user_comments VALUES
(_id_, 'GREAT! I was waiting for it for so long!');
COMMIT WORK;
</programlisting>
</para>
<para>
- The parameters for the <command>MOVE</command> command are identical to
+ The parameters for the <command>MOVE</command> command are identical to
those of the <command>FETCH</command> command; refer to
<xref linkend="sql-fetch">
for details on syntax and usage.
<para>
Print the location of user executables. Use this, for example, to find
the <command>psql</> program. This is normally also the location
- where the <filename>pg_config</> program resides.
+ where the <filename>pg_config</> program resides.
</para>
</listitem>
</varlistentry>
<para>
<command>pg_controldata</command> prints information initialized during
<command>initdb</>, such as the catalog version.
- It also shows information about write-ahead logging and checkpoint
+ It also shows information about write-ahead logging and checkpoint
processing. This information is cluster-wide, and not specific to any one
database.
</para>
<para>
This utility can only be run by the user who initialized the cluster because
it requires read access to the data directory.
- You can specify the data directory on the command line, or use
+ You can specify the data directory on the command line, or use
the environment variable <envar>PGDATA</>. This utility supports the options
<literal>-V</> and <literal>--version</>, which print the
<application>pg_controldata</application> version and exit. It also
<para>
<application>pg_dumpall</application> requires all needed
- tablespace directories to exist before the restore; otherwise,
+ tablespace directories to exist before the restore; otherwise,
database creation will fail for databases in non-default
locations.
</para>
</para>
<para>
- The <literal>-V</> and <literal>--version</> options print
+ The <literal>-V</> and <literal>--version</> options print
the <application>pg_resetxlog</application> version and exit. The
options <literal>-?</> and <literal>--help</> show supported arguments,
and exit.
<title>Notes</title>
<para>
- This command must not be used when the server is
+ This command must not be used when the server is
running. <command>pg_resetxlog</command> will refuse to start up if
it finds a server lock file in the data directory. If the
server crashed then a lock file might have been left
<refsect2>
<title>General Purpose</title>
-
+
<variablelist>
<varlistentry>
<term><option>-A 0|1</option></term>
<term><option>--describe-config</option></term>
<listitem>
<para>
- This option dumps out the server's internal configuration variables,
+ This option dumps out the server's internal configuration variables,
descriptions, and defaults in tab-delimited <command>COPY</> format.
It is designed primarily for use by administration tools.
</para>
<literal>n</literal>, <literal>m</literal>, and <literal>h</literal>
disable nested-loop, merge and hash joins respectively.
</para>
-
+
<para>
Neither sequential scans nor nested-loop joins can be disabled
completely; the <literal>-fs</literal> and
start and shut down the <command>postgres</command> server
safely and comfortably.
</para>
-
+
<para>
If at all possible, <emphasis>do not</emphasis> use
<literal>SIGKILL</literal> to kill the main
<para>
Prepared statements only last for the duration of the current
database session. When the session ends, the prepared statement is
- forgotten, so it must be recreated before being used again. This
+ forgotten, so it must be recreated before being used again. This
also means that a single prepared statement cannot be used by
multiple simultaneous database clients; however, each client can create
their own prepared statement to use. The prepared statement can be
<arg><replaceable>dbname</replaceable></arg>
</cmdsynopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<para>
<application>reindexdb</application> accepts the following command-line arguments:
-
+
<variablelist>
<varlistentry>
<term><option>-a</></term>
</para>
<para>
- <application>reindexdb</application> also accepts
+ <application>reindexdb</application> also accepts
the following command-line arguments for connection parameters:
<variablelist>
<term><option>--port <replaceable class="parameter">port</replaceable></></term>
<listitem>
<para>
- Specifies the TCP port or local Unix domain socket file
+ Specifies the TCP port or local Unix domain socket file
extension on which the server
is listening for connections.
</para>
<listitem>
<para>
Force <application>reindexdb</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
RELEASE [ SAVEPOINT ] <replaceable>savepoint_name</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
<refsect1>
<title>Compatibility</title>
-
+
<para>
This command conforms to the <acronym>SQL</> standard. The standard
specifies that the key word <literal>SAVEPOINT</literal> is
<title>Description</title>
<para>
- <command>ROLLBACK PREPARED</command> rolls back a transaction that is in
+ <command>ROLLBACK PREPARED</command> rolls back a transaction that is in
prepared state.
</para>
</refsect1>
<para>
Roll back the transaction identified by the transaction
identifier <literal>foobar</>:
-
+
<programlisting>
ROLLBACK PREPARED 'foobar';
</programlisting>
SAVEPOINT <replaceable>savepoint_name</replaceable>
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
the transaction state to what it was at the time of the savepoint.
</para>
</refsect1>
-
+
<refsect1>
<title>Parameters</title>
<refsect1>
<title>Compatibility</title>
-
+
<para>
SQL requires a savepoint to be destroyed automatically when another
savepoint with the same name is established. In
<listitem>
<para>
- The data type(s) of the function's arguments (optionally
+ The data type(s) of the function's arguments (optionally
schema-qualified), if any.
</para>
</listitem>
SET [ SESSION | LOCAL ] TIME ZONE { <replaceable class="PARAMETER">timezone</replaceable> | LOCAL | DEFAULT }
</synopsis>
</refsynopsisdiv>
-
+
<refsect1>
<title>Description</title>
system view to perform the equivalent of <command>SET</>.
</para>
</refsect1>
-
+
<refsect1>
<title>Examples</title>
<listitem>
<para>
The number of rows to skip before starting to return rows.
- For details see
+ For details see
<xref linkend="sql-limit" endterm="sql-limit-title">.
</para>
</listitem>
</para>
<para>
- Some extreme behaviours can also be generated on the primary using the
+ Some extreme behaviours can also be generated on the primary using the
script: <filename>src/test/regress/sql/hs_primary_extremes.sql</filename>
to allow the behaviour of the standby to be tested.
</para>
</sect1>
<sect1 id="regress-evaluation">
- <title>Test Evaluation</title>
+ <title>Test Evaluation</title>
<para>
Some properly installed and fully functional
<sect2>
<title>Error message differences</title>
-
+
<para>
Some of the regression tests involve intentional invalid input
values. Error messages can come from either the
inspection.
</para>
</sect2>
-
+
<sect2>
<title>Locale differences</title>
applications.
</para>
</sect2>
-
+
<sect2>
<title>Date and time differences</title>
ensures proper results.
</para>
</sect2>
-
+
<sect2>
<title>Floating-point differences</title>
-
+
<para>
Some of the tests involve computing 64-bit floating-point numbers (<type>double
precision</type>) from table columns. Differences in
<sect2>
<title>Row ordering differences</title>
-
+
<para>
You might see differences in which the same rows are output in a
different order than what appears in the expected file. In most cases
<sect2>
<title>Insufficient stack depth</title>
-
+
<para>
If the <literal>errors</literal> test results in a server crash
at the <literal>select infinite_recurse()</> command, it means that
<![%standalone-include;[<literal>max_stack_depth</literal>]]>
parameter indicates. This
can be fixed by running the server under a higher stack
- size limit (4MB is recommended with the default value of
+ size limit (4MB is recommended with the default value of
<varname>max_stack_depth</>). If you are unable to do that, an
alternative is to reduce the value of <varname>max_stack_depth</>.
</para>
<sect2>
<title>The <quote>random</quote> test</title>
-
+
<para>
- The <literal>random</literal> test script is intended to produce
+ The <literal>random</literal> test script is intended to produce
random results. In rare cases, this causes the random regression
test to fail. Typing:
<programlisting>
testname:output:platformpattern=comparisonfilename
</synopsis>
The test name is just the name of the particular regression test
- module. The output value indicates which output file to check. For the
+ module. The output value indicates which output file to check. For the
standard regression tests, this is always <literal>out</literal>. The
value corresponds to the file extension of the output file.
The platform pattern is a pattern in the style of the Unix
mechanism only for variant results that you are willing to consider
equally valid in all contexts.
</para>
-
+
</sect1>
<sect1 id="regress-coverage">
);
</programlisting>
then the same <literal>inventory_item</> composite type shown above would
- come into being as a
+ come into being as a
byproduct, and could be used just as above. Note however an important
restriction of the current implementation: since no constraints are
associated with a composite type, the constraints shown in the table
<para>
The <literal>ROW</> constructor syntax is usually easier to work with
than the composite-literal syntax when writing composite values in SQL
- commands.
+ commands.
In <literal>ROW</>, individual field values are written the same way
they would be written when not members of a composite.
</para>
separate user account. This user account should only own the data
that is managed by the server, and should not be shared with other
daemons. (For example, using the user <literal>nobody</literal> is a bad
- idea.) It is not advisable to install executables owned by this
- user because compromised systems could then modify their own
+ idea.) It is not advisable to install executables owned by this
+ user because compromised systems could then modify their own
binaries.
</para>
them turned on or sufficiently sized by default, especially as
available RAM and the demands of database applications grow.
(On <systemitem class="osname">Windows</>,
- <productname>PostgreSQL</> provides its own replacement
+ <productname>PostgreSQL</> provides its own replacement
implementation of these facilities, so most of this section
can be disregarded.)
</para>
</para>
<para>
- <systemitem class="osname">FreeBSD</> versions before 4.0 work like
+ <systemitem class="osname">FreeBSD</> versions before 4.0 work like
<systemitem class="osname">NetBSD</> and <systemitem class="osname">
OpenBSD</> (see below).
</para>
<para>
Ancient distributions might not have the <command>sysctl</command> program,
- but equivalent changes can be made by manipulating the
+ but equivalent changes can be made by manipulating the
<filename>/proc</filename> file system:
<screen>
<prompt>$</prompt> <userinput>echo 17179869184 >/proc/sys/kernel/shmmax</userinput>
file installed (<xref linkend="libpq-connect">).
</para>
</sect1>
-
+
<sect1 id="encryption-options">
<title>Encryption Options</title>
insecure networks. Encryption might also be required to secure
sensitive data such as medical records or financial transactions.
</para>
-
+
<variablelist>
<varlistentry>
The client supplies the decryption key and the data is decrypted
on the server and then sent to the client.
</para>
-
+
<para>
The decrypted data and the decryption key are present on the
server for a brief time while it is being decrypted and
</abstract>
<para>
- <acronym>SQL</acronym> has become the most popular relational query
+ <acronym>SQL</acronym> has become the most popular relational query
language.
The name <quote><acronym>SQL</acronym></quote> is an abbreviation for
- <firstterm>Structured Query Language</firstterm>.
+ <firstterm>Structured Query Language</firstterm>.
In 1974 Donald Chamberlin and others defined the
language SEQUEL (<firstterm>Structured English Query
Language</firstterm>) at IBM
Database Committee X3H2 to
develop a proposal for a standard relational language. This proposal
was ratified in 1986 and consisted essentially of the IBM dialect of
- <acronym>SQL</acronym>. In 1987 this <acronym>ANSI</acronym>
+ <acronym>SQL</acronym>. In 1987 this <acronym>ANSI</acronym>
standard was also accepted as an international
standard by the International Organization for Standardization
(<acronym>ISO</acronym>).
<acronym>SQL/92</acronym> is the version
normally meant when people refer to <quote>the <acronym>SQL</acronym>
standard</quote>. A detailed
- description of <acronym>SQL/92</acronym> is given in
+ description of <acronym>SQL/92</acronym> is given in
<xref linkend="DATE97" endterm="DATE97">. At the time of
writing this document a new standard informally referred to
as <firstterm><acronym>SQL3</acronym></firstterm>
</para>
<para>
- A <firstterm>relational database</firstterm> is a database that is
+ A <firstterm>relational database</firstterm> is a database that is
perceived by its
users as a <firstterm>collection of tables</firstterm> (and
nothing else but tables).
<listitem>
<para>
SELLS stores information about which part (PNO) is sold by which
- supplier (SNO).
+ supplier (SNO).
It serves in a sense to connect the other two tables together.
</para>
</listitem>
<firstterm>entities</firstterm> and
SELLS can be regarded as a <firstterm>relationship</firstterm>
between a particular
- part and a particular supplier.
+ part and a particular supplier.
</para>
<para>
...
<parameter>v<subscript>k</subscript></parameter>,
such that
- <parameter>v<subscript>1</subscript></parameter> ∈
+ <parameter>v<subscript>1</subscript></parameter> ∈
<parameter>D<subscript>1</subscript></parameter>,
- <parameter>v<subscript>2</subscript></parameter> ∈
+ <parameter>v<subscript>2</subscript></parameter> ∈
<parameter>D<subscript>2</subscript></parameter>,
...
- <parameter>v<subscript>k</subscript></parameter> ∈
+ <parameter>v<subscript>k</subscript></parameter> ∈
<parameter>D<subscript>k</subscript></parameter>.
</para>
<xref linkend="supplier-fig" endterm="supplier-fig"> where
every tuple is represented by a row and every column corresponds to
one component of a tuple. Giving names (called attributes) to the
- columns leads to the definition of a
+ columns leads to the definition of a
<firstterm>relation scheme</firstterm>.
</para>
\mbox{$R(A_{1},A_{2},\ldots,A_{k})$}.
\end{definition}
-->
- A <firstterm>relation scheme</firstterm> <literal>R</literal> is a
+ A <firstterm>relation scheme</firstterm> <literal>R</literal> is a
finite set of attributes
<parameter>A<subscript>1</subscript></parameter>,
<parameter>A<subscript>2</subscript></parameter>,
<para>
SELECT (σ): extracts <firstterm>tuples</firstterm> from
a relation that
- satisfy a given restriction. Let <parameter>R</parameter> be a
+ satisfy a given restriction. Let <parameter>R</parameter> be a
table that contains an attribute
<parameter>A</parameter>.
σ<subscript>A=a</subscript>(R) = {t ∈ R ∣ t(A) = a}
<classname>S</classname> be a table with
arity <literal>k</literal><subscript>2</subscript>.
<classname>R</classname> × <classname>S</classname>
- is the set of all
+ is the set of all
<literal>k</literal><subscript>1</subscript>
+ <literal>k</literal><subscript>2</subscript>-tuples
whose first <literal>k</literal><subscript>1</subscript>
set of tuples
that are in <classname>R</classname> and in
<classname>S</classname>.
- We again require that <classname>R</classname> and
+ We again require that <classname>R</classname> and
<classname>S</classname> have the
same arity.
</para>
<listitem>
<para>
JOIN (∏): connects two tables by their common
- attributes. Let <classname>R</classname> be a table with the
- attributes <classname>A</classname>,<classname>B</classname>
+ attributes. Let <classname>R</classname> be a table with the
+ attributes <classname>A</classname>,<classname>B</classname>
and <classname>C</classname> and
let <classname>S</classname> be a table with the attributes
<classname>C</classname>,<classname>D</classname>
and <classname>E</classname>. There is one
attribute common to both relations,
- the attribute <classname>C</classname>.
+ the attribute <classname>C</classname>.
<!--
<classname>R</classname> ∏ <classname>S</classname> =
π<subscript><classname>R</classname>.<classname>A</classname>,<classname>R</classname>.<classname>B</classname>,<classname>R</classname>.<classname>C</classname>,<classname>S</classname>.<classname>D</classname>,<classname>S</classname>.<classname>E</classname></subscript>(σ<subscript><classname>R</classname>.<classname>C</classname>=<classname>S</classname>.<classname>C</classname></subscript>(<classname>R</classname> × <classname>S</classname>)).
<title id="suppl-rel-alg">A Query Using Relational Algebra</title>
<para>
Recall that we formulated all those relational operators to be able to
- retrieve data from the database. Let's return to our example from
+ retrieve data from the database. Let's return to our example from
the previous
section (<xref linkend="operations" endterm="operations">)
where someone wanted to know the names of all
- suppliers that sell the part <literal>Screw</literal>.
+ suppliers that sell the part <literal>Screw</literal>.
This question can be answered
using relational algebra by the following operation:
<para>
The relational algebra and the relational calculus have the same
<firstterm>expressive power</firstterm>; i.e., all queries that
- can be formulated using relational algebra can also be formulated
+ can be formulated using relational algebra can also be formulated
using the relational calculus and vice versa.
This was first proved by E. F. Codd in
1972. This proof is based on an algorithm (<quote>Codd's reduction
<para>
Aggregate Functions: Operations such as
<firstterm>average</firstterm>, <firstterm>sum</firstterm>,
- <firstterm>max</firstterm>, etc. can be applied to columns of a
+ <firstterm>max</firstterm>, etc. can be applied to columns of a
relation to
obtain a single quantity.
</para>
statement:
<programlisting>
-SELECT PNAME, PRICE
+SELECT PNAME, PRICE
FROM PART
WHERE PRICE > 10;
</programlisting>
using the keywords OR, AND, and NOT:
<programlisting>
-SELECT PNAME, PRICE
+SELECT PNAME, PRICE
FROM PART
WHERE PNAME = 'Bolt' AND
(PRICE = 0 OR PRICE <= 15);
because there are common named attributes (SNO and PNO) among the
relations. Now we can distinguish between the common named attributes
by simply prefixing the attribute name with the alias name followed by
- a dot. The join is calculated in the same way as shown in
+ a dot. The join is calculated in the same way as shown in
<xref linkend="join-example" endterm="join-example">.
First the Cartesian product
is derived. Now only those tuples satisfying the
conditions given in the WHERE clause are selected (i.e., the common
named attributes have to be equal). Finally we project out all
- columns but S.SNAME and P.PNAME.
+ columns but S.SNAME and P.PNAME.
</para>
<para>
Blake | Bolt
Jones | Cam
Blake | Cam
-(8 rows)
+(8 rows)
</screen>
</para>
<listitem>
<para>
For each row R1 of T1, the joined table has a row for each row
- in T2 that satisfies the join condition with R1.
+ in T2 that satisfies the join condition with R1.
</para>
<tip>
<para>
The partitioning of the tuples into groups is done by using the
keywords <command>GROUP BY</command> followed by a list of
attributes that define the
- groups. If we have
+ groups. If we have
<command>GROUP BY A<subscript>1</subscript>, ⃛, A<subscript>k</subscript></command>
we partition
the relation into groups, such that two tuples are in the same group
- if and only if they agree on all the attributes
+ if and only if they agree on all the attributes
A<subscript>1</subscript>, ⃛, A<subscript>k</subscript>.
<example>
named 'Screw' we use the query:
<programlisting>
-SELECT *
- FROM PART
+SELECT *
+ FROM PART
WHERE PRICE > (SELECT PRICE FROM PART
WHERE PNAME='Screw');
</programlisting>
</para>
<para>
- If we want to know all suppliers that do not sell any part
+ If we want to know all suppliers that do not sell any part
(e.g., to be able to remove these suppliers from the database) we use:
<programlisting>
-SELECT *
+SELECT *
FROM SUPPLIER S
WHERE NOT EXISTS
(SELECT * FROM SELLS SE
UNION
SELECT S.SNO, S.SNAME, S.CITY
FROM SUPPLIER S
- WHERE S.SNAME = 'Adams';
+ WHERE S.SNAME = 'Adams';
</programlisting>
gives the result:
<para>
There is a set of commands used for data definition included in the
- <acronym>SQL</acronym> language.
+ <acronym>SQL</acronym> language.
</para>
<sect3 id="create">
<synopsis>
CREATE TABLE <replaceable class="parameter">table_name</replaceable>
(<replaceable class="parameter">name_of_attr_1</replaceable> <replaceable class="parameter">type_of_attr_1</replaceable>
- [, <replaceable class="parameter">name_of_attr_2</replaceable> <replaceable class="parameter">type_of_attr_2</replaceable>
+ [, <replaceable class="parameter">name_of_attr_2</replaceable> <replaceable class="parameter">type_of_attr_2</replaceable>
[, ...]]);
</synopsis>
<title>Data Types in <acronym>SQL</acronym></title>
<para>
- The following is a list of some data types that are supported by
+ The following is a list of some data types that are supported by
<acronym>SQL</acronym>:
<itemizedlist>
the <command>CREATE INDEX</command> command is used. The syntax is:
<programlisting>
-CREATE INDEX <replaceable class="parameter">index_name</replaceable>
+CREATE INDEX <replaceable class="parameter">index_name</replaceable>
ON <replaceable class="parameter">table_name</replaceable> ( <replaceable class="parameter">name_of_attribute</replaceable> );
</programlisting>
</para>
AS <replaceable class="parameter">select_stmt</replaceable>
</programlisting>
- where <replaceable class="parameter">select_stmt</replaceable>
+ where <replaceable class="parameter">select_stmt</replaceable>
is a valid select statement as defined
in <xref linkend="select-title" endterm="select-title">.
Note that <replaceable class="parameter">select_stmt</replaceable> is
- not executed when the view is created. It is just stored in the
+ not executed when the view is created. It is just stored in the
<firstterm>system catalogs</firstterm>
and is executed whenever a query against the view is made.
</para>
The syntax is:
<programlisting>
-INSERT INTO <replaceable class="parameter">table_name</replaceable> (<replaceable class="parameter">name_of_attr_1</replaceable>
+INSERT INTO <replaceable class="parameter">table_name</replaceable> (<replaceable class="parameter">name_of_attr_1</replaceable>
[, <replaceable class="parameter">name_of_attr_2</replaceable> [,...]])
VALUES (<replaceable class="parameter">val_attr_1</replaceable> [, <replaceable class="parameter">val_attr_2</replaceable> [, ...]]);
</programlisting>
<programlisting>
UPDATE <replaceable class="parameter">table_name</replaceable>
- SET <replaceable class="parameter">name_of_attr_1</replaceable> = <replaceable class="parameter">value_1</replaceable>
+ SET <replaceable class="parameter">name_of_attr_1</replaceable> = <replaceable class="parameter">value_1</replaceable>
[, ... [, <replaceable class="parameter">name_of_attr_k</replaceable> = <replaceable class="parameter">value_k</replaceable>]]
WHERE <replaceable class="parameter">condition</replaceable>;
</programlisting>
by a <firstterm>precompiler</firstterm>
(which usually inserts
calls to library routines that perform the various <acronym>SQL</acronym>
- commands).
+ commands).
</para>
<para>
<screen>
psql (&version;)
Type "help" for help.
-
+
mydb=>
</screen>
<indexterm><primary>superuser</primary></indexterm>
font-size: 1.2em;
margin: 1.2em 0em 1.2em 0em;
font-weight: bold;
- color: #666;
+ color: #666;
}
H3 {
<style-sheet>
<style-specification use="docbook">
- <style-specification-body>
+ <style-specification-body>
<!-- general customization ......................................... -->
;; Don't append period if run-in title ends with any of these
;; characters. We had to add the colon here. This is fixed in
;; stylesheets version 1.71, so it can be removed sometime.
-(define %content-title-end-punct%
+(define %content-title-end-punct%
'(#\. #\! #\? #\:))
;; No automatic punctuation after honorific name parts
(normalize "author")
(normalize "authorgroup")
(normalize "title")
- (normalize "subtitle")
+ (normalize "subtitle")
(normalize "volumenum")
(normalize "edition")
(normalize "othercredit")
(empty-sosofo)))
;; Add character encoding and time of creation into HTML header
-(define %html-header-tags%
+(define %html-header-tags%
(list (list "META" '("HTTP-EQUIV" "Content-Type") '("CONTENT" "text/html; charset=ISO-8859-1"))
(list "META" '("NAME" "creation") (list "CONTENT" (time->string (time) #t)))))
(make element gi: "A"
attributes: (list
(list "TITLE" (element-title-string nextsib))
- (list "HREF"
+ (list "HREF"
(href-to
nextsib)))
(gentext-nav-next-sibling nextsib))))
(make element gi: "A"
attributes: (list
(list "TITLE" (element-title-string next))
- (list "HREF"
+ (list "HREF"
(href-to
next))
(list "ACCESSKEY"
(my-simplelist-vert members))
((equal? type (normalize "horiz"))
(simplelist-table 'row cols members)))))
-
+
(element member
(let ((type (inherited-attribute-string (normalize "type"))))
(cond
(let ((table (ancestor-member nd ($table-element-list$))))
(if (node-list-empty? table)
nd
- table)))
+ table)))
;; (The function below overrides the one in print/dbindex.dsl.)
(define (part-titlepage elements #!optional (side 'recto))
- (let ((nodelist (titlepage-nodelist
+ (let ((nodelist (titlepage-nodelist
(if (equal? side 'recto)
(reference-titlepage-recto-elements)
(reference-titlepage-verso-elements))
page-number-restart?: (first-part?)
input-whitespace-treatment: 'collapse
use: default-text-style
-
+
;; This hack is required for the RTF backend. If an external-graphic
;; is the first thing on the page, RTF doesn't seem to do the right
;; thing (the graphic winds up on the baseline of the first line
(make paragraph
line-spacing: 1pt
(literal ""))
-
+
(let loop ((nl nodelist) (lastnode (empty-node-list)))
(if (node-list-empty? nl)
(empty-sosofo)
(define (reference-titlepage elements #!optional (side 'recto))
- (let ((nodelist (titlepage-nodelist
+ (let ((nodelist (titlepage-nodelist
(if (equal? side 'recto)
(reference-titlepage-recto-elements)
(reference-titlepage-verso-elements))
page-number-restart?: (first-reference?)
input-whitespace-treatment: 'collapse
use: default-text-style
-
+
;; This hack is required for the RTF backend. If an external-graphic
;; is the first thing on the page, RTF doesn't seem to do the right
;; thing (the graphic winds up on the baseline of the first line
(make paragraph
line-spacing: 1pt
(literal ""))
-
+
(let loop ((nl nodelist) (lastnode (empty-node-list)))
(if (node-list-empty? nl)
(empty-sosofo)
(literal "*")
sosofo
(literal "*")))
-
+
(define ($dquote-seq$ #!optional (sosofo (process-children)))
(make sequence
(literal (gentext-start-quote))
sosofo
(literal (gentext-end-quote))))
-
+
(element (para command) ($dquote-seq$))
(element (para emphasis) ($asterix-seq$))
(element (para filename) ($dquote-seq$))
<listitem>
<para>
Force <application>vacuumlo</application> to prompt for a
- password before connecting to a database.
+ password before connecting to a database.
</para>
<para>
are points in the sequence of transactions at which it is guaranteed
that the heap and index data files have been updated with all information written before
the checkpoint. At checkpoint time, all dirty data pages are flushed to
- disk and a special checkpoint record is written to the log file.
+ disk and a special checkpoint record is written to the log file.
(The changes were previously flushed to the <acronym>WAL</acronym> files.)
In the event of a crash, the crash recovery procedure looks at the latest
checkpoint record to determine the point in the log (known as the redo
</thead>
<tbody>
<row>
- <entry>consistent - determine whether key satisfies the
+ <entry>consistent - determine whether key satisfies the
query qualifier</entry>
<entry>1</entry>
</row>
<entry>3</entry>
</row>
<row>
- <entry>decompress - compute a decompressed representation of a
+ <entry>decompress - compute a decompressed representation of a
compressed key</entry>
<entry>4</entry>
</row>
<row>
- <entry>penalty - compute penalty for inserting new key into subtree
+ <entry>penalty - compute penalty for inserting new key into subtree
with given subtree's key</entry>
<entry>5</entry>
</row>
can be satisfied exactly by a B-tree index on the integer column.
But there are cases where an index is useful as an inexact guide to
the matching rows. For example, if a GiST index stores only bounding boxes
- for geometric objects, then it cannot exactly satisfy a <literal>WHERE</>
+ for geometric objects, then it cannot exactly satisfy a <literal>WHERE</>
condition that tests overlap between nonrectangular objects such as
polygons. Yet we could use the index to find objects whose bounding
box overlaps the bounding box of the target object, and then do the
<para>
Now we could execute a query like this:
-
+
<screen>
SELECT (a + b) AS c FROM test_complex;
Finally, we can provide the full definition of the data type:
<programlisting>
CREATE TYPE complex (
- internallength = 16,
+ internallength = 16,
input = complex_in,
output = complex_out,
receive = complex_recv,
WINDRES = @WINDRES@
X = @EXEEXT@
-# Perl
+# Perl
ifneq (@PERL@,)
# quoted to protect pathname with spaces
# This macro is for use by libraries linking to libpq. (Because libpgport
# isn't created with the same link flags as libpq, it can't be used.)
libpq = -L$(libpq_builddir) -lpq
-
+
# If doing static linking, shared library dependency info isn't available,
# so add in the libraries that libpq depends on.
ifeq ($(enable_shared), no)
endif
# This macro is for use by client executables (not libraries) that use libpq.
-# We force clients to pull symbols from the non-shared library libpgport
-# rather than pulling some libpgport symbols from libpq just because
-# libpq uses those functions too. This makes applications less
+# We force clients to pull symbols from the non-shared library libpgport
+# rather than pulling some libpgport symbols from libpq just because
+# libpq uses those functions too. This makes applications less
# dependent on changes in libpq's usage of pgport. To do this we link to
# pgport before libpq. This does cause duplicate -lpgport's to appear
# on client link lines.
$(top_builddir)/src/include/stamp-h: $(top_srcdir)/src/include/pg_config.h.in $(top_builddir)/config.status
cd $(top_builddir) && ./config.status src/include/pg_config.h
-# Also remake ecpg_config.h from ecpg_config.h.in if the latter changed, same
+# Also remake ecpg_config.h from ecpg_config.h.in if the latter changed, same
# logic as above.
$(top_builddir)/src/interfaces/ecpg/include/ecpg_config.h: $(top_builddir)/src/interfaces/ecpg/include/stamp-h
ifeq ($(PORTNAME), sunos4)
LINK.shared = $(LD) -assert pure-text -Bdynamic
endif
-
+
ifeq ($(PORTNAME), osf)
LINK.shared = $(LD) -shared -expect_unresolved '*'
endif
$(MAKE) -C parser gram.c gram.h scan.c
$(MAKE) -C bootstrap bootparse.c bootscanner.c
$(MAKE) -C catalog schemapg.h postgres.bki postgres.description postgres.shdescription
- $(MAKE) -C utils fmgrtab.c fmgroids.h
+ $(MAKE) -C utils fmgrtab.c fmgroids.h
$(MAKE) -C utils/misc guc-file.c
#
# Support for code development.
#
-# Use target "quick" to build "postgres" when you know all the subsystems
+# Use target "quick" to build "postgres" when you know all the subsystems
# are up to date. It saves the time of doing all the submakes.
.PHONY: quick
quick: $(OBJS)
not a drink.
Generalized means that the index does not know which operation it accelerates.
-It instead works with custom strategies, defined for specific data types (read
-"Index Method Strategies" in the PostgreSQL documentation). In that sense, Gin
+It instead works with custom strategies, defined for specific data types (read
+"Index Method Strategies" in the PostgreSQL documentation). In that sense, Gin
is similar to GiST and differs from btree indices, which have predefined,
comparison-based operations.
-An inverted index is an index structure storing a set of (key, posting list)
-pairs, where 'posting list' is a set of documents in which the key occurs.
-(A text document would usually contain many keys.) The primary goal of
+An inverted index is an index structure storing a set of (key, posting list)
+pairs, where 'posting list' is a set of documents in which the key occurs.
+(A text document would usually contain many keys.) The primary goal of
Gin indices is support for highly scalable, full-text search in PostgreSQL.
Gin consists of a B-tree index constructed over entries (ET, entries tree),
where each entry is an element of the indexed value (element of array, lexeme
-for tsvector) and where each tuple in a leaf page is either a pointer to a
-B-tree over item pointers (PT, posting tree), or a list of item pointers
+for tsvector) and where each tuple in a leaf page is either a pointer to a
+B-tree over item pointers (PT, posting tree), or a list of item pointers
(PL, posting list) if the tuple is small enough.
Note: There is no delete operation for ET. The reason for this is that in
our experience, the set of distinct words in a large corpus changes very
rarely. This greatly simplifies the code and concurrency algorithms.
-Gin comes with built-in support for one-dimensional arrays (eg. integer[],
+Gin comes with built-in support for one-dimensional arrays (eg. integer[],
text[]), but no support for NULL elements. The following operations are
available:
There are often situations when a full-text search returns a very large set of
results. Since reading tuples from the disk and sorting them could take a
-lot of time, this is unacceptable for production. (Note that the search
+lot of time, this is unacceptable for production. (Note that the search
itself is very fast.)
-Such queries usually contain very frequent lexemes, so the results are not
-very helpful. To facilitate execution of such queries Gin has a configurable
-soft upper limit on the size of the returned set, determined by the
-'gin_fuzzy_search_limit' GUC variable. This is set to 0 by default (no
+Such queries usually contain very frequent lexemes, so the results are not
+very helpful. To facilitate execution of such queries Gin has a configurable
+soft upper limit on the size of the returned set, determined by the
+'gin_fuzzy_search_limit' GUC variable. This is set to 0 by default (no
limit).
If a non-zero search limit is set, then the returned set is a subset of the
whole result set, chosen at random.
"Soft" means that the actual number of returned results could slightly differ
-from the specified limit, depending on the query and the quality of the
+from the specified limit, depending on the query and the quality of the
system's random number generator.
From experience, a value of 'gin_fuzzy_search_limit' in the thousands
(eg. 5000-20000) works well. This means that 'gin_fuzzy_search_limit' will
-have no effect for queries returning a result set with less tuples than this
+have no effect for queries returning a result set with less tuples than this
number.
Limitations
Authors
-------
* Concurrency
* Recovery support via WAL logging
-The support for concurrency implemented in PostgreSQL was developed based on
-the paper "Access Methods for Next-Generation Database Systems" by
+The support for concurrency implemented in PostgreSQL was developed based on
+the paper "Access Methods for Next-Generation Database Systems" by
Marcel Kornaker:
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
The original algorithms were modified in several ways:
-* They should be adapted to PostgreSQL conventions. For example, the SEARCH
- algorithm was considerably changed, because in PostgreSQL function search
- should return one tuple (next), not all tuples at once. Also, it should
+* They should be adapted to PostgreSQL conventions. For example, the SEARCH
+ algorithm was considerably changed, because in PostgreSQL function search
+ should return one tuple (next), not all tuples at once. Also, it should
release page locks between calls.
-* Since we added support for variable length keys, it's not possible to
- guarantee enough free space for all keys on pages after splitting. User
- defined function picksplit doesn't have information about size of tuples
+* Since we added support for variable length keys, it's not possible to
+ guarantee enough free space for all keys on pages after splitting. User
+ defined function picksplit doesn't have information about size of tuples
(each tuple may contain several keys as in multicolumn index while picksplit
could work with only one key) and pages.
* We modified original INSERT algorithm for performance reason. In particular,
ptr = top of stack
while(true)
latch( ptr->page, S-mode )
- if ( ptr->page->lsn != ptr->lsn )
+ if ( ptr->page->lsn != ptr->lsn )
ptr->lsn = ptr->page->lsn
currentposition=0
if ( ptr->parentlsn < ptr->page->nsn )
else if ( ptr->page is leaf )
unlatch( ptr->page )
return tuple
- else
+ else
add to stack child page
end
currentposition++
Insert Algorithm
----------------
-INSERT guarantees that the GiST tree remains balanced. User defined key method
-Penalty is used for choosing a subtree to insert; method PickSplit is used for
-the node splitting algorithm; method Union is used for propagating changes
+INSERT guarantees that the GiST tree remains balanced. User defined key method
+Penalty is used for choosing a subtree to insert; method PickSplit is used for
+the node splitting algorithm; method Union is used for propagating changes
upward to maintain the tree properties.
-NOTICE: We modified original INSERT algorithm for performance reason. In
+NOTICE: We modified original INSERT algorithm for performance reason. In
particularly, it is now a single-pass algorithm.
-Function findLeaf is used to identify subtree for insertion. Page, in which
-insertion is proceeded, is locked as well as its parent page. Functions
-findParent and findPath are used to find parent pages, which could be changed
-because of concurrent access. Function pageSplit is recurrent and could split
-page by more than 2 pages, which could be necessary if keys have different
-lengths or more than one key are inserted (in such situation, user defined
+Function findLeaf is used to identify subtree for insertion. Page, in which
+insertion is proceeded, is locked as well as its parent page. Functions
+findParent and findPath are used to find parent pages, which could be changed
+because of concurrent access. Function pageSplit is recurrent and could split
+page by more than 2 pages, which could be necessary if keys have different
+lengths or more than one key are inserted (in such situation, user defined
function pickSplit cannot guarantee free space on page).
findLeaf(new-key)
end
findPath( stack item )
- push stack, [root, 0, 0] // page, LSN, parent
+ push stack, [root, 0, 0] // page, LSN, parent
while( stack )
ptr = top of stack
latch( ptr->page, S-mode )
end
for( each tuple on page )
if ( tuple->pagepointer == item->page )
- return stack
+ return stack
else
add to stack at the end [tuple->pagepointer,0, ptr]
end
unlatch( ptr->page )
pop stack
end
-
+
findParent( stack item )
parent = item->parent
latch( parent->page, X-mode )
if ( parent->page->lsn != parent->lsn )
- while(true)
+ while(true)
search parent tuple on parent->page, if found the return
rightlink = parent->page->rightlink
unlatch( parent->page )
keysarray = [ union(keysarray) ]
end
end
-
+
insert(new-key)
stack = findLeaf(new-key)
keysarray = [new-key]
Authors:
the btbulkdelete call cannot return while any indexscan is still holding
a copy of a deleted index tuple. Note that this requirement does not say
that btbulkdelete must visit the pages in any particular order. (See also
-on-the-fly deletion, below.)
+on-the-fly deletion, below.)
There is no such interlocking for deletion of items in internal pages,
since backends keep no lock nor pin on a page they have descended past.
* Returns timestamp of latest processed commit/abort record.
*
* When the server has been started normally without recovery the function
- * returns NULL.
+ * returns NULL.
*/
Datum
pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS)
override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS)
-OBJS= bootparse.o bootstrap.o
+OBJS= bootparse.o bootstrap.o
include $(top_srcdir)/src/backend/common.mk
-- not tracked by PostgreSQL
-/*
+/*
* 5.47
* ROUTINE_SEQUENCE_USAGE view
*/
CAST(null AS sql_identifier) AS result_cast_scope_schema,
CAST(null AS sql_identifier) AS result_cast_scope_name,
CAST(null AS cardinal_number) AS result_cast_maximum_cardinality,
- CAST(null AS sql_identifier) AS result_cast_dtd_identifier
+ CAST(null AS sql_identifier) AS result_cast_dtd_identifier
FROM pg_namespace n, pg_proc p, pg_language l,
pg_type t, pg_namespace nt
CAST(null AS cardinal_number) AS datetime_precision,
CAST(null AS character_data) AS interval_type,
CAST(null AS character_data) AS interval_precision,
-
+
CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard
CAST(current_database() AS sql_identifier) AS udt_catalog,
else
{
found = ((Form_pg_attribute) GETSTRUCT(atttup))->attisdropped;
- ReleaseSysCache(atttup);
+ ReleaseSysCache(atttup);
}
return found;
}
found = HeapTupleIsValid(systable_getnext(sd));
systable_endscan(sd);
heap_close(rel, AccessShareLock);
- return found;
+ return found;
}
* src/backend/catalog/system_views.sql
*/
-CREATE VIEW pg_roles AS
- SELECT
+CREATE VIEW pg_roles AS
+ SELECT
rolname,
rolsuper,
rolinherit,
FROM pg_authid
WHERE NOT rolcanlogin;
-CREATE VIEW pg_user AS
- SELECT
- usename,
- usesysid,
- usecreatedb,
- usesuper,
- usecatupd,
- '********'::text as passwd,
- valuntil,
- useconfig
+CREATE VIEW pg_user AS
+ SELECT
+ usename,
+ usesysid,
+ usecreatedb,
+ usesuper,
+ usecatupd,
+ '********'::text as passwd,
+ valuntil,
+ useconfig
FROM pg_shadow;
-CREATE VIEW pg_rules AS
- SELECT
- N.nspname AS schemaname,
- C.relname AS tablename,
- R.rulename AS rulename,
- pg_get_ruledef(R.oid) AS definition
- FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class))
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+CREATE VIEW pg_rules AS
+ SELECT
+ N.nspname AS schemaname,
+ C.relname AS tablename,
+ R.rulename AS rulename,
+ pg_get_ruledef(R.oid) AS definition
+ FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class))
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE R.rulename != '_RETURN';
-CREATE VIEW pg_views AS
- SELECT
- N.nspname AS schemaname,
- C.relname AS viewname,
- pg_get_userbyid(C.relowner) AS viewowner,
- pg_get_viewdef(C.oid) AS definition
- FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+CREATE VIEW pg_views AS
+ SELECT
+ N.nspname AS schemaname,
+ C.relname AS viewname,
+ pg_get_userbyid(C.relowner) AS viewowner,
+ pg_get_viewdef(C.oid) AS definition
+ FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind = 'v';
-CREATE VIEW pg_tables AS
- SELECT
- N.nspname AS schemaname,
- C.relname AS tablename,
- pg_get_userbyid(C.relowner) AS tableowner,
+CREATE VIEW pg_tables AS
+ SELECT
+ N.nspname AS schemaname,
+ C.relname AS tablename,
+ pg_get_userbyid(C.relowner) AS tableowner,
T.spcname AS tablespace,
- C.relhasindex AS hasindexes,
- C.relhasrules AS hasrules,
- C.relhastriggers AS hastriggers
- FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+ C.relhasindex AS hasindexes,
+ C.relhasrules AS hasrules,
+ C.relhastriggers AS hastriggers
+ FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
LEFT JOIN pg_tablespace T ON (T.oid = C.reltablespace)
WHERE C.relkind = 'r';
-CREATE VIEW pg_indexes AS
- SELECT
- N.nspname AS schemaname,
- C.relname AS tablename,
- I.relname AS indexname,
+CREATE VIEW pg_indexes AS
+ SELECT
+ N.nspname AS schemaname,
+ C.relname AS tablename,
+ I.relname AS indexname,
T.spcname AS tablespace,
- pg_get_indexdef(I.oid) AS indexdef
- FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid)
- JOIN pg_class I ON (I.oid = X.indexrelid)
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+ pg_get_indexdef(I.oid) AS indexdef
+ FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid)
+ JOIN pg_class I ON (I.oid = X.indexrelid)
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
LEFT JOIN pg_tablespace T ON (T.oid = I.reltablespace)
WHERE C.relkind = 'r' AND I.relkind = 'i';
-CREATE VIEW pg_stats AS
- SELECT
- nspname AS schemaname,
- relname AS tablename,
- attname AS attname,
- stainherit AS inherited,
- stanullfrac AS null_frac,
- stawidth AS avg_width,
- stadistinct AS n_distinct,
+CREATE VIEW pg_stats AS
+ SELECT
+ nspname AS schemaname,
+ relname AS tablename,
+ attname AS attname,
+ stainherit AS inherited,
+ stanullfrac AS null_frac,
+ stawidth AS avg_width,
+ stadistinct AS n_distinct,
CASE
WHEN stakind1 IN (1, 4) THEN stavalues1
WHEN stakind2 IN (1, 4) THEN stavalues2
WHEN stakind3 = 3 THEN stanumbers3[1]
WHEN stakind4 = 3 THEN stanumbers4[1]
END AS correlation
- FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid)
- JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum)
- LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
+ FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid)
+ JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum)
+ LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE NOT attisdropped AND has_column_privilege(c.oid, a.attnum, 'select');
REVOKE ALL on pg_statistic FROM public;
-CREATE VIEW pg_locks AS
+CREATE VIEW pg_locks AS
SELECT * FROM pg_lock_status() AS L;
CREATE VIEW pg_cursors AS
WHERE
l.objsubid = 0;
-CREATE VIEW pg_settings AS
- SELECT * FROM pg_show_all_settings() AS A;
+CREATE VIEW pg_settings AS
+ SELECT * FROM pg_show_all_settings() AS A;
-CREATE RULE pg_settings_u AS
- ON UPDATE TO pg_settings
- WHERE new.name = old.name DO
+CREATE RULE pg_settings_u AS
+ ON UPDATE TO pg_settings
+ WHERE new.name = old.name DO
SELECT set_config(old.name, new.setting, 'f');
-CREATE RULE pg_settings_n AS
- ON UPDATE TO pg_settings
+CREATE RULE pg_settings_n AS
+ ON UPDATE TO pg_settings
DO INSTEAD NOTHING;
GRANT SELECT, UPDATE ON pg_settings TO PUBLIC;
-- Statistics views
-CREATE VIEW pg_stat_all_tables AS
- SELECT
- C.oid AS relid,
- N.nspname AS schemaname,
- C.relname AS relname,
- pg_stat_get_numscans(C.oid) AS seq_scan,
- pg_stat_get_tuples_returned(C.oid) AS seq_tup_read,
- sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan,
+CREATE VIEW pg_stat_all_tables AS
+ SELECT
+ C.oid AS relid,
+ N.nspname AS schemaname,
+ C.relname AS relname,
+ pg_stat_get_numscans(C.oid) AS seq_scan,
+ pg_stat_get_tuples_returned(C.oid) AS seq_tup_read,
+ sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan,
sum(pg_stat_get_tuples_fetched(I.indexrelid))::bigint +
- pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch,
- pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins,
- pg_stat_get_tuples_updated(C.oid) AS n_tup_upd,
+ pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch,
+ pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins,
+ pg_stat_get_tuples_updated(C.oid) AS n_tup_upd,
pg_stat_get_tuples_deleted(C.oid) AS n_tup_del,
pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd,
- pg_stat_get_live_tuples(C.oid) AS n_live_tup,
+ pg_stat_get_live_tuples(C.oid) AS n_live_tup,
pg_stat_get_dead_tuples(C.oid) AS n_dead_tup,
pg_stat_get_last_vacuum_time(C.oid) as last_vacuum,
pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum,
pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count,
pg_stat_get_analyze_count(C.oid) AS analyze_count,
pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count
- FROM pg_class C LEFT JOIN
- pg_index I ON C.oid = I.indrelid
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+ FROM pg_class C LEFT JOIN
+ pg_index I ON C.oid = I.indrelid
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't')
GROUP BY C.oid, N.nspname, C.relname;
WHERE C.relkind IN ('r', 't')
GROUP BY C.oid, N.nspname, C.relname;
-CREATE VIEW pg_stat_sys_tables AS
- SELECT * FROM pg_stat_all_tables
+CREATE VIEW pg_stat_sys_tables AS
+ SELECT * FROM pg_stat_all_tables
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
-CREATE VIEW pg_stat_user_tables AS
- SELECT * FROM pg_stat_all_tables
+CREATE VIEW pg_stat_user_tables AS
+ SELECT * FROM pg_stat_all_tables
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
-CREATE VIEW pg_statio_all_tables AS
- SELECT
- C.oid AS relid,
- N.nspname AS schemaname,
- C.relname AS relname,
- pg_stat_get_blocks_fetched(C.oid) -
- pg_stat_get_blocks_hit(C.oid) AS heap_blks_read,
- pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit,
- sum(pg_stat_get_blocks_fetched(I.indexrelid) -
- pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read,
- sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit,
- pg_stat_get_blocks_fetched(T.oid) -
- pg_stat_get_blocks_hit(T.oid) AS toast_blks_read,
- pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit,
- pg_stat_get_blocks_fetched(X.oid) -
- pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read,
- pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit
- FROM pg_class C LEFT JOIN
- pg_index I ON C.oid = I.indrelid LEFT JOIN
- pg_class T ON C.reltoastrelid = T.oid LEFT JOIN
- pg_class X ON T.reltoastidxid = X.oid
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+CREATE VIEW pg_statio_all_tables AS
+ SELECT
+ C.oid AS relid,
+ N.nspname AS schemaname,
+ C.relname AS relname,
+ pg_stat_get_blocks_fetched(C.oid) -
+ pg_stat_get_blocks_hit(C.oid) AS heap_blks_read,
+ pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit,
+ sum(pg_stat_get_blocks_fetched(I.indexrelid) -
+ pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read,
+ sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit,
+ pg_stat_get_blocks_fetched(T.oid) -
+ pg_stat_get_blocks_hit(T.oid) AS toast_blks_read,
+ pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit,
+ pg_stat_get_blocks_fetched(X.oid) -
+ pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read,
+ pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit
+ FROM pg_class C LEFT JOIN
+ pg_index I ON C.oid = I.indrelid LEFT JOIN
+ pg_class T ON C.reltoastrelid = T.oid LEFT JOIN
+ pg_class X ON T.reltoastidxid = X.oid
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't')
GROUP BY C.oid, N.nspname, C.relname, T.oid, X.oid;
-CREATE VIEW pg_statio_sys_tables AS
- SELECT * FROM pg_statio_all_tables
+CREATE VIEW pg_statio_sys_tables AS
+ SELECT * FROM pg_statio_all_tables
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
-CREATE VIEW pg_statio_user_tables AS
- SELECT * FROM pg_statio_all_tables
+CREATE VIEW pg_statio_user_tables AS
+ SELECT * FROM pg_statio_all_tables
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
-CREATE VIEW pg_stat_all_indexes AS
- SELECT
- C.oid AS relid,
- I.oid AS indexrelid,
- N.nspname AS schemaname,
- C.relname AS relname,
- I.relname AS indexrelname,
- pg_stat_get_numscans(I.oid) AS idx_scan,
- pg_stat_get_tuples_returned(I.oid) AS idx_tup_read,
- pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch
- FROM pg_class C JOIN
- pg_index X ON C.oid = X.indrelid JOIN
- pg_class I ON I.oid = X.indexrelid
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+CREATE VIEW pg_stat_all_indexes AS
+ SELECT
+ C.oid AS relid,
+ I.oid AS indexrelid,
+ N.nspname AS schemaname,
+ C.relname AS relname,
+ I.relname AS indexrelname,
+ pg_stat_get_numscans(I.oid) AS idx_scan,
+ pg_stat_get_tuples_returned(I.oid) AS idx_tup_read,
+ pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch
+ FROM pg_class C JOIN
+ pg_index X ON C.oid = X.indrelid JOIN
+ pg_class I ON I.oid = X.indexrelid
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't');
-CREATE VIEW pg_stat_sys_indexes AS
- SELECT * FROM pg_stat_all_indexes
+CREATE VIEW pg_stat_sys_indexes AS
+ SELECT * FROM pg_stat_all_indexes
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
-CREATE VIEW pg_stat_user_indexes AS
- SELECT * FROM pg_stat_all_indexes
+CREATE VIEW pg_stat_user_indexes AS
+ SELECT * FROM pg_stat_all_indexes
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
-CREATE VIEW pg_statio_all_indexes AS
- SELECT
- C.oid AS relid,
- I.oid AS indexrelid,
- N.nspname AS schemaname,
- C.relname AS relname,
- I.relname AS indexrelname,
- pg_stat_get_blocks_fetched(I.oid) -
- pg_stat_get_blocks_hit(I.oid) AS idx_blks_read,
- pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit
- FROM pg_class C JOIN
- pg_index X ON C.oid = X.indrelid JOIN
- pg_class I ON I.oid = X.indexrelid
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+CREATE VIEW pg_statio_all_indexes AS
+ SELECT
+ C.oid AS relid,
+ I.oid AS indexrelid,
+ N.nspname AS schemaname,
+ C.relname AS relname,
+ I.relname AS indexrelname,
+ pg_stat_get_blocks_fetched(I.oid) -
+ pg_stat_get_blocks_hit(I.oid) AS idx_blks_read,
+ pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit
+ FROM pg_class C JOIN
+ pg_index X ON C.oid = X.indrelid JOIN
+ pg_class I ON I.oid = X.indexrelid
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't');
-CREATE VIEW pg_statio_sys_indexes AS
- SELECT * FROM pg_statio_all_indexes
+CREATE VIEW pg_statio_sys_indexes AS
+ SELECT * FROM pg_statio_all_indexes
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
-CREATE VIEW pg_statio_user_indexes AS
- SELECT * FROM pg_statio_all_indexes
+CREATE VIEW pg_statio_user_indexes AS
+ SELECT * FROM pg_statio_all_indexes
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
-CREATE VIEW pg_statio_all_sequences AS
- SELECT
- C.oid AS relid,
- N.nspname AS schemaname,
- C.relname AS relname,
- pg_stat_get_blocks_fetched(C.oid) -
- pg_stat_get_blocks_hit(C.oid) AS blks_read,
- pg_stat_get_blocks_hit(C.oid) AS blks_hit
- FROM pg_class C
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+CREATE VIEW pg_statio_all_sequences AS
+ SELECT
+ C.oid AS relid,
+ N.nspname AS schemaname,
+ C.relname AS relname,
+ pg_stat_get_blocks_fetched(C.oid) -
+ pg_stat_get_blocks_hit(C.oid) AS blks_read,
+ pg_stat_get_blocks_hit(C.oid) AS blks_hit
+ FROM pg_class C
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind = 'S';
-CREATE VIEW pg_statio_sys_sequences AS
- SELECT * FROM pg_statio_all_sequences
+CREATE VIEW pg_statio_sys_sequences AS
+ SELECT * FROM pg_statio_all_sequences
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
-CREATE VIEW pg_statio_user_sequences AS
- SELECT * FROM pg_statio_all_sequences
+CREATE VIEW pg_statio_user_sequences AS
+ SELECT * FROM pg_statio_all_sequences
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
-CREATE VIEW pg_stat_activity AS
- SELECT
+CREATE VIEW pg_stat_activity AS
+ SELECT
S.datid AS datid,
D.datname AS datname,
S.procpid,
S.waiting,
S.current_query
FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U
- WHERE S.datid = D.oid AND
+ WHERE S.datid = D.oid AND
S.usesysid = U.oid;
-CREATE VIEW pg_stat_database AS
- SELECT
- D.oid AS datid,
- D.datname AS datname,
- pg_stat_get_db_numbackends(D.oid) AS numbackends,
- pg_stat_get_db_xact_commit(D.oid) AS xact_commit,
- pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback,
- pg_stat_get_db_blocks_fetched(D.oid) -
- pg_stat_get_db_blocks_hit(D.oid) AS blks_read,
+CREATE VIEW pg_stat_database AS
+ SELECT
+ D.oid AS datid,
+ D.datname AS datname,
+ pg_stat_get_db_numbackends(D.oid) AS numbackends,
+ pg_stat_get_db_xact_commit(D.oid) AS xact_commit,
+ pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback,
+ pg_stat_get_db_blocks_fetched(D.oid) -
+ pg_stat_get_db_blocks_hit(D.oid) AS blks_read,
pg_stat_get_db_blocks_hit(D.oid) AS blks_hit,
pg_stat_get_db_tuples_returned(D.oid) AS tup_returned,
pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched,
pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted
FROM pg_database D;
-CREATE VIEW pg_stat_user_functions AS
+CREATE VIEW pg_stat_user_functions AS
SELECT
- P.oid AS funcid,
+ P.oid AS funcid,
N.nspname AS schemaname,
P.proname AS funcname,
pg_stat_get_function_calls(P.oid) AS calls,
pg_stat_get_function_time(P.oid) / 1000 AS total_time,
pg_stat_get_function_self_time(P.oid) / 1000 AS self_time
FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace)
- WHERE P.prolang != 12 -- fast check to eliminate built-in functions
+ WHERE P.prolang != 12 -- fast check to eliminate built-in functions
AND pg_stat_get_function_calls(P.oid) IS NOT NULL;
CREATE VIEW pg_stat_xact_user_functions AS
OUT lexemes text[])
RETURNS SETOF record AS
$$
-SELECT
+SELECT
tt.alias AS alias,
tt.description AS description,
parse.token AS token,
LIMIT 1
) AS lexemes
FROM pg_catalog.ts_parse(
- (SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ), $2
+ (SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ), $2
) AS parse,
pg_catalog.ts_token_type(
(SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 )
* catalog. Comments on all other objects are recorded in pg_description.
*/
if (stmt->objtype == OBJECT_DATABASE || stmt->objtype == OBJECT_TABLESPACE
- || stmt->objtype == OBJECT_ROLE)
+ || stmt->objtype == OBJECT_ROLE)
CreateSharedComments(address.objectId, address.classId, stmt->comment);
else
CreateComments(address.objectId, address.classId, address.objectSubId,
done = true;
break;
}
-
+
if (fld_count == -1)
{
/*
{
ExplainOpenGroup("Plans", "Plans", false, es);
/* Pass current PlanState as head of ancestors list for children */
- ancestors = lcons(planstate, ancestors);
+ ancestors = lcons(planstate, ancestors);
}
/* initPlan-s */
/* end of child plans */
if (haschildren)
{
- ancestors = list_delete_first(ancestors);
+ ancestors = list_delete_first(ancestors);
ExplainCloseGroup("Plans", "Plans", false, es);
}
errmsg("could not remove symbolic link \"%s\": %m",
linkloc)));
}
-
+
/*
* Create the symlink under PGDATA
*/
relation.h - planner internal nodes
execnodes.h - executor nodes
memnodes.h - memory nodes
- pg_list.h - generic list
+ pg_list.h - generic list
Steps to Add a Node
Historical Note
---------------
-Prior to the current simple C structure definitions, the Node structures
+Prior to the current simple C structure definitions, the Node structures
used a pseudo-inheritance system which automatically generated creator and
accessor functions. Since every node inherited from LispValue, the whole thing
was a mess. Here's a little anecdote:
First, implementation uses new type of parameters - PARAM_EXEC - to deal
with correlation Vars. When query_planner() is called, it first tries to
-replace all upper queries Var referenced in current query with Param of
-this type. Some global variables are used to keep mapping of Vars to
-Params and Params to Vars.
-
-After this, all current query' SubLinks are processed: for each SubLink
-found in query' qual union_planner() (old planner() function) will be
-called to plan corresponding subselect (union_planner() calls
-query_planner() for "simple" query and supports UNIONs). After subselect
-are planned, optimizer knows about is this correlated, un-correlated or
-_undirect_ correlated (references some grand-parent Vars but no parent
-ones: uncorrelated from the parent' point of view) query.
-
-For uncorrelated and undirect correlated subqueries of EXPRession or
+replace all upper queries Var referenced in current query with Param of
+this type. Some global variables are used to keep mapping of Vars to
+Params and Params to Vars.
+
+After this, all current query' SubLinks are processed: for each SubLink
+found in query' qual union_planner() (old planner() function) will be
+called to plan corresponding subselect (union_planner() calls
+query_planner() for "simple" query and supports UNIONs). After subselect
+are planned, optimizer knows about is this correlated, un-correlated or
+_undirect_ correlated (references some grand-parent Vars but no parent
+ones: uncorrelated from the parent' point of view) query.
+
+For uncorrelated and undirect correlated subqueries of EXPRession or
EXISTS type SubLinks will be replaced with "normal" clauses from
SubLink->Oper list (I changed this list to be list of EXPR nodes,
not just Oper ones). Right sides of these nodes are replaced with
Params (from the list of their "interests").
After all SubLinks are processed, query_planner() calls qual'
-canonificator and does "normal" work. By using Params optimizer
+canonificator and does "normal" work. By using Params optimizer
is mostly unchanged.
Well, Executor. To get subplans re-evaluated without ExecutorStart()
Explanation of EXPLAIN.
-vac=> explain select * from tmp where x >= (select max(x2) from test2
+vac=> explain select * from tmp where x >= (select max(x2) from test2
where y2 = y and exists (select * from tempx where tx = x));
NOTICE: QUERY PLAN:
for each parent tuple - very slow...
Results of some test. TMP is table with x,y (int4-s), x in 0-9,
-y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table
+y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table
with x2, y2 (int4-s), x2 in 1-99, y2 = 100 -x2, 10000 tuples (100 dups).
- Trying
+ Trying
select * from tmp where x >= (select max(x2) from test2 where y2 = y);
-
+
and
begin;
-select y as ty, max(x2) as mx into table tsub from test2, tmp
+select y as ty, max(x2) as mx into table tsub from test2, tmp
where y2 = y group by ty;
vacuum tsub;
select x, y from tmp, tsub where x >= mx and y = ty;
/* $foo$ style quotes ("dollar quoting")
* The quoted string starts with $foo$ where "foo" is an optional string
- * in the form of an identifier, except that it may not contain "$",
- * and extends to the first occurrence of an identical string.
+ * in the form of an identifier, except that it may not contain "$",
+ * and extends to the first occurrence of an identical string.
* There is *no* processing of the quoted text.
*
* {dolqfailed} is an error rule to avoid scanner backup when {dolqdelim}
op_chars [\~\!\@\#\^\&\|\`\?\+\-\*\/\%\<\>\=]
operator {op_chars}+
-/* we no longer allow unary minus in numbers.
+/* we no longer allow unary minus in numbers.
* instead we pass it separately to parser. there it gets
* coerced via doNegate() -- Leon aug 20 1999
*
# Makefile for the port-specific subsystem of the backend
#
# We have two different modes of operation: 1) put stuff specific to Port X
-# in subdirectory X and have that subdirectory's make file make it all, and
+# in subdirectory X and have that subdirectory's make file make it all, and
# 2) use conditional statements in the present make file to include what's
# necessary for a specific port in our own output. (1) came first, but (2)
# is superior for many things, like when the same thing needs to be done for
-# multiple ports and you don't want to duplicate files in multiple
+# multiple ports and you don't want to duplicate files in multiple
# subdirectories. Much of the stuff done via Method 1 today should probably
-# be converted to Method 2.
+# be converted to Method 2.
#
# IDENTIFICATION
# src/backend/port/Makefile
# mkldexport objectfile [location]
# where
# objectfile is the current location of the object file.
-# location is the eventual (installed) location of the
+# location is the eventual (installed) location of the
# object file (if different from the current
# working directory).
#
# [This file comes from the Postgres 4.2 distribution. - ay 7/95]
#
-# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp
+# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp
#
# setting this to nm -B might be better
love to know why there is a discrepancy between the published source and
the actual behavior --- tgl 7-Nov-2001.
-Appropriate bug reports have been filed with Apple --- see
+Appropriate bug reports have been filed with Apple --- see
Radar Bug#s 2767956, 2683531, 2805147. One hopes we can retire this
kluge in the not too distant future.
.global pg_atomic_cas
pg_atomic_cas:
-
+
! "cas" only works on sparcv9 and sparcv8plus chips, and
! requies a compiler targeting these CPUs. It will fail
! on a compiler targeting sparcv8, and of course will not
! be understood by a sparcv8 CPU. gcc continues to use
! "ldstub" because it targets sparcv7.
!
- ! There is actually a trick for embedding "cas" in a
+ ! There is actually a trick for embedding "cas" in a
! sparcv8-targeted compiler, but it can only be run
! on a sparcv8plus/v9 cpus:
!
$(SQLSCRIPT): Makefile snowball_func.sql.in snowball.sql.in
ifeq ($(enable_shared), yes)
echo '-- Language-specific snowball dictionaries' > $@
- cat $(srcdir)/snowball_func.sql.in >> $@
+ cat $(srcdir)/snowball_func.sql.in >> $@
@set -e; \
set $(LANGUAGES) ; \
while [ "$$#" -gt 0 ] ; \
the contention cost of the writer compared to PG 8.0.)
During a checkpoint, the writer's strategy must be to write every dirty
-buffer (pinned or not!). We may as well make it start this scan from
+buffer (pinned or not!). We may as well make it start this scan from
NextVictimBuffer, however, so that the first-to-be-written pages are the
ones that backends might otherwise have to write for themselves soon.
by having them insert into different pages. But it is also desirable to fill
up pages in sequential order, to get the benefit of OS prefetching and batched
writes. The FSM is responsible for making that happen, and the next slot
-pointer helps provide the desired behavior.
+pointer helps provide the desired behavior.
Higher-level structure
----------------------
The cache synchronization is done using a message queue. Every
backend can register a message which then has to be read by
-all backends. A message read by all backends is removed from the
+all backends. A message read by all backends is removed from the
queue automatically. If a message has been lost because the buffer
was full, all backends that haven't read this message will be
told that they have to reset their cache state. This is done
check: s_lock_test
./s_lock_test
-clean distclean maintainer-clean:
+clean distclean maintainer-clean:
rm -f s_lock_test
* Regular locks (a/k/a heavyweight locks). The regular lock manager
supports a variety of lock modes with table-driven semantics, and it has
-full deadlock detection and automatic release at transaction end.
+full deadlock detection and automatic release at transaction end.
Regular locks should be used for all user-driven lock requests.
Acquisition of either a spinlock or a lightweight lock causes query
(lock grant and release) run quickly when there is no deadlock, and
avoid the overhead of deadlock handling as much as possible. We do this
using an "optimistic waiting" approach: if a process cannot acquire the
-lock it wants immediately, it goes to sleep without any deadlock check.
+lock it wants immediately, it goes to sleep without any deadlock check.
But it also sets a delay timer, with a delay of DeadlockTimeout
milliseconds (typically set to one second). If the delay expires before
the process is granted the lock it wants, it runs the deadlock
* Character-type support functions, equivalent to is* macros, but
* working with any possible encodings and locales. Notes:
* - with multibyte encoding and C-locale isw* function may fail
- * or give wrong result.
- * - multibyte encoding and C-locale often are used for
+ * or give wrong result.
+ * - multibyte encoding and C-locale often are used for
* Asian languages.
* - if locale is C the we use pgwstr instead of wstr
*/
/*
* returns true if current character has zero display length or
* it's a special sign in several languages. Such characters
- * aren't a word-breaker although they aren't an isalpha.
- * In beginning of word they aren't a part of it.
+ * aren't a word-breaker although they aren't an isalpha.
+ * In beginning of word they aren't a part of it.
*/
static int
p_isspecial(TParser *prs)
return false;
}
-static void
+static void
mark_fragment(HeadlineParsedText *prs, int highlight, int startpos, int endpos)
{
int i;
}
}
-typedef struct
+typedef struct
{
int4 startpos;
int4 endpos;
int2 excluded;
} CoverPos;
-static void
+static void
get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos,
int *curlen, int *poslen, int max_words)
{
int i;
- /* Objective: Generate a fragment of words between startpos and endpos
- * such that it has at most max_words and both ends has query words.
- * If the startpos and endpos are the endpoints of the cover and the
- * cover has fewer words than max_words, then this function should
- * just return the cover
+ /* Objective: Generate a fragment of words between startpos and endpos
+ * such that it has at most max_words and both ends has query words.
+ * If the startpos and endpos are the endpoints of the cover and the
+ * cover has fewer words than max_words, then this function should
+ * just return the cover
*/
/* first move startpos to an item */
for(i = *startpos; i <= *endpos; i++)
/* cut endpos to have only max_words */
*curlen = 0;
*poslen = 0;
- for(i = *startpos; i <= *endpos && *curlen < max_words; i++)
+ for(i = *startpos; i <= *endpos && *curlen < max_words; i++)
{
if (!NONWORDTOKEN(prs->words[i].type))
*curlen += 1;
if (prs->words[i].item && !prs->words[i].repeated)
*poslen += 1;
}
- /* if the cover was cut then move back endpos to a query item */
+ /* if the cover was cut then move back endpos to a query item */
if (*endpos > i)
{
*endpos = i;
break;
if (!NONWORDTOKEN(prs->words[i].type))
*curlen -= 1;
- }
- }
+ }
+ }
}
static void
mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
- int shortword, int min_words,
+ int shortword, int min_words,
int max_words, int max_fragments)
{
int4 poslen, curlen, i, f, num_f = 0;
int4 stretch, maxstretch, posmarker;
- int4 startpos = 0,
- endpos = 0,
+ int4 startpos = 0,
+ endpos = 0,
p = 0,
q = 0;
- int4 numcovers = 0,
+ int4 numcovers = 0,
maxcovers = 32;
int4 minI, minwords, maxitems;
CoverPos *covers;
covers = palloc(maxcovers * sizeof(CoverPos));
-
+
/* get all covers */
while (hlCover(prs, query, &p, &q))
{
/* Break the cover into smaller fragments such that each fragment
* has at most max_words. Also ensure that each end of the fragment
- * is a query word. This will allow us to stretch the fragment in
+ * is a query word. This will allow us to stretch the fragment in
* either direction
*/
numcovers ++;
startpos = endpos + 1;
endpos = q;
- }
+ }
/* move p to generate the next cover */
- p++;
+ p++;
}
/* choose best covers */
minwords = 0x7fffffff;
minI = -1;
/* Choose the cover that contains max items.
- * In case of tie choose the one with smaller
- * number of words.
+ * In case of tie choose the one with smaller
+ * number of words.
*/
for (i = 0; i < numcovers; i ++)
{
- if (!covers[i].in && !covers[i].excluded &&
- (maxitems < covers[i].poslen || (maxitems == covers[i].poslen
+ if (!covers[i].in && !covers[i].excluded &&
+ (maxitems < covers[i].poslen || (maxitems == covers[i].poslen
&& minwords > covers[i].curlen)))
{
maxitems = covers[i].poslen;
endpos = covers[minI].endpos;
curlen = covers[minI].curlen;
/* stretch the cover if cover size is lower than max_words */
- if (curlen < max_words)
+ if (curlen < max_words)
{
/* divide the stretch on both sides of cover */
maxstretch = (max_words - curlen)/2;
- /* first stretch the startpos
- * stop stretching if
- * 1. we hit the beginning of document
- * 2. exceed maxstretch
- * 3. we hit an already marked fragment
+ /* first stretch the startpos
+ * stop stretching if
+ * 1. we hit the beginning of document
+ * 2. exceed maxstretch
+ * 3. we hit an already marked fragment
*/
stretch = 0;
posmarker = startpos;
{
if (!NONWORDTOKEN(prs->words[i].type))
curlen ++;
- posmarker = i;
+ posmarker = i;
}
/* cut back endpos till we find a non-short token */
for ( i = posmarker; i > endpos && (NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword); i--)
/* exclude overlapping covers */
for (i = 0; i < numcovers; i ++)
{
- if (i != minI && ( (covers[i].startpos >= covers[minI].startpos && covers[i].startpos <= covers[minI].endpos) || (covers[i].endpos >= covers[minI].startpos && covers[i].endpos <= covers[minI].endpos)))
+ if (i != minI && ( (covers[i].startpos >= covers[minI].startpos && covers[i].startpos <= covers[minI].endpos) || (covers[i].endpos >= covers[minI].startpos && covers[i].endpos <= covers[minI].endpos)))
covers[i].excluded = 1;
}
}
}
static void
-mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
+mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
int shortword, int min_words, int max_words)
{
int p = 0,
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("MaxFragments should be >= 0")));
- }
+ }
if (max_fragments == 0)
/* call the default headline generator */
open H, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!";
open T, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!";
-print H
+print H
qq|/*-------------------------------------------------------------------------
*
* fmgroids.h
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
- *
+ *
* In the NumericShort format, the remaining 14 bits of the header word
* (n_short.n_header) are allocated as follows: 1 for sign (positive or
* negative), 6 for dynamic scale, and 7 for weight. In practice, most
len = NUMERIC_HDRSZ_SHORT + n * sizeof(NumericDigit);
result = (Numeric) palloc(len);
SET_VARSIZE(result, len);
- result->choice.n_short.n_header =
+ result->choice.n_short.n_header =
(sign == NUMERIC_NEG ? (NUMERIC_SHORT | NUMERIC_SHORT_SIGN_MASK)
: NUMERIC_SHORT)
| (var->dscale << NUMERIC_SHORT_DSCALE_SHIFT)
int start_posn;
int end_posn;
int chunk_len;
-
+
text_position_setup(inputstring, fldsep, &state);
/*
PointerGetDatum(inputstring),
is_null, 1));
}
-
+
start_posn = 1;
/* start_ptr points to the start_posn'th character of inputstring */
start_ptr = VARDATA_ANY(inputstring);
/* must build a temp text datum to pass to accumArrayResult */
result_text = cstring_to_text_with_len(start_ptr, chunk_len);
is_null = null_string ? text_isequal(result_text, null_string) : false;
-
+
/* stash away this field */
astate = accumArrayResult(astate,
PointerGetDatum(result_text),
}
else
{
- /*
+ /*
* When fldsep is NULL, each character in the inputstring becomes an
* element in the result array. The separator is effectively the space
* between characters.
*/
inputstring_len = VARSIZE_ANY_EXHDR(inputstring);
-
+
/* return empty array for empty input string */
if (inputstring_len < 1)
PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID));
-
+
start_ptr = VARDATA_ANY(inputstring);
-
+
while (inputstring_len > 0)
{
int chunk_len = pg_mblen(start_ptr);
/* must build a temp text datum to pass to accumArrayResult */
result_text = cstring_to_text_with_len(start_ptr, chunk_len);
is_null = null_string ? text_isequal(result_text, null_string) : false;
-
+
/* stash away this field */
astate = accumArrayResult(astate,
PointerGetDatum(result_text),
/* returns NULL when first or second parameter is NULL */
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
PG_RETURN_NULL();
-
+
v = PG_GETARG_ARRAYTYPE_P(0);
fldsep = text_to_cstring(PG_GETARG_TEXT_PP(1));
}
}
}
-
+
result = cstring_to_text_with_len(buf.data, buf.len);
pfree(buf.data);
{
#ifdef USE_LIBXML
text *data = PG_GETARG_TEXT_P(0);
-
+
PG_RETURN_BOOL(wellformed_xml(data, xmloption));
#else
NO_XML_SUPPORT();
{
#ifdef USE_LIBXML
text *data = PG_GETARG_TEXT_P(0);
-
+
PG_RETURN_BOOL(wellformed_xml(data, XMLOPTION_DOCUMENT));
#else
NO_XML_SUPPORT();
{
#ifdef USE_LIBXML
text *data = PG_GETARG_TEXT_P(0);
-
+
PG_RETURN_BOOL(wellformed_xml(data, XMLOPTION_CONTENT));
#else
NO_XML_SUPPORT();
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
-# you have to obtain GB2312.TXT from
+# you have to obtain GB2312.TXT from
# the organization's ftp site.
#
# GB2312.TXT format:
} else {
next;
}
-
+
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
if( $count == 0 ){
printf FILE " {0x%08x, 0x%06x} /* %s */\n", $index, $code, $comment{ $code };
} else {
- printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
+ printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
}
}
($code >= 0x8ea1 && $code <= 0x8efe) ||
($code >= 0x8fa1a1 && $code <= 0x8ffefe) ||
($code >= 0xa1a1 && $code <= 0x8fefe))) {
-
+
$v1 = hex(substr($index, 0, 8));
$v2 = hex(substr($index, 8, 8));
} else {
next;
}
-
+
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
if( $count == 0 ){
printf FILE " {0x%06x, 0x%08x} /* %s */\n", $index, $code, $comment{ $code };
} else {
- printf FILE " {0x%06x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
+ printf FILE " {0x%06x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
}
}
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
-# you have to obtain JIS0201.TXT, JIS0208.TXT, JIS0212.TXT from
+# you have to obtain JIS0201.TXT, JIS0208.TXT, JIS0212.TXT from
# the organization's ftp site.
#
# JIS0201.TXT format:
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
-# you have to obtain OLD5601.TXT from
+# you have to obtain OLD5601.TXT from
# the organization's ftp site.
#
# OLD5601.TXT format:
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
-# you have to obtain CNS11643.TXT from
+# you have to obtain CNS11643.TXT from
# the organization's ftp site.
#
# CNS11643.TXT format:
} else {
next;
}
-
+
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
if( $count == 0 ){
printf FILE " {0x%08x, 0x%06x} /* %s */\n", $index, $code, $comment{ $code };
} else {
- printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
+ printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
}
}
} else {
next;
}
-
+
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
if( $count == 0 ){
printf FILE " {0x%04x, 0x%08x} /* %s */\n", $index, $code, $comment{ $code };
} else {
- printf FILE " {0x%04x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
+ printf FILE " {0x%04x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
}
}
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
-# you have to obtain SHIFTJIS.TXT from
+# you have to obtain SHIFTJIS.TXT from
# the organization's ftp site.
#
# SHIFTJIS.TXT format:
} elsif ($ucs > 0x007f && $ucs <= 0x07ff) {
$utf = (($ucs & 0x003f) | 0x80) | ((($ucs >> 6) | 0xc0) << 8);
} elsif ($ucs > 0x07ff && $ucs <= 0xffff) {
- $utf = ((($ucs >> 12) | 0xe0) << 16) |
+ $utf = ((($ucs >> 12) | 0xe0) << 16) |
(((($ucs & 0x0fc0) >> 6) | 0x80) << 8) |
(($ucs & 0x003f) | 0x80);
} else {
$utf = ((($ucs >> 18) | 0xf0) << 24) |
- (((($ucs & 0x3ffff) >> 12) | 0x80) << 16) |
+ (((($ucs & 0x3ffff) >> 12) | 0x80) << 16) |
(((($ucs & 0x0fc0) >> 6) | 0x80) << 8) |
(($ucs & 0x003f) | 0x80);
}
# Note: guc-file.c is not deleted by 'make clean',
# since we want to ship it in distribution tarballs.
-clean:
+clean:
@rm -f lex.yy.c
## in postgresql.conf.sample:
## 1) the valid config settings may be preceded by a '#', but NOT '# '
## (we use this to skip comments)
-## 2) the valid config settings will be followed immediately by ' ='
+## 2) the valid config settings will be followed immediately by ' ='
## (at least one space preceding the '=')
## in guc.c:
## 3) the options have PGC_ on the same line as the option
## 1) Don't know what to do with TRANSACTION ISOLATION LEVEL
## if an option is valid but shows up in only one file (guc.c but not
-## postgresql.conf.sample), it should be listed here so that it
+## postgresql.conf.sample), it should be listed here so that it
## can be ignored
INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \
is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \
trace_notify trace_userlocks transaction_isolation transaction_read_only \
zero_damaged_pages"
-### What options are listed in postgresql.conf.sample, but don't appear
+### What options are listed in postgresql.conf.sample, but don't appear
### in guc.c?
# grab everything that looks like a setting and convert it to lower case
-SETTINGS=`grep ' =' postgresql.conf.sample |
+SETTINGS=`grep ' =' postgresql.conf.sample |
grep -v '^# ' | # strip comments
-sed -e 's/^#//' |
+sed -e 's/^#//' |
awk '{print $1}'`
SETTINGS=`echo "$SETTINGS" | tr 'A-Z' 'a-z'`
-for i in $SETTINGS ; do
+for i in $SETTINGS ; do
hidden=0
## it sure would be nice to replace this with an sql "not in" statement
## it doesn't seem to make sense to have things in .sample and not in guc.c
# for hidethis in $INTENTIONALLY_NOT_INCLUDED ; do
-# if [ "$hidethis" = "$i" ] ; then
+# if [ "$hidethis" = "$i" ] ; then
# hidden=1
# fi
# done
if [ "$hidden" -eq 0 ] ; then
grep -i '"'$i'"' guc.c > /dev/null
- if [ $? -ne 0 ] ; then
- echo "$i seems to be missing from guc.c";
- fi;
+ if [ $? -ne 0 ] ; then
+ echo "$i seems to be missing from guc.c";
+ fi;
fi
done
-### What options are listed in guc.c, but don't appear
+### What options are listed in guc.c, but don't appear
### in postgresql.conf.sample?
# grab everything that looks like a setting and convert it to lower case
/* now we must have the option value */
if (token != GUC_ID &&
- token != GUC_STRING &&
- token != GUC_INTEGER &&
- token != GUC_REAL &&
+ token != GUC_STRING &&
+ token != GUC_INTEGER &&
+ token != GUC_REAL &&
token != GUC_UNQUOTED_STRING)
goto parse_error;
if (token == GUC_STRING) /* strip quotes and escapes */
else
ereport(elevel,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("syntax error in file \"%s\" line %u, near token \"%s\"",
+ errmsg("syntax error in file \"%s\" line %u, near token \"%s\"",
config_file, ConfigFileLineno, yytext)));
OK = false;
# (change requires restart)
#port = 5432 # (change requires restart)
#max_connections = 100 # (change requires restart)
-# Note: Increasing max_connections costs ~400 bytes of shared memory per
+# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction).
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directory = '' # (change requires restart)
# (change requires restart)
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # immediate fsync at commit
-#wal_sync_method = fsync # the default is the first option
+#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
-#join_collapse_limit = 8 # 1 disables collapsing of explicit
+#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
-#log_rotation_size = 10MB # Automatic rotation of logfiles will
+#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
-#autovacuum = on # Enable autovacuum subprocess? 'on'
+#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
-#autovacuum_analyze_threshold = 50 # min number of row updates before
+#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
Given this, the pfree routine will look something like
- StandardChunkHeader * header =
+ StandardChunkHeader * header =
(StandardChunkHeader *) ((char *) p - sizeof(StandardChunkHeader));
(*header->mycontext->methods->free_p) (p);
!IF "$(OS)" == "Windows_NT"
NULL=
-!ELSE
+!ELSE
NULL=nul
-!ENDIF
+!ENDIF
-ALL:
+ALL:
cd include
if not exist pg_config.h copy pg_config.h.win32 pg_config.h
if not exist pg_config_os.h copy port\win32.h pg_config_os.h
cd ..
cd interfaces\libpq
- make -N -DCFG=$(CFG) /f bcc32.mak
+ make -N -DCFG=$(CFG) /f bcc32.mak
cd ..\..
echo All Win32 parts have been built!
pg_dump <db-name> -Ft > <backup-file>
To restore, try
-
+
To list contents:
pg_restore -l <backup-file> | less
TAR
===
-The TAR archive that pg_dump creates currently has a blank username & group for the files,
+The TAR archive that pg_dump creates currently has a blank username & group for the files,
but should be otherwise valid. It also includes a 'restore.sql' script which is there for
the benefit of humans. The script is never used by pg_restore.
Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form.
-(ie. you should not extract the files then expect pg_restore to work).
+(ie. you should not extract the files then expect pg_restore to work).
You can extract, edit, and tar the files again, and it should work, but the 'toc'
file should go at the start, the data files be in the order they are used, and
}
/*
- * dumpSecLabel
+ * dumpSecLabel
*
* This routine is used to dump any security labels associated with the
* object handed to this routine. The routine takes a constant character
/* $foo$ style quotes ("dollar quoting")
* The quoted string starts with $foo$ where "foo" is an optional string
- * in the form of an identifier, except that it may not contain "$",
- * and extends to the first occurrence of an identical string.
+ * in the form of an identifier, except that it may not contain "$",
+ * and extends to the first occurrence of an identical string.
* There is *no* processing of the quoted text.
*
* {dolqfailed} is an error rule to avoid scanner backup when {dolqdelim}
op_chars [\~\!\@\#\^\&\|\`\?\+\-\*\/\%\<\>\=]
operator {op_chars}+
-/* we no longer allow unary minus in numbers.
+/* we no longer allow unary minus in numbers.
* instead we pass it separately to parser. there it gets
* coerced via doNegate() -- Leon aug 20 1999
*
*
*-------------------------------------------------------------------------
*/
-#ifndef OBJECTADDRESS_H
+#ifndef OBJECTADDRESS_H
#define OBJECTADDRESS_H
#include "nodes/parsenodes.h"
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the global variable 'int timezone'. */
-#define HAVE_INT_TIMEZONE
+#define HAVE_INT_TIMEZONE
/* Define to 1 if you have support for IPv6. */
#define HAVE_IPV6 1
/* Define to 1 if `long long int' works and is 64 bits. */
#if (_MSC_VER > 1200)
-#define HAVE_LONG_LONG_INT_64
+#define HAVE_LONG_LONG_INT_64
#endif
/* Define to 1 if you have the `memmove' function. */
#endif
-
+
#endif /* !defined(HAS_TEST_AND_SET) */
descriptor statements have the following shortcomings
- input descriptors (USING DESCRIPTOR <name>) are not supported
-
+
Reason: to fully support dynamic SQL the frontend/backend communication
should change to recognize input parameters.
Since this is not likely to happen in the near future and you
struct prepared_statement *this,
*prev;
- (void) questionmarks; /* quiet the compiler */
+ (void) questionmarks; /* quiet the compiler */
con = ecpg_get_connection(connection_name);
if (!ecpg_init(con, connection_name, lineno))
endif
preproc.y: ../../../backend/parser/gram.y parse.pl ecpg.addons ecpg.header ecpg.tokens ecpg.trailer ecpg.type
- $(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@
+ $(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@
$(PERL) $(srcdir)/check_rules.pl $(srcdir) $<
ecpg_keywords.o c_keywords.o keywords.o preproc.o parser.o: preproc.h
$block = $block . $arr[$fieldIndexer];
}
}
-}
+}
close GRAM;
@Fld = split(' ', $_, -1);
if (!/^ECPG:/) {
- next line;
+ next line;
}
if ($found{$Fld[2]} ne 'found') {
{
if ($1.type == NULL || strlen($1.type) == 0)
output_prepare_statement($1.name, $1.stmt);
- else
+ else
output_statement(cat_str(5, make_str("prepare"), $1.name, $1.type, make_str("as"), $1.stmt), 0, ECPGst_normal);
}
ECPG: stmtTransactionStmt block
if (!strcmp($1, "all"))
fprintf(yyout, "{ ECPGdeallocate_all(__LINE__, %d, %s);", compat, con);
- else if ($1[0] == ':')
+ else if ($1[0] == ':')
fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, %s);", compat, con, $1+1);
else
fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, \"%s\");", compat, con, $1);
fclose(yyin);
if (yyout)
fclose(yyout);
-
+
if (strcmp(output_filename, "-") != 0 && unlink(output_filename) != 0)
fprintf(stderr, _("could not remove output file \"%s\"\n"), output_filename);
exit(error_code);
/* special embedded SQL tokens */
%token SQL_ALLOCATE SQL_AUTOCOMMIT SQL_BOOL SQL_BREAK
SQL_CALL SQL_CARDINALITY SQL_CONNECT
- SQL_COUNT
+ SQL_COUNT
SQL_DATETIME_INTERVAL_CODE
SQL_DATETIME_INTERVAL_PRECISION SQL_DESCRIBE
SQL_DESCRIPTOR SQL_DISCONNECT SQL_FOUND
S_STATIC S_SUB S_VOLATILE
S_TYPEDEF
-%token CSTRING CVARIABLE CPP_LINE IP
+%token CSTRING CVARIABLE CPP_LINE IP
%token DOLCONST ECONST NCONST UCONST UIDENT
/* old style: dbname[@server][:port] */
if (strlen($2) > 0 && *($2) != '@')
mmerror(PARSE_ERROR, ET_ERROR, "expected \"@\", found \"%s\"", $2);
-
+
/* C strings need to be handled differently */
if ($1[0] == '\"')
$$ = $1;
| /*EMPTY*/ { $$ = EMPTY; }
;
-connect_options: ColId opt_opt_value
+connect_options: ColId opt_opt_value
{ $$ = make2_str($1, $2); }
| ColId opt_opt_value Op connect_options
{
;
ECPGExecuteImmediateStmt: EXECUTE IMMEDIATE execstring
- {
+ {
/* execute immediate means prepare the statement and
* immediately execute it */
$$ = $3;
$$.type_index = this->type->type_index;
if (this->type->type_sizeof && strlen(this->type->type_sizeof) != 0)
$$.type_sizeof = this->type->type_sizeof;
- else
+ else
$$.type_sizeof = cat_str(3, make_str("sizeof("), mm_strdup(this->name), make_str(")"));
struct_member_list[struct_level] = ECPGstruct_member_dup(this->struct_member_list);
type = ECPGmake_simple_type(actual_type[struct_level].type_enum, length, varchar_counter);
else
type = ECPGmake_array_type(ECPGmake_simple_type(actual_type[struct_level].type_enum, length, varchar_counter), dimension);
-
+
if (strcmp(dimension, "0") == 0 || abs(atoi(dimension)) == 1)
*dim = '\0';
else
}
| civar { $$ = EMPTY; }
| civarind { $$ = EMPTY; }
- ;
+ ;
UsingConst: Iconst { $$ = $1; }
| '+' Iconst { $$ = cat_str(2, make_str("+"), $2); }
| ecpg_into ecpg_using { $$ = EMPTY; }
| ecpg_using { $$ = EMPTY; }
| ecpg_into { $$ = EMPTY; }
- ;
+ ;
ecpg_into: INTO into_list { $$ = EMPTY; }
| into_descriptor { $$ = $1; }
%type <str> variable
%type <str> variable_declarations
%type <str> variable_list
-%type <str> vt_declarations
+%type <str> vt_declarations
%type <str> Op
%type <str> IntConstVar
chomp; # strip record separator
@Fld = split(' ', $_, -1);
- # Dump the action for a rule -
+ # Dump the action for a rule -
# mode indicates if we are processing the 'stmt:' rule (mode==0 means normal, mode==1 means stmt:)
# flds are the fields to use. These may start with a '$' - in which case they are the result of a previous non-terminal
# if they dont start with a '$' then they are token name
if ($replace_token{$arr[$fieldIndexer]}) {
$arr[$fieldIndexer] = $replace_token{$arr[$fieldIndexer]};
}
-
- # Are we looking at a declaration of a non-terminal ?
+
+ # Are we looking at a declaration of a non-terminal ?
if (($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:') || $arr[$fieldIndexer + 1] eq ':') {
$non_term_id = $arr[$fieldIndexer];
$s = ':', $non_term_id =~ s/$s//g;
$copymode = 'on';
}
$line = $line . ' ' . $arr[$fieldIndexer];
- # Do we have the : attached already ?
+ # Do we have the : attached already ?
# If yes, we'll have already printed the ':'
if (!($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:')) {
# Consume the ':' which is next...
$fieldIndexer++;
}
- # Special mode?
+ # Special mode?
if ($non_term_id eq 'stmt') {
$stmt_mode = 1;
}
sub dump_fields {
local($mode, *flds, $len, $ln) = @_;
if ($mode == 0) {
- #Normal
+ #Normal
&add_to_buffer('rules', $ln);
if ($feature_not_supported == 1) {
# we found an unsupported feature, but we have to
}
if ($len == 0) {
- # We have no fields ?
+ # We have no fields ?
&add_to_buffer('rules', " \$\$=EMPTY; }");
}
else {
}
}
- # So - how many fields did we end up with ?
+ # So - how many fields did we end up with ?
if ($cnt == 1) {
# Straight assignement
$str = " \$\$ = " . $flds_new{0} . ';';
char *token_start;
int state_before;
-struct _yy_buffer
-{
+struct _yy_buffer
+{
YY_BUFFER_STATE buffer;
long lineno;
char *filename;
#define MAX_NESTED_IF 128
static short preproc_tos;
static short ifcond;
-static struct _if_value
+static struct _if_value
{
short condition;
short else_branch;
%option yylineno
-%x C SQL incl def def_ident undef
+%x C SQL incl def def_ident undef
/*
* OK, here is a short description of lex/flex rules behavior.
/* throw back all but the initial "$" */
yyless(1);
/* and treat it as {other} */
- return yytext[0];
+ return yytext[0];
}
<SQL>{dolqdelim} {
token_start = yytext;
}
<SQL>{identifier} {
const ScanKeyword *keyword;
-
+
if (!isdefine())
{
/* Is it an SQL/ECPG keyword? */
}
<SQL>{other} { return yytext[0]; }
<C>{exec_sql} { BEGIN(SQL); return SQL_START; }
-<C>{informix_special} {
+<C>{informix_special} {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
yyterminate();
}
<C>{exec_sql}{include}{space}* { BEGIN(incl); }
-<C>{informix_special}{include}{space}* {
+<C>{informix_special}{include}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
}
}
<C,xskip>{exec_sql}{ifdef}{space}* { ifcond = TRUE; BEGIN(xcond); }
-<C,xskip>{informix_special}{ifdef}{space}* {
+<C,xskip>{informix_special}{ifdef}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
}
}
<C,xskip>{exec_sql}{ifndef}{space}* { ifcond = FALSE; BEGIN(xcond); }
-<C,xskip>{informix_special}{ifndef}{space}* {
+<C,xskip>{informix_special}{ifndef}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
ifcond = TRUE; BEGIN(xcond);
}
-<C,xskip>{informix_special}{elif}{space}* {
+<C,xskip>{informix_special}{elif}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
<xcond>{identifier}{space}*";" {
if (preproc_tos >= MAX_NESTED_IF-1)
mmerror(PARSE_ERROR, ET_FATAL, "too many nested EXEC SQL IFDEF conditions");
- else
+ else
{
struct _defines *defptr;
unsigned int i;
<def_ident>{other}|\n {
mmerror(PARSE_ERROR, ET_FATAL, "missing identifier in EXEC SQL DEFINE command");
yyterminate();
- }
+ }
<def>{space}*";" {
struct _defines *ptr, *this;
<<EOF>> {
if (yy_buffer == NULL)
{
- if ( preproc_tos > 0 )
+ if ( preproc_tos > 0 )
{
preproc_tos = 0;
mmerror(PARSE_ERROR, ET_FATAL, "missing \"EXEC SQL ENDIF;\"");
ptr->used = NULL;
break;
}
-
+
if (yyin != NULL)
fclose(yyin);
if (i != 0)
output_line_number();
-
+
}
}
<INITIAL>{other}|\n { mmerror(PARSE_ERROR, ET_FATAL, "internal error: unreachable state; please report this to <
[email protected]>"); }
/* enlarge buffer if needed */
if ((literallen+yleng) >= literalalloc)
{
- do
+ do
literalalloc *= 2;
while ((literallen+yleng) >= literalalloc);
literalbuf = (char *) realloc(literalbuf, literalalloc);
/*
* skip the ";" if there is one and trailing whitespace. Note that
- * yytext contains at least one non-space character plus the ";"
+ * yytext contains at least one non-space character plus the ";"
*/
for (i = strlen(yytext)-2;
i > 0 && ecpg_isspace(yytext[i]);
i--;
yytext[i+1] = '\0';
-
+
yyin = NULL;
/* If file name is enclosed in '"' remove these and look only in '.' */
{
yytext[i] = '\0';
memmove(yytext, yytext+1, strlen(yytext));
-
+
strncpy(inc_file, yytext, sizeof(inc_file));
yyin = fopen(inc_file, "r");
if (!yyin)
yyin = fopen(inc_file, "r");
}
}
-
+
}
else
{
yytext[i] = '\0';
memmove(yytext, yytext+1, strlen(yytext));
}
-
+
for (ip = include_paths; yyin == NULL && ip != NULL; ip = ip->next)
{
if (strlen(ip->path) + strlen(yytext) + 3 > MAXPGPATH)
override CPPFLAGS := -I../../include -I$(top_srcdir)/src/interfaces/ecpg/include \
- -I$(libpq_srcdir) $(CPPFLAGS)
-override CFLAGS += $(PTHREAD_CFLAGS)
+ -I$(libpq_srcdir) $(CPPFLAGS)
+override CFLAGS += $(PTHREAD_CFLAGS)
override LDFLAGS := -L../../ecpglib -L../../pgtypeslib $(filter-out -l%, $(libpq)) $(LDFLAGS)
override LIBS := -lecpg -lpgtypes $(filter -l%, $(libpq)) $(LIBS) $(PTHREAD_LIBS)
strcpy(msg, "commit");
exec sql commit;
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
exec sql disconnect;
return (0);
while (1)
{
strcpy(msg, "fetch");
- exec sql fetch 1 from mycur1 into descriptor outp_sqlda;
+ exec sql fetch 1 from mycur1 into descriptor outp_sqlda;
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);
int main(void)
{
- $int i = 14;
+ $int i = 14;
$decimal j, m, n;
$string c[10];
EXEC SQL create table history (customerid integer, timestamp timestamp without time zone, action_taken char(5), narrative varchar(100));
sql_check("main", "create", 0);
-
- EXEC SQL insert into history
+
+ EXEC SQL insert into history
(customerid, timestamp, action_taken, narrative)
values(1, '2003-05-07 13:28:34 CEST', 'test', 'test');
sql_check("main", "insert", 0);
(customerid, timestamp, action_taken, narrative)
values(:c, :e, 'test', 'test');
sql_check("main", "update", 0);
-
+
EXEC SQL commit;
EXEC SQL drop table history;
#line 193 "describe.pgc"
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 196 "describe.pgc"
if (sqlca.sqlcode < 0) exit (1);}
#line 109 "sqlda.pgc"
-
+
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);
int i = 14 ;
#line 14 "test_informix.pgc"
-
+
#line 15 "test_informix.pgc"
decimal j , m , n ;
#line 68 "test_informix2.pgc"
sql_check("main", "create", 0);
-
+
{ ECPGdo(__LINE__, 1, 1, NULL, 0, ECPGst_normal, "insert into history ( customerid , timestamp , action_taken , narrative ) values ( 1 , '2003-05-07 13:28:34 CEST' , 'test' , 'test' )", ECPGt_EOIT, ECPGt_EORT);
#line 73 "test_informix2.pgc"
#line 97 "test_informix2.pgc"
sql_check("main", "update", 0);
-
+
{ ECPGtrans(__LINE__, NULL, "commit");
#line 100 "test_informix2.pgc"
char *t1 = "2000-7-12 17:34:29";
int i;
- ECPGdebug(1, stderr);
- /* exec sql whenever sqlerror do sqlprint ( ) ; */
+ ECPGdebug(1, stderr);
+ /* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 27 "dt_test.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0);
+ { ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0);
#line 28 "dt_test.pgc"
if (sqlca.sqlcode < 0) sqlprint ( );}
#line 28 "dt_test.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table date_test ( d date , ts timestamp )", ECPGt_EOIT, ECPGt_EORT);
+ { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table date_test ( d date , ts timestamp )", ECPGt_EOIT, ECPGt_EORT);
#line 29 "dt_test.pgc"
if (sqlca.sqlcode < 0) sqlprint ( );}
#line 31 "dt_test.pgc"
- date1 = PGTYPESdate_from_asc(d1, NULL);
- ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
+ date1 = PGTYPESdate_from_asc(d1, NULL);
+ ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into date_test ( d , ts ) values ( $1 , $2 )",
ECPGt_date,&(date1),(long)1,(long)1,sizeof(date),
ECPGdebug(1, stderr);
-
+
{ ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0);
#line 50 "array_of_struct.pgc"
#line 239 "cursor.pgc"
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 242 "cursor.pgc"
-
+
/* = 1L */
#line 60 "init.pgc"
/* exec sql whenever sqlerror do sqlnotice ( NULL , 0 ) ; */
#line 97 "init.pgc"
-
+
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select now ( )", ECPGt_EOIT, ECPGt_EORT);
#line 98 "init.pgc"
#line 118 "outofscope.pgc"
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 121 "outofscope.pgc"
#line 95 "variable.pgc"
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 98 "variable.pgc"
#line 65 "whenever.pgc"
exit (0);
-}
+}
#line 29 "array.pgc"
- { ECPGtrans(__LINE__, NULL, "begin work");
+ { ECPGtrans(__LINE__, NULL, "begin work");
#line 31 "array.pgc"
if (sqlca.sqlcode < 0) sqlprint();}
if (sqlca.sqlcode < 0) sqlprint();}
#line 43 "array.pgc"
-
+
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select f , text from test where i = 1", ECPGt_EOIT,
ECPGt_double,&(f),(long)1,(long)1,sizeof(double),
ECPGdebug(1,stderr);
-
+
{ ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0); }
#line 15 "code100.pgc"
#line 22 "code100.pgc"
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
for (index=0;index<10;++index)
{ { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into test ( payload , index ) values ( 0 , $1 )",
ECPGt_int,&(index),(long)1,(long)1,sizeof(int),
#line 31 "code100.pgc"
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "update test set payload = payload + 1 where index = - 1", ECPGt_EOIT, ECPGt_EORT);}
#line 35 "code100.pgc"
if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "delete from test where index = - 1", ECPGt_EOIT, ECPGt_EORT);}
#line 38 "code100.pgc"
#line 46 "code100.pgc"
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
{ ECPGdisconnect(__LINE__, "CURRENT");}
#line 49 "code100.pgc"
#line 193 "describe.pgc"
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 196 "describe.pgc"
for (i=0;i<sqlca.sqlerrd[2];++i)
{
if (i1[i]) printf("NULL, ");
- else printf("%d, ",d1[i]);
+ else printf("%d, ",d1[i]);
if (i2[i]) printf("NULL, ");
- else printf("%f, ",d2[i]);
+ else printf("%f, ",d2[i]);
if (i3[i]) printf("NULL, ");
- else printf("'%s', ",d3[i]);
+ else printf("'%s', ",d3[i]);
if (i4[i]) printf("NULL, ");
- else printf("'%s', ",d4[i]);
+ else printf("'%s', ",d4[i]);
if (i5[i]) printf("NULL, ");
- else printf("'%s', ",d5[i]);
+ else printf("'%s', ",d5[i]);
if (i6[i]) printf("NULL, ");
- else printf("'%s', ",d6[i]);
+ else printf("'%s', ",d6[i]);
if (i7[i]) printf("NULL, ");
- else printf("'%s', ",d7[i]);
+ else printf("'%s', ",d7[i]);
if (i9[i]) printf("NULL, ");
- else printf("'%s', ",d9[i]);
+ else printf("'%s', ",d9[i]);
printf("\n");
}
for (i=0;i < sqlca.sqlerrd[2];++i)
{
if (ipointer1[i]) printf("NULL, ");
- else printf("%d, ",ip1[i]);
+ else printf("%d, ",ip1[i]);
if (ipointer2[i]) printf("NULL, ");
- else printf("'%s', ",cp2[i]);
+ else printf("'%s', ",cp2[i]);
printf("\n");
}
ECPGfree_auto_mem();
int main() {
/* exec sql begin declare section */
-
+
#line 9 "fetch.pgc"
char str [ 25 ] ;
if (sqlca.sqlcode < 0) exit (1);}
#line 111 "sqlda.pgc"
-
+
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);
char *t1 = "2000-7-12 17:34:29";
int i;
- ECPGdebug(1, stderr);
- exec sql whenever sqlerror do sqlprint();
- exec sql connect to REGRESSDB1;
- exec sql create table date_test (d date, ts timestamp);
+ ECPGdebug(1, stderr);
+ exec sql whenever sqlerror do sqlprint();
+ exec sql connect to REGRESSDB1;
+ exec sql create table date_test (d date, ts timestamp);
exec sql set datestyle to iso;
exec sql set intervalstyle to postgres_verbose;
- date1 = PGTYPESdate_from_asc(d1, NULL);
- ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
+ date1 = PGTYPESdate_from_asc(d1, NULL);
+ ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
exec sql insert into date_test(d, ts) values (:date1, :ts1);
EXEC SQL end declare section;
ECPGdebug(1, stderr);
-
+
EXEC SQL connect to REGRESSDB1;
EXEC SQL create table customers (c varchar(50), p int);
strcpy(msg, "commit");
exec sql commit;
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
exec sql disconnect;
return (0);
int e=y->member;
int c=10>>2;
- bool h=2||1;
+ bool h=2||1;
long iay /* = 1L */ ;
exec sql end declare section;
exec sql select now();
exec sql whenever sqlerror do fe(ENUM0);
exec sql select now();
- exec sql whenever sqlerror do sqlnotice(NULL, NONO);
+ exec sql whenever sqlerror do sqlnotice(NULL, NONO);
exec sql select now();
return 0;
}
strcpy(msg, "commit");
exec sql commit;
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
exec sql disconnect;
return (0);
strcpy(msg, "commit");
exec sql commit;
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
exec sql disconnect;
return (0);
exec sql select 1 into :i;
exec sql rollback;
exit (0);
-}
+}
parser parser.c \
quote quote.c \
show show.c \
- insupd insupd.c
+ insupd insupd.c
all: $(TESTS)
EXEC SQL SET AUTOCOMMIT = ON;
- EXEC SQL BEGIN WORK;
+ EXEC SQL BEGIN WORK;
EXEC SQL CREATE TABLE test (f float, i int, a int[10], text char(10));
EXEC SQL COMMIT;
- EXEC SQL BEGIN WORK;
+ EXEC SQL BEGIN WORK;
EXEC SQL SELECT f,text
INTO :f,:text
ECPGdebug(1,stderr);
-
+
exec sql connect to REGRESSDB1;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql commit work;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
for (index=0;index<10;++index)
{ exec sql insert into test
(payload, index)
}
exec sql commit work;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
exec sql update test
- set payload=payload+1 where index=-1;
+ set payload=payload+1 where index=-1;
if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
exec sql delete from test where index=-1;
if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql commit work;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
+
exec sql disconnect;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
return 0;
strcpy(msg, "commit");
exec sql commit;
- strcpy(msg, "disconnect");
+ strcpy(msg, "disconnect");
exec sql disconnect;
return (0);
for (i=0;i<sqlca.sqlerrd[2];++i)
{
if (i1[i]) printf("NULL, ");
- else printf("%d, ",d1[i]);
+ else printf("%d, ",d1[i]);
if (i2[i]) printf("NULL, ");
- else printf("%f, ",d2[i]);
+ else printf("%f, ",d2[i]);
if (i3[i]) printf("NULL, ");
- else printf("'%s', ",d3[i]);
+ else printf("'%s', ",d3[i]);
if (i4[i]) printf("NULL, ");
- else printf("'%s', ",d4[i]);
+ else printf("'%s', ",d4[i]);
if (i5[i]) printf("NULL, ");
- else printf("'%s', ",d5[i]);
+ else printf("'%s', ",d5[i]);
if (i6[i]) printf("NULL, ");
- else printf("'%s', ",d6[i]);
+ else printf("'%s', ",d6[i]);
if (i7[i]) printf("NULL, ");
- else printf("'%s', ",d7[i]);
+ else printf("'%s', ",d7[i]);
if (i9[i]) printf("NULL, ");
- else printf("'%s', ",d9[i]);
+ else printf("'%s', ",d9[i]);
printf("\n");
}
for (i=0;i < sqlca.sqlerrd[2];++i)
{
if (ipointer1[i]) printf("NULL, ");
- else printf("%d, ",ip1[i]);
+ else printf("%d, ",ip1[i]);
if (ipointer2[i]) printf("NULL, ");
- else printf("'%s', ",cp2[i]);
+ else printf("'%s', ",cp2[i]);
printf("\n");
}
ECPGfree_auto_mem();
int main() {
EXEC SQL BEGIN DECLARE SECTION;
char str[25];
- int i, count=1;
+ int i, count=1;
EXEC SQL END DECLARE SECTION;
ECPGdebug(1, stderr);
while (1)
{
strcpy(msg, "fetch");
- exec sql fetch 1 from mycur1 into descriptor outp_sqlda;
+ exec sql fetch 1 from mycur1 into descriptor outp_sqlda;
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);
!IF "$(OS)" == "Windows_NT"
NULL=
-!ELSE
+!ELSE
NULL=nul
-!ENDIF
+!ENDIF
!IF "$(CFG)" == "Debug"
DEBUG=1
LIB32=tlib.exe
-LIB32_FLAGS=
+LIB32_FLAGS=
LIB32_OBJS= \
"$(INTDIR)\win32.obj" \
"$(INTDIR)\getaddrinfo.obj" \
# @<< is a Response file, http://www.opussoftware.com/tutorial/TutMakefile.htm
-"$(OUTDIR)\blibpq.dll": "$(OUTDIR)\blibpq.lib" "$(INTDIR)\libpq.res" blibpqdll.def
+"$(OUTDIR)\blibpq.dll": "$(OUTDIR)\blibpq.lib" "$(INTDIR)\libpq.res" blibpqdll.def
$(LINK32) @<<
$(LINK32_FLAGS) +
c0d32.obj , +
# Connection configuration file
#
# A service is a set of named connection parameters. You may specify
-# multiple services in this file. Each starts with a service name in
+# multiple services in this file. Each starts with a service name in
# brackets. Subsequent lines have connection configuration parameters of
# the pattern "param=value" or LDAP URLs starting with "ldap://"
-# to look up such parameters. A sample configuration for postgres is
+# to look up such parameters. A sample configuration for postgres is
# included in this file. Lines beginning with '#' are comments.
#
# Copy this to your sysconf directory (typically /usr/local/pgsql/etc) and
OUTFILENAME=libpq
!ENDIF
-!IF "$(SSL_INC)" == ""
+!IF "$(SSL_INC)" == ""
SSL_INC=C:\OpenSSL\include
!MESSAGE Using default OpenSSL Include directory: $(SSL_INC)
!ENDIF
!MESSAGE Using default OpenSSL Library directory: $(SSL_LIB_PATH)
!ENDIF
-!IF "$(KFW_INC)" == ""
+!IF "$(KFW_INC)" == ""
KFW_INC=C:\kfw-2.6.5\inc
!MESSAGE Using default Kerberos Include directory: $(KFW_INC)
!ENDIF
!IF "$(OS)" == "Windows_NT"
NULL=
-!ELSE
+!ELSE
NULL=nul
-!ENDIF
+!ENDIF
CPP=cl.exe
RSC=rc.exe
LIB32=link.exe -lib
-LIB32_FLAGS=$(LOPT) /nologo /out:"$(OUTDIR)\$(OUTFILENAME).lib"
+LIB32_FLAGS=$(LOPT) /nologo /out:"$(OUTDIR)\$(OUTFILENAME).lib"
LIB32_OBJS= \
"$(INTDIR)\win32.obj" \
"$(INTDIR)\getaddrinfo.obj" \
AROPT = crs
-
+
DLSUFFIX = .so
ifdef PGXS
# Rule for building a shared library from a single .o file
%.so: %.o
- $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_SL) -shared -o $@ $<
+ $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_SL) -shared -o $@ $<
sqlmansect = 5sql
# PGXS: PostgreSQL extensions makefile
-# src/makefiles/pgxs.mk
+# src/makefiles/pgxs.mk
# This file contains generic rules to build many kinds of simple
# extension modules. You only need to set a few variables and include
REGRESS_OPTS = --dbname=$(PL_TESTDB) --load-language=plperl --load-language=plperlu
REGRESS = plperl plperl_trigger plperl_shared plperl_elog plperl_util plperl_init plperlu
-# if Perl can support two interpreters in one backend,
+# if Perl can support two interpreters in one backend,
# test plperl-and-plperlu cases
ifneq ($(PERL),)
ifeq ($(shell $(PERL) -V:usemultiplicity), usemultiplicity='define';)
plperl_opmask.h: plperl_opmask.pl
$(PERL) $< $@
-perlchunks.h: $(PERLCHUNKS)
+perlchunks.h: $(PERLCHUNKS)
$(PERL) $(srcdir)/text2macro.pl --strip='^(\#.*|\s*)$$' $^ > $@
all: all-lib
/**********************************************************************
* PostgreSQL::InServer::SPI
*
- * SPI interface for plperl.
+ * SPI interface for plperl.
*
* src/pl/plperl/SPI.xs
*
CODE:
int i;
SV** argv;
- if (items < 1)
+ if (items < 1)
Perl_croak(aTHX_ "Usage: spi_prepare(query, ...)");
argv = ( SV**) palloc(( items - 1) * sizeof(SV*));
- for ( i = 1; i < items; i++)
+ for ( i = 1; i < items; i++)
argv[i - 1] = ST(i);
RETVAL = plperl_spi_prepare(query, items - 1, argv);
pfree( argv);
HV *attr = NULL;
int i, offset = 1, argc;
SV ** argv;
- if ( items < 1)
- Perl_croak(aTHX_ "Usage: spi_exec_prepared(query, [\\%%attr,] "
+ if ( items < 1)
+ Perl_croak(aTHX_ "Usage: spi_exec_prepared(query, [\\%%attr,] "
"[\\@bind_values])");
if ( items > 1 && SvROK( ST( 1)) && SvTYPE( SvRV( ST( 1))) == SVt_PVHV)
- {
+ {
attr = ( HV*) SvRV(ST(1));
offset++;
}
argc = items - offset;
argv = ( SV**) palloc( argc * sizeof(SV*));
- for ( i = 0; offset < items; offset++, i++)
+ for ( i = 0; offset < items; offset++, i++)
argv[i] = ST(offset);
ret_hash = plperl_spi_exec_prepared(query, attr, argc, argv);
RETVAL = newRV_noinc((SV*)ret_hash);
CODE:
int i;
SV ** argv;
- if ( items < 1)
+ if ( items < 1)
Perl_croak(aTHX_ "Usage: spi_query_prepared(query, "
"[\\@bind_values])");
argv = ( SV**) palloc(( items - 1) * sizeof(SV*));
- for ( i = 1; i < items; i++)
+ for ( i = 1; i < items; i++)
argv[i - 1] = ST(i);
RETVAL = plperl_spi_query_prepared(query, items - 1, argv);
pfree( argv);
util_quote_nullable(sv)
SV *sv
CODE:
- if (!sv || !SvOK(sv))
+ if (!sv || !SvOK(sv))
{
RETVAL = newSVstring_len("NULL", 4);
}
- else
+ else
{
text *arg = sv2text(sv);
text *ret = DatumGetTextP(DirectFunctionCall1(quote_nullable, PointerGetDatum(arg)));
---
--- Test arrary return
---
-CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][]
-LANGUAGE plperl as $$
- return [['a"b',undef,'c,d'],['e\\f',undef,'g']];
+CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][]
+LANGUAGE plperl as $$
+ return [['a"b',undef,'c,d'],['e\\f',undef,'g']];
$$;
SELECT array_of_text();
array_of_text
# alternative - causes server process to exit(255)
spi_exec_query("invalid sql statement");
$$ language plperl; -- compile plperl code
-
CREATE OR REPLACE FUNCTION foo() RETURNS integer AS $$
spi_exec_query("SELECT * FROM bar()");
return 1;
$$ LANGUAGE plperlu; -- compile plperlu code
-
SELECT * FROM bar(); -- throws exception normally (running plperl)
ERROR: syntax error at or near "invalid" at line 4.
CONTEXT: PL/Perl function "bar"
}
return undef; # allow statement to proceed;
$$;
-CREATE TRIGGER show_trigger_data_trig
+CREATE TRIGGER show_trigger_data_trig
BEFORE INSERT OR UPDATE OR DELETE ON trigger_test
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
insert into trigger_test values(1,'insert');
CONTEXT: PL/Perl function "trigger_data"
NOTICE: $_TD->{when} = 'BEFORE'
CONTEXT: PL/Perl function "trigger_data"
-
DROP TRIGGER show_trigger_data_trig on trigger_test;
insert into trigger_test values(1,'insert');
CREATE VIEW trigger_test_view AS SELECT * FROM trigger_test;
CONTEXT: PL/Perl function "trigger_data"
DROP VIEW trigger_test_view;
delete from trigger_test;
-
DROP FUNCTION trigger_data();
CREATE OR REPLACE FUNCTION valid_id() RETURNS trigger AS $$
if (($_TD->{new}{i}>=100) || ($_TD->{new}{i}<=0))
{
return "SKIP"; # Skip INSERT/UPDATE command
- }
- elsif ($_TD->{new}{v} ne "immortal")
+ }
+ elsif ($_TD->{new}{v} ne "immortal")
{
$_TD->{new}{v} .= "(modified by trigger)";
return "MODIFY"; # Modify tuple and proceed INSERT/UPDATE command
- }
- else
+ }
+ else
{
return; # Proceed INSERT/UPDATE command
}
if ($_TD->{old}{v} eq $_TD->{args}[0])
{
return "SKIP"; # Skip DELETE command
- }
- else
- {
+ }
+ else
+ {
return; # Proceed DELETE command
};
$$ LANGUAGE plperl;
# src/pl/plperl/plc_trusted.pl
package PostgreSQL::InServer::safe;
-
+
# Load widely useful pragmas into plperl to make them available.
#
# SECURITY RISKS:
&argtypes, &argnames, &argmodes);
for (i = 0; i < numargs; i++)
{
- if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO &&
+ if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO &&
argtypes[i] != RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
typeStruct = (Form_pg_type) GETSTRUCT(typeTup);
/* Disallow pseudotype argument */
- if (typeStruct->typtype == TYPTYPE_PSEUDO &&
+ if (typeStruct->typtype == TYPTYPE_PSEUDO &&
procStruct->proargtypes.values[i] != RECORDOID)
{
free(prodesc->proname);
format_type_be(procStruct->proargtypes.values[i]))));
}
- if (typeStruct->typtype == TYPTYPE_COMPOSITE ||
+ if (typeStruct->typtype == TYPTYPE_COMPOSITE ||
procStruct->proargtypes.values[i] == RECORDOID)
prodesc->arg_is_rowtype[i] = true;
else
---
--- Test arrary return
---
-CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][]
-LANGUAGE plperl as $$
- return [['a"b',undef,'c,d'],['e\\f',undef,'g']];
+CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][]
+LANGUAGE plperl as $$
+ return [['a"b',undef,'c,d'],['e\\f',undef,'g']];
$$;
SELECT array_of_text();
# alternative - causes server process to exit(255)
spi_exec_query("invalid sql statement");
$$ language plperl; -- compile plperl code
-
+
CREATE OR REPLACE FUNCTION foo() RETURNS integer AS $$
spi_exec_query("SELECT * FROM bar()");
return 1;
$$ LANGUAGE plperlu; -- compile plperlu code
-
+
SELECT * FROM bar(); -- throws exception normally (running plperl)
SELECT * FROM foo(); -- used to cause backend crash (after switching to plperlu)
return undef; # allow statement to proceed;
$$;
-CREATE TRIGGER show_trigger_data_trig
+CREATE TRIGGER show_trigger_data_trig
BEFORE INSERT OR UPDATE OR DELETE ON trigger_test
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
insert into trigger_test values(1,'insert');
update trigger_test set v = 'update' where i = 1;
delete from trigger_test;
-
+
DROP TRIGGER show_trigger_data_trig on trigger_test;
insert into trigger_test values(1,'insert');
DROP VIEW trigger_test_view;
delete from trigger_test;
-
+
DROP FUNCTION trigger_data();
CREATE OR REPLACE FUNCTION valid_id() RETURNS trigger AS $$
if (($_TD->{new}{i}>=100) || ($_TD->{new}{i}<=0))
{
return "SKIP"; # Skip INSERT/UPDATE command
- }
- elsif ($_TD->{new}{v} ne "immortal")
+ }
+ elsif ($_TD->{new}{v} ne "immortal")
{
$_TD->{new}{v} .= "(modified by trigger)";
return "MODIFY"; # Modify tuple and proceed INSERT/UPDATE command
- }
- else
+ }
+ else
{
return; # Proceed INSERT/UPDATE command
}
if ($_TD->{old}{v} eq $_TD->{args}[0])
{
return "SKIP"; # Skip DELETE command
- }
- else
- {
+ }
+ else
+ {
return; # Proceed DELETE command
};
$$ LANGUAGE plperl;
print $fh "int main() { puts(X); return 0; }\n";
close $fh;
system("cat -n $tmp.c");
-
+
system("make $tmp") == 0 or die;
open $fh, "./$tmp |" or die;
my $result = <$fh>;
if (endtoken == K_USING)
{
PLpgSQL_expr *expr;
-
+
do
{
expr = read_sql_expression2(',', ';',
--
-- Universal Newline Support
---
+--
CREATE OR REPLACE FUNCTION newline_lf() RETURNS integer AS
E'x = 100\ny = 23\nreturn x + y\n'
LANGUAGE plpythonu;
lname text not null,
username text,
userid serial,
- PRIMARY KEY(lname, fname)
+ PRIMARY KEY(lname, fname)
) ;
NOTICE: CREATE TABLE will create implicit sequence "users_userid_seq" for serial column "users.userid"
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "users_pkey" for table "users"
val = TD[key]
plpy.notice("TD[" + key + "] => " + str(val))
-return None
+return None
$$;
CREATE TRIGGER show_trigger_data_trig_before
--
-- Universal Newline Support
---
+--
CREATE OR REPLACE FUNCTION newline_lf() RETURNS integer AS
E'x = 100\ny = 23\nreturn x + y\n'
lname text not null,
username text,
userid serial,
- PRIMARY KEY(lname, fname)
+ PRIMARY KEY(lname, fname)
) ;
CREATE INDEX users_username_idx ON users(username);
val = TD[key]
plpy.notice("TD[" + key + "] => " + str(val))
-return None
+return None
$$;
where key1 = \\$1 and key2 = \\$2" \\
{int4 bpchar}]
}
-
+
set n [spi_execp -count 1 $GD(plan) [list $1 $2]]
if {$n > 0} {
set dnames [info locals {[a-zA-Z]*} ]
foreach key [lsort $dnames] {
-
- if { [array exists $key] } {
+
+ if { [array exists $key] } {
set str "{"
foreach akey [lsort [ array names $key ] ] {
if {[string length $str] > 1} { set str "$str, " }
}
- return OK
+ return OK
$_$;
-CREATE TRIGGER show_trigger_data_trig
+CREATE TRIGGER show_trigger_data_trig
BEFORE INSERT OR UPDATE OR DELETE ON trigger_test
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
CREATE TRIGGER show_trigger_data_view_trig
where key1 = \\$1 and key2 = \\$2" \\
{int4 bpchar}]
}
-
+
set n [spi_execp -count 1 $GD(plan) [list $1 $2]]
if {$n > 0} {
set dnames [info locals {[a-zA-Z]*} ]
foreach key [lsort $dnames] {
-
- if { [array exists $key] } {
+
+ if { [array exists $key] } {
set str "{"
foreach akey [lsort [ array names $key ] ] {
if {[string length $str] > 1} { set str "$str, " }
}
- return OK
+ return OK
$_$;
-CREATE TRIGGER show_trigger_data_trig
+CREATE TRIGGER show_trigger_data_trig
BEFORE INSERT OR UPDATE OR DELETE ON trigger_test
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
all: $(PROGS)
-clean:
+clean:
rm -f $(PROGS)
all: $(PROGS)
-clean:
+clean:
rm -f $(PROGS)
for d in $(DIRS); do \
$(MAKE) -C $$d clean || exit; \
the files.
Oleg.
-----
+----
-all:
+all:
-test:
+test:
./runall
-clean:
+clean:
rm -f *.out
-all:
+all:
-test:
+test:
./runall
-clean:
+clean:
rm -f *.out
-all:
+all:
-test:
+test:
./runall
-clean:
+clean:
rm -f *.out
-all:
+all:
-test:
+test:
./runall
-clean:
+clean:
rm -f *.out
else
EXPECTED="expected/${i}.out"
fi
-
+
if [ `diff ${EXPECTED} results/${i}.out | wc -l` -ne 0 ]
then
( diff -C3 ${EXPECTED} results/${i}.out; \
);
# Tests to run: test' script, test' description, ...
-# Test' script is in form
+# Test' script is in form
#
# script_name[.ntm][ T]
-#
+#
# script_name is name of file in ./sqls
# .ntm means that script will be used for some initialization
# and should not be timed: runtests.pl opens /dev/null as STDERR
# Script shouldn't notice either he is running for test or for
# initialization purposes.
# T means that all queries in this test (initialization ?) are to be
-# executed in SINGLE transaction. In this case global variable $XACTBLOCK
+# executed in SINGLE transaction. In this case global variable $XACTBLOCK
# is not empty string. Otherwise, each query in test is to be executed
-# in own transaction ($XACTBLOCK is empty string). In accordance with
-# $XACTBLOCK, script is to do DBMS specific preparation before execution
-# of queries. (Look at example in sqls/inssimple for MySQL - it gives
+# in own transaction ($XACTBLOCK is empty string). In accordance with
+# $XACTBLOCK, script is to do DBMS specific preparation before execution
+# of queries. (Look at example in sqls/inssimple for MySQL - it gives
# an idea of what can be done for features unsupported by an DBMS.)
#
@perftests = (
$runtest = $test;
if ( $test =~ /\.ntm/ )
{
- #
+ #
# No timing for this queries
- #
+ #
close (STDERR); # close $TmpFile
open (STDERR, ">/dev/null") or die;
$runtest =~ s/\.ntm//;
runtest-parallel: installcheck-parallel
bigtest: all tablespace-setup
- $(pg_regress_call) --psqldir=$(PSQLDIR) --schedule=$(srcdir)/serial_schedule numeric_big
+ $(pg_regress_call) --psqldir=$(PSQLDIR) --schedule=$(srcdir)/serial_schedule numeric_big
bigcheck: all tablespace-setup
$(pg_regress_call) --temp-install=./tmp_check --top-builddir=$(top_builddir) --schedule=$(srcdir)/parallel_schedule $(MAXCONNOPT) numeric_big
--
--
-- timezones may vary based not only on location but the operating
--- system. the main correctness issue is that the OS may not get
+-- system. the main correctness issue is that the OS may not get
-- daylight savings time right for times prior to Unix epoch (jan 1 1970).
--
CREATE TABLE ABSTIME_TBL (f1 abstime);
INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'infinity');
INSERT INTO ABSTIME_TBL (f1) VALUES (abstime '-infinity');
INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'May 10, 1947 23:59:12');
--- what happens if we specify slightly misformatted abstime?
+-- what happens if we specify slightly misformatted abstime?
INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00');
ERROR: date/time field value out of range: "Feb 35, 1946 10:00:00"
LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'...
ERROR: date/time field value out of range: "Feb 28, 1984 25:08:10"
LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'...
^
--- badly formatted abstimes: these should result in invalid abstimes
+-- badly formatted abstimes: these should result in invalid abstimes
INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format');
ERROR: invalid input syntax for type abstime: "bad date format"
LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format');
y BIT(4)
);
-- empty case
-SELECT
+SELECT
BIT_AND(i2) AS "?",
BIT_OR(i4) AS "?"
FROM bitwise_test;
t | t | t | t | t | t | t | t | t
(1 row)
-CREATE TEMPORARY TABLE bool_test(
+CREATE TEMPORARY TABLE bool_test(
b1 BOOL,
b2 BOOL,
b3 BOOL,
ALTER TABLE tmp ADD COLUMN z int2[];
INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u,
v, w, x, y, z)
- VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
- 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
314159, '(1,1)', '512',
'1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
'(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]',
(1 row)
DROP TABLE tmp;
--- the wolf bug - schema mods caused inconsistent row descriptors
+-- the wolf bug - schema mods caused inconsistent row descriptors
CREATE TABLE tmp (
initial int4
);
ALTER TABLE tmp ADD COLUMN z int2[];
INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u,
v, w, x, y, z)
- VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
- 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
314159, '(1,1)', '512',
'1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
'(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]',
ANALYZE tenk1;
set enable_seqscan to off;
set enable_bitmapscan to off;
--- 5 values, sorted
+-- 5 values, sorted
SELECT unique1 FROM tenk1 WHERE unique1 < 5;
unique1
---------
(3 rows)
alter table anothertab alter column atcol2 type text
- using case when atcol2 is true then 'IT WAS TRUE'
+ using case when atcol2 is true then 'IT WAS TRUE'
when atcol2 is false then 'IT WAS FALSE'
else 'IT WAS NULL!' end;
select * from anothertab;
a int2[],
b int4[][][],
c name[],
- d text[][],
+ d text[][],
e float8[],
f char(5)[],
g varchar(5)[]
VALUES ('{"too long"}');
ERROR: value too long for type character(5)
INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g)
- VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}',
+ VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}',
'{{"elt1", "elt2"}}', '{"3.4", "6.7"}',
'{"abc","abcde"}', '{"abc","abcde"}');
INSERT INTO arrtest (a, b[1:2], c, d[1:2])
SELECT arrtest.a[1],
arrtest.b[1][1][1],
arrtest.c[1],
- arrtest.d[1][1],
+ arrtest.d[1][1],
arrtest.e[0]
FROM arrtest;
a | b | c | d | e
SELECT a[1:3],
b[1:1][1:2][1:2],
- c[1:2],
+ c[1:2],
d[1:1][1:2]
FROM arrtest;
a | b | c | d
| [1:2] | [1:2]
(3 rows)
--- returns nothing
+-- returns nothing
SELECT *
FROM arrtest
- WHERE a[1] < 5 and
+ WHERE a[1] < 5 and
c = '{"foobar"}'::_name;
a | b | c | d | e | f | g
---+---+---+---+---+---+---
SELECT a[1:3],
b[1:1][1:2][1:2],
- c[1:2],
+ c[1:2],
d[1:1][2:2]
FROM arrtest;
a | b | c | d
drop type _comptype;
drop table comptable;
drop type comptype;
-create or replace function unnest1(anyarray)
+create or replace function unnest1(anyarray)
returns setof anyelement as $$
select $1[s] from generate_subscripts($1,1) g(s);
$$ language sql immutable;
-create or replace function unnest2(anyarray)
+create or replace function unnest2(anyarray)
returns setof anyelement as $$
select $1[s1][s2] from generate_subscripts($1,1) g1(s1),
generate_subscripts($1,2) g2(s2);
ERROR: bit string length 12 does not match type bit(11)
--INSERT INTO BIT_TABLE VALUES ('X554');
--INSERT INTO BIT_TABLE VALUES ('X555');
-SELECT * FROM BIT_TABLE;
+SELECT * FROM BIT_TABLE;
b
-------------
00000000000
ERROR: bit string too long for type bit varying(11)
--INSERT INTO VARBIT_TABLE VALUES ('X554');
--INSERT INTO VARBIT_TABLE VALUES ('X555');
-SELECT * FROM VARBIT_TABLE;
+SELECT * FROM VARBIT_TABLE;
v
-------------
-- Concatenation
SELECT v, b, (v || b) AS concat
- FROM BIT_TABLE, VARBIT_TABLE
+ FROM BIT_TABLE, VARBIT_TABLE
ORDER BY 3;
v | b | concat
-------------+-------------+------------------------
DROP TABLE varbit_table;
CREATE TABLE varbit_table (a BIT VARYING(16), b BIT VARYING(16));
COPY varbit_table FROM stdin;
-SELECT a, b, ~a AS "~ a", a & b AS "a & b",
+SELECT a, b, ~a AS "~ a", a & b AS "a & b",
a | b AS "a | b", a # b AS "a # b" FROM varbit_table;
a | b | ~ a | a & b | a | b | a # b
------------------+------------------+------------------+------------------+------------------+------------------
DROP TABLE bit_table;
CREATE TABLE bit_table (a BIT(16), b BIT(16));
COPY bit_table FROM stdin;
-SELECT a,b,~a AS "~ a",a & b AS "a & b",
+SELECT a,b,~a AS "~ a",a & b AS "a & b",
a|b AS "a | b", a # b AS "a # b" FROM bit_table;
a | b | ~ a | a & b | a | b | a # b
------------------+------------------+------------------+------------------+------------------+------------------
INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE;
SELECT POSITION(B'1101' IN b),
POSITION(B'11011' IN b),
- b
+ b
FROM BIT_SHIFT_TABLE ;
position | position | b
----------+----------+------------------
INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE;
SELECT POSITION(B'1101' IN v),
POSITION(B'11011' IN v),
- v
+ v
FROM VARBIT_SHIFT_TABLE ;
position | position | v
----------+----------+----------------------
-- That allows us to test all the different combinations of
-- lossy and non-lossy pages with the minimum amount of data
CREATE TABLE bmscantest (a int, b int, t text);
-INSERT INTO bmscantest
+INSERT INTO bmscantest
SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo'
FROM generate_series(1,70000) r;
CREATE INDEX i_bmtest_a ON bmscantest(a);
INSERT INTO BOOLTBL1 (f1) VALUES (bool 't');
INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True');
INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true');
--- BOOLTBL1 should be full of true's at this point
+-- BOOLTBL1 should be full of true's at this point
SELECT '' AS t_3, BOOLTBL1.* FROM BOOLTBL1;
t_3 | f1
-----+----
| t
(3 rows)
-SELECT '' AS t_3, BOOLTBL1.*
+SELECT '' AS t_3, BOOLTBL1.*
FROM BOOLTBL1
WHERE f1 <> bool 'false';
t_3 | f1
(0 rows)
INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f');
-SELECT '' AS f_1, BOOLTBL1.*
+SELECT '' AS f_1, BOOLTBL1.*
FROM BOOLTBL1
WHERE f1 = bool 'false';
f_1 | f1
INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE');
-- This is now an invalid expression
-- For pre-v6.3 this evaluated to false - thomas 1997-10-23
-INSERT INTO BOOLTBL2 (f1)
- VALUES (bool 'XXX');
+INSERT INTO BOOLTBL2 (f1)
+ VALUES (bool 'XXX');
ERROR: invalid input syntax for type boolean: "XXX"
LINE 2: VALUES (bool 'XXX');
^
--- BOOLTBL2 should be full of false's at this point
+-- BOOLTBL2 should be full of false's at this point
SELECT '' AS f_4, BOOLTBL2.* FROM BOOLTBL2;
f_4 | f1
-----+----
CREATE TABLE BOX_TBL (f1 box);
INSERT INTO BOX_TBL (f1) VALUES ('(2.0,2.0,0.0,0.0)');
INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)');
--- degenerate cases where the box is a line or a point
--- note that lines and points boxes all have zero area
+-- degenerate cases where the box is a line or a point
+-- note that lines and points boxes all have zero area
INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)');
INSERT INTO BOX_TBL (f1) VALUES ('(3.0, 3.0,3.0,3.0)');
--- badly formatted box inputs
+-- badly formatted box inputs
INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)');
ERROR: invalid input syntax for type box: "(2.3, 4.5)"
LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)');
| (3,3),(3,3) | 0
(4 rows)
--- overlap
+-- overlap
SELECT '' AS three, b.f1
- FROM BOX_TBL b
+ FROM BOX_TBL b
WHERE b.f1 && box '(2.5,2.5,1.0,1.0)';
three | f1
-------+---------------------
| (2.5,3.5),(2.5,2.5)
(3 rows)
--- left-or-overlap (x only)
+-- left-or-overlap (x only)
SELECT '' AS two, b1.*
FROM BOX_TBL b1
WHERE b1.f1 &< box '(2.0,2.0,2.5,2.5)';
| (2.5,3.5),(2.5,2.5)
(2 rows)
--- right-or-overlap (x only)
+-- right-or-overlap (x only)
SELECT '' AS two, b1.*
FROM BOX_TBL b1
WHERE b1.f1 &> box '(2.0,2.0,2.5,2.5)';
| (3,3),(3,3)
(2 rows)
--- left of
+-- left of
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE b.f1 << box '(3.0,3.0,5.0,5.0)';
| (2.5,3.5),(2.5,2.5)
(2 rows)
--- area <=
+-- area <=
SELECT '' AS four, b.f1
FROM BOX_TBL b
WHERE b.f1 <= box '(3.0,3.0,5.0,5.0)';
| (3,3),(3,3)
(4 rows)
--- area <
+-- area <
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE b.f1 < box '(3.0,3.0,5.0,5.0)';
| (3,3),(3,3)
(2 rows)
--- area =
+-- area =
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE b.f1 = box '(3.0,3.0,5.0,5.0)';
| (3,3),(1,1)
(2 rows)
--- area >
+-- area >
SELECT '' AS two, b.f1
- FROM BOX_TBL b -- zero area
- WHERE b.f1 > box '(3.5,3.0,4.5,3.0)';
+ FROM BOX_TBL b -- zero area
+ WHERE b.f1 > box '(3.5,3.0,4.5,3.0)';
two | f1
-----+-------------
| (2,2),(0,0)
| (3,3),(1,1)
(2 rows)
--- area >=
+-- area >=
SELECT '' AS four, b.f1
- FROM BOX_TBL b -- zero area
+ FROM BOX_TBL b -- zero area
WHERE b.f1 >= box '(3.5,3.0,4.5,3.0)';
four | f1
------+---------------------
| (3,3),(3,3)
(4 rows)
--- right of
+-- right of
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE box '(3.0,3.0,5.0,5.0)' >> b.f1;
| (2.5,3.5),(2.5,2.5)
(2 rows)
--- contained in
+-- contained in
SELECT '' AS three, b.f1
FROM BOX_TBL b
WHERE b.f1 <@ box '(0,0,3,3)';
| (3,3),(3,3)
(3 rows)
--- contains
+-- contains
SELECT '' AS three, b.f1
FROM BOX_TBL b
WHERE box '(0,0,3,3)' @> b.f1;
| (3,3),(3,3)
(3 rows)
--- box equality
+-- box equality
SELECT '' AS one, b.f1
FROM BOX_TBL b
WHERE box '(1,1,3,3)' ~= b.f1;
| (3,3),(1,1)
(1 row)
--- center of box, left unary operator
+-- center of box, left unary operator
SELECT '' AS four, @@(b1.f1) AS p
FROM BOX_TBL b1;
four | p
| (3,3)
(4 rows)
--- wholly-contained
+-- wholly-contained
SELECT '' AS one, b1.*, b2.*
- FROM BOX_TBL b1, BOX_TBL b2
+ FROM BOX_TBL b1, BOX_TBL b2
WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1;
one | f1 | f1
-----+-------------+-------------
CREATE TABLE CHAR_TBL(f1 char);
INSERT INTO CHAR_TBL (f1) VALUES ('a');
INSERT INTO CHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO CHAR_TBL (f1) VALUES ('1');
INSERT INTO CHAR_TBL (f1) VALUES (2);
INSERT INTO CHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO CHAR_TBL (f1) VALUES ('');
--- try char's of greater than 1 length
+-- try char's of greater than 1 length
INSERT INTO CHAR_TBL (f1) VALUES ('cd');
ERROR: value too long for type character(1)
INSERT INTO CHAR_TBL (f1) VALUES ('c ');
CREATE TABLE CHAR_TBL(f1 char);
INSERT INTO CHAR_TBL (f1) VALUES ('a');
INSERT INTO CHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO CHAR_TBL (f1) VALUES ('1');
INSERT INTO CHAR_TBL (f1) VALUES (2);
INSERT INTO CHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO CHAR_TBL (f1) VALUES ('');
--- try char's of greater than 1 length
+-- try char's of greater than 1 length
INSERT INTO CHAR_TBL (f1) VALUES ('cd');
ERROR: value too long for type character(1)
INSERT INTO CHAR_TBL (f1) VALUES ('c ');
CREATE TABLE CHAR_TBL(f1 char);
INSERT INTO CHAR_TBL (f1) VALUES ('a');
INSERT INTO CHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO CHAR_TBL (f1) VALUES ('1');
INSERT INTO CHAR_TBL (f1) VALUES (2);
INSERT INTO CHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO CHAR_TBL (f1) VALUES ('');
--- try char's of greater than 1 length
+-- try char's of greater than 1 length
INSERT INTO CHAR_TBL (f1) VALUES ('cd');
ERROR: value too long for type character(1)
INSERT INTO CHAR_TBL (f1) VALUES ('c ');
UPDATE clustertest SET key = 100 WHERE key = 10;
-- Test update where the new row version is found first in the scan
UPDATE clustertest SET key = 35 WHERE key = 40;
--- Test longer update chain
+-- Test longer update chain
UPDATE clustertest SET key = 60 WHERE key = 50;
UPDATE clustertest SET key = 70 WHERE key = 60;
UPDATE clustertest SET key = 80 WHERE key = 70;
HINT: Try the COPY (SELECT ...) TO variant.
\copy: ERROR: cannot copy from view "v_test1"
HINT: Try the COPY (SELECT ...) TO variant.
---
+--
-- Test \copy (select ...)
--
\copy (select "id",'id','id""'||t,(id + 1)*id,t,"test1"."t" from test1 where id=3) to stdout
--
-- all functions CREATEd
CREATE AGGREGATE newavg (
- sfunc = int4_avg_accum, basetype = int4, stype = _int8,
+ sfunc = int4_avg_accum, basetype = int4, stype = _int8,
finalfunc = int8_avg,
initcond1 = '{0,0}'
);
COMMENT ON AGGREGATE newavg (int4) IS NULL;
-- without finalfunc; test obsolete spellings 'sfunc1' etc
CREATE AGGREGATE newsum (
- sfunc1 = int4pl, basetype = int4, stype1 = int4,
+ sfunc1 = int4pl, basetype = int4, stype1 = int4,
initcond1 = '0'
);
-- zero-argument aggregate
CREATE INDEX gpointind ON point_tbl USING gist (f1);
CREATE TEMP TABLE gpolygon_tbl AS
SELECT polygon(home_base) AS f1 FROM slow_emp4000;
-INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' );
-INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' );
+INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' );
+INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' );
CREATE TEMP TABLE gcircle_tbl AS
SELECT circle(home_base) AS f1 FROM slow_emp4000;
CREATE INDEX ggpolygonind ON gpolygon_tbl USING gist (f1);
RESET enable_seqscan;
RESET enable_indexscan;
RESET enable_bitmapscan;
-
DROP TABLE onek_with_null;
INTO TABLE ramp
FROM road
WHERE name ~ '.*Ramp';
-INSERT INTO ihighway
- SELECT *
- FROM road
+INSERT INTO ihighway
+ SELECT *
+ FROM road
WHERE name ~ 'I- .*';
-INSERT INTO shighway
- SELECT *
- FROM road
+INSERT INTO shighway
+ SELECT *
+ FROM road
WHERE name ~ 'State Hwy.*';
UPDATE shighway
SET surface = 'asphalt';
INSERT INTO f_star (class, a, e, f)
VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon);
INSERT INTO f_star (class, c, e, f)
- VALUES ('f', 'hi keith'::name, '-8'::int2,
+ VALUES ('f', 'hi keith'::name, '-8'::int2,
'(1111,3333),(2222,4444)'::polygon);
INSERT INTO f_star (class, a, c)
VALUES ('f', 24, 'hi marc'::name);
INSERT INTO f_star (class, a, e)
VALUES ('f', 25, '-9'::int2);
INSERT INTO f_star (class, a, f)
- VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon);
+ VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon);
INSERT INTO f_star (class, c, e)
VALUES ('f', 'hi allison'::name, '-10'::int2);
INSERT INTO f_star (class, c, f)
INSERT INTO f_star (class, a) VALUES ('f', 27);
INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name);
INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2);
-INSERT INTO f_star (class, f)
+INSERT INTO f_star (class, f)
VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon);
INSERT INTO f_star (class) VALUES ('f');
--
-- for internal portal (cursor) tests
--
CREATE TABLE iportaltest (
- i int4,
- d float4,
+ i int4,
+ d float4,
p polygon
);
INSERT INTO iportaltest (i, d, p)
--
-- CREATE_OPERATOR
--
-CREATE OPERATOR ## (
+CREATE OPERATOR ## (
leftarg = path,
rightarg = path,
procedure = path_inter,
- commutator = ##
+ commutator = ##
);
CREATE OPERATOR <% (
leftarg = point,
rightarg = widget,
procedure = pt_in_widget,
commutator = >% ,
- negator = >=%
+ negator = >=%
);
CREATE OPERATOR @#@ (
- rightarg = int8, -- left unary
- procedure = numeric_fac
+ rightarg = int8, -- left unary
+ procedure = numeric_fac
);
CREATE OPERATOR #@# (
leftarg = int8, -- right unary
procedure = numeric_fac
);
-CREATE OPERATOR #%# (
- leftarg = int8, -- right unary
- procedure = numeric_fac
+CREATE OPERATOR #%# (
+ leftarg = int8, -- right unary
+ procedure = numeric_fac
);
-- Test comments
COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad right unary';
-- CLASS DEFINITIONS
--
CREATE TABLE hobbies_r (
- name text,
+ name text,
person text
);
CREATE TABLE equipment_r (
-- f inherits from e (three-level single inheritance)
--
CREATE TABLE a_star (
- class char,
+ class char,
a int4
);
CREATE TABLE b_star (
);
-- don't include the hash_ovfl_heap stuff in the distribution
-- the data set is too large for what it's worth
---
+--
-- CREATE TABLE hash_ovfl_heap (
-- x int4,
-- y int4
random int4
);
CREATE TABLE bt_f8_heap (
- seqno float8,
+ seqno float8,
random int4
);
CREATE TABLE array_op_test (
i int4[],
t text[]
);
-CREATE TABLE IF NOT EXISTS test_tsvector(
- t text,
- a tsvector
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text,
+ a tsvector
);
-CREATE TABLE IF NOT EXISTS test_tsvector(
+CREATE TABLE IF NOT EXISTS test_tsvector(
t text
);
NOTICE: relation "test_tsvector" already exists, skipping
-- of the "old style" approach of making the functions first.
--
CREATE TYPE widget (
- internallength = 24,
+ internallength = 24,
input = widget_in,
output = widget_out,
typmod_in = numerictypmodin,
typmod_out = numerictypmodout,
alignment = double
);
-CREATE TYPE city_budget (
- internallength = 16,
- input = int44in,
- output = int44out,
+CREATE TYPE city_budget (
+ internallength = 16,
+ input = int44in,
+ output = int44out,
element = int4,
category = 'x', -- just to verify the system will take it
preferred = true -- ditto
-- (this also tests the query rewrite system)
--
CREATE VIEW street AS
- SELECT r.name, r.thepath, c.cname AS cname
+ SELECT r.name, r.thepath, c.cname AS cname
FROM ONLY road r, real_city c
WHERE c.outline ## r.thepath;
CREATE VIEW iexit AS
- SELECT ih.name, ih.thepath,
+ SELECT ih.name, ih.thepath,
interpt_pp(ih.thepath, r.thepath) AS exit
FROM ihighway ih, ramp r
WHERE ih.thepath ## r.thepath;
CREATE OR REPLACE VIEW viewtest AS
SELECT a, b::numeric FROM viewtest_tbl;
ERROR: cannot change data type of view column "b" from integer to numeric
--- should work
+-- should work
CREATE OR REPLACE VIEW viewtest AS
SELECT a, b, 0 AS c FROM viewtest_tbl;
DROP VIEW viewtest;
CREATE VIEW v13_temp AS SELECT seq1_temp.is_called FROM seq1_temp;
NOTICE: view "v13_temp" will be a temporary view
SELECT relname FROM pg_class
- WHERE relname LIKE 'v_'
+ WHERE relname LIKE 'v_'
AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'temp_view_test')
ORDER BY relname;
relname
(9 rows)
SELECT relname FROM pg_class
- WHERE relname LIKE 'v%'
+ WHERE relname LIKE 'v%'
AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%')
ORDER BY relname;
relname
(4 rows)
SELECT relname FROM pg_class
- WHERE relname LIKE 'temporal%'
+ WHERE relname LIKE 'temporal%'
AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%')
ORDER BY relname;
relname
---
+--
-- IF EXISTS tests
---
+--
-- table (will be really dropped at the end)
DROP TABLE test_exists;
ERROR: table "test_exists" does not exist
--
-- UNSUPPORTED STUFF
-
--- doesn't work
+-- doesn't work
-- notify pg_class
--
--
-- SELECT
-
--- missing relation name
+-- missing relation name
select;
ERROR: syntax error at or near ";"
LINE 1: select;
^
--- no such relation
+-- no such relation
select * from nonesuch;
ERROR: relation "nonesuch" does not exist
LINE 1: select * from nonesuch;
^
--
-- DELETE
-
--- missing relation name (this had better not wildcard!)
+-- missing relation name (this had better not wildcard!)
delete from;
ERROR: syntax error at or near ";"
LINE 1: delete from;
^
--- no such relation
+-- no such relation
delete from nonesuch;
ERROR: relation "nonesuch" does not exist
LINE 1: delete from nonesuch;
^
--
-- DROP
-
--- missing relation name (this had better not wildcard!)
+-- missing relation name (this had better not wildcard!)
drop table;
ERROR: syntax error at or near ";"
LINE 1: drop table;
^
--- no such relation
+-- no such relation
drop table nonesuch;
ERROR: table "nonesuch" does not exist
--
-- ALTER TABLE
-
--- relation renaming
--- missing relation name
+-- relation renaming
+-- missing relation name
alter table rename;
ERROR: syntax error at or near ";"
LINE 1: alter table rename;
^
--- no such relation
+-- no such relation
alter table nonesuch rename to newnonesuch;
ERROR: relation "nonesuch" does not exist
--- no such relation
+-- no such relation
alter table nonesuch rename to stud_emp;
ERROR: relation "nonesuch" does not exist
--- conflict
+-- conflict
alter table stud_emp rename to aggtest;
ERROR: relation "aggtest" already exists
--- self-conflict
+-- self-conflict
alter table stud_emp rename to stud_emp;
ERROR: relation "stud_emp" already exists
--- attribute renaming
--- no such relation
+-- attribute renaming
+-- no such relation
alter table nonesuchrel rename column nonesuchatt to newnonesuchatt;
ERROR: relation "nonesuchrel" does not exist
--- no such attribute
+-- no such attribute
alter table emp rename column nonesuchatt to newnonesuchatt;
ERROR: column "nonesuchatt" does not exist
--- conflict
+-- conflict
alter table emp rename column salary to manager;
ERROR: column "manager" of relation "stud_emp" already exists
--- conflict
+-- conflict
alter table emp rename column salary to oid;
ERROR: column "oid" of relation "stud_emp" already exists
--
-- TRANSACTION STUFF
-
--- not in a xact
+-- not in a xact
abort;
NOTICE: there is no transaction in progress
--- not in a xact
+-- not in a xact
end;
WARNING: there is no transaction in progress
--
-- CREATE AGGREGATE
--- sfunc/finalfunc type disagreement
+-- sfunc/finalfunc type disagreement
create aggregate newavg2 (sfunc = int4pl,
basetype = int4,
stype = int4,
ERROR: aggregate input type must be specified
--
-- DROP INDEX
-
--- missing index name
+-- missing index name
drop index;
ERROR: syntax error at or near ";"
LINE 1: drop index;
^
--- bad index name
+-- bad index name
drop index 314159;
ERROR: syntax error at or near "314159"
LINE 1: drop index 314159;
^
--- no such index
+-- no such index
drop index nonesuch;
ERROR: index "nonesuch" does not exist
--
-- DROP AGGREGATE
-
--- missing aggregate name
+-- missing aggregate name
drop aggregate;
ERROR: syntax error at or near ";"
LINE 1: drop aggregate;
ERROR: syntax error at or near ";"
LINE 1: drop aggregate newcnt1;
^
--- bad aggregate name
+-- bad aggregate name
drop aggregate 314159 (int);
ERROR: syntax error at or near "314159"
LINE 1: drop aggregate 314159 (int);
-- bad aggregate type
drop aggregate newcnt (nonesuch);
ERROR: type "nonesuch" does not exist
--- no such aggregate
+-- no such aggregate
drop aggregate nonesuch (int4);
ERROR: aggregate nonesuch(integer) does not exist
-- no such aggregate for type
ERROR: aggregate newcnt(real) does not exist
--
-- DROP FUNCTION
-
--- missing function name
+-- missing function name
drop function ();
ERROR: syntax error at or near "("
LINE 1: drop function ();
^
--- bad function name
+-- bad function name
drop function 314159();
ERROR: syntax error at or near "314159"
LINE 1: drop function 314159();
^
--- no such function
+-- no such function
drop function nonesuch();
ERROR: function nonesuch() does not exist
--
-- DROP TYPE
-
--- missing type name
+-- missing type name
drop type;
ERROR: syntax error at or near ";"
LINE 1: drop type;
^
--- bad type name
+-- bad type name
drop type 314159;
ERROR: syntax error at or near "314159"
LINE 1: drop type 314159;
^
--- no such type
+-- no such type
drop type nonesuch;
ERROR: type "nonesuch" does not exist
--
-- DROP OPERATOR
-
--- missing everything
+-- missing everything
drop operator;
ERROR: syntax error at or near ";"
LINE 1: drop operator;
^
--- bad operator name
+-- bad operator name
drop operator equals;
ERROR: syntax error at or near ";"
LINE 1: drop operator equals;
^
--- missing type list
+-- missing type list
drop operator ===;
ERROR: syntax error at or near ";"
LINE 1: drop operator ===;
^
--- missing parentheses
+-- missing parentheses
drop operator int4, int4;
ERROR: syntax error at or near ","
LINE 1: drop operator int4, int4;
^
--- missing operator name
+-- missing operator name
drop operator (int4, int4);
ERROR: syntax error at or near "("
LINE 1: drop operator (int4, int4);
^
--- missing type list contents
+-- missing type list contents
drop operator === ();
ERROR: syntax error at or near ")"
LINE 1: drop operator === ();
^
--- no such operator
+-- no such operator
drop operator === (int4);
ERROR: missing argument
LINE 1: drop operator === (int4);
^
HINT: Use NONE to denote the missing argument of a unary operator.
--- no such operator by that name
+-- no such operator by that name
drop operator === (int4, int4);
ERROR: operator does not exist: integer === integer
--- no such type1
+-- no such type1
drop operator = (nonesuch);
ERROR: missing argument
LINE 1: drop operator = (nonesuch);
^
HINT: Use NONE to denote the missing argument of a unary operator.
--- no such type1
+-- no such type1
drop operator = ( , int4);
ERROR: syntax error at or near ","
LINE 1: drop operator = ( , int4);
^
--- no such type1
+-- no such type1
drop operator = (nonesuch, int4);
ERROR: type "nonesuch" does not exist
--- no such type2
+-- no such type2
drop operator = (int4, nonesuch);
ERROR: type "nonesuch" does not exist
--- no such type2
+-- no such type2
drop operator = (int4, );
ERROR: syntax error at or near ")"
LINE 1: drop operator = (int4, );
^
--
-- DROP RULE
-
--- missing rule name
+-- missing rule name
drop rule;
ERROR: syntax error at or near ";"
LINE 1: drop rule;
^
--- bad rule name
+-- bad rule name
drop rule 314159;
ERROR: syntax error at or near "314159"
LINE 1: drop rule 314159;
^
--- no such rule
+-- no such rule
drop rule nonesuch on noplace;
ERROR: relation "noplace" does not exist
-- these postquel variants are no longer supported
ERROR: syntax error at or near "123"
LINE 1: INSERT INTO 123
^
-INSERT INTO foo
+INSERT INTO foo
VALUES(123) 123
;
ERROR: syntax error at or near "123"
LINE 3: id3 INTEGER NOT NUL,
^
-- long line to be truncated on the left
-CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
+CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL);
ERROR: syntax error at or near "NUL"
-LINE 1: ...T NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
- ^
+LINE 1: ...OT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
+ ^
-- long line to be truncated on the right
CREATE TABLE foo(
id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY);
-- long line to be truncated on the left, many lines
CREATE
TEMPORARY
-TABLE
-foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
-id4 INT4
-UNIQUE
-NOT
-NULL,
-id5 TEXT
-UNIQUE
-NOT
+TABLE
+foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
+id4 INT4
+UNIQUE
+NOT
+NULL,
+id5 TEXT
+UNIQUE
+NOT
NULL)
;
ERROR: syntax error at or near "NUL"
-LINE 4: ...T NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
- ^
+LINE 4: ...OT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
+ ^
-- long line to be truncated on the right, many lines
-CREATE
+CREATE
TEMPORARY
-TABLE
+TABLE
foo(
id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY)
;
LINE 5: id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQ...
^
-- long line to be truncated both ways, many lines
-CREATE
+CREATE
TEMPORARY
-TABLE
+TABLE
foo
-(id
-INT4
-UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
-idz INT4 UNIQUE NOT NULL,
+(id
+INT4
+UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
+idz INT4 UNIQUE NOT NULL,
idv INT4 UNIQUE NOT NULL);
ERROR: syntax error at or near "NUL"
LINE 7: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I...
^
-- more than 10 lines...
-CREATE
+CREATE
TEMPORARY
-TABLE
+TABLE
foo
-(id
-INT4
-UNIQUE
-NOT
+(id
+INT4
+UNIQUE
+NOT
NULL
-,
+,
idm
-INT4
-UNIQUE
-NOT
+INT4
+UNIQUE
+NOT
NULL,
-idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
-idz INT4 UNIQUE NOT NULL,
-idv
-INT4
-UNIQUE
-NOT
+idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
+idz INT4 UNIQUE NOT NULL,
+idv
+INT4
+UNIQUE
+NOT
NULL);
ERROR: syntax error at or near "NUL"
LINE 16: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I...
INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 ');
INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
--- test for over and under flow
+-- test for over and under flow
INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
ERROR: value out of range: overflow
LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
| 1.23457e-020
(5 rows)
--- test the unary float4abs operator
+-- test the unary float4abs operator
SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f;
five | f1 | abs_f1
------+--------------+--------------
INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 ');
INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
--- test for over and under flow
+-- test for over and under flow
INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
ERROR: value out of range: overflow
LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
| 1.23457e-20
(5 rows)
--- test the unary float4abs operator
+-- test the unary float4abs operator
SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f;
five | f1 | abs_f1
------+-------------+-------------
| 1.2345678901234e-200
(4 rows)
-SELECT '' AS three, f.f1, f.f1 * '-10' AS x
+SELECT '' AS three, f.f1, f.f1 * '-10' AS x
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
three | f1 | x
| 1008618.49
(1 row)
--- absolute value
-SELECT '' AS five, f.f1, @f.f1 AS abs_f1
+-- absolute value
+SELECT '' AS five, f.f1, @f.f1 AS abs_f1
FROM FLOAT8_TBL f;
five | f1 | abs_f1
------+----------------------+----------------------
| 1.2345678901234e-200 | 1.2345678901234e-200
(5 rows)
--- truncate
+-- truncate
SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1
FROM FLOAT8_TBL f;
five | f1 | trunc_f1
| 1.2345678901234e-200 | 0
(5 rows)
--- round
+-- round
SELECT '' AS five, f.f1, round(f.f1) AS round_f1
FROM FLOAT8_TBL f;
five | f1 | round_f1
1
(5 rows)
--- square root
+-- square root
SELECT sqrt(float8 '64') AS eight;
eight
-------
12
(1 row)
--- take exp of ln(f.f1)
+-- take exp of ln(f.f1)
SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
| 1.2345678901234e-200 | 1.23456789012339e-200
(3 rows)
--- cube root
+-- cube root
SELECT ||/ float8 '27' AS three;
three
-------
| -1.2345678901234e-200
(5 rows)
--- test for over- and underflow
+-- test for over- and underflow
INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
ERROR: "10e400" is out of range for type double precision
LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
| 1.2345678901234e-200
(4 rows)
-SELECT '' AS three, f.f1, f.f1 * '-10' AS x
+SELECT '' AS three, f.f1, f.f1 * '-10' AS x
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
three | f1 | x
| 1008618.49
(1 row)
--- absolute value
-SELECT '' AS five, f.f1, @f.f1 AS abs_f1
+-- absolute value
+SELECT '' AS five, f.f1, @f.f1 AS abs_f1
FROM FLOAT8_TBL f;
five | f1 | abs_f1
------+----------------------+----------------------
| 1.2345678901234e-200 | 1.2345678901234e-200
(5 rows)
--- truncate
+-- truncate
SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1
FROM FLOAT8_TBL f;
five | f1 | trunc_f1
| 1.2345678901234e-200 | 0
(5 rows)
--- round
+-- round
SELECT '' AS five, f.f1, round(f.f1) AS round_f1
FROM FLOAT8_TBL f;
five | f1 | round_f1
1
(5 rows)
--- square root
+-- square root
SELECT sqrt(float8 '64') AS eight;
eight
-------
12
(1 row)
--- take exp of ln(f.f1)
+-- take exp of ln(f.f1)
SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
| 1.2345678901234e-200 | 1.23456789012339e-200
(3 rows)
--- cube root
+-- cube root
SELECT ||/ float8 '27' AS three;
three
-------
| -1.2345678901234e-200
(5 rows)
--- test for over- and underflow
+-- test for over- and underflow
INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
ERROR: "10e400" is out of range for type double precision
LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
| 1.2345678901234e-200
(4 rows)
-SELECT '' AS three, f.f1, f.f1 * '-10' AS x
+SELECT '' AS three, f.f1, f.f1 * '-10' AS x
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
three | f1 | x
| 1008618.49
(1 row)
--- absolute value
-SELECT '' AS five, f.f1, @f.f1 AS abs_f1
+-- absolute value
+SELECT '' AS five, f.f1, @f.f1 AS abs_f1
FROM FLOAT8_TBL f;
five | f1 | abs_f1
------+----------------------+----------------------
| 1.2345678901234e-200 | 1.2345678901234e-200
(5 rows)
--- truncate
+-- truncate
SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1
FROM FLOAT8_TBL f;
five | f1 | trunc_f1
| 1.2345678901234e-200 | 0
(5 rows)
--- round
+-- round
SELECT '' AS five, f.f1, round(f.f1) AS round_f1
FROM FLOAT8_TBL f;
five | f1 | round_f1
1
(5 rows)
--- square root
+-- square root
SELECT sqrt(float8 '64') AS eight;
eight
-------
12
(1 row)
--- take exp of ln(f.f1)
+-- take exp of ln(f.f1)
SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
| 1.2345678901234e-200 | 1.23456789012339e-200
(3 rows)
--- cube root
+-- cube root
SELECT ||/ float8 '27' AS three;
three
-------
| -1.2345678901234e-200
(5 rows)
--- test for over- and underflow
+-- test for over- and underflow
INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
ERROR: "10e400" is out of range for type double precision
LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
| 1.2345678901234e-200
(4 rows)
-SELECT '' AS three, f.f1, f.f1 * '-10' AS x
+SELECT '' AS three, f.f1, f.f1 * '-10' AS x
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
three | f1 | x
| 1008618.49
(1 row)
--- absolute value
-SELECT '' AS five, f.f1, @f.f1 AS abs_f1
+-- absolute value
+SELECT '' AS five, f.f1, @f.f1 AS abs_f1
FROM FLOAT8_TBL f;
five | f1 | abs_f1
------+----------------------+----------------------
| 1.2345678901234e-200 | 1.2345678901234e-200
(5 rows)
--- truncate
+-- truncate
SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1
FROM FLOAT8_TBL f;
five | f1 | trunc_f1
| 1.2345678901234e-200 | 0
(5 rows)
--- round
+-- round
SELECT '' AS five, f.f1, round(f.f1) AS round_f1
FROM FLOAT8_TBL f;
five | f1 | round_f1
1
(5 rows)
--- square root
+-- square root
SELECT sqrt(float8 '64') AS eight;
eight
-------
12
(1 row)
--- take exp of ln(f.f1)
+-- take exp of ln(f.f1)
SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
| 1.2345678901234e-200 | 1.23456789012339e-200
(3 rows)
--- cube root
+-- cube root
SELECT ||/ float8 '27' AS three;
three
-------
| -1.2345678901234e-200
(5 rows)
--- test for over- and underflow
+-- test for over- and underflow
INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
ERROR: "10e400" is out of range for type double precision
LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
--
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable"
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
+CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL);
-- Test comments
COMMENT ON CONSTRAINT constrname_wrong ON FKTABLE IS 'fk constraint comment';
--
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable"
-CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
+CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT);
-- Insert a value in PKTABLE for default
INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!');
INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
INSERT INTO FKTABLE VALUES (2, 4, 5, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
--
-- Tests for mismatched types
--
--- Basic one column, two table setup
+-- Basic one column, two table setup
CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable"
INSERT INTO PKTABLE VALUES(42);
ptest4) REFERENCES pktable(ptest1, ptest2));
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable"
DROP TABLE PKTABLE;
--- And this,
+-- And this,
CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
ptest4) REFERENCES pktable);
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable"
1492 | 1
(1 row)
-UPDATE hash_i4_heap
- SET seqno = 20000
+UPDATE hash_i4_heap
+ SET seqno = 20000
WHERE hash_i4_heap.random = 1492795354;
-SELECT h.seqno AS i20000
+SELECT h.seqno AS i20000
FROM hash_i4_heap h
WHERE h.random = 1492795354;
i20000
20000
(1 row)
-UPDATE hash_name_heap
+UPDATE hash_name_heap
SET random = '0123456789abcdef'::name
WHERE hash_name_heap.seqno = 6543;
SELECT h.seqno AS i6543, h.random AS c0_to_f
SET seqno = 20000
WHERE hash_name_heap.random = '76652222'::name;
--
--- this is the row we just replaced; index scan should return zero rows
+-- this is the row we just replaced; index scan should return zero rows
--
SELECT h.seqno AS emptyset
FROM hash_name_heap h
----------
(0 rows)
-UPDATE hash_txt_heap
+UPDATE hash_txt_heap
SET random = '0123456789abcdefghijklmnop'::text
WHERE hash_txt_heap.seqno = 4002;
SELECT h.seqno AS i4002, h.random AS c0_to_p
UPDATE hash_f8_heap
SET random = '-1234.1234'::float8
WHERE hash_f8_heap.seqno = 8906;
-SELECT h.seqno AS i8096, h.random AS f1234_1234
+SELECT h.seqno AS i8096, h.random AS f1234_1234
FROM hash_f8_heap h
WHERE h.random = '-1234.1234'::float8;
i8096 | f1234_1234
8906 | -1234.1234
(1 row)
-UPDATE hash_f8_heap
+UPDATE hash_f8_heap
SET seqno = 20000
WHERE hash_f8_heap.random = '488912369'::float8;
SELECT h.seqno AS f20000
(2 rows)
SELECT '' AS ten, i, c,
- i < c AS lt, i <= c AS le, i = c AS eq,
+ i < c AS lt, i <= c AS le, i = c AS eq,
i >= c AS ge, i > c AS gt, i <> c AS ne,
i << c AS sb, i <<= c AS sbe,
i >> c AS sup, i >>= c AS spe
* Test double inheritance
*
* Ensure that defaults are NOT included unless
- * INCLUDING DEFAULTS is specified
+ * INCLUDING DEFAULTS is specified
*/
CREATE TABLE inhe (ee text, LIKE inhx) inherits (b);
INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4');
| 32767
(3 rows)
--- positive odds
+-- positive odds
SELECT '' AS one, i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
one | f1
-----+-------
| 32767
(1 row)
--- any evens
+-- any evens
SELECT '' AS three, i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
three | f1
-------+-------
-- TO_CHAR()
--
-SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
+SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
FROM INT8_TBL;
to_char_1 | to_char | to_char
-----------+------------------------+------------------------
| 4,567,890,123,456,789 | -4,567,890,123,456,789
(5 rows)
-SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
- FROM INT8_TBL;
+SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
+ FROM INT8_TBL;
to_char_2 | to_char | to_char
-----------+--------------------------------+--------------------------------
| 123.000,000 | 456.000,000
| 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000
(5 rows)
-SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
+SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
FROM INT8_TBL;
to_char_3 | to_char | to_char
-----------+--------------------+------------------------
| <4567890123456789> | 4567890123456789.000
(5 rows)
-SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
+SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
FROM INT8_TBL;
to_char_4 | to_char | to_char
-----------+-------------------+-------------------
| 4567890123456789- | +4567890123456789
(5 rows)
-SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
to_char_5 | to_char
-----------+-------------------
| 456
| <4567890123456789>
(5 rows)
-SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
+SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
to_char_8 | to_char
-----------+---------------------
| + 456th
| -4567890123456789
(5 rows)
-SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL;
to_char_9 | to_char
-----------+-------------------
| 0000000000000456
| -4567890123456789
(5 rows)
-SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL;
to_char_10 | to_char
------------+-------------------
| +0000000000000456
| -4567890123456789
(5 rows)
-SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
to_char_11 | to_char
------------+-------------------
| 0000000000000456
| -4567890123456789.000
(5 rows)
-SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
+SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
to_char_13 | to_char
------------+------------------------
| 456.000
-- TO_CHAR()
--
-SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
+SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
FROM INT8_TBL;
to_char_1 | to_char | to_char
-----------+------------------------+------------------------
| 4,567,890,123,456,789 | -4,567,890,123,456,789
(5 rows)
-SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
- FROM INT8_TBL;
+SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
+ FROM INT8_TBL;
to_char_2 | to_char | to_char
-----------+--------------------------------+--------------------------------
| 123.000,000 | 456.000,000
| 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000
(5 rows)
-SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
+SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
FROM INT8_TBL;
to_char_3 | to_char | to_char
-----------+--------------------+------------------------
| <4567890123456789> | 4567890123456789.000
(5 rows)
-SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
+SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
FROM INT8_TBL;
to_char_4 | to_char | to_char
-----------+-------------------+-------------------
| 4567890123456789- | +4567890123456789
(5 rows)
-SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
to_char_5 | to_char
-----------+-------------------
| 456
| <4567890123456789>
(5 rows)
-SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
+SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
to_char_8 | to_char
-----------+---------------------
| + 456th
| -4567890123456789
(5 rows)
-SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL;
to_char_9 | to_char
-----------+-------------------
| 0000000000000456
| -4567890123456789
(5 rows)
-SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL;
to_char_10 | to_char
------------+-------------------
| +0000000000000456
| -4567890123456789
(5 rows)
-SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
to_char_11 | to_char
------------+-------------------
| 0000000000000456
| -4567890123456789.000
(5 rows)
-SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
+SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
to_char_13 | to_char
------------+------------------------
| 456.000
| 34 years
(1 row)
-SELECT '' AS five, * FROM INTERVAL_TBL
+SELECT '' AS five, * FROM INTERVAL_TBL
WHERE INTERVAL_TBL.f1 >= interval '@ 1 month';
five | f1
------+-----------------
(45 rows)
-- Test multiplication and division with intervals.
--- Floating point arithmetic rounding errors can lead to unexpected results,
--- though the code attempts to do the right thing and round up to days and
--- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
--- Note that it is expected for some day components to be greater than 29 and
--- some time components be greater than 23:59:59 due to how intervals are
+-- Floating point arithmetic rounding errors can lead to unexpected results,
+-- though the code attempts to do the right thing and round up to days and
+-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
+-- Note that it is expected for some day components to be greater than 29 and
+-- some time components be greater than 23:59:59 due to how intervals are
-- stored internally.
CREATE TABLE INTERVAL_MULDIV_TBL (span interval);
COPY INTERVAL_MULDIV_TBL FROM STDIN;
@ 1 year 2 mons 3 days 4 hours 5 mins 6.699999 secs
(1 row)
-select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
+select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
interval | interval | interval
------------+------------+------------
@ 0.7 secs | @ 0.7 secs | @ 0.7 secs
-- LIMIT
-- Check the LIMIT/OFFSET feature of SELECT
--
-SELECT ''::text AS two, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 50
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
ORDER BY unique1 LIMIT 2;
two | unique1 | unique2 | stringu1
-----+---------+---------+----------
| 52 | 985 | ACAAAA
(2 rows)
-SELECT ''::text AS five, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 60
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 60
ORDER BY unique1 LIMIT 5;
five | unique1 | unique2 | stringu1
------+---------+---------+----------
| 65 | 64 | NCAAAA
(5 rows)
-SELECT ''::text AS two, unique1, unique2, stringu1
+SELECT ''::text AS two, unique1, unique2, stringu1
FROM onek WHERE unique1 > 60 AND unique1 < 63
ORDER BY unique1 LIMIT 5;
two | unique1 | unique2 | stringu1
| 62 | 633 | KCAAAA
(2 rows)
-SELECT ''::text AS three, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 100
+SELECT ''::text AS three, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 100
ORDER BY unique1 LIMIT 3 OFFSET 20;
three | unique1 | unique2 | stringu1
-------+---------+---------+----------
| 123 | 777 | TEAAAA
(3 rows)
-SELECT ''::text AS zero, unique1, unique2, stringu1
- FROM onek WHERE unique1 < 50
+SELECT ''::text AS zero, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
ORDER BY unique1 DESC LIMIT 8 OFFSET 99;
zero | unique1 | unique2 | stringu1
------+---------+---------+----------
(0 rows)
-SELECT ''::text AS eleven, unique1, unique2, stringu1
- FROM onek WHERE unique1 < 50
+SELECT ''::text AS eleven, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
ORDER BY unique1 DESC LIMIT 20 OFFSET 39;
eleven | unique1 | unique2 | stringu1
--------+---------+---------+----------
| 0 | 998 | AAAAAA
(11 rows)
-SELECT ''::text AS ten, unique1, unique2, stringu1
+SELECT ''::text AS ten, unique1, unique2, stringu1
FROM onek
ORDER BY unique1 OFFSET 990;
ten | unique1 | unique2 | stringu1
| 999 | 152 | LMAAAA
(10 rows)
-SELECT ''::text AS five, unique1, unique2, stringu1
+SELECT ''::text AS five, unique1, unique2, stringu1
FROM onek
ORDER BY unique1 OFFSET 990 LIMIT 5;
five | unique1 | unique2 | stringu1
| 994 | 695 | GMAAAA
(5 rows)
-SELECT ''::text AS five, unique1, unique2, stringu1
+SELECT ''::text AS five, unique1, unique2, stringu1
FROM onek
ORDER BY unique1 LIMIT 5 OFFSET 900;
five | unique1 | unique2 | stringu1
DROP TABLE width_bucket_test;
-- TO_CHAR()
--
-SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999')
+SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999')
FROM num_data;
to_char_1 | to_char
-----------+------------------------
| 24926804.045047420000000-
(10 rows)
-SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
to_char_5 | to_char
-----------+-----------------------------------
| .000000000000000
| <24926804.04504742>
(10 rows)
-SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
+SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
to_char_8 | to_char
-----------+-----------------------------------
| + .000000000000000
| - 24926804.045047420000000
(10 rows)
-SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data;
to_char_9 | to_char
-----------+-----------------------------------
| 0000000000000000.000000000000000
| -0000000024926804.045047420000000
(10 rows)
-SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
to_char_10 | to_char
------------+-----------------------------------
| +0000000000000000.000000000000000
| -0000000024926804.045047420000000
(10 rows)
-SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
to_char_11 | to_char
------------+-----------------------------
| 0000000000000000.
| -24926804.04504742
(10 rows)
-SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
+SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
to_char_16 | to_char
------------+------------------------------------
| .000000000000000
INSERT INTO OID_TBL(f1) VALUES (' 10 ');
-- leading/trailing hard tab is also allowed
INSERT INTO OID_TBL(f1) VALUES (' 15 ');
--- bad inputs
+-- bad inputs
INSERT INTO OID_TBL(f1) VALUES ('');
ERROR: invalid input syntax for type oid: ""
LINE 1: INSERT INTO OID_TBL(f1) VALUES ('');
--
-- This is created by pgsql/src/tools/findoidjoins/make_oidjoins_check
--
-SELECT ctid, aggfnoid
-FROM pg_catalog.pg_aggregate fk
-WHERE aggfnoid != 0 AND
+SELECT ctid, aggfnoid
+FROM pg_catalog.pg_aggregate fk
+WHERE aggfnoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfnoid);
ctid | aggfnoid
------+----------
(0 rows)
-SELECT ctid, aggtransfn
-FROM pg_catalog.pg_aggregate fk
-WHERE aggtransfn != 0 AND
+SELECT ctid, aggtransfn
+FROM pg_catalog.pg_aggregate fk
+WHERE aggtransfn != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggtransfn);
ctid | aggtransfn
------+------------
(0 rows)
-SELECT ctid, aggfinalfn
-FROM pg_catalog.pg_aggregate fk
-WHERE aggfinalfn != 0 AND
+SELECT ctid, aggfinalfn
+FROM pg_catalog.pg_aggregate fk
+WHERE aggfinalfn != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfinalfn);
ctid | aggfinalfn
------+------------
(0 rows)
-SELECT ctid, aggsortop
-FROM pg_catalog.pg_aggregate fk
-WHERE aggsortop != 0 AND
+SELECT ctid, aggsortop
+FROM pg_catalog.pg_aggregate fk
+WHERE aggsortop != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.aggsortop);
ctid | aggsortop
------+-----------
(0 rows)
-SELECT ctid, aggtranstype
-FROM pg_catalog.pg_aggregate fk
-WHERE aggtranstype != 0 AND
+SELECT ctid, aggtranstype
+FROM pg_catalog.pg_aggregate fk
+WHERE aggtranstype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.aggtranstype);
ctid | aggtranstype
------+--------------
(0 rows)
-SELECT ctid, amkeytype
-FROM pg_catalog.pg_am fk
-WHERE amkeytype != 0 AND
+SELECT ctid, amkeytype
+FROM pg_catalog.pg_am fk
+WHERE amkeytype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amkeytype);
ctid | amkeytype
------+-----------
(0 rows)
-SELECT ctid, aminsert
-FROM pg_catalog.pg_am fk
-WHERE aminsert != 0 AND
+SELECT ctid, aminsert
+FROM pg_catalog.pg_am fk
+WHERE aminsert != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aminsert);
ctid | aminsert
------+----------
(0 rows)
-SELECT ctid, ambeginscan
-FROM pg_catalog.pg_am fk
-WHERE ambeginscan != 0 AND
+SELECT ctid, ambeginscan
+FROM pg_catalog.pg_am fk
+WHERE ambeginscan != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambeginscan);
ctid | ambeginscan
------+-------------
(0 rows)
-SELECT ctid, amgettuple
-FROM pg_catalog.pg_am fk
-WHERE amgettuple != 0 AND
+SELECT ctid, amgettuple
+FROM pg_catalog.pg_am fk
+WHERE amgettuple != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgettuple);
ctid | amgettuple
------+------------
(0 rows)
-SELECT ctid, amgetbitmap
-FROM pg_catalog.pg_am fk
-WHERE amgetbitmap != 0 AND
+SELECT ctid, amgetbitmap
+FROM pg_catalog.pg_am fk
+WHERE amgetbitmap != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgetbitmap);
ctid | amgetbitmap
------+-------------
(0 rows)
-SELECT ctid, amrescan
-FROM pg_catalog.pg_am fk
-WHERE amrescan != 0 AND
+SELECT ctid, amrescan
+FROM pg_catalog.pg_am fk
+WHERE amrescan != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrescan);
ctid | amrescan
------+----------
(0 rows)
-SELECT ctid, amendscan
-FROM pg_catalog.pg_am fk
-WHERE amendscan != 0 AND
+SELECT ctid, amendscan
+FROM pg_catalog.pg_am fk
+WHERE amendscan != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amendscan);
ctid | amendscan
------+-----------
(0 rows)
-SELECT ctid, ammarkpos
-FROM pg_catalog.pg_am fk
-WHERE ammarkpos != 0 AND
+SELECT ctid, ammarkpos
+FROM pg_catalog.pg_am fk
+WHERE ammarkpos != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ammarkpos);
ctid | ammarkpos
------+-----------
(0 rows)
-SELECT ctid, amrestrpos
-FROM pg_catalog.pg_am fk
-WHERE amrestrpos != 0 AND
+SELECT ctid, amrestrpos
+FROM pg_catalog.pg_am fk
+WHERE amrestrpos != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrestrpos);
ctid | amrestrpos
------+------------
(0 rows)
-SELECT ctid, ambuild
-FROM pg_catalog.pg_am fk
-WHERE ambuild != 0 AND
+SELECT ctid, ambuild
+FROM pg_catalog.pg_am fk
+WHERE ambuild != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambuild);
ctid | ambuild
------+---------
(0 rows)
-SELECT ctid, ambulkdelete
-FROM pg_catalog.pg_am fk
-WHERE ambulkdelete != 0 AND
+SELECT ctid, ambulkdelete
+FROM pg_catalog.pg_am fk
+WHERE ambulkdelete != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambulkdelete);
ctid | ambulkdelete
------+--------------
(0 rows)
-SELECT ctid, amvacuumcleanup
-FROM pg_catalog.pg_am fk
-WHERE amvacuumcleanup != 0 AND
+SELECT ctid, amvacuumcleanup
+FROM pg_catalog.pg_am fk
+WHERE amvacuumcleanup != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amvacuumcleanup);
ctid | amvacuumcleanup
------+-----------------
(0 rows)
-SELECT ctid, amcostestimate
-FROM pg_catalog.pg_am fk
-WHERE amcostestimate != 0 AND
+SELECT ctid, amcostestimate
+FROM pg_catalog.pg_am fk
+WHERE amcostestimate != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amcostestimate);
ctid | amcostestimate
------+----------------
(0 rows)
-SELECT ctid, amoptions
-FROM pg_catalog.pg_am fk
-WHERE amoptions != 0 AND
+SELECT ctid, amoptions
+FROM pg_catalog.pg_am fk
+WHERE amoptions != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amoptions);
ctid | amoptions
------+-----------
(0 rows)
-SELECT ctid, amopfamily
-FROM pg_catalog.pg_amop fk
-WHERE amopfamily != 0 AND
+SELECT ctid, amopfamily
+FROM pg_catalog.pg_amop fk
+WHERE amopfamily != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amopfamily);
ctid | amopfamily
------+------------
(0 rows)
-SELECT ctid, amoplefttype
-FROM pg_catalog.pg_amop fk
-WHERE amoplefttype != 0 AND
+SELECT ctid, amoplefttype
+FROM pg_catalog.pg_amop fk
+WHERE amoplefttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoplefttype);
ctid | amoplefttype
------+--------------
(0 rows)
-SELECT ctid, amoprighttype
-FROM pg_catalog.pg_amop fk
-WHERE amoprighttype != 0 AND
+SELECT ctid, amoprighttype
+FROM pg_catalog.pg_amop fk
+WHERE amoprighttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoprighttype);
ctid | amoprighttype
------+---------------
(0 rows)
-SELECT ctid, amopopr
-FROM pg_catalog.pg_amop fk
-WHERE amopopr != 0 AND
+SELECT ctid, amopopr
+FROM pg_catalog.pg_amop fk
+WHERE amopopr != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.amopopr);
ctid | amopopr
------+---------
(0 rows)
-SELECT ctid, amopmethod
-FROM pg_catalog.pg_amop fk
-WHERE amopmethod != 0 AND
+SELECT ctid, amopmethod
+FROM pg_catalog.pg_amop fk
+WHERE amopmethod != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.amopmethod);
ctid | amopmethod
------+------------
(0 rows)
-SELECT ctid, amprocfamily
-FROM pg_catalog.pg_amproc fk
-WHERE amprocfamily != 0 AND
+SELECT ctid, amprocfamily
+FROM pg_catalog.pg_amproc fk
+WHERE amprocfamily != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amprocfamily);
ctid | amprocfamily
------+--------------
(0 rows)
-SELECT ctid, amproclefttype
-FROM pg_catalog.pg_amproc fk
-WHERE amproclefttype != 0 AND
+SELECT ctid, amproclefttype
+FROM pg_catalog.pg_amproc fk
+WHERE amproclefttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amproclefttype);
ctid | amproclefttype
------+----------------
(0 rows)
-SELECT ctid, amprocrighttype
-FROM pg_catalog.pg_amproc fk
-WHERE amprocrighttype != 0 AND
+SELECT ctid, amprocrighttype
+FROM pg_catalog.pg_amproc fk
+WHERE amprocrighttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amprocrighttype);
ctid | amprocrighttype
------+-----------------
(0 rows)
-SELECT ctid, amproc
-FROM pg_catalog.pg_amproc fk
-WHERE amproc != 0 AND
+SELECT ctid, amproc
+FROM pg_catalog.pg_amproc fk
+WHERE amproc != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amproc);
ctid | amproc
------+--------
(0 rows)
-SELECT ctid, attrelid
-FROM pg_catalog.pg_attribute fk
-WHERE attrelid != 0 AND
+SELECT ctid, attrelid
+FROM pg_catalog.pg_attribute fk
+WHERE attrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.attrelid);
ctid | attrelid
------+----------
(0 rows)
-SELECT ctid, atttypid
-FROM pg_catalog.pg_attribute fk
-WHERE atttypid != 0 AND
+SELECT ctid, atttypid
+FROM pg_catalog.pg_attribute fk
+WHERE atttypid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.atttypid);
ctid | atttypid
------+----------
(0 rows)
-SELECT ctid, castsource
-FROM pg_catalog.pg_cast fk
-WHERE castsource != 0 AND
+SELECT ctid, castsource
+FROM pg_catalog.pg_cast fk
+WHERE castsource != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.castsource);
ctid | castsource
------+------------
(0 rows)
-SELECT ctid, casttarget
-FROM pg_catalog.pg_cast fk
-WHERE casttarget != 0 AND
+SELECT ctid, casttarget
+FROM pg_catalog.pg_cast fk
+WHERE casttarget != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.casttarget);
ctid | casttarget
------+------------
(0 rows)
-SELECT ctid, castfunc
-FROM pg_catalog.pg_cast fk
-WHERE castfunc != 0 AND
+SELECT ctid, castfunc
+FROM pg_catalog.pg_cast fk
+WHERE castfunc != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.castfunc);
ctid | castfunc
------+----------
(0 rows)
-SELECT ctid, relnamespace
-FROM pg_catalog.pg_class fk
-WHERE relnamespace != 0 AND
+SELECT ctid, relnamespace
+FROM pg_catalog.pg_class fk
+WHERE relnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.relnamespace);
ctid | relnamespace
------+--------------
(0 rows)
-SELECT ctid, reltype
-FROM pg_catalog.pg_class fk
-WHERE reltype != 0 AND
+SELECT ctid, reltype
+FROM pg_catalog.pg_class fk
+WHERE reltype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.reltype);
ctid | reltype
------+---------
(0 rows)
-SELECT ctid, relowner
-FROM pg_catalog.pg_class fk
-WHERE relowner != 0 AND
+SELECT ctid, relowner
+FROM pg_catalog.pg_class fk
+WHERE relowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.relowner);
ctid | relowner
------+----------
(0 rows)
-SELECT ctid, relam
-FROM pg_catalog.pg_class fk
-WHERE relam != 0 AND
+SELECT ctid, relam
+FROM pg_catalog.pg_class fk
+WHERE relam != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.relam);
ctid | relam
------+-------
(0 rows)
-SELECT ctid, reltablespace
-FROM pg_catalog.pg_class fk
-WHERE reltablespace != 0 AND
+SELECT ctid, reltablespace
+FROM pg_catalog.pg_class fk
+WHERE reltablespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.reltablespace);
ctid | reltablespace
------+---------------
(0 rows)
-SELECT ctid, reltoastrelid
-FROM pg_catalog.pg_class fk
-WHERE reltoastrelid != 0 AND
+SELECT ctid, reltoastrelid
+FROM pg_catalog.pg_class fk
+WHERE reltoastrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastrelid);
ctid | reltoastrelid
------+---------------
(0 rows)
-SELECT ctid, reltoastidxid
-FROM pg_catalog.pg_class fk
-WHERE reltoastidxid != 0 AND
+SELECT ctid, reltoastidxid
+FROM pg_catalog.pg_class fk
+WHERE reltoastidxid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastidxid);
ctid | reltoastidxid
------+---------------
(0 rows)
-SELECT ctid, connamespace
-FROM pg_catalog.pg_constraint fk
-WHERE connamespace != 0 AND
+SELECT ctid, connamespace
+FROM pg_catalog.pg_constraint fk
+WHERE connamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace);
ctid | connamespace
------+--------------
(0 rows)
-SELECT ctid, contypid
-FROM pg_catalog.pg_constraint fk
-WHERE contypid != 0 AND
+SELECT ctid, contypid
+FROM pg_catalog.pg_constraint fk
+WHERE contypid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.contypid);
ctid | contypid
------+----------
(0 rows)
-SELECT ctid, connamespace
-FROM pg_catalog.pg_conversion fk
-WHERE connamespace != 0 AND
+SELECT ctid, connamespace
+FROM pg_catalog.pg_conversion fk
+WHERE connamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace);
ctid | connamespace
------+--------------
(0 rows)
-SELECT ctid, conowner
-FROM pg_catalog.pg_conversion fk
-WHERE conowner != 0 AND
+SELECT ctid, conowner
+FROM pg_catalog.pg_conversion fk
+WHERE conowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.conowner);
ctid | conowner
------+----------
(0 rows)
-SELECT ctid, conproc
-FROM pg_catalog.pg_conversion fk
-WHERE conproc != 0 AND
+SELECT ctid, conproc
+FROM pg_catalog.pg_conversion fk
+WHERE conproc != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.conproc);
ctid | conproc
------+---------
(0 rows)
-SELECT ctid, datdba
-FROM pg_catalog.pg_database fk
-WHERE datdba != 0 AND
+SELECT ctid, datdba
+FROM pg_catalog.pg_database fk
+WHERE datdba != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.datdba);
ctid | datdba
------+--------
(0 rows)
-SELECT ctid, dattablespace
-FROM pg_catalog.pg_database fk
-WHERE dattablespace != 0 AND
+SELECT ctid, dattablespace
+FROM pg_catalog.pg_database fk
+WHERE dattablespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.dattablespace);
ctid | dattablespace
------+---------------
(0 rows)
-SELECT ctid, setdatabase
-FROM pg_catalog.pg_db_role_setting fk
-WHERE setdatabase != 0 AND
+SELECT ctid, setdatabase
+FROM pg_catalog.pg_db_role_setting fk
+WHERE setdatabase != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_database pk WHERE pk.oid = fk.setdatabase);
ctid | setdatabase
------+-------------
(0 rows)
-SELECT ctid, classid
-FROM pg_catalog.pg_depend fk
-WHERE classid != 0 AND
+SELECT ctid, classid
+FROM pg_catalog.pg_depend fk
+WHERE classid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classid);
ctid | classid
------+---------
(0 rows)
-SELECT ctid, refclassid
-FROM pg_catalog.pg_depend fk
-WHERE refclassid != 0 AND
+SELECT ctid, refclassid
+FROM pg_catalog.pg_depend fk
+WHERE refclassid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid);
ctid | refclassid
------+------------
(0 rows)
-SELECT ctid, classoid
-FROM pg_catalog.pg_description fk
-WHERE classoid != 0 AND
+SELECT ctid, classoid
+FROM pg_catalog.pg_description fk
+WHERE classoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid);
ctid | classoid
------+----------
(0 rows)
-SELECT ctid, indexrelid
-FROM pg_catalog.pg_index fk
-WHERE indexrelid != 0 AND
+SELECT ctid, indexrelid
+FROM pg_catalog.pg_index fk
+WHERE indexrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indexrelid);
ctid | indexrelid
------+------------
(0 rows)
-SELECT ctid, indrelid
-FROM pg_catalog.pg_index fk
-WHERE indrelid != 0 AND
+SELECT ctid, indrelid
+FROM pg_catalog.pg_index fk
+WHERE indrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indrelid);
ctid | indrelid
------+----------
(0 rows)
-SELECT ctid, lanowner
-FROM pg_catalog.pg_language fk
-WHERE lanowner != 0 AND
+SELECT ctid, lanowner
+FROM pg_catalog.pg_language fk
+WHERE lanowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.lanowner);
ctid | lanowner
------+----------
(0 rows)
-SELECT ctid, lanplcallfoid
-FROM pg_catalog.pg_language fk
-WHERE lanplcallfoid != 0 AND
+SELECT ctid, lanplcallfoid
+FROM pg_catalog.pg_language fk
+WHERE lanplcallfoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanplcallfoid);
ctid | lanplcallfoid
------+---------------
(0 rows)
-SELECT ctid, laninline
-FROM pg_catalog.pg_language fk
-WHERE laninline != 0 AND
+SELECT ctid, laninline
+FROM pg_catalog.pg_language fk
+WHERE laninline != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.laninline);
ctid | laninline
------+-----------
(0 rows)
-SELECT ctid, lanvalidator
-FROM pg_catalog.pg_language fk
-WHERE lanvalidator != 0 AND
+SELECT ctid, lanvalidator
+FROM pg_catalog.pg_language fk
+WHERE lanvalidator != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanvalidator);
ctid | lanvalidator
------+--------------
(0 rows)
-SELECT ctid, nspowner
-FROM pg_catalog.pg_namespace fk
-WHERE nspowner != 0 AND
+SELECT ctid, nspowner
+FROM pg_catalog.pg_namespace fk
+WHERE nspowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.nspowner);
ctid | nspowner
------+----------
(0 rows)
-SELECT ctid, opcmethod
-FROM pg_catalog.pg_opclass fk
-WHERE opcmethod != 0 AND
+SELECT ctid, opcmethod
+FROM pg_catalog.pg_opclass fk
+WHERE opcmethod != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opcmethod);
ctid | opcmethod
------+-----------
(0 rows)
-SELECT ctid, opcnamespace
-FROM pg_catalog.pg_opclass fk
-WHERE opcnamespace != 0 AND
+SELECT ctid, opcnamespace
+FROM pg_catalog.pg_opclass fk
+WHERE opcnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opcnamespace);
ctid | opcnamespace
------+--------------
(0 rows)
-SELECT ctid, opcowner
-FROM pg_catalog.pg_opclass fk
-WHERE opcowner != 0 AND
+SELECT ctid, opcowner
+FROM pg_catalog.pg_opclass fk
+WHERE opcowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opcowner);
ctid | opcowner
------+----------
(0 rows)
-SELECT ctid, opcfamily
-FROM pg_catalog.pg_opclass fk
-WHERE opcfamily != 0 AND
+SELECT ctid, opcfamily
+FROM pg_catalog.pg_opclass fk
+WHERE opcfamily != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.opcfamily);
ctid | opcfamily
------+-----------
(0 rows)
-SELECT ctid, opcintype
-FROM pg_catalog.pg_opclass fk
-WHERE opcintype != 0 AND
+SELECT ctid, opcintype
+FROM pg_catalog.pg_opclass fk
+WHERE opcintype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opcintype);
ctid | opcintype
------+-----------
(0 rows)
-SELECT ctid, opckeytype
-FROM pg_catalog.pg_opclass fk
-WHERE opckeytype != 0 AND
+SELECT ctid, opckeytype
+FROM pg_catalog.pg_opclass fk
+WHERE opckeytype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opckeytype);
ctid | opckeytype
------+------------
(0 rows)
-SELECT ctid, oprnamespace
-FROM pg_catalog.pg_operator fk
-WHERE oprnamespace != 0 AND
+SELECT ctid, oprnamespace
+FROM pg_catalog.pg_operator fk
+WHERE oprnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.oprnamespace);
ctid | oprnamespace
------+--------------
(0 rows)
-SELECT ctid, oprowner
-FROM pg_catalog.pg_operator fk
-WHERE oprowner != 0 AND
+SELECT ctid, oprowner
+FROM pg_catalog.pg_operator fk
+WHERE oprowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.oprowner);
ctid | oprowner
------+----------
(0 rows)
-SELECT ctid, oprleft
-FROM pg_catalog.pg_operator fk
-WHERE oprleft != 0 AND
+SELECT ctid, oprleft
+FROM pg_catalog.pg_operator fk
+WHERE oprleft != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprleft);
ctid | oprleft
------+---------
(0 rows)
-SELECT ctid, oprright
-FROM pg_catalog.pg_operator fk
-WHERE oprright != 0 AND
+SELECT ctid, oprright
+FROM pg_catalog.pg_operator fk
+WHERE oprright != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprright);
ctid | oprright
------+----------
(0 rows)
-SELECT ctid, oprresult
-FROM pg_catalog.pg_operator fk
-WHERE oprresult != 0 AND
+SELECT ctid, oprresult
+FROM pg_catalog.pg_operator fk
+WHERE oprresult != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprresult);
ctid | oprresult
------+-----------
(0 rows)
-SELECT ctid, oprcom
-FROM pg_catalog.pg_operator fk
-WHERE oprcom != 0 AND
+SELECT ctid, oprcom
+FROM pg_catalog.pg_operator fk
+WHERE oprcom != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprcom);
ctid | oprcom
------+--------
(0 rows)
-SELECT ctid, oprnegate
-FROM pg_catalog.pg_operator fk
-WHERE oprnegate != 0 AND
+SELECT ctid, oprnegate
+FROM pg_catalog.pg_operator fk
+WHERE oprnegate != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprnegate);
ctid | oprnegate
------+-----------
(0 rows)
-SELECT ctid, oprcode
-FROM pg_catalog.pg_operator fk
-WHERE oprcode != 0 AND
+SELECT ctid, oprcode
+FROM pg_catalog.pg_operator fk
+WHERE oprcode != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprcode);
ctid | oprcode
------+---------
(0 rows)
-SELECT ctid, oprrest
-FROM pg_catalog.pg_operator fk
-WHERE oprrest != 0 AND
+SELECT ctid, oprrest
+FROM pg_catalog.pg_operator fk
+WHERE oprrest != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprrest);
ctid | oprrest
------+---------
(0 rows)
-SELECT ctid, oprjoin
-FROM pg_catalog.pg_operator fk
-WHERE oprjoin != 0 AND
+SELECT ctid, oprjoin
+FROM pg_catalog.pg_operator fk
+WHERE oprjoin != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprjoin);
ctid | oprjoin
------+---------
(0 rows)
-SELECT ctid, opfmethod
-FROM pg_catalog.pg_opfamily fk
-WHERE opfmethod != 0 AND
+SELECT ctid, opfmethod
+FROM pg_catalog.pg_opfamily fk
+WHERE opfmethod != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opfmethod);
ctid | opfmethod
------+-----------
(0 rows)
-SELECT ctid, opfnamespace
-FROM pg_catalog.pg_opfamily fk
-WHERE opfnamespace != 0 AND
+SELECT ctid, opfnamespace
+FROM pg_catalog.pg_opfamily fk
+WHERE opfnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opfnamespace);
ctid | opfnamespace
------+--------------
(0 rows)
-SELECT ctid, opfowner
-FROM pg_catalog.pg_opfamily fk
-WHERE opfowner != 0 AND
+SELECT ctid, opfowner
+FROM pg_catalog.pg_opfamily fk
+WHERE opfowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opfowner);
ctid | opfowner
------+----------
(0 rows)
-SELECT ctid, pronamespace
-FROM pg_catalog.pg_proc fk
-WHERE pronamespace != 0 AND
+SELECT ctid, pronamespace
+FROM pg_catalog.pg_proc fk
+WHERE pronamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.pronamespace);
ctid | pronamespace
------+--------------
(0 rows)
-SELECT ctid, proowner
-FROM pg_catalog.pg_proc fk
-WHERE proowner != 0 AND
+SELECT ctid, proowner
+FROM pg_catalog.pg_proc fk
+WHERE proowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.proowner);
ctid | proowner
------+----------
(0 rows)
-SELECT ctid, prolang
-FROM pg_catalog.pg_proc fk
-WHERE prolang != 0 AND
+SELECT ctid, prolang
+FROM pg_catalog.pg_proc fk
+WHERE prolang != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_language pk WHERE pk.oid = fk.prolang);
ctid | prolang
------+---------
(0 rows)
-SELECT ctid, prorettype
-FROM pg_catalog.pg_proc fk
-WHERE prorettype != 0 AND
+SELECT ctid, prorettype
+FROM pg_catalog.pg_proc fk
+WHERE prorettype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.prorettype);
ctid | prorettype
------+------------
(0 rows)
-SELECT ctid, ev_class
-FROM pg_catalog.pg_rewrite fk
-WHERE ev_class != 0 AND
+SELECT ctid, ev_class
+FROM pg_catalog.pg_rewrite fk
+WHERE ev_class != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.ev_class);
ctid | ev_class
------+----------
(0 rows)
-SELECT ctid, refclassid
-FROM pg_catalog.pg_shdepend fk
-WHERE refclassid != 0 AND
+SELECT ctid, refclassid
+FROM pg_catalog.pg_shdepend fk
+WHERE refclassid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid);
ctid | refclassid
------+------------
(0 rows)
-SELECT ctid, classoid
-FROM pg_catalog.pg_shdescription fk
-WHERE classoid != 0 AND
+SELECT ctid, classoid
+FROM pg_catalog.pg_shdescription fk
+WHERE classoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid);
ctid | classoid
------+----------
(0 rows)
-SELECT ctid, starelid
-FROM pg_catalog.pg_statistic fk
-WHERE starelid != 0 AND
+SELECT ctid, starelid
+FROM pg_catalog.pg_statistic fk
+WHERE starelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.starelid);
ctid | starelid
------+----------
(0 rows)
-SELECT ctid, staop1
-FROM pg_catalog.pg_statistic fk
-WHERE staop1 != 0 AND
+SELECT ctid, staop1
+FROM pg_catalog.pg_statistic fk
+WHERE staop1 != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop1);
ctid | staop1
------+--------
(0 rows)
-SELECT ctid, staop2
-FROM pg_catalog.pg_statistic fk
-WHERE staop2 != 0 AND
+SELECT ctid, staop2
+FROM pg_catalog.pg_statistic fk
+WHERE staop2 != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop2);
ctid | staop2
------+--------
(0 rows)
-SELECT ctid, staop3
-FROM pg_catalog.pg_statistic fk
-WHERE staop3 != 0 AND
+SELECT ctid, staop3
+FROM pg_catalog.pg_statistic fk
+WHERE staop3 != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop3);
ctid | staop3
------+--------
(0 rows)
-SELECT ctid, spcowner
-FROM pg_catalog.pg_tablespace fk
-WHERE spcowner != 0 AND
+SELECT ctid, spcowner
+FROM pg_catalog.pg_tablespace fk
+WHERE spcowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.spcowner);
ctid | spcowner
------+----------
(0 rows)
-SELECT ctid, cfgnamespace
-FROM pg_catalog.pg_ts_config fk
-WHERE cfgnamespace != 0 AND
+SELECT ctid, cfgnamespace
+FROM pg_catalog.pg_ts_config fk
+WHERE cfgnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.cfgnamespace);
ctid | cfgnamespace
------+--------------
(0 rows)
-SELECT ctid, cfgowner
-FROM pg_catalog.pg_ts_config fk
-WHERE cfgowner != 0 AND
+SELECT ctid, cfgowner
+FROM pg_catalog.pg_ts_config fk
+WHERE cfgowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.cfgowner);
ctid | cfgowner
------+----------
(0 rows)
-SELECT ctid, cfgparser
-FROM pg_catalog.pg_ts_config fk
-WHERE cfgparser != 0 AND
+SELECT ctid, cfgparser
+FROM pg_catalog.pg_ts_config fk
+WHERE cfgparser != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_parser pk WHERE pk.oid = fk.cfgparser);
ctid | cfgparser
------+-----------
(0 rows)
-SELECT ctid, mapcfg
-FROM pg_catalog.pg_ts_config_map fk
-WHERE mapcfg != 0 AND
+SELECT ctid, mapcfg
+FROM pg_catalog.pg_ts_config_map fk
+WHERE mapcfg != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_config pk WHERE pk.oid = fk.mapcfg);
ctid | mapcfg
------+--------
(0 rows)
-SELECT ctid, mapdict
-FROM pg_catalog.pg_ts_config_map fk
-WHERE mapdict != 0 AND
+SELECT ctid, mapdict
+FROM pg_catalog.pg_ts_config_map fk
+WHERE mapdict != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_dict pk WHERE pk.oid = fk.mapdict);
ctid | mapdict
------+---------
(0 rows)
-SELECT ctid, dictnamespace
-FROM pg_catalog.pg_ts_dict fk
-WHERE dictnamespace != 0 AND
+SELECT ctid, dictnamespace
+FROM pg_catalog.pg_ts_dict fk
+WHERE dictnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.dictnamespace);
ctid | dictnamespace
------+---------------
(0 rows)
-SELECT ctid, dictowner
-FROM pg_catalog.pg_ts_dict fk
-WHERE dictowner != 0 AND
+SELECT ctid, dictowner
+FROM pg_catalog.pg_ts_dict fk
+WHERE dictowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.dictowner);
ctid | dictowner
------+-----------
(0 rows)
-SELECT ctid, dicttemplate
-FROM pg_catalog.pg_ts_dict fk
-WHERE dicttemplate != 0 AND
+SELECT ctid, dicttemplate
+FROM pg_catalog.pg_ts_dict fk
+WHERE dicttemplate != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_template pk WHERE pk.oid = fk.dicttemplate);
ctid | dicttemplate
------+--------------
(0 rows)
-SELECT ctid, prsnamespace
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsnamespace != 0 AND
+SELECT ctid, prsnamespace
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.prsnamespace);
ctid | prsnamespace
------+--------------
(0 rows)
-SELECT ctid, prsstart
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsstart != 0 AND
+SELECT ctid, prsstart
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsstart != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsstart);
ctid | prsstart
------+----------
(0 rows)
-SELECT ctid, prstoken
-FROM pg_catalog.pg_ts_parser fk
-WHERE prstoken != 0 AND
+SELECT ctid, prstoken
+FROM pg_catalog.pg_ts_parser fk
+WHERE prstoken != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prstoken);
ctid | prstoken
------+----------
(0 rows)
-SELECT ctid, prsend
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsend != 0 AND
+SELECT ctid, prsend
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsend != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsend);
ctid | prsend
------+--------
(0 rows)
-SELECT ctid, prsheadline
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsheadline != 0 AND
+SELECT ctid, prsheadline
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsheadline != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsheadline);
ctid | prsheadline
------+-------------
(0 rows)
-SELECT ctid, prslextype
-FROM pg_catalog.pg_ts_parser fk
-WHERE prslextype != 0 AND
+SELECT ctid, prslextype
+FROM pg_catalog.pg_ts_parser fk
+WHERE prslextype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prslextype);
ctid | prslextype
------+------------
(0 rows)
-SELECT ctid, tmplnamespace
-FROM pg_catalog.pg_ts_template fk
-WHERE tmplnamespace != 0 AND
+SELECT ctid, tmplnamespace
+FROM pg_catalog.pg_ts_template fk
+WHERE tmplnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.tmplnamespace);
ctid | tmplnamespace
------+---------------
(0 rows)
-SELECT ctid, tmplinit
-FROM pg_catalog.pg_ts_template fk
-WHERE tmplinit != 0 AND
+SELECT ctid, tmplinit
+FROM pg_catalog.pg_ts_template fk
+WHERE tmplinit != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmplinit);
ctid | tmplinit
------+----------
(0 rows)
-SELECT ctid, tmpllexize
-FROM pg_catalog.pg_ts_template fk
-WHERE tmpllexize != 0 AND
+SELECT ctid, tmpllexize
+FROM pg_catalog.pg_ts_template fk
+WHERE tmpllexize != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmpllexize);
ctid | tmpllexize
------+------------
(0 rows)
-SELECT ctid, typnamespace
-FROM pg_catalog.pg_type fk
-WHERE typnamespace != 0 AND
+SELECT ctid, typnamespace
+FROM pg_catalog.pg_type fk
+WHERE typnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.typnamespace);
ctid | typnamespace
------+--------------
(0 rows)
-SELECT ctid, typowner
-FROM pg_catalog.pg_type fk
-WHERE typowner != 0 AND
+SELECT ctid, typowner
+FROM pg_catalog.pg_type fk
+WHERE typowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.typowner);
ctid | typowner
------+----------
(0 rows)
-SELECT ctid, typrelid
-FROM pg_catalog.pg_type fk
-WHERE typrelid != 0 AND
+SELECT ctid, typrelid
+FROM pg_catalog.pg_type fk
+WHERE typrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.typrelid);
ctid | typrelid
------+----------
(0 rows)
-SELECT ctid, typelem
-FROM pg_catalog.pg_type fk
-WHERE typelem != 0 AND
+SELECT ctid, typelem
+FROM pg_catalog.pg_type fk
+WHERE typelem != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typelem);
ctid | typelem
------+---------
(0 rows)
-SELECT ctid, typarray
-FROM pg_catalog.pg_type fk
-WHERE typarray != 0 AND
+SELECT ctid, typarray
+FROM pg_catalog.pg_type fk
+WHERE typarray != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typarray);
ctid | typarray
------+----------
(0 rows)
-SELECT ctid, typinput
-FROM pg_catalog.pg_type fk
-WHERE typinput != 0 AND
+SELECT ctid, typinput
+FROM pg_catalog.pg_type fk
+WHERE typinput != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typinput);
ctid | typinput
------+----------
(0 rows)
-SELECT ctid, typoutput
-FROM pg_catalog.pg_type fk
-WHERE typoutput != 0 AND
+SELECT ctid, typoutput
+FROM pg_catalog.pg_type fk
+WHERE typoutput != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typoutput);
ctid | typoutput
------+-----------
(0 rows)
-SELECT ctid, typreceive
-FROM pg_catalog.pg_type fk
-WHERE typreceive != 0 AND
+SELECT ctid, typreceive
+FROM pg_catalog.pg_type fk
+WHERE typreceive != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typreceive);
ctid | typreceive
------+------------
(0 rows)
-SELECT ctid, typsend
-FROM pg_catalog.pg_type fk
-WHERE typsend != 0 AND
+SELECT ctid, typsend
+FROM pg_catalog.pg_type fk
+WHERE typsend != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typsend);
ctid | typsend
------+---------
(0 rows)
-SELECT ctid, typmodin
-FROM pg_catalog.pg_type fk
-WHERE typmodin != 0 AND
+SELECT ctid, typmodin
+FROM pg_catalog.pg_type fk
+WHERE typmodin != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodin);
ctid | typmodin
------+----------
(0 rows)
-SELECT ctid, typmodout
-FROM pg_catalog.pg_type fk
-WHERE typmodout != 0 AND
+SELECT ctid, typmodout
+FROM pg_catalog.pg_type fk
+WHERE typmodout != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodout);
ctid | typmodout
------+-----------
(0 rows)
-SELECT ctid, typanalyze
-FROM pg_catalog.pg_type fk
-WHERE typanalyze != 0 AND
+SELECT ctid, typanalyze
+FROM pg_catalog.pg_type fk
+WHERE typanalyze != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typanalyze);
ctid | typanalyze
------+------------
(0 rows)
-SELECT ctid, typbasetype
-FROM pg_catalog.pg_type fk
-WHERE typbasetype != 0 AND
+SELECT ctid, typbasetype
+FROM pg_catalog.pg_type fk
+WHERE typbasetype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typbasetype);
ctid | typbasetype
------+-------------
drop function exc_using(int, text);
create or replace function exc_using(int) returns void as $$
-declare
+declare
c refcursor;
i int;
begin
raise notice '%', i;
end loop;
close c;
- return;
+ return;
end;
$$ language plpgsql;
select exc_using(5);
INSERT INTO POINT_TBL(f1) VALUES ('(-3.0,4.0)');
INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)');
INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)');
--- bad format points
+-- bad format points
INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf');
ERROR: invalid input syntax for type point: "asdfasdf"
LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf');
| (10,10)
(6 rows)
--- left of
+-- left of
SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)';
three | f1
-------+----------
| (-5,-12)
(3 rows)
--- right of
+-- right of
SELECT '' AS three, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >> p.f1;
three | f1
-------+----------
| (-5,-12)
(3 rows)
--- above
+-- above
SELECT '' AS one, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >^ p.f1;
one | f1
-----+----------
| (-5,-12)
(1 row)
--- below
+-- below
SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 <^ '(0.0, 0.0)';
one | f1
-----+----------
| (-5,-12)
(1 row)
--- equal
+-- equal
SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 ~= '(5.1, 34.5)';
one | f1
-----+------------
| (5.1,34.5)
(1 row)
--- point in box
+-- point in box
SELECT '' AS three, p.* FROM POINT_TBL p
WHERE p.f1 <@ box '(0,0,100,100)';
three | f1
-- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10
SELECT '' AS three, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance
- FROM POINT_TBL p1, POINT_TBL p2
+ FROM POINT_TBL p1, POINT_TBL p2
WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 and p1.f1 >^ p2.f1
ORDER BY distance;
three | point1 | point2 | distance
CREATE TABLE POLYGON_TBL(f1 polygon);
INSERT INTO POLYGON_TBL(f1) VALUES ('(2.0,0.0),(2.0,4.0),(0.0,0.0)');
INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)');
--- degenerate polygons
+-- degenerate polygons
INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)');
INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,1.0),(0.0,1.0)');
--- bad polygon input strings
+-- bad polygon input strings
INSERT INTO POLYGON_TBL(f1) VALUES ('0.0');
ERROR: invalid input syntax for type polygon: "0.0"
LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('0.0');
| ((0,1),(0,1))
(4 rows)
--- overlap
+-- overlap
SELECT '' AS three, p.*
FROM POLYGON_TBL p
WHERE p.f1 && '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
| ((3,1),(3,3),(1,0))
(2 rows)
--- left overlap
-SELECT '' AS four, p.*
+-- left overlap
+SELECT '' AS four, p.*
FROM POLYGON_TBL p
WHERE p.f1 &< '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
four | f1
| ((0,1),(0,1))
(4 rows)
--- right overlap
-SELECT '' AS two, p.*
+-- right overlap
+SELECT '' AS two, p.*
FROM POLYGON_TBL p
WHERE p.f1 &> '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
two | f1
| ((3,1),(3,3),(1,0))
(1 row)
--- left of
+-- left of
SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 << '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
| ((0,1),(0,1))
(2 rows)
--- right of
+-- right of
SELECT '' AS zero, p.*
FROM POLYGON_TBL p
WHERE p.f1 >> '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
------+----
(0 rows)
--- contained
-SELECT '' AS one, p.*
+-- contained
+SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
one | f1
| ((3,1),(3,3),(1,0))
(1 row)
--- same
+-- same
SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
| ((3,1),(3,3),(1,0))
(1 row)
--- contains
+-- contains
SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--
-- 0 1 2 3 4
--
--- left of
+-- left of
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
false
-------
f
(1 row)
--- left overlap
+-- left overlap
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true;
true
------
f
(1 row)
--- right overlap
+-- right overlap
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' &> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
false
-------
f
(1 row)
--- right of
+-- right of
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' >> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
false
-------
f
(1 row)
--- contained in
+-- contained in
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
false
-------
f
(1 row)
--- contains
+-- contains
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
false
-------
-- +------------------------+
-- | *---* 1
--- | + | |
+-- | + | |
-- | 2 *---*
-- +------------------------+
-- 3
(1 row)
-- +-----------+
--- | *---* /
--- | | |/
--- | | +
--- | | |\
+-- | *---* /
+-- | | |/
+-- | | +
+-- | | |\
-- | *---* \
-- +-----------+
SELECT '((0,4),(6,4),(3,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true";
t
(1 row)
--- same
+-- same
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
false
-------
f
(1 row)
--- overlap
+-- overlap
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true;
true
------
-- +--------------------+
-- | *---* 1
--- | + | |
+-- | + | |
-- | 2 *---*
-- +--------------------+
-- 3
ROLLBACK;
-- Make sure snapshot management works okay, per bug report in
-BEGIN;
-SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
-CREATE TABLE cursor (a int);
-INSERT INTO cursor VALUES (1);
-DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE;
-UPDATE cursor SET a = 2;
-FETCH ALL FROM c1;
+BEGIN;
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+CREATE TABLE cursor (a int);
+INSERT INTO cursor VALUES (1);
+DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE;
+UPDATE cursor SET a = 2;
+FETCH ALL FROM c1;
a
---
(0 rows)
-COMMIT;
+COMMIT;
DROP TABLE cursor;
-- PORTALS_P2
--
BEGIN;
-DECLARE foo13 CURSOR FOR
+DECLARE foo13 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 50;
-DECLARE foo14 CURSOR FOR
+DECLARE foo14 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 51;
-DECLARE foo15 CURSOR FOR
+DECLARE foo15 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 52;
-DECLARE foo16 CURSOR FOR
+DECLARE foo16 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 53;
-DECLARE foo17 CURSOR FOR
+DECLARE foo17 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 54;
-DECLARE foo18 CURSOR FOR
+DECLARE foo18 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 55;
-DECLARE foo19 CURSOR FOR
+DECLARE foo19 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 56;
-DECLARE foo20 CURSOR FOR
+DECLARE foo20 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 57;
-DECLARE foo21 CURSOR FOR
+DECLARE foo21 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 58;
-DECLARE foo22 CURSOR FOR
+DECLARE foo22 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 59;
-DECLARE foo23 CURSOR FOR
+DECLARE foo23 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 60;
-DECLARE foo24 CURSOR FOR
+DECLARE foo24 CURSOR FOR
SELECT * FROM onek2 WHERE unique1 = 50;
-DECLARE foo25 CURSOR FOR
+DECLARE foo25 CURSOR FOR
SELECT * FROM onek2 WHERE unique1 = 60;
FETCH all in foo13;
unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
create table rtest_person (pname text, pdesc text);
create table rtest_admin (pname text, sysname text);
create rule rtest_sys_upd as on update to rtest_system do also (
- update rtest_interface set sysname = new.sysname
+ update rtest_interface set sysname = new.sysname
where sysname = old.sysname;
- update rtest_admin set sysname = new.sysname
+ update rtest_admin set sysname = new.sysname
where sysname = old.sysname
);
create rule rtest_sys_del as on delete to rtest_system do also (
'fired', '0.00', old.salary);
--
-- Tables and rules for the multiple cascaded qualified instead
--- rule test
+-- rule test
--
create table rtest_t4 (a int4, b text);
create table rtest_t5 (a int4, b text);
create table rtest_view2 (a int4);
create table rtest_view3 (a int4, b text);
create table rtest_view4 (a int4, b text, c int4);
-create view rtest_vview1 as select a, b from rtest_view1 X
+create view rtest_vview1 as select a, b from rtest_view1 X
where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a);
create view rtest_vview2 as select a, b from rtest_view1 where v;
create view rtest_vview3 as select a, b from rtest_vview2 X
unit char(4),
factor float
);
-create view rtest_vcomp as
+create view rtest_vcomp as
select X.part, (X.size * Y.factor) as size_in_cm
from rtest_comp X, rtest_unitfact Y
where X.unit = Y.unit;
on update to vview do instead
(
insert into cchild (pid, descrip)
- select old.pid, new.descrip where old.descrip isnull;
+ select old.pid, new.descrip where old.descrip isnull;
update cchild set descrip = new.descrip where cchild.pid = old.pid;
);
select * from vview;
toyemp | SELECT emp.name, emp.age, emp.location, (12 * emp.salary) AS annualsal FROM emp;
(56 rows)
-SELECT tablename, rulename, definition FROM pg_rules
+SELECT tablename, rulename, definition FROM pg_rules
ORDER BY tablename, rulename;
tablename | rulename | definition
---------------+-----------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
create table rule_and_refint_t1 (
id1a integer,
id1b integer,
-
primary key (id1a, id1b)
);
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "rule_and_refint_t1_pkey" for table "rule_and_refint_t1"
create table rule_and_refint_t2 (
id2a integer,
id2c integer,
-
primary key (id2a, id2c)
);
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "rule_and_refint_t2_pkey" for table "rule_and_refint_t2"
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1"
create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1);
create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1);
-create rule t1_ins_1 as on insert to t1
+create rule t1_ins_1 as on insert to t1
where new.a >= 0 and new.a < 10
do instead
insert into t1_1 values (new.a);
-create rule t1_ins_2 as on insert to t1
+create rule t1_ins_2 as on insert to t1
where new.a >= 10 and new.a < 20
do instead
insert into t1_2 values (new.a);
-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
--
SELECT onek.unique1, onek.stringu1 FROM onek
- WHERE onek.unique1 < 20
+ WHERE onek.unique1 < 20
ORDER BY unique1 using >;
unique1 | stringu1
---------+----------
-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
--
SELECT onek.unique1, onek.stringu1 FROM onek
- WHERE onek.unique1 > 980
+ WHERE onek.unique1 > 980
ORDER BY stringu1 using <;
unique1 | stringu1
---------+----------
987 | ZLAAAA
(19 rows)
-
--
-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
-- sort +1d -2 +0nr -1
--
SELECT onek.unique1, onek.string4 FROM onek
- WHERE onek.unique1 > 980
+ WHERE onek.unique1 > 980
ORDER BY string4 using <, unique1 using >;
unique1 | string4
---------+---------
984 | VVVVxx
(19 rows)
-
--
-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
-- sort +1dr -2 +0n -1
999 | AAAAxx
(19 rows)
-
--
-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
-- sort +0nr -1 +1d -2
-- sort +0n -1 +1dr -2
--
SELECT onek.unique1, onek.string4 FROM onek
- WHERE onek.unique1 < 20
+ WHERE onek.unique1 < 20
ORDER BY unique1 using <, string4 using >;
unique1 | string4
---------+---------
-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
--
SELECT onek2.unique1, onek2.stringu1 FROM onek2
- WHERE onek2.unique1 < 20
+ WHERE onek2.unique1 < 20
ORDER BY unique1 using >;
unique1 | stringu1
---------+----------
^
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b ORDER BY b;
ERROR: column reference "b" is ambiguous
(5 rows)
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
b | count
(4 rows)
-- group w/o existing GROUP BY target under ambiguous condition
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
count
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(*) INTO TABLE test_missing_target2
-FROM test_missing_target x, test_missing_target y
+SELECT count(*) INTO TABLE test_missing_target2
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
SELECT * FROM test_missing_target2;
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b/2 ORDER BY b/2;
ERROR: column reference "b" is ambiguous
LINE 3: GROUP BY b/2 ORDER BY b/2;
^
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
?column? | count
-- group w/o existing GROUP BY target under ambiguous condition
-- failure expected due to ambiguous b in count(b)
-SELECT count(b) FROM test_missing_target x, test_missing_target y
+SELECT count(b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2;
ERROR: column reference "b" is ambiguous
^
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(x.b) INTO TABLE test_missing_target3
-FROM test_missing_target x, test_missing_target y
+SELECT count(x.b) INTO TABLE test_missing_target3
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
SELECT * FROM test_missing_target3;
^
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b ORDER BY b;
ERROR: column reference "b" is ambiguous
(5 rows)
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
b | count
(4 rows)
-- group w/o existing GROUP BY target under ambiguous condition
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
count
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(*) INTO TABLE test_missing_target2
-FROM test_missing_target x, test_missing_target y
+SELECT count(*) INTO TABLE test_missing_target2
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
SELECT * FROM test_missing_target2;
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b/2 ORDER BY b/2;
ERROR: column reference "b" is ambiguous
LINE 3: GROUP BY b/2 ORDER BY b/2;
^
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
?column? | count
-- group w/o existing GROUP BY target under ambiguous condition
-- failure expected due to ambiguous b in count(b)
-SELECT count(b) FROM test_missing_target x, test_missing_target y
+SELECT count(b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2;
ERROR: column reference "b" is ambiguous
^
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(x.b) INTO TABLE test_missing_target3
-FROM test_missing_target x, test_missing_target y
+SELECT count(x.b) INTO TABLE test_missing_target3
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
SELECT * FROM test_missing_target3;
^
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b ORDER BY b;
ERROR: column reference "b" is ambiguous
(5 rows)
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
b | count
(4 rows)
-- group w/o existing GROUP BY target under ambiguous condition
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
count
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(*) INTO TABLE test_missing_target2
-FROM test_missing_target x, test_missing_target y
+SELECT count(*) INTO TABLE test_missing_target2
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
SELECT * FROM test_missing_target2;
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b/2 ORDER BY b/2;
ERROR: column reference "b" is ambiguous
LINE 3: GROUP BY b/2 ORDER BY b/2;
^
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
?column? | count
-- group w/o existing GROUP BY target under ambiguous condition
-- failure expected due to ambiguous b in count(b)
-SELECT count(b) FROM test_missing_target x, test_missing_target y
+SELECT count(b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2;
ERROR: column reference "b" is ambiguous
^
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(x.b) INTO TABLE test_missing_target3
-FROM test_missing_target x, test_missing_target y
+SELECT count(x.b) INTO TABLE test_missing_target3
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
SELECT * FROM test_missing_target3;
---
--- test creation of SERIAL column
---
-
CREATE TABLE serialTest (f1 text, f2 serial);
NOTICE: CREATE TABLE will create implicit sequence "serialtest_f2_seq" for serial column "serialtest.f2"
-
INSERT INTO serialTest VALUES ('foo');
INSERT INTO serialTest VALUES ('bar');
INSERT INTO serialTest VALUES ('force', 100);
INSERT INTO serialTest VALUES ('wrong', NULL);
ERROR: null value in column "f2" violates not-null constraint
-
SELECT * FROM serialTest;
f1 | f2
-------+-----
-- basic sequence operations using both text and oid references
CREATE SEQUENCE sequence_test;
-
SELECT nextval('sequence_test'::text);
nextval
---------
---
--- test creation of SERIAL column
---
-
CREATE TABLE serialTest (f1 text, f2 serial);
NOTICE: CREATE TABLE will create implicit sequence "serialtest_f2_seq" for serial column "serialtest.f2"
-
INSERT INTO serialTest VALUES ('foo');
INSERT INTO serialTest VALUES ('bar');
INSERT INTO serialTest VALUES ('force', 100);
INSERT INTO serialTest VALUES ('wrong', NULL);
ERROR: null value in column "f2" violates not-null constraint
-
SELECT * FROM serialTest;
f1 | f2
-------+-----
-- basic sequence operations using both text and oid references
CREATE SEQUENCE sequence_test;
-
SELECT nextval('sequence_test'::text);
nextval
---------
ELSE 'Approved'
END)
ELSE 'PO'
- END)
+ END)
END) AS "Status",
(CASE
WHEN ord.ordercancelled
ELSE 'Approved'
END)
ELSE 'PO'
- END)
+ END)
END) AS "Status_OK"
FROM orderstest ord;
SELECT * FROM orders_view;
ERROR: timestamp out of range: "Feb 16 17:32:01 5097 BC"
LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC')...
^
-SELECT '' AS "64", d1 FROM TIMESTAMP_TBL;
+SELECT '' AS "64", d1 FROM TIMESTAMP_TBL;
64 | d1
----+-----------------------------
| -infinity
(55 rows)
-- TO_CHAR()
-SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
FROM TIMESTAMP_TBL;
to_char_1 | to_char
-----------+------------------------------------------------------------------------------------------
| 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911
(65 rows)
-SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
FROM TIMESTAMP_TBL;
to_char_4 | to_char
-----------+-------------------------------------------------
| 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911
(65 rows)
-SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
FROM TIMESTAMP_TBL;
to_char_5 | to_char
-----------+----------------------
| 05 05 17 32 01 63121
(65 rows)
-SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
FROM TIMESTAMP_TBL;
to_char_6 | to_char
-----------+-------------------------------------------------
| 17--text--32--text--01
(65 rows)
-SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
FROM TIMESTAMP_TBL;
to_char_8 | to_char
-----------+-------------------------
| 2001ST 2001st 2451911th
(65 rows)
-
-SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
- FROM TIMESTAMP_TBL;
+SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMP_TBL;
to_char_9 | to_char
-----------+---------------------------------------------------------------------
|
Wed Jul 11 06:51:14 2001 PDT
(1 row)
-SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL;
+SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL;
64 | d1
----+---------------------------------
| -infinity
(56 rows)
-- TO_CHAR()
-SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
FROM TIMESTAMPTZ_TBL;
to_char_1 | to_char
-----------+------------------------------------------------------------------------------------------
| MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan
(66 rows)
-
SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL;
to_char_2 | to_char
-----------+--------------------------------------------------------------
|
| 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911
(66 rows)
-
-SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
- FROM TIMESTAMPTZ_TBL;
+SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMPTZ_TBL;
to_char_4 | to_char
-----------+-------------------------------------------------
|
| 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911
(66 rows)
-
-SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
FROM TIMESTAMPTZ_TBL;
to_char_5 | to_char
-----------+----------------------
| 05 05 17 32 01 63121
(66 rows)
-SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
- FROM TIMESTAMPTZ_TBL;
+SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+ FROM TIMESTAMPTZ_TBL;
to_char_6 | to_char
-----------+-------------------------------------------------
|
| HH:MI:SS is 05:32:01 "text between quote marks"
(66 rows)
-
SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL;
to_char_7 | to_char
-----------+------------------------
|
| 17--text--32--text--01
(66 rows)
-SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
FROM TIMESTAMPTZ_TBL;
to_char_8 | to_char
-----------+-------------------------
| 2001ST 2001st 2451911th
(66 rows)
-
-SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
- FROM TIMESTAMPTZ_TBL;
+SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMPTZ_TBL;
to_char_9 | to_char
-----------+---------------------------------------------------------------------
|
VALUES ('["epoch" "Mon May 1 00:30:30 1995"]');
INSERT INTO TINTERVAL_TBL (f1)
VALUES ('["Feb 15 1990 12:15:03" "2001-09-23 11:12:13"]');
--- badly formatted tintervals
+-- badly formatted tintervals
INSERT INTO TINTERVAL_TBL (f1)
VALUES ('["bad time specifications" ""]');
ERROR: invalid input syntax for type abstime: "bad time specifications"
-- contains
SELECT '' AS five, t1.f1
FROM TINTERVAL_TBL t1
- WHERE not t1.f1 <<
+ WHERE not t1.f1 <<
tinterval '["Aug 15 14:23:19 1980" "Sep 16 14:23:19 1990"]'
ORDER BY t1.f1;
five | f1
-- TRANSACTIONS
--
BEGIN;
-SELECT *
+SELECT *
INTO TABLE xacttest
FROM aggtest;
INSERT INTO xacttest (a, b) VALUES (777, 777.777);
(0 rows)
ABORT;
--- should not exist
+-- should not exist
SELECT oid FROM pg_class WHERE relname = 'disappear';
oid
-----
(0 rows)
--- should have members again
+-- should have members again
SELECT * FROM aggtest;
a | b
-----+---------
ROLLBACK;
COMMIT; -- should not be in a transaction block
WARNING: there is no transaction in progress
-
SELECT * FROM savepoints;
a
---
-- (fkey1, fkey2) --> pkeys (pkey1, pkey2)
-- (fkey3) --> fkeys2 (pkey23)
--
-create trigger check_fkeys_pkey_exist
- before insert or update on fkeys
- for each row
- execute procedure
+create trigger check_fkeys_pkey_exist
+ before insert or update on fkeys
+ for each row
+ execute procedure
check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2');
-create trigger check_fkeys_pkey2_exist
- before insert or update on fkeys
- for each row
+create trigger check_fkeys_pkey2_exist
+ before insert or update on fkeys
+ for each row
execute procedure check_primary_key ('fkey3', 'fkeys2', 'pkey23');
--
-- For fkeys2:
-- (fkey21, fkey22) --> pkeys (pkey1, pkey2)
--
-create trigger check_fkeys2_pkey_exist
- before insert or update on fkeys2
- for each row
- execute procedure
+create trigger check_fkeys2_pkey_exist
+ before insert or update on fkeys2
+ for each row
+ execute procedure
check_primary_key ('fkey21', 'fkey22', 'pkeys', 'pkey1', 'pkey2');
-- Test comments
COMMENT ON TRIGGER check_fkeys2_pkey_bad ON fkeys2 IS 'wrong';
-- fkeys (fkey1, fkey2) and fkeys2 (fkey21, fkey22)
--
create trigger check_pkeys_fkey_cascade
- before delete or update on pkeys
- for each row
- execute procedure
- check_foreign_key (2, 'cascade', 'pkey1', 'pkey2',
+ before delete or update on pkeys
+ for each row
+ execute procedure
+ check_foreign_key (2, 'cascade', 'pkey1', 'pkey2',
'fkeys', 'fkey1', 'fkey2', 'fkeys2', 'fkey21', 'fkey22');
--
-- For fkeys2:
-- ON DELETE/UPDATE (pkey23) RESTRICT:
-- fkeys (fkey3)
--
-create trigger check_fkeys2_fkey_restrict
+create trigger check_fkeys2_fkey_restrict
before delete or update on fkeys2
- for each row
+ for each row
execute procedure check_foreign_key (1, 'restrict', 'pkey23', 'fkeys', 'fkey3');
insert into fkeys2 values (10, '1', 1);
insert into fkeys2 values (30, '3', 2);
-- -- Jan
--
-- create table dup17 (x int4);
---
--- create trigger dup17_before
+--
+-- create trigger dup17_before
-- before insert on dup17
--- for each row
--- execute procedure
+-- for each row
+-- execute procedure
-- funny_dup17 ()
-- ;
---
+--
-- insert into dup17 values (17);
-- select count(*) from dup17;
-- insert into dup17 values (17);
-- select count(*) from dup17;
---
+--
-- drop trigger dup17_before on dup17;
---
+--
-- create trigger dup17_after
-- after insert on dup17
--- for each row
--- execute procedure
+-- for each row
+-- execute procedure
-- funny_dup17 ()
-- ;
-- insert into dup17 values (13);
-- select count(*) from dup17 where x = 13;
-- insert into dup17 values (13);
-- select count(*) from dup17 where x = 13;
---
+--
-- DROP TABLE dup17;
create sequence ttdummy_seq increment 10 start 0 minvalue 0;
create table tttest (
- price_id int4,
- price_val int4,
+ price_id int4,
+ price_val int4,
price_on int4,
price_off int4 default 999999
);
-create trigger ttdummy
+create trigger ttdummy
before delete or update on tttest
- for each row
- execute procedure
+ for each row
+ execute procedure
ttdummy (price_on, price_off);
-create trigger ttserial
+create trigger ttserial
before insert or update on tttest
- for each row
- execute procedure
+ for each row
+ execute procedure
autoinc (price_on, ttdummy_seq);
insert into tttest values (1, 1, null);
insert into tttest values (2, 2, null);
i int,
v varchar
);
-CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
+CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
LANGUAGE plpgsql AS $$
declare
relid := TG_relid::regclass;
-- plpgsql can't discover its trigger data in a hash like perl and python
- -- can, or by a sort of reflection like tcl can,
+ -- can, or by a sort of reflection like tcl can,
-- so we have to hard code the names.
raise NOTICE 'TG_NAME: %', TG_name;
raise NOTICE 'TG_WHEN: %', TG_when;
end;
$$;
-CREATE TRIGGER show_trigger_data_trig
+CREATE TRIGGER show_trigger_data_trig
BEFORE INSERT OR UPDATE OR DELETE ON trigger_test
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
insert into trigger_test values(1,'insert');
NOTICE: TG_NARGS: 2
NOTICE: TG_ARGV: [23, skidoo]
NOTICE: OLD: (1,update)
-
DROP TRIGGER show_trigger_data_trig on trigger_test;
-
DROP FUNCTION trigger_data();
DROP TABLE trigger_test;
--
f3 int) WITH OIDS;
INSERT INTO min_updates_test VALUES ('a',1,2),('b','2',null);
INSERT INTO min_updates_test_oids VALUES ('a',1,2),('b','2',null);
-CREATE TRIGGER z_min_update
+CREATE TRIGGER z_min_update
BEFORE UPDATE ON min_updates_test
FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
-CREATE TRIGGER z_min_update
+CREATE TRIGGER z_min_update
BEFORE UPDATE ON min_updates_test_oids
FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
\set QUIET false
INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
CREATE TRIGGER t
BEFORE TRUNCATE ON trunc_trigger_test
-FOR EACH STATEMENT
+FOR EACH STATEMENT
EXECUTE PROCEDURE trunctrigger('before trigger truncate');
SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
Row count in test table
INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
CREATE TRIGGER tt
AFTER TRUNCATE ON trunc_trigger_test
-FOR EACH STATEMENT
+FOR EACH STATEMENT
EXECUTE PROCEDURE trunctrigger('after trigger truncate');
SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
Row count in test table
-- Synonim dictionary
CREATE TEXT SEARCH DICTIONARY synonym (
- Template=synonym,
+ Template=synonym,
Synonyms=synonym_sample
);
SELECT ts_lexize('synonym', 'PoStGrEs');
-- cannot pass more than one word to thesaurus.
CREATE TEXT SEARCH DICTIONARY thesaurus (
Template=thesaurus,
- DictFile=thesaurus_sample,
+ DictFile=thesaurus_sample,
Dictionary=english_stem
);
SELECT ts_lexize('thesaurus', 'one');
CREATE TEXT SEARCH CONFIGURATION synonym_tst (
COPY=english
);
-ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR
- asciiword, hword_asciipart, asciihword
+ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR
+ asciiword, hword_asciipart, asciihword
WITH synonym, english_stem;
SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre');
to_tsvector
CREATE TEXT SEARCH CONFIGURATION thesaurus_tst (
COPY=synonym_tst
);
-ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR
- asciiword, hword_asciipart, asciihword
+ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR
+ asciiword, hword_asciipart, asciihword
WITH synonym, thesaurus, english_stem;
SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one');
to_tsvector
-- Look for pg_ts_config_map entries that aren't one of parser's token types
SELECT * FROM
( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid
- FROM pg_ts_config ) AS tt
+ FROM pg_ts_config ) AS tt
RIGHT JOIN pg_ts_config_map AS m
ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype)
WHERE
494
(1 row)
-
RESET enable_seqscan;
INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH');
SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10;
</html>
(1 row)
---Check if headline fragments work
+--Check if headline fragments work
SELECT ts_headline('english', '
Day after day, day after day,
We stuck, nor breath nor motion,
(3 rows)
-- Make sure typarray points to a varlena array type of our own base
-SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype,
+SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype,
p2.typelem, p2.typlen
FROM pg_type p1 LEFT JOIN pg_type p2 ON (p1.typarray = p2.oid)
WHERE p1.typarray <> 0 AND
CREATE TABLE VARCHAR_TBL(f1 varchar(1));
INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
INSERT INTO VARCHAR_TBL (f1) VALUES (2);
INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO VARCHAR_TBL (f1) VALUES ('');
--- try varchar's of greater than 1 length
+-- try varchar's of greater than 1 length
INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
ERROR: value too long for type character varying(1)
INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
CREATE TABLE VARCHAR_TBL(f1 varchar(1));
INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
INSERT INTO VARCHAR_TBL (f1) VALUES (2);
INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO VARCHAR_TBL (f1) VALUES ('');
--- try varchar's of greater than 1 length
+-- try varchar's of greater than 1 length
INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
ERROR: value too long for type character varying(1)
INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
CREATE TABLE VARCHAR_TBL(f1 varchar(1));
INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
INSERT INTO VARCHAR_TBL (f1) VALUES (2);
INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO VARCHAR_TBL (f1) VALUES ('');
--- try varchar's of greater than 1 length
+-- try varchar's of greater than 1 length
INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
ERROR: value too long for type character varying(1)
INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
(10 rows)
-- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window.
-SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
last_value | ten | four
------------+-----+------
0 | 0 | 0
| 3 | 3
(10 rows)
-SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
FROM tenk1 GROUP BY ten, two;
ten | two | gsum | wsum
-----+-----+-------+--------
2 | 3
(6 rows)
-SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
- sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
+SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
FROM tenk1 WHERE unique2 < 10;
cntsum
--------
-- opexpr with different windows evaluation.
SELECT * FROM(
- SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
- sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
+ SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount,
sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum
FROM tenk1
3.0000000000000000
(10 rows)
-SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten);
ten | two | gsum | wsum
-----+-----+-------+--------
-- test header line feature
create temp table copytest3 (
- c1 int,
- "col with , comma" text,
+ c1 int,
+ "col with , comma" text,
"col with "" quote" int);
copy copytest3 from stdin csv header;
-- CREATE_FUNCTION_2
--
CREATE FUNCTION hobbies(person)
- RETURNS setof hobbies_r
+ RETURNS setof hobbies_r
AS 'select * from hobbies_r where person = $1.name'
LANGUAGE SQL;
CREATE FUNCTION user_relns()
RETURNS setof name
- AS 'select relname
+ AS 'select relname
from pg_class c, pg_namespace n
where relnamespace = n.oid and
(nspname !~ ''pg_.*'' and nspname <> ''information_schema'') and
-- UPDATE onek2
-- SET unique1 = onek2.unique1 + 1;
---UPDATE onek2
+--UPDATE onek2
-- SET unique1 = onek2.unique1 - 1;
--
-- BTREE shutting out non-functional updates
--
--- the following two tests seem to take a long time on some
+-- the following two tests seem to take a long time on some
-- systems. This non-func update stuff needs to be examined
-- more closely. - jolly (2/22/96)
---
+--
UPDATE tmp
SET stringu1 = reverse_name(onek.stringu1)
FROM onek
--
SELECT * FROM a_star*;
-SELECT *
+SELECT *
FROM b_star* x
WHERE x.b = text 'bumble' or x.a < 3;
-SELECT class, a
- FROM c_star* x
+SELECT class, a
+ FROM c_star* x
WHERE x.c ~ text 'hi';
SELECT class, b, c
ALTER TABLE a_star RENAME COLUMN foo TO aa;
-SELECT *
+SELECT *
from a_star*
WHERE aa < 1000;
-- test header line feature
create temp table copytest3 (
- c1 int,
- "col with , comma" text,
+ c1 int,
+ "col with , comma" text,
"col with "" quote" int);
copy copytest3 from stdin csv header;
copy copytest3 to stdout csv header;
-- CREATE_FUNCTION_2
--
CREATE FUNCTION hobbies(person)
- RETURNS setof hobbies_r
+ RETURNS setof hobbies_r
AS 'select * from hobbies_r where person = $1.name'
LANGUAGE SQL;
CREATE FUNCTION hobby_construct(text, text)
LANGUAGE SQL;
CREATE FUNCTION user_relns()
RETURNS setof name
- AS 'select relname
+ AS 'select relname
from pg_class c, pg_namespace n
where relnamespace = n.oid and
(nspname !~ ''pg_.*'' and nspname <> ''information_schema'') and
--
-- UPDATE onek2
-- SET unique1 = onek2.unique1 + 1;
---UPDATE onek2
+--UPDATE onek2
-- SET unique1 = onek2.unique1 - 1;
--
-- BTREE shutting out non-functional updates
--
--- the following two tests seem to take a long time on some
+-- the following two tests seem to take a long time on some
-- systems. This non-func update stuff needs to be examined
-- more closely. - jolly (2/22/96)
---
+--
UPDATE tmp
SET stringu1 = reverse_name(onek.stringu1)
FROM onek
f |
(50 rows)
-SELECT *
+SELECT *
FROM b_star* x
WHERE x.b = text 'bumble' or x.a < 3;
class | a | b
b | | bumble
(1 row)
-SELECT class, a
- FROM c_star* x
+SELECT class, a
+ FROM c_star* x
WHERE x.c ~ text 'hi';
class | a
-------+----
(25 rows)
ALTER TABLE a_star RENAME COLUMN foo TO aa;
-SELECT *
+SELECT *
from a_star*
WHERE aa < 1000;
class | aa
--
-- timezones may vary based not only on location but the operating
--- system. the main correctness issue is that the OS may not get
+-- system. the main correctness issue is that the OS may not get
-- daylight savings time right for times prior to Unix epoch (jan 1 1970).
--
INSERT INTO ABSTIME_TBL (f1) VALUES (abstime '-infinity');
INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'May 10, 1947 23:59:12');
--- what happens if we specify slightly misformatted abstime?
+-- what happens if we specify slightly misformatted abstime?
INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00');
INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10');
--- badly formatted abstimes: these should result in invalid abstimes
+-- badly formatted abstimes: these should result in invalid abstimes
INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format');
INSERT INTO ABSTIME_TBL (f1) VALUES ('Jun 10, 1843');
);
-- empty case
-SELECT
+SELECT
BIT_AND(i2) AS "?",
BIT_OR(i4) AS "?"
FROM bitwise_test;
boolor_statefunc(FALSE, TRUE) AS "t",
NOT boolor_statefunc(FALSE, FALSE) AS "t";
-CREATE TEMPORARY TABLE bool_test(
+CREATE TEMPORARY TABLE bool_test(
b1 BOOL,
b2 BOOL,
b3 BOOL,
INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u,
v, w, x, y, z)
- VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
- 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
314159, '(1,1)', '512',
'1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
'(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]',
DROP TABLE tmp;
--- the wolf bug - schema mods caused inconsistent row descriptors
+-- the wolf bug - schema mods caused inconsistent row descriptors
CREATE TABLE tmp (
initial int4
);
INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u,
v, w, x, y, z)
- VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
- 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}',
314159, '(1,1)', '512',
'1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
'(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]',
ANALYZE tenk1;
set enable_seqscan to off;
set enable_bitmapscan to off;
--- 5 values, sorted
+-- 5 values, sorted
SELECT unique1 FROM tenk1 WHERE unique1 < 5;
reset enable_seqscan;
reset enable_bitmapscan;
select * from anothertab;
alter table anothertab alter column atcol2 type text
- using case when atcol2 is true then 'IT WAS TRUE'
+ using case when atcol2 is true then 'IT WAS TRUE'
when atcol2 is false then 'IT WAS FALSE'
else 'IT WAS NULL!' end;
a int2[],
b int4[][][],
c name[],
- d text[][],
+ d text[][],
e float8[],
f char(5)[],
g varchar(5)[]
VALUES ('{"too long"}');
INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g)
- VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}',
+ VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}',
'{{"elt1", "elt2"}}', '{"3.4", "6.7"}',
'{"abc","abcde"}', '{"abc","abcde"}');
SELECT arrtest.a[1],
arrtest.b[1][1][1],
arrtest.c[1],
- arrtest.d[1][1],
+ arrtest.d[1][1],
arrtest.e[0]
FROM arrtest;
SELECT a[1:3],
b[1:1][1:2][1:2],
- c[1:2],
+ c[1:2],
d[1:1][1:2]
FROM arrtest;
SELECT array_dims(a) AS a,array_dims(b) AS b,array_dims(c) AS c
FROM arrtest;
--- returns nothing
+-- returns nothing
SELECT *
FROM arrtest
- WHERE a[1] < 5 and
+ WHERE a[1] < 5 and
c = '{"foobar"}'::_name;
UPDATE arrtest
SELECT a[1:3],
b[1:1][1:2][1:2],
- c[1:2],
+ c[1:2],
d[1:1][2:2]
FROM arrtest;
drop table comptable;
drop type comptype;
-create or replace function unnest1(anyarray)
+create or replace function unnest1(anyarray)
returns setof anyelement as $$
select $1[s] from generate_subscripts($1,1) g(s);
$$ language sql immutable;
-create or replace function unnest2(anyarray)
+create or replace function unnest2(anyarray)
returns setof anyelement as $$
select $1[s1][s2] from generate_subscripts($1,1) g1(s1),
generate_subscripts($1,2) g2(s2);
--INSERT INTO BIT_TABLE VALUES ('X554');
--INSERT INTO BIT_TABLE VALUES ('X555');
-SELECT * FROM BIT_TABLE;
+SELECT * FROM BIT_TABLE;
CREATE TABLE VARBIT_TABLE(v BIT VARYING(11));
INSERT INTO VARBIT_TABLE VALUES (B'101011111010'); -- too long
--INSERT INTO VARBIT_TABLE VALUES ('X554');
--INSERT INTO VARBIT_TABLE VALUES ('X555');
-SELECT * FROM VARBIT_TABLE;
+SELECT * FROM VARBIT_TABLE;
-- Concatenation
SELECT v, b, (v || b) AS concat
- FROM BIT_TABLE, VARBIT_TABLE
+ FROM BIT_TABLE, VARBIT_TABLE
ORDER BY 3;
-- Length
X1234 XFFF5
\.
-SELECT a, b, ~a AS "~ a", a & b AS "a & b",
+SELECT a, b, ~a AS "~ a", a & b AS "a & b",
a | b AS "a | b", a # b AS "a # b" FROM varbit_table;
SELECT a,b,a<b AS "a<b",a<=b AS "a<=b",a=b AS "a=b",
a>=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM varbit_table;
X1234 XFFF5
\.
-SELECT a,b,~a AS "~ a",a & b AS "a & b",
+SELECT a,b,~a AS "~ a",a & b AS "a & b",
a|b AS "a | b", a # b AS "a # b" FROM bit_table;
SELECT a,b,a<b AS "a<b",a<=b AS "a<=b",a=b AS "a=b",
a>=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM bit_table;
INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE;
SELECT POSITION(B'1101' IN b),
POSITION(B'11011' IN b),
- b
+ b
FROM BIT_SHIFT_TABLE ;
INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE;
SELECT POSITION(B'1101' IN v),
POSITION(B'11011' IN v),
- v
+ v
FROM VARBIT_SHIFT_TABLE ;
CREATE TABLE bmscantest (a int, b int, t text);
-INSERT INTO bmscantest
+INSERT INTO bmscantest
SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo'
FROM generate_series(1,70000) r;
INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true');
--- BOOLTBL1 should be full of true's at this point
+-- BOOLTBL1 should be full of true's at this point
SELECT '' AS t_3, BOOLTBL1.* FROM BOOLTBL1;
WHERE f1 = bool 'true';
-SELECT '' AS t_3, BOOLTBL1.*
+SELECT '' AS t_3, BOOLTBL1.*
FROM BOOLTBL1
WHERE f1 <> bool 'false';
INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f');
-SELECT '' AS f_1, BOOLTBL1.*
+SELECT '' AS f_1, BOOLTBL1.*
FROM BOOLTBL1
WHERE f1 = bool 'false';
-- This is now an invalid expression
-- For pre-v6.3 this evaluated to false - thomas 1997-10-23
-INSERT INTO BOOLTBL2 (f1)
- VALUES (bool 'XXX');
+INSERT INTO BOOLTBL2 (f1)
+ VALUES (bool 'XXX');
--- BOOLTBL2 should be full of false's at this point
+-- BOOLTBL2 should be full of false's at this point
SELECT '' AS f_4, BOOLTBL2.* FROM BOOLTBL2;
INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)');
--- degenerate cases where the box is a line or a point
--- note that lines and points boxes all have zero area
+-- degenerate cases where the box is a line or a point
+-- note that lines and points boxes all have zero area
INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)');
INSERT INTO BOX_TBL (f1) VALUES ('(3.0, 3.0,3.0,3.0)');
--- badly formatted box inputs
+-- badly formatted box inputs
INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)');
INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad');
SELECT '' AS four, b.*, area(b.f1) as barea
FROM BOX_TBL b;
--- overlap
+-- overlap
SELECT '' AS three, b.f1
- FROM BOX_TBL b
+ FROM BOX_TBL b
WHERE b.f1 && box '(2.5,2.5,1.0,1.0)';
--- left-or-overlap (x only)
+-- left-or-overlap (x only)
SELECT '' AS two, b1.*
FROM BOX_TBL b1
WHERE b1.f1 &< box '(2.0,2.0,2.5,2.5)';
--- right-or-overlap (x only)
+-- right-or-overlap (x only)
SELECT '' AS two, b1.*
FROM BOX_TBL b1
WHERE b1.f1 &> box '(2.0,2.0,2.5,2.5)';
--- left of
+-- left of
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE b.f1 << box '(3.0,3.0,5.0,5.0)';
--- area <=
+-- area <=
SELECT '' AS four, b.f1
FROM BOX_TBL b
WHERE b.f1 <= box '(3.0,3.0,5.0,5.0)';
--- area <
+-- area <
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE b.f1 < box '(3.0,3.0,5.0,5.0)';
--- area =
+-- area =
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE b.f1 = box '(3.0,3.0,5.0,5.0)';
--- area >
+-- area >
SELECT '' AS two, b.f1
- FROM BOX_TBL b -- zero area
- WHERE b.f1 > box '(3.5,3.0,4.5,3.0)';
+ FROM BOX_TBL b -- zero area
+ WHERE b.f1 > box '(3.5,3.0,4.5,3.0)';
--- area >=
+-- area >=
SELECT '' AS four, b.f1
- FROM BOX_TBL b -- zero area
+ FROM BOX_TBL b -- zero area
WHERE b.f1 >= box '(3.5,3.0,4.5,3.0)';
--- right of
+-- right of
SELECT '' AS two, b.f1
FROM BOX_TBL b
WHERE box '(3.0,3.0,5.0,5.0)' >> b.f1;
--- contained in
+-- contained in
SELECT '' AS three, b.f1
FROM BOX_TBL b
WHERE b.f1 <@ box '(0,0,3,3)';
--- contains
+-- contains
SELECT '' AS three, b.f1
FROM BOX_TBL b
WHERE box '(0,0,3,3)' @> b.f1;
--- box equality
+-- box equality
SELECT '' AS one, b.f1
FROM BOX_TBL b
WHERE box '(1,1,3,3)' ~= b.f1;
--- center of box, left unary operator
+-- center of box, left unary operator
SELECT '' AS four, @@(b1.f1) AS p
FROM BOX_TBL b1;
--- wholly-contained
+-- wholly-contained
SELECT '' AS one, b1.*, b2.*
- FROM BOX_TBL b1, BOX_TBL b2
+ FROM BOX_TBL b1, BOX_TBL b2
WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1;
SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL;
INSERT INTO CHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO CHAR_TBL (f1) VALUES ('1');
INSERT INTO CHAR_TBL (f1) VALUES (2);
INSERT INTO CHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO CHAR_TBL (f1) VALUES ('');
--- try char's of greater than 1 length
+-- try char's of greater than 1 length
INSERT INTO CHAR_TBL (f1) VALUES ('cd');
INSERT INTO CHAR_TBL (f1) VALUES ('c ');
-- Test update where the new row version is found first in the scan
UPDATE clustertest SET key = 35 WHERE key = 40;
--- Test longer update chain
+-- Test longer update chain
UPDATE clustertest SET key = 60 WHERE key = 50;
UPDATE clustertest SET key = 70 WHERE key = 60;
UPDATE clustertest SET key = 80 WHERE key = 70;
-- This should fail
--
\copy v_test1 to stdout
---
+--
-- Test \copy (select ...)
--
\copy (select "id",'id','id""'||t,(id + 1)*id,t,"test1"."t" from test1 where id=3) to stdout
-- all functions CREATEd
CREATE AGGREGATE newavg (
- sfunc = int4_avg_accum, basetype = int4, stype = _int8,
+ sfunc = int4_avg_accum, basetype = int4, stype = _int8,
finalfunc = int8_avg,
initcond1 = '{0,0}'
);
-- without finalfunc; test obsolete spellings 'sfunc1' etc
CREATE AGGREGATE newsum (
- sfunc1 = int4pl, basetype = int4, stype1 = int4,
+ sfunc1 = int4pl, basetype = int4, stype1 = int4,
initcond1 = '0'
);
CREATE TEMP TABLE gpolygon_tbl AS
SELECT polygon(home_base) AS f1 FROM slow_emp4000;
-INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' );
-INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' );
+INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' );
+INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' );
CREATE TEMP TABLE gcircle_tbl AS
SELECT circle(home_base) AS f1 FROM slow_emp4000;
RESET enable_seqscan;
RESET enable_indexscan;
RESET enable_bitmapscan;
-
+
DROP TABLE onek_with_null;
FROM road
WHERE name ~ '.*Ramp';
-INSERT INTO ihighway
- SELECT *
- FROM road
+INSERT INTO ihighway
+ SELECT *
+ FROM road
WHERE name ~ 'I- .*';
-INSERT INTO shighway
- SELECT *
- FROM road
+INSERT INTO shighway
+ SELECT *
+ FROM road
WHERE name ~ 'State Hwy.*';
UPDATE shighway
VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon);
INSERT INTO f_star (class, c, e, f)
- VALUES ('f', 'hi keith'::name, '-8'::int2,
+ VALUES ('f', 'hi keith'::name, '-8'::int2,
'(1111,3333),(2222,4444)'::polygon);
INSERT INTO f_star (class, a, c)
VALUES ('f', 25, '-9'::int2);
INSERT INTO f_star (class, a, f)
- VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon);
+ VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon);
INSERT INTO f_star (class, c, e)
VALUES ('f', 'hi allison'::name, '-10'::int2);
INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2);
-INSERT INTO f_star (class, f)
+INSERT INTO f_star (class, f)
VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon);
INSERT INTO f_star (class) VALUES ('f');
-- for internal portal (cursor) tests
--
CREATE TABLE iportaltest (
- i int4,
- d float4,
+ i int4,
+ d float4,
p polygon
);
-- CREATE_OPERATOR
--
-CREATE OPERATOR ## (
+CREATE OPERATOR ## (
leftarg = path,
rightarg = path,
procedure = path_inter,
- commutator = ##
+ commutator = ##
);
CREATE OPERATOR <% (
rightarg = widget,
procedure = pt_in_widget,
commutator = >% ,
- negator = >=%
+ negator = >=%
);
CREATE OPERATOR @#@ (
- rightarg = int8, -- left unary
- procedure = numeric_fac
+ rightarg = int8, -- left unary
+ procedure = numeric_fac
);
CREATE OPERATOR #@# (
procedure = numeric_fac
);
-CREATE OPERATOR #%# (
- leftarg = int8, -- right unary
- procedure = numeric_fac
+CREATE OPERATOR #%# (
+ leftarg = int8, -- right unary
+ procedure = numeric_fac
);
-- Test comments
-- CLASS DEFINITIONS
--
CREATE TABLE hobbies_r (
- name text,
+ name text,
person text
);
-- f inherits from e (three-level single inheritance)
--
CREATE TABLE a_star (
- class char,
+ class char,
a int4
);
-- don't include the hash_ovfl_heap stuff in the distribution
-- the data set is too large for what it's worth
---
+--
-- CREATE TABLE hash_ovfl_heap (
-- x int4,
-- y int4
);
CREATE TABLE bt_f8_heap (
- seqno float8,
+ seqno float8,
random int4
);
t text[]
);
-CREATE TABLE IF NOT EXISTS test_tsvector(
- t text,
- a tsvector
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text,
+ a tsvector
);
-CREATE TABLE IF NOT EXISTS test_tsvector(
+CREATE TABLE IF NOT EXISTS test_tsvector(
t text
);
-- of the "old style" approach of making the functions first.
--
CREATE TYPE widget (
- internallength = 24,
+ internallength = 24,
input = widget_in,
output = widget_out,
typmod_in = numerictypmodin,
alignment = double
);
-CREATE TYPE city_budget (
- internallength = 16,
- input = int44in,
- output = int44out,
+CREATE TYPE city_budget (
+ internallength = 16,
+ input = int44in,
+ output = int44out,
element = int4,
category = 'x', -- just to verify the system will take it
preferred = true -- ditto
--
CREATE VIEW street AS
- SELECT r.name, r.thepath, c.cname AS cname
+ SELECT r.name, r.thepath, c.cname AS cname
FROM ONLY road r, real_city c
WHERE c.outline ## r.thepath;
CREATE VIEW iexit AS
- SELECT ih.name, ih.thepath,
+ SELECT ih.name, ih.thepath,
interpt_pp(ih.thepath, r.thepath) AS exit
FROM ihighway ih, ramp r
WHERE ih.thepath ## r.thepath;
CREATE OR REPLACE VIEW viewtest AS
SELECT a, b::numeric FROM viewtest_tbl;
--- should work
+-- should work
CREATE OR REPLACE VIEW viewtest AS
SELECT a, b, 0 AS c FROM viewtest_tbl;
CREATE VIEW v13_temp AS SELECT seq1_temp.is_called FROM seq1_temp;
SELECT relname FROM pg_class
- WHERE relname LIKE 'v_'
+ WHERE relname LIKE 'v_'
AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'temp_view_test')
ORDER BY relname;
SELECT relname FROM pg_class
- WHERE relname LIKE 'v%'
+ WHERE relname LIKE 'v%'
AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%')
ORDER BY relname;
AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'testviewschm2')
ORDER BY relname;
SELECT relname FROM pg_class
- WHERE relname LIKE 'temporal%'
+ WHERE relname LIKE 'temporal%'
AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%')
ORDER BY relname;
--
-- this will fail if the user is not the postgres superuser.
-- if it does, don't worry about it (you can turn usersuper
--- back on as "postgres"). too many people don't follow
+-- back on as "postgres"). too many people don't follow
-- directions and run this as "postgres", though...
--
UPDATE pg_user
DROP OPERATOR <% (point, widget);
--- left unary
+-- left unary
DROP OPERATOR @#@ (none, int4);
--- right unary
-DROP OPERATOR #@# (int4, none);
+-- right unary
+DROP OPERATOR #@# (int4, none);
--- right unary
-DROP OPERATOR #%# (int4, none);
+-- right unary
+DROP OPERATOR #%# (int4, none);
--
---
+--
-- IF EXISTS tests
---
+--
-- table (will be really dropped at the end)
--
-- UNSUPPORTED STUFF
-
--- doesn't work
+
+-- doesn't work
-- notify pg_class
--
--
-- SELECT
-
--- missing relation name
+
+-- missing relation name
select;
--- no such relation
+-- no such relation
select * from nonesuch;
-- missing target list
--
-- DELETE
-
--- missing relation name (this had better not wildcard!)
+
+-- missing relation name (this had better not wildcard!)
delete from;
--- no such relation
+-- no such relation
delete from nonesuch;
--
-- DROP
-
--- missing relation name (this had better not wildcard!)
+
+-- missing relation name (this had better not wildcard!)
drop table;
--- no such relation
+-- no such relation
drop table nonesuch;
--
-- ALTER TABLE
-
--- relation renaming
--- missing relation name
+-- relation renaming
+
+-- missing relation name
alter table rename;
--- no such relation
+-- no such relation
alter table nonesuch rename to newnonesuch;
--- no such relation
+-- no such relation
alter table nonesuch rename to stud_emp;
--- conflict
+-- conflict
alter table stud_emp rename to aggtest;
--- self-conflict
+-- self-conflict
alter table stud_emp rename to stud_emp;
--- attribute renaming
+-- attribute renaming
--- no such relation
+-- no such relation
alter table nonesuchrel rename column nonesuchatt to newnonesuchatt;
--- no such attribute
+-- no such attribute
alter table emp rename column nonesuchatt to newnonesuchatt;
--- conflict
+-- conflict
alter table emp rename column salary to manager;
--- conflict
+-- conflict
alter table emp rename column salary to oid;
--
-- TRANSACTION STUFF
-
--- not in a xact
+
+-- not in a xact
abort;
--- not in a xact
+-- not in a xact
end;
--
-- CREATE AGGREGATE
--- sfunc/finalfunc type disagreement
+-- sfunc/finalfunc type disagreement
create aggregate newavg2 (sfunc = int4pl,
basetype = int4,
stype = int4,
--
-- DROP INDEX
-
--- missing index name
+
+-- missing index name
drop index;
--- bad index name
+-- bad index name
drop index 314159;
--- no such index
+-- no such index
drop index nonesuch;
--
-- DROP AGGREGATE
-
--- missing aggregate name
+
+-- missing aggregate name
drop aggregate;
-- missing aggregate type
drop aggregate newcnt1;
--- bad aggregate name
+-- bad aggregate name
drop aggregate 314159 (int);
-- bad aggregate type
drop aggregate newcnt (nonesuch);
--- no such aggregate
+-- no such aggregate
drop aggregate nonesuch (int4);
-- no such aggregate for type
--
-- DROP FUNCTION
-
--- missing function name
+
+-- missing function name
drop function ();
--- bad function name
+-- bad function name
drop function 314159();
--- no such function
+-- no such function
drop function nonesuch();
--
-- DROP TYPE
-
--- missing type name
+
+-- missing type name
drop type;
--- bad type name
+-- bad type name
drop type 314159;
--- no such type
+-- no such type
drop type nonesuch;
--
-- DROP OPERATOR
-
--- missing everything
+
+-- missing everything
drop operator;
--- bad operator name
+-- bad operator name
drop operator equals;
--- missing type list
+-- missing type list
drop operator ===;
--- missing parentheses
+-- missing parentheses
drop operator int4, int4;
--- missing operator name
+-- missing operator name
drop operator (int4, int4);
--- missing type list contents
+-- missing type list contents
drop operator === ();
--- no such operator
+-- no such operator
drop operator === (int4);
--- no such operator by that name
+-- no such operator by that name
drop operator === (int4, int4);
--- no such type1
+-- no such type1
drop operator = (nonesuch);
--- no such type1
+-- no such type1
drop operator = ( , int4);
--- no such type1
+-- no such type1
drop operator = (nonesuch, int4);
--- no such type2
+-- no such type2
drop operator = (int4, nonesuch);
--- no such type2
+-- no such type2
drop operator = (int4, );
--
-- DROP RULE
-
--- missing rule name
+
+-- missing rule name
drop rule;
--- bad rule name
+-- bad rule name
drop rule 314159;
--- no such rule
+-- no such rule
drop rule nonesuch on noplace;
-- these postquel variants are no longer supported
INSERT INTO 123
VALUES(123);
-INSERT INTO foo
+INSERT INTO foo
VALUES(123) 123
;
id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL);
-- long line to be truncated on the left
-CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
+CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL);
-- long line to be truncated on the right
-- long line to be truncated on the left, many lines
CREATE
TEMPORARY
-TABLE
-foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
-id4 INT4
-UNIQUE
-NOT
-NULL,
-id5 TEXT
-UNIQUE
-NOT
+TABLE
+foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL,
+id4 INT4
+UNIQUE
+NOT
+NULL,
+id5 TEXT
+UNIQUE
+NOT
NULL)
;
-- long line to be truncated on the right, many lines
-CREATE
+CREATE
TEMPORARY
-TABLE
+TABLE
foo(
id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY)
;
-- long line to be truncated both ways, many lines
-CREATE
+CREATE
TEMPORARY
-TABLE
+TABLE
foo
-(id
-INT4
-UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
-idz INT4 UNIQUE NOT NULL,
+(id
+INT4
+UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
+idz INT4 UNIQUE NOT NULL,
idv INT4 UNIQUE NOT NULL);
-- more than 10 lines...
-CREATE
+CREATE
TEMPORARY
-TABLE
+TABLE
foo
-(id
-INT4
-UNIQUE
-NOT
+(id
+INT4
+UNIQUE
+NOT
NULL
-,
+,
idm
-INT4
-UNIQUE
-NOT
+INT4
+UNIQUE
+NOT
NULL,
-idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
-idz INT4 UNIQUE NOT NULL,
-idv
-INT4
-UNIQUE
-NOT
+idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL,
+idz INT4 UNIQUE NOT NULL,
+idv
+INT4
+UNIQUE
+NOT
NULL);
-- Check that stack depth detection mechanism works and
INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
--- test for over and under flow
+-- test for over and under flow
INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
SELECT '' AS five, * FROM FLOAT4_TBL;
--- test the unary float4abs operator
+-- test the unary float4abs operator
SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f;
UPDATE FLOAT4_TBL
SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3';
-SELECT '' AS three, f.f1, f.f1 * '-10' AS x
+SELECT '' AS three, f.f1, f.f1 * '-10' AS x
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
SELECT '' AS one, f.f1 ^ '2.0' AS square_f1
FROM FLOAT8_TBL f where f.f1 = '1004.3';
--- absolute value
-SELECT '' AS five, f.f1, @f.f1 AS abs_f1
+-- absolute value
+SELECT '' AS five, f.f1, @f.f1 AS abs_f1
FROM FLOAT8_TBL f;
--- truncate
+-- truncate
SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1
FROM FLOAT8_TBL f;
--- round
+-- round
SELECT '' AS five, f.f1, round(f.f1) AS round_f1
FROM FLOAT8_TBL f;
-- sign
select sign(f1) as sign_f1 from float8_tbl f;
--- square root
+-- square root
SELECT sqrt(float8 '64') AS eight;
SELECT |/ float8 '64' AS eight;
-- power
SELECT power(float8 '144', float8 '0.5');
--- take exp of ln(f.f1)
+-- take exp of ln(f.f1)
SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1
FROM FLOAT8_TBL f
WHERE f.f1 > '0.0';
--- cube root
+-- cube root
SELECT ||/ float8 '27' AS three;
SELECT '' AS five, f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f;
SELECT '' AS five, * FROM FLOAT8_TBL;
--- test for over- and underflow
+-- test for over- and underflow
INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
-- check set NULL and table constraint on multiple columns
--
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
+CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL);
-- Test comments
-- check set default and table constraint on multiple columns
--
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
-CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
+CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT);
-- Insert a value in PKTABLE for default
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5');
-- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
+INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
+INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
INSERT INTO FKTABLE VALUES (2, 4, 5, 1);
INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
--
-- Tests for mismatched types
--
--- Basic one column, two table setup
+-- Basic one column, two table setup
CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
INSERT INTO PKTABLE VALUES(42);
-- This next should fail, because int=inet does not exist
CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
ptest4) REFERENCES pktable(ptest1, ptest2));
DROP TABLE PKTABLE;
--- And this,
+-- And this,
CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
ptest4) REFERENCES pktable);
DROP TABLE PKTABLE;
FROM hash_i4_heap h
WHERE h.random = 1;
-UPDATE hash_i4_heap
- SET seqno = 20000
+UPDATE hash_i4_heap
+ SET seqno = 20000
WHERE hash_i4_heap.random = 1492795354;
-SELECT h.seqno AS i20000
+SELECT h.seqno AS i20000
FROM hash_i4_heap h
WHERE h.random = 1492795354;
-UPDATE hash_name_heap
+UPDATE hash_name_heap
SET random = '0123456789abcdef'::name
WHERE hash_name_heap.seqno = 6543;
WHERE hash_name_heap.random = '76652222'::name;
--
--- this is the row we just replaced; index scan should return zero rows
+-- this is the row we just replaced; index scan should return zero rows
--
SELECT h.seqno AS emptyset
FROM hash_name_heap h
WHERE h.random = '76652222'::name;
-UPDATE hash_txt_heap
+UPDATE hash_txt_heap
SET random = '0123456789abcdefghijklmnop'::text
WHERE hash_txt_heap.seqno = 4002;
SET random = '-1234.1234'::float8
WHERE hash_f8_heap.seqno = 8906;
-SELECT h.seqno AS i8096, h.random AS f1234_1234
+SELECT h.seqno AS i8096, h.random AS f1234_1234
FROM hash_f8_heap h
WHERE h.random = '-1234.1234'::float8;
-UPDATE hash_f8_heap
+UPDATE hash_f8_heap
SET seqno = 20000
WHERE hash_f8_heap.random = '488912369'::float8;
create table hs_extreme (col1 integer);
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
-RETURNS void
-LANGUAGE plpgsql
+RETURNS void
+LANGUAGE plpgsql
AS $$
BEGIN
IF n <= 0 THEN RETURN; END IF;
set client_min_messages = 'warning';
CREATE OR REPLACE FUNCTION hs_locks_create (n integer)
-RETURNS void
-LANGUAGE plpgsql
+RETURNS void
+LANGUAGE plpgsql
AS $$
BEGIN
IF n <= 0 THEN
CHECKPOINT;
- RETURN;
+ RETURN;
END IF;
EXECUTE 'CREATE TABLE hs_locks_' || n::text || ' ()';
PERFORM hs_locks_create(n - 1);
$$;
CREATE OR REPLACE FUNCTION hs_locks_drop (n integer)
-RETURNS void
-LANGUAGE plpgsql
+RETURNS void
+LANGUAGE plpgsql
AS $$
BEGIN
IF n <= 0 THEN
CHECKPOINT;
- RETURN;
+ RETURN;
END IF;
EXECUTE 'DROP TABLE IF EXISTS hs_locks_' || n::text;
PERFORM hs_locks_drop(n - 1);
WHERE c = i;
SELECT '' AS ten, i, c,
- i < c AS lt, i <= c AS le, i = c AS eq,
+ i < c AS lt, i <= c AS le, i = c AS eq,
i >= c AS ge, i > c AS gt, i <> c AS ne,
i << c AS sb, i <<= c AS sbe,
i >> c AS sup, i >>= c AS spe
* Test double inheritance
*
* Ensure that defaults are NOT included unless
- * INCLUDING DEFAULTS is specified
+ * INCLUDING DEFAULTS is specified
*/
CREATE TABLE inhe (ee text, LIKE inhx) inherits (b);
INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4');
SELECT '' AS three, i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0';
--- positive odds
+-- positive odds
SELECT '' AS one, i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
--- any evens
+-- any evens
SELECT '' AS three, i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
SELECT '' AS five, i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i;
-- TO_CHAR()
--
-SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
+SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
FROM INT8_TBL;
-SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
- FROM INT8_TBL;
+SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
+ FROM INT8_TBL;
-SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
+SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
FROM INT8_TBL;
-SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
+SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
FROM INT8_TBL;
-SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
SELECT '' AS to_char_6, to_char(q2, 'FMS9999999999999999') FROM INT8_TBL;
SELECT '' AS to_char_7, to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL;
-SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
-SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL;
-SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL;
-SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
+SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL;
+SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
SELECT '' AS to_char_12, to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL;
-SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
+SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
SELECT '' AS to_char_14, to_char(q2, 'FM9999999999999999.999') FROM INT8_TBL;
SELECT '' AS to_char_15, to_char(q2, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9') FROM INT8_TBL;
SELECT '' AS to_char_16, to_char(q2, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM INT8_TBL;
SELECT '' AS one, * FROM INTERVAL_TBL
WHERE INTERVAL_TBL.f1 = interval '@ 34 years';
-SELECT '' AS five, * FROM INTERVAL_TBL
+SELECT '' AS five, * FROM INTERVAL_TBL
WHERE INTERVAL_TBL.f1 >= interval '@ 1 month';
SELECT '' AS nine, * FROM INTERVAL_TBL
-- Test multiplication and division with intervals.
--- Floating point arithmetic rounding errors can lead to unexpected results,
--- though the code attempts to do the right thing and round up to days and
--- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
--- Note that it is expected for some day components to be greater than 29 and
--- some time components be greater than 23:59:59 due to how intervals are
+-- Floating point arithmetic rounding errors can lead to unexpected results,
+-- though the code attempts to do the right thing and round up to days and
+-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
+-- Note that it is expected for some day components to be greater than 29 and
+-- some time components be greater than 23:59:59 due to how intervals are
-- stored internally.
CREATE TABLE INTERVAL_MULDIV_TBL (span interval);
SET IntervalStyle to postgres_verbose;
select interval '-10 mons -3 days +03:55:06.70';
select interval '1 year 2 mons 3 days 04:05:06.699999';
-select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
+select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
-- check that '30 days' equals '1 month' according to the hash function
select '30 days'::interval = '1 month'::interval as t;
-- Check the LIMIT/OFFSET feature of SELECT
--
-SELECT ''::text AS two, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 50
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
ORDER BY unique1 LIMIT 2;
-SELECT ''::text AS five, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 60
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 60
ORDER BY unique1 LIMIT 5;
-SELECT ''::text AS two, unique1, unique2, stringu1
+SELECT ''::text AS two, unique1, unique2, stringu1
FROM onek WHERE unique1 > 60 AND unique1 < 63
ORDER BY unique1 LIMIT 5;
-SELECT ''::text AS three, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 100
+SELECT ''::text AS three, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 100
ORDER BY unique1 LIMIT 3 OFFSET 20;
-SELECT ''::text AS zero, unique1, unique2, stringu1
- FROM onek WHERE unique1 < 50
+SELECT ''::text AS zero, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
ORDER BY unique1 DESC LIMIT 8 OFFSET 99;
-SELECT ''::text AS eleven, unique1, unique2, stringu1
- FROM onek WHERE unique1 < 50
+SELECT ''::text AS eleven, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
ORDER BY unique1 DESC LIMIT 20 OFFSET 39;
-SELECT ''::text AS ten, unique1, unique2, stringu1
+SELECT ''::text AS ten, unique1, unique2, stringu1
FROM onek
ORDER BY unique1 OFFSET 990;
-SELECT ''::text AS five, unique1, unique2, stringu1
+SELECT ''::text AS five, unique1, unique2, stringu1
FROM onek
ORDER BY unique1 OFFSET 990 LIMIT 5;
-SELECT ''::text AS five, unique1, unique2, stringu1
+SELECT ''::text AS five, unique1, unique2, stringu1
FROM onek
ORDER BY unique1 LIMIT 5 OFFSET 900;
-- TO_CHAR()
--
-SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999')
+SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999')
FROM num_data;
SELECT '' AS to_char_2, to_char(val, '9G999G999G999G999G999D999G999G999G999G999')
SELECT '' AS to_char_4, to_char(val, '9999999999999999.999999999999999S')
FROM num_data;
-SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
SELECT '' AS to_char_6, to_char(val, 'FMS9999999999999999.999999999999999') FROM num_data;
SELECT '' AS to_char_7, to_char(val, 'FM9999999999999999.999999999999999THPR') FROM num_data;
-SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
-SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data;
-SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
-SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
+SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
+SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
SELECT '' AS to_char_12, to_char(val, 'FM9999999999999999.099999999999999') FROM num_data;
SELECT '' AS to_char_13, to_char(val, 'FM9999999999990999.990999999999999') FROM num_data;
SELECT '' AS to_char_14, to_char(val, 'FM0999999999999999.999909999999999') FROM num_data;
SELECT '' AS to_char_15, to_char(val, 'FM9999999990999999.099999999999999') FROM num_data;
-SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
+SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
SELECT '' AS to_char_17, to_char(val, 'FM9999999999999999.99999999999999') FROM num_data;
SELECT '' AS to_char_18, to_char(val, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
SELECT '' AS to_char_19, to_char(val, 'FMS 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
-- leading/trailing hard tab is also allowed
INSERT INTO OID_TBL(f1) VALUES (' 15 ');
--- bad inputs
+-- bad inputs
INSERT INTO OID_TBL(f1) VALUES ('');
INSERT INTO OID_TBL(f1) VALUES (' ');
INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
--
-- This is created by pgsql/src/tools/findoidjoins/make_oidjoins_check
--
-SELECT ctid, aggfnoid
-FROM pg_catalog.pg_aggregate fk
-WHERE aggfnoid != 0 AND
+SELECT ctid, aggfnoid
+FROM pg_catalog.pg_aggregate fk
+WHERE aggfnoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfnoid);
-SELECT ctid, aggtransfn
-FROM pg_catalog.pg_aggregate fk
-WHERE aggtransfn != 0 AND
+SELECT ctid, aggtransfn
+FROM pg_catalog.pg_aggregate fk
+WHERE aggtransfn != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggtransfn);
-SELECT ctid, aggfinalfn
-FROM pg_catalog.pg_aggregate fk
-WHERE aggfinalfn != 0 AND
+SELECT ctid, aggfinalfn
+FROM pg_catalog.pg_aggregate fk
+WHERE aggfinalfn != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfinalfn);
-SELECT ctid, aggsortop
-FROM pg_catalog.pg_aggregate fk
-WHERE aggsortop != 0 AND
+SELECT ctid, aggsortop
+FROM pg_catalog.pg_aggregate fk
+WHERE aggsortop != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.aggsortop);
-SELECT ctid, aggtranstype
-FROM pg_catalog.pg_aggregate fk
-WHERE aggtranstype != 0 AND
+SELECT ctid, aggtranstype
+FROM pg_catalog.pg_aggregate fk
+WHERE aggtranstype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.aggtranstype);
-SELECT ctid, amkeytype
-FROM pg_catalog.pg_am fk
-WHERE amkeytype != 0 AND
+SELECT ctid, amkeytype
+FROM pg_catalog.pg_am fk
+WHERE amkeytype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amkeytype);
-SELECT ctid, aminsert
-FROM pg_catalog.pg_am fk
-WHERE aminsert != 0 AND
+SELECT ctid, aminsert
+FROM pg_catalog.pg_am fk
+WHERE aminsert != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aminsert);
-SELECT ctid, ambeginscan
-FROM pg_catalog.pg_am fk
-WHERE ambeginscan != 0 AND
+SELECT ctid, ambeginscan
+FROM pg_catalog.pg_am fk
+WHERE ambeginscan != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambeginscan);
-SELECT ctid, amgettuple
-FROM pg_catalog.pg_am fk
-WHERE amgettuple != 0 AND
+SELECT ctid, amgettuple
+FROM pg_catalog.pg_am fk
+WHERE amgettuple != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgettuple);
-SELECT ctid, amgetbitmap
-FROM pg_catalog.pg_am fk
-WHERE amgetbitmap != 0 AND
+SELECT ctid, amgetbitmap
+FROM pg_catalog.pg_am fk
+WHERE amgetbitmap != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgetbitmap);
-SELECT ctid, amrescan
-FROM pg_catalog.pg_am fk
-WHERE amrescan != 0 AND
+SELECT ctid, amrescan
+FROM pg_catalog.pg_am fk
+WHERE amrescan != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrescan);
-SELECT ctid, amendscan
-FROM pg_catalog.pg_am fk
-WHERE amendscan != 0 AND
+SELECT ctid, amendscan
+FROM pg_catalog.pg_am fk
+WHERE amendscan != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amendscan);
-SELECT ctid, ammarkpos
-FROM pg_catalog.pg_am fk
-WHERE ammarkpos != 0 AND
+SELECT ctid, ammarkpos
+FROM pg_catalog.pg_am fk
+WHERE ammarkpos != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ammarkpos);
-SELECT ctid, amrestrpos
-FROM pg_catalog.pg_am fk
-WHERE amrestrpos != 0 AND
+SELECT ctid, amrestrpos
+FROM pg_catalog.pg_am fk
+WHERE amrestrpos != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrestrpos);
-SELECT ctid, ambuild
-FROM pg_catalog.pg_am fk
-WHERE ambuild != 0 AND
+SELECT ctid, ambuild
+FROM pg_catalog.pg_am fk
+WHERE ambuild != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambuild);
-SELECT ctid, ambulkdelete
-FROM pg_catalog.pg_am fk
-WHERE ambulkdelete != 0 AND
+SELECT ctid, ambulkdelete
+FROM pg_catalog.pg_am fk
+WHERE ambulkdelete != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambulkdelete);
-SELECT ctid, amvacuumcleanup
-FROM pg_catalog.pg_am fk
-WHERE amvacuumcleanup != 0 AND
+SELECT ctid, amvacuumcleanup
+FROM pg_catalog.pg_am fk
+WHERE amvacuumcleanup != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amvacuumcleanup);
-SELECT ctid, amcostestimate
-FROM pg_catalog.pg_am fk
-WHERE amcostestimate != 0 AND
+SELECT ctid, amcostestimate
+FROM pg_catalog.pg_am fk
+WHERE amcostestimate != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amcostestimate);
-SELECT ctid, amoptions
-FROM pg_catalog.pg_am fk
-WHERE amoptions != 0 AND
+SELECT ctid, amoptions
+FROM pg_catalog.pg_am fk
+WHERE amoptions != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amoptions);
-SELECT ctid, amopfamily
-FROM pg_catalog.pg_amop fk
-WHERE amopfamily != 0 AND
+SELECT ctid, amopfamily
+FROM pg_catalog.pg_amop fk
+WHERE amopfamily != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amopfamily);
-SELECT ctid, amoplefttype
-FROM pg_catalog.pg_amop fk
-WHERE amoplefttype != 0 AND
+SELECT ctid, amoplefttype
+FROM pg_catalog.pg_amop fk
+WHERE amoplefttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoplefttype);
-SELECT ctid, amoprighttype
-FROM pg_catalog.pg_amop fk
-WHERE amoprighttype != 0 AND
+SELECT ctid, amoprighttype
+FROM pg_catalog.pg_amop fk
+WHERE amoprighttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoprighttype);
-SELECT ctid, amopopr
-FROM pg_catalog.pg_amop fk
-WHERE amopopr != 0 AND
+SELECT ctid, amopopr
+FROM pg_catalog.pg_amop fk
+WHERE amopopr != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.amopopr);
-SELECT ctid, amopmethod
-FROM pg_catalog.pg_amop fk
-WHERE amopmethod != 0 AND
+SELECT ctid, amopmethod
+FROM pg_catalog.pg_amop fk
+WHERE amopmethod != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.amopmethod);
-SELECT ctid, amprocfamily
-FROM pg_catalog.pg_amproc fk
-WHERE amprocfamily != 0 AND
+SELECT ctid, amprocfamily
+FROM pg_catalog.pg_amproc fk
+WHERE amprocfamily != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amprocfamily);
-SELECT ctid, amproclefttype
-FROM pg_catalog.pg_amproc fk
-WHERE amproclefttype != 0 AND
+SELECT ctid, amproclefttype
+FROM pg_catalog.pg_amproc fk
+WHERE amproclefttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amproclefttype);
-SELECT ctid, amprocrighttype
-FROM pg_catalog.pg_amproc fk
-WHERE amprocrighttype != 0 AND
+SELECT ctid, amprocrighttype
+FROM pg_catalog.pg_amproc fk
+WHERE amprocrighttype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amprocrighttype);
-SELECT ctid, amproc
-FROM pg_catalog.pg_amproc fk
-WHERE amproc != 0 AND
+SELECT ctid, amproc
+FROM pg_catalog.pg_amproc fk
+WHERE amproc != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amproc);
-SELECT ctid, attrelid
-FROM pg_catalog.pg_attribute fk
-WHERE attrelid != 0 AND
+SELECT ctid, attrelid
+FROM pg_catalog.pg_attribute fk
+WHERE attrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.attrelid);
-SELECT ctid, atttypid
-FROM pg_catalog.pg_attribute fk
-WHERE atttypid != 0 AND
+SELECT ctid, atttypid
+FROM pg_catalog.pg_attribute fk
+WHERE atttypid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.atttypid);
-SELECT ctid, castsource
-FROM pg_catalog.pg_cast fk
-WHERE castsource != 0 AND
+SELECT ctid, castsource
+FROM pg_catalog.pg_cast fk
+WHERE castsource != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.castsource);
-SELECT ctid, casttarget
-FROM pg_catalog.pg_cast fk
-WHERE casttarget != 0 AND
+SELECT ctid, casttarget
+FROM pg_catalog.pg_cast fk
+WHERE casttarget != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.casttarget);
-SELECT ctid, castfunc
-FROM pg_catalog.pg_cast fk
-WHERE castfunc != 0 AND
+SELECT ctid, castfunc
+FROM pg_catalog.pg_cast fk
+WHERE castfunc != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.castfunc);
-SELECT ctid, relnamespace
-FROM pg_catalog.pg_class fk
-WHERE relnamespace != 0 AND
+SELECT ctid, relnamespace
+FROM pg_catalog.pg_class fk
+WHERE relnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.relnamespace);
-SELECT ctid, reltype
-FROM pg_catalog.pg_class fk
-WHERE reltype != 0 AND
+SELECT ctid, reltype
+FROM pg_catalog.pg_class fk
+WHERE reltype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.reltype);
-SELECT ctid, relowner
-FROM pg_catalog.pg_class fk
-WHERE relowner != 0 AND
+SELECT ctid, relowner
+FROM pg_catalog.pg_class fk
+WHERE relowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.relowner);
-SELECT ctid, relam
-FROM pg_catalog.pg_class fk
-WHERE relam != 0 AND
+SELECT ctid, relam
+FROM pg_catalog.pg_class fk
+WHERE relam != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.relam);
-SELECT ctid, reltablespace
-FROM pg_catalog.pg_class fk
-WHERE reltablespace != 0 AND
+SELECT ctid, reltablespace
+FROM pg_catalog.pg_class fk
+WHERE reltablespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.reltablespace);
-SELECT ctid, reltoastrelid
-FROM pg_catalog.pg_class fk
-WHERE reltoastrelid != 0 AND
+SELECT ctid, reltoastrelid
+FROM pg_catalog.pg_class fk
+WHERE reltoastrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastrelid);
-SELECT ctid, reltoastidxid
-FROM pg_catalog.pg_class fk
-WHERE reltoastidxid != 0 AND
+SELECT ctid, reltoastidxid
+FROM pg_catalog.pg_class fk
+WHERE reltoastidxid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastidxid);
-SELECT ctid, connamespace
-FROM pg_catalog.pg_constraint fk
-WHERE connamespace != 0 AND
+SELECT ctid, connamespace
+FROM pg_catalog.pg_constraint fk
+WHERE connamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace);
-SELECT ctid, contypid
-FROM pg_catalog.pg_constraint fk
-WHERE contypid != 0 AND
+SELECT ctid, contypid
+FROM pg_catalog.pg_constraint fk
+WHERE contypid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.contypid);
-SELECT ctid, connamespace
-FROM pg_catalog.pg_conversion fk
-WHERE connamespace != 0 AND
+SELECT ctid, connamespace
+FROM pg_catalog.pg_conversion fk
+WHERE connamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace);
-SELECT ctid, conowner
-FROM pg_catalog.pg_conversion fk
-WHERE conowner != 0 AND
+SELECT ctid, conowner
+FROM pg_catalog.pg_conversion fk
+WHERE conowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.conowner);
-SELECT ctid, conproc
-FROM pg_catalog.pg_conversion fk
-WHERE conproc != 0 AND
+SELECT ctid, conproc
+FROM pg_catalog.pg_conversion fk
+WHERE conproc != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.conproc);
-SELECT ctid, datdba
-FROM pg_catalog.pg_database fk
-WHERE datdba != 0 AND
+SELECT ctid, datdba
+FROM pg_catalog.pg_database fk
+WHERE datdba != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.datdba);
-SELECT ctid, dattablespace
-FROM pg_catalog.pg_database fk
-WHERE dattablespace != 0 AND
+SELECT ctid, dattablespace
+FROM pg_catalog.pg_database fk
+WHERE dattablespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.dattablespace);
-SELECT ctid, setdatabase
-FROM pg_catalog.pg_db_role_setting fk
-WHERE setdatabase != 0 AND
+SELECT ctid, setdatabase
+FROM pg_catalog.pg_db_role_setting fk
+WHERE setdatabase != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_database pk WHERE pk.oid = fk.setdatabase);
-SELECT ctid, classid
-FROM pg_catalog.pg_depend fk
-WHERE classid != 0 AND
+SELECT ctid, classid
+FROM pg_catalog.pg_depend fk
+WHERE classid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classid);
-SELECT ctid, refclassid
-FROM pg_catalog.pg_depend fk
-WHERE refclassid != 0 AND
+SELECT ctid, refclassid
+FROM pg_catalog.pg_depend fk
+WHERE refclassid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid);
-SELECT ctid, classoid
-FROM pg_catalog.pg_description fk
-WHERE classoid != 0 AND
+SELECT ctid, classoid
+FROM pg_catalog.pg_description fk
+WHERE classoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid);
-SELECT ctid, indexrelid
-FROM pg_catalog.pg_index fk
-WHERE indexrelid != 0 AND
+SELECT ctid, indexrelid
+FROM pg_catalog.pg_index fk
+WHERE indexrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indexrelid);
-SELECT ctid, indrelid
-FROM pg_catalog.pg_index fk
-WHERE indrelid != 0 AND
+SELECT ctid, indrelid
+FROM pg_catalog.pg_index fk
+WHERE indrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indrelid);
-SELECT ctid, lanowner
-FROM pg_catalog.pg_language fk
-WHERE lanowner != 0 AND
+SELECT ctid, lanowner
+FROM pg_catalog.pg_language fk
+WHERE lanowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.lanowner);
-SELECT ctid, lanplcallfoid
-FROM pg_catalog.pg_language fk
-WHERE lanplcallfoid != 0 AND
+SELECT ctid, lanplcallfoid
+FROM pg_catalog.pg_language fk
+WHERE lanplcallfoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanplcallfoid);
-SELECT ctid, laninline
-FROM pg_catalog.pg_language fk
-WHERE laninline != 0 AND
+SELECT ctid, laninline
+FROM pg_catalog.pg_language fk
+WHERE laninline != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.laninline);
-SELECT ctid, lanvalidator
-FROM pg_catalog.pg_language fk
-WHERE lanvalidator != 0 AND
+SELECT ctid, lanvalidator
+FROM pg_catalog.pg_language fk
+WHERE lanvalidator != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanvalidator);
-SELECT ctid, nspowner
-FROM pg_catalog.pg_namespace fk
-WHERE nspowner != 0 AND
+SELECT ctid, nspowner
+FROM pg_catalog.pg_namespace fk
+WHERE nspowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.nspowner);
-SELECT ctid, opcmethod
-FROM pg_catalog.pg_opclass fk
-WHERE opcmethod != 0 AND
+SELECT ctid, opcmethod
+FROM pg_catalog.pg_opclass fk
+WHERE opcmethod != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opcmethod);
-SELECT ctid, opcnamespace
-FROM pg_catalog.pg_opclass fk
-WHERE opcnamespace != 0 AND
+SELECT ctid, opcnamespace
+FROM pg_catalog.pg_opclass fk
+WHERE opcnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opcnamespace);
-SELECT ctid, opcowner
-FROM pg_catalog.pg_opclass fk
-WHERE opcowner != 0 AND
+SELECT ctid, opcowner
+FROM pg_catalog.pg_opclass fk
+WHERE opcowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opcowner);
-SELECT ctid, opcfamily
-FROM pg_catalog.pg_opclass fk
-WHERE opcfamily != 0 AND
+SELECT ctid, opcfamily
+FROM pg_catalog.pg_opclass fk
+WHERE opcfamily != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.opcfamily);
-SELECT ctid, opcintype
-FROM pg_catalog.pg_opclass fk
-WHERE opcintype != 0 AND
+SELECT ctid, opcintype
+FROM pg_catalog.pg_opclass fk
+WHERE opcintype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opcintype);
-SELECT ctid, opckeytype
-FROM pg_catalog.pg_opclass fk
-WHERE opckeytype != 0 AND
+SELECT ctid, opckeytype
+FROM pg_catalog.pg_opclass fk
+WHERE opckeytype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opckeytype);
-SELECT ctid, oprnamespace
-FROM pg_catalog.pg_operator fk
-WHERE oprnamespace != 0 AND
+SELECT ctid, oprnamespace
+FROM pg_catalog.pg_operator fk
+WHERE oprnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.oprnamespace);
-SELECT ctid, oprowner
-FROM pg_catalog.pg_operator fk
-WHERE oprowner != 0 AND
+SELECT ctid, oprowner
+FROM pg_catalog.pg_operator fk
+WHERE oprowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.oprowner);
-SELECT ctid, oprleft
-FROM pg_catalog.pg_operator fk
-WHERE oprleft != 0 AND
+SELECT ctid, oprleft
+FROM pg_catalog.pg_operator fk
+WHERE oprleft != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprleft);
-SELECT ctid, oprright
-FROM pg_catalog.pg_operator fk
-WHERE oprright != 0 AND
+SELECT ctid, oprright
+FROM pg_catalog.pg_operator fk
+WHERE oprright != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprright);
-SELECT ctid, oprresult
-FROM pg_catalog.pg_operator fk
-WHERE oprresult != 0 AND
+SELECT ctid, oprresult
+FROM pg_catalog.pg_operator fk
+WHERE oprresult != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprresult);
-SELECT ctid, oprcom
-FROM pg_catalog.pg_operator fk
-WHERE oprcom != 0 AND
+SELECT ctid, oprcom
+FROM pg_catalog.pg_operator fk
+WHERE oprcom != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprcom);
-SELECT ctid, oprnegate
-FROM pg_catalog.pg_operator fk
-WHERE oprnegate != 0 AND
+SELECT ctid, oprnegate
+FROM pg_catalog.pg_operator fk
+WHERE oprnegate != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprnegate);
-SELECT ctid, oprcode
-FROM pg_catalog.pg_operator fk
-WHERE oprcode != 0 AND
+SELECT ctid, oprcode
+FROM pg_catalog.pg_operator fk
+WHERE oprcode != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprcode);
-SELECT ctid, oprrest
-FROM pg_catalog.pg_operator fk
-WHERE oprrest != 0 AND
+SELECT ctid, oprrest
+FROM pg_catalog.pg_operator fk
+WHERE oprrest != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprrest);
-SELECT ctid, oprjoin
-FROM pg_catalog.pg_operator fk
-WHERE oprjoin != 0 AND
+SELECT ctid, oprjoin
+FROM pg_catalog.pg_operator fk
+WHERE oprjoin != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprjoin);
-SELECT ctid, opfmethod
-FROM pg_catalog.pg_opfamily fk
-WHERE opfmethod != 0 AND
+SELECT ctid, opfmethod
+FROM pg_catalog.pg_opfamily fk
+WHERE opfmethod != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opfmethod);
-SELECT ctid, opfnamespace
-FROM pg_catalog.pg_opfamily fk
-WHERE opfnamespace != 0 AND
+SELECT ctid, opfnamespace
+FROM pg_catalog.pg_opfamily fk
+WHERE opfnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opfnamespace);
-SELECT ctid, opfowner
-FROM pg_catalog.pg_opfamily fk
-WHERE opfowner != 0 AND
+SELECT ctid, opfowner
+FROM pg_catalog.pg_opfamily fk
+WHERE opfowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opfowner);
-SELECT ctid, pronamespace
-FROM pg_catalog.pg_proc fk
-WHERE pronamespace != 0 AND
+SELECT ctid, pronamespace
+FROM pg_catalog.pg_proc fk
+WHERE pronamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.pronamespace);
-SELECT ctid, proowner
-FROM pg_catalog.pg_proc fk
-WHERE proowner != 0 AND
+SELECT ctid, proowner
+FROM pg_catalog.pg_proc fk
+WHERE proowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.proowner);
-SELECT ctid, prolang
-FROM pg_catalog.pg_proc fk
-WHERE prolang != 0 AND
+SELECT ctid, prolang
+FROM pg_catalog.pg_proc fk
+WHERE prolang != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_language pk WHERE pk.oid = fk.prolang);
-SELECT ctid, prorettype
-FROM pg_catalog.pg_proc fk
-WHERE prorettype != 0 AND
+SELECT ctid, prorettype
+FROM pg_catalog.pg_proc fk
+WHERE prorettype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.prorettype);
-SELECT ctid, ev_class
-FROM pg_catalog.pg_rewrite fk
-WHERE ev_class != 0 AND
+SELECT ctid, ev_class
+FROM pg_catalog.pg_rewrite fk
+WHERE ev_class != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.ev_class);
-SELECT ctid, refclassid
-FROM pg_catalog.pg_shdepend fk
-WHERE refclassid != 0 AND
+SELECT ctid, refclassid
+FROM pg_catalog.pg_shdepend fk
+WHERE refclassid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid);
-SELECT ctid, classoid
-FROM pg_catalog.pg_shdescription fk
-WHERE classoid != 0 AND
+SELECT ctid, classoid
+FROM pg_catalog.pg_shdescription fk
+WHERE classoid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid);
-SELECT ctid, starelid
-FROM pg_catalog.pg_statistic fk
-WHERE starelid != 0 AND
+SELECT ctid, starelid
+FROM pg_catalog.pg_statistic fk
+WHERE starelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.starelid);
-SELECT ctid, staop1
-FROM pg_catalog.pg_statistic fk
-WHERE staop1 != 0 AND
+SELECT ctid, staop1
+FROM pg_catalog.pg_statistic fk
+WHERE staop1 != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop1);
-SELECT ctid, staop2
-FROM pg_catalog.pg_statistic fk
-WHERE staop2 != 0 AND
+SELECT ctid, staop2
+FROM pg_catalog.pg_statistic fk
+WHERE staop2 != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop2);
-SELECT ctid, staop3
-FROM pg_catalog.pg_statistic fk
-WHERE staop3 != 0 AND
+SELECT ctid, staop3
+FROM pg_catalog.pg_statistic fk
+WHERE staop3 != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop3);
-SELECT ctid, spcowner
-FROM pg_catalog.pg_tablespace fk
-WHERE spcowner != 0 AND
+SELECT ctid, spcowner
+FROM pg_catalog.pg_tablespace fk
+WHERE spcowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.spcowner);
-SELECT ctid, cfgnamespace
-FROM pg_catalog.pg_ts_config fk
-WHERE cfgnamespace != 0 AND
+SELECT ctid, cfgnamespace
+FROM pg_catalog.pg_ts_config fk
+WHERE cfgnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.cfgnamespace);
-SELECT ctid, cfgowner
-FROM pg_catalog.pg_ts_config fk
-WHERE cfgowner != 0 AND
+SELECT ctid, cfgowner
+FROM pg_catalog.pg_ts_config fk
+WHERE cfgowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.cfgowner);
-SELECT ctid, cfgparser
-FROM pg_catalog.pg_ts_config fk
-WHERE cfgparser != 0 AND
+SELECT ctid, cfgparser
+FROM pg_catalog.pg_ts_config fk
+WHERE cfgparser != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_parser pk WHERE pk.oid = fk.cfgparser);
-SELECT ctid, mapcfg
-FROM pg_catalog.pg_ts_config_map fk
-WHERE mapcfg != 0 AND
+SELECT ctid, mapcfg
+FROM pg_catalog.pg_ts_config_map fk
+WHERE mapcfg != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_config pk WHERE pk.oid = fk.mapcfg);
-SELECT ctid, mapdict
-FROM pg_catalog.pg_ts_config_map fk
-WHERE mapdict != 0 AND
+SELECT ctid, mapdict
+FROM pg_catalog.pg_ts_config_map fk
+WHERE mapdict != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_dict pk WHERE pk.oid = fk.mapdict);
-SELECT ctid, dictnamespace
-FROM pg_catalog.pg_ts_dict fk
-WHERE dictnamespace != 0 AND
+SELECT ctid, dictnamespace
+FROM pg_catalog.pg_ts_dict fk
+WHERE dictnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.dictnamespace);
-SELECT ctid, dictowner
-FROM pg_catalog.pg_ts_dict fk
-WHERE dictowner != 0 AND
+SELECT ctid, dictowner
+FROM pg_catalog.pg_ts_dict fk
+WHERE dictowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.dictowner);
-SELECT ctid, dicttemplate
-FROM pg_catalog.pg_ts_dict fk
-WHERE dicttemplate != 0 AND
+SELECT ctid, dicttemplate
+FROM pg_catalog.pg_ts_dict fk
+WHERE dicttemplate != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_template pk WHERE pk.oid = fk.dicttemplate);
-SELECT ctid, prsnamespace
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsnamespace != 0 AND
+SELECT ctid, prsnamespace
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.prsnamespace);
-SELECT ctid, prsstart
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsstart != 0 AND
+SELECT ctid, prsstart
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsstart != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsstart);
-SELECT ctid, prstoken
-FROM pg_catalog.pg_ts_parser fk
-WHERE prstoken != 0 AND
+SELECT ctid, prstoken
+FROM pg_catalog.pg_ts_parser fk
+WHERE prstoken != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prstoken);
-SELECT ctid, prsend
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsend != 0 AND
+SELECT ctid, prsend
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsend != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsend);
-SELECT ctid, prsheadline
-FROM pg_catalog.pg_ts_parser fk
-WHERE prsheadline != 0 AND
+SELECT ctid, prsheadline
+FROM pg_catalog.pg_ts_parser fk
+WHERE prsheadline != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsheadline);
-SELECT ctid, prslextype
-FROM pg_catalog.pg_ts_parser fk
-WHERE prslextype != 0 AND
+SELECT ctid, prslextype
+FROM pg_catalog.pg_ts_parser fk
+WHERE prslextype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prslextype);
-SELECT ctid, tmplnamespace
-FROM pg_catalog.pg_ts_template fk
-WHERE tmplnamespace != 0 AND
+SELECT ctid, tmplnamespace
+FROM pg_catalog.pg_ts_template fk
+WHERE tmplnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.tmplnamespace);
-SELECT ctid, tmplinit
-FROM pg_catalog.pg_ts_template fk
-WHERE tmplinit != 0 AND
+SELECT ctid, tmplinit
+FROM pg_catalog.pg_ts_template fk
+WHERE tmplinit != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmplinit);
-SELECT ctid, tmpllexize
-FROM pg_catalog.pg_ts_template fk
-WHERE tmpllexize != 0 AND
+SELECT ctid, tmpllexize
+FROM pg_catalog.pg_ts_template fk
+WHERE tmpllexize != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmpllexize);
-SELECT ctid, typnamespace
-FROM pg_catalog.pg_type fk
-WHERE typnamespace != 0 AND
+SELECT ctid, typnamespace
+FROM pg_catalog.pg_type fk
+WHERE typnamespace != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.typnamespace);
-SELECT ctid, typowner
-FROM pg_catalog.pg_type fk
-WHERE typowner != 0 AND
+SELECT ctid, typowner
+FROM pg_catalog.pg_type fk
+WHERE typowner != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.typowner);
-SELECT ctid, typrelid
-FROM pg_catalog.pg_type fk
-WHERE typrelid != 0 AND
+SELECT ctid, typrelid
+FROM pg_catalog.pg_type fk
+WHERE typrelid != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.typrelid);
-SELECT ctid, typelem
-FROM pg_catalog.pg_type fk
-WHERE typelem != 0 AND
+SELECT ctid, typelem
+FROM pg_catalog.pg_type fk
+WHERE typelem != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typelem);
-SELECT ctid, typarray
-FROM pg_catalog.pg_type fk
-WHERE typarray != 0 AND
+SELECT ctid, typarray
+FROM pg_catalog.pg_type fk
+WHERE typarray != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typarray);
-SELECT ctid, typinput
-FROM pg_catalog.pg_type fk
-WHERE typinput != 0 AND
+SELECT ctid, typinput
+FROM pg_catalog.pg_type fk
+WHERE typinput != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typinput);
-SELECT ctid, typoutput
-FROM pg_catalog.pg_type fk
-WHERE typoutput != 0 AND
+SELECT ctid, typoutput
+FROM pg_catalog.pg_type fk
+WHERE typoutput != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typoutput);
-SELECT ctid, typreceive
-FROM pg_catalog.pg_type fk
-WHERE typreceive != 0 AND
+SELECT ctid, typreceive
+FROM pg_catalog.pg_type fk
+WHERE typreceive != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typreceive);
-SELECT ctid, typsend
-FROM pg_catalog.pg_type fk
-WHERE typsend != 0 AND
+SELECT ctid, typsend
+FROM pg_catalog.pg_type fk
+WHERE typsend != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typsend);
-SELECT ctid, typmodin
-FROM pg_catalog.pg_type fk
-WHERE typmodin != 0 AND
+SELECT ctid, typmodin
+FROM pg_catalog.pg_type fk
+WHERE typmodin != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodin);
-SELECT ctid, typmodout
-FROM pg_catalog.pg_type fk
-WHERE typmodout != 0 AND
+SELECT ctid, typmodout
+FROM pg_catalog.pg_type fk
+WHERE typmodout != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodout);
-SELECT ctid, typanalyze
-FROM pg_catalog.pg_type fk
-WHERE typanalyze != 0 AND
+SELECT ctid, typanalyze
+FROM pg_catalog.pg_type fk
+WHERE typanalyze != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typanalyze);
-SELECT ctid, typbasetype
-FROM pg_catalog.pg_type fk
-WHERE typbasetype != 0 AND
+SELECT ctid, typbasetype
+FROM pg_catalog.pg_type fk
+WHERE typbasetype != 0 AND
NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typbasetype);
drop function exc_using(int, text);
create or replace function exc_using(int) returns void as $$
-declare
+declare
c refcursor;
i int;
begin
raise notice '%', i;
end loop;
close c;
- return;
+ return;
end;
$$ language plpgsql;
INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)');
--- bad format points
+-- bad format points
INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf');
INSERT INTO POINT_TBL(f1) VALUES ('10.0,10.0');
SELECT '' AS six, * FROM POINT_TBL;
--- left of
+-- left of
SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)';
--- right of
+-- right of
SELECT '' AS three, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >> p.f1;
--- above
+-- above
SELECT '' AS one, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >^ p.f1;
--- below
+-- below
SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 <^ '(0.0, 0.0)';
--- equal
+-- equal
SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 ~= '(5.1, 34.5)';
--- point in box
+-- point in box
SELECT '' AS three, p.* FROM POINT_TBL p
WHERE p.f1 <@ box '(0,0,100,100)';
-- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10
SELECT '' AS three, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance
- FROM POINT_TBL p1, POINT_TBL p2
+ FROM POINT_TBL p1, POINT_TBL p2
WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 and p1.f1 >^ p2.f1
ORDER BY distance;
INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)');
--- degenerate polygons
+-- degenerate polygons
INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)');
INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,1.0),(0.0,1.0)');
--- bad polygon input strings
+-- bad polygon input strings
INSERT INTO POLYGON_TBL(f1) VALUES ('0.0');
INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0 0.0');
SELECT '' AS four, * FROM POLYGON_TBL;
--- overlap
+-- overlap
SELECT '' AS three, p.*
FROM POLYGON_TBL p
WHERE p.f1 && '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- left overlap
-SELECT '' AS four, p.*
+-- left overlap
+SELECT '' AS four, p.*
FROM POLYGON_TBL p
WHERE p.f1 &< '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- right overlap
-SELECT '' AS two, p.*
+-- right overlap
+SELECT '' AS two, p.*
FROM POLYGON_TBL p
WHERE p.f1 &> '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- left of
+-- left of
SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 << '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- right of
+-- right of
SELECT '' AS zero, p.*
FROM POLYGON_TBL p
WHERE p.f1 >> '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- contained
-SELECT '' AS one, p.*
+-- contained
+SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- same
+-- same
SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--- contains
+-- contains
SELECT '' AS one, p.*
FROM POLYGON_TBL p
WHERE p.f1 @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)';
--
-- 0 1 2 3 4
--
--- left of
+-- left of
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
--- left overlap
+-- left overlap
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true;
--- right overlap
+-- right overlap
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' &> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
--- right of
+-- right of
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' >> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
--- contained in
+-- contained in
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
--- contains
+-- contains
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
-- +------------------------+
-- | *---* 1
--- | + | |
+-- | + | |
-- | 2 *---*
-- +------------------------+
-- 3
SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "false";
-- +-----------+
--- | *---* /
--- | | |/
--- | | +
--- | | |\
+-- | *---* /
+-- | | |/
+-- | | +
+-- | | |\
-- | *---* \
-- +-----------+
SELECT '((0,4),(6,4),(3,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true";
-- +---------+
SELECT '((0,0),(0,3),(3,3),(3,0))'::polygon @> '((2,1),(2,2),(3,2),(3,1))'::polygon AS "true";
--- same
+-- same
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false;
--- overlap
+-- overlap
SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true;
-- +--------------------+
-- | *---* 1
--- | + | |
+-- | + | |
-- | 2 *---*
-- +--------------------+
-- 3
-- Make sure snapshot management works okay, per bug report in
-BEGIN;
-SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
-CREATE TABLE cursor (a int);
-INSERT INTO cursor VALUES (1);
-DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE;
-UPDATE cursor SET a = 2;
-FETCH ALL FROM c1;
-COMMIT;
+BEGIN;
+SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+CREATE TABLE cursor (a int);
+INSERT INTO cursor VALUES (1);
+DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE;
+UPDATE cursor SET a = 2;
+FETCH ALL FROM c1;
+COMMIT;
DROP TABLE cursor;
BEGIN;
-DECLARE foo13 CURSOR FOR
+DECLARE foo13 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 50;
-DECLARE foo14 CURSOR FOR
+DECLARE foo14 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 51;
-DECLARE foo15 CURSOR FOR
+DECLARE foo15 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 52;
-DECLARE foo16 CURSOR FOR
+DECLARE foo16 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 53;
-DECLARE foo17 CURSOR FOR
+DECLARE foo17 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 54;
-DECLARE foo18 CURSOR FOR
+DECLARE foo18 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 55;
-DECLARE foo19 CURSOR FOR
+DECLARE foo19 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 56;
-DECLARE foo20 CURSOR FOR
+DECLARE foo20 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 57;
-DECLARE foo21 CURSOR FOR
+DECLARE foo21 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 58;
-DECLARE foo22 CURSOR FOR
+DECLARE foo22 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 59;
-DECLARE foo23 CURSOR FOR
+DECLARE foo23 CURSOR FOR
SELECT * FROM onek WHERE unique1 = 60;
-DECLARE foo24 CURSOR FOR
+DECLARE foo24 CURSOR FOR
SELECT * FROM onek2 WHERE unique1 = 50;
-DECLARE foo25 CURSOR FOR
+DECLARE foo25 CURSOR FOR
SELECT * FROM onek2 WHERE unique1 = 60;
FETCH all in foo13;
create table rtest_admin (pname text, sysname text);
create rule rtest_sys_upd as on update to rtest_system do also (
- update rtest_interface set sysname = new.sysname
+ update rtest_interface set sysname = new.sysname
where sysname = old.sysname;
- update rtest_admin set sysname = new.sysname
+ update rtest_admin set sysname = new.sysname
where sysname = old.sysname
);
--
-- Tables and rules for the multiple cascaded qualified instead
--- rule test
+-- rule test
--
create table rtest_t4 (a int4, b text);
create table rtest_t5 (a int4, b text);
create table rtest_view2 (a int4);
create table rtest_view3 (a int4, b text);
create table rtest_view4 (a int4, b text, c int4);
-create view rtest_vview1 as select a, b from rtest_view1 X
+create view rtest_vview1 as select a, b from rtest_view1 X
where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a);
create view rtest_vview2 as select a, b from rtest_view1 where v;
create view rtest_vview3 as select a, b from rtest_vview2 X
factor float
);
-create view rtest_vcomp as
+create view rtest_vcomp as
select X.part, (X.size * Y.factor) as size_in_cm
from rtest_comp X, rtest_unitfact Y
where X.unit = Y.unit;
on update to vview do instead
(
insert into cchild (pid, descrip)
- select old.pid, new.descrip where old.descrip isnull;
+ select old.pid, new.descrip where old.descrip isnull;
update cchild set descrip = new.descrip where cchild.pid = old.pid;
);
--
SELECT viewname, definition FROM pg_views WHERE schemaname <> 'information_schema' ORDER BY viewname;
-SELECT tablename, rulename, definition FROM pg_rules
+SELECT tablename, rulename, definition FROM pg_rules
ORDER BY tablename, rulename;
--
create table rule_and_refint_t1 (
id1a integer,
id1b integer,
-
+
primary key (id1a, id1b)
);
create table rule_and_refint_t2 (
id2a integer,
id2c integer,
-
+
primary key (id2a, id2c)
);
create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1);
create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1);
-create rule t1_ins_1 as on insert to t1
+create rule t1_ins_1 as on insert to t1
where new.a >= 0 and new.a < 10
do instead
insert into t1_1 values (new.a);
-create rule t1_ins_2 as on insert to t1
+create rule t1_ins_2 as on insert to t1
where new.a >= 10 and new.a < 20
do instead
insert into t1_2 values (new.a);
-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
--
SELECT onek.unique1, onek.stringu1 FROM onek
- WHERE onek.unique1 < 20
+ WHERE onek.unique1 < 20
ORDER BY unique1 using >;
--
-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
--
SELECT onek.unique1, onek.stringu1 FROM onek
- WHERE onek.unique1 > 980
+ WHERE onek.unique1 > 980
ORDER BY stringu1 using <;
-
+
--
-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
-- sort +1d -2 +0nr -1
--
SELECT onek.unique1, onek.string4 FROM onek
- WHERE onek.unique1 > 980
+ WHERE onek.unique1 > 980
ORDER BY string4 using <, unique1 using >;
-
+
--
-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
-- sort +1dr -2 +0n -1
SELECT onek.unique1, onek.string4 FROM onek
WHERE onek.unique1 > 980
ORDER BY string4 using >, unique1 using <;
-
+
--
-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
-- sort +0nr -1 +1d -2
-- sort +0n -1 +1dr -2
--
SELECT onek.unique1, onek.string4 FROM onek
- WHERE onek.unique1 < 20
+ WHERE onek.unique1 < 20
ORDER BY unique1 using <, string4 using >;
--
-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
--
SELECT onek2.unique1, onek2.stringu1 FROM onek2
- WHERE onek2.unique1 < 20
+ WHERE onek2.unique1 < 20
ORDER BY unique1 using >;
--
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b ORDER BY b;
GROUP BY a/2 ORDER BY a/2;
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
-- group w/o existing GROUP BY target under ambiguous condition
-SELECT count(*) FROM test_missing_target x, test_missing_target y
+SELECT count(*) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(*) INTO TABLE test_missing_target2
-FROM test_missing_target x, test_missing_target y
+SELECT count(*) INTO TABLE test_missing_target2
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b ORDER BY x.b;
SELECT * FROM test_missing_target2;
-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
-- failure expected
-SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY b/2 ORDER BY b/2;
-- group w/ existing GROUP BY target under ambiguous condition
-SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
-- group w/o existing GROUP BY target under ambiguous condition
-- failure expected due to ambiguous b in count(b)
-SELECT count(b) FROM test_missing_target x, test_missing_target y
+SELECT count(b) FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2;
-- group w/o existing GROUP BY target under ambiguous condition
-- into a table
-SELECT count(x.b) INTO TABLE test_missing_target3
-FROM test_missing_target x, test_missing_target y
+SELECT count(x.b) INTO TABLE test_missing_target3
+FROM test_missing_target x, test_missing_target y
WHERE x.a = y.a
GROUP BY x.b/2 ORDER BY x.b/2;
SELECT * FROM test_missing_target3;
---
--- test creation of SERIAL column
---
-
+
CREATE TABLE serialTest (f1 text, f2 serial);
-
+
INSERT INTO serialTest VALUES ('foo');
INSERT INTO serialTest VALUES ('bar');
INSERT INTO serialTest VALUES ('force', 100);
INSERT INTO serialTest VALUES ('wrong', NULL);
-
+
SELECT * FROM serialTest;
-- basic sequence operations using both text and oid references
CREATE SEQUENCE sequence_test;
-
+
SELECT nextval('sequence_test'::text);
SELECT nextval('sequence_test'::regclass);
SELECT currval('sequence_test'::text);
ELSE 'Approved'
END)
ELSE 'PO'
- END)
+ END)
END) AS "Status",
(CASE
WHEN ord.ordercancelled
ELSE 'Approved'
END)
ELSE 'PO'
- END)
+ END)
END) AS "Status_OK"
FROM orderstest ord;
INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097');
INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC');
-SELECT '' AS "64", d1 FROM TIMESTAMP_TBL;
+SELECT '' AS "64", d1 FROM TIMESTAMP_TBL;
-- Demonstrate functions and operators
SELECT '' AS "48", d1 FROM TIMESTAMP_TBL
FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
-- TO_CHAR()
-SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
FROM TIMESTAMP_TBL;
SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
FROM TIMESTAMP_TBL;
-SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
FROM TIMESTAMP_TBL;
-SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
FROM TIMESTAMP_TBL;
-SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
FROM TIMESTAMP_TBL;
SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS')
FROM TIMESTAMP_TBL;
-SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMP_TBL;
+
+SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
FROM TIMESTAMP_TBL;
-
-SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
- FROM TIMESTAMP_TBL;
SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID')
FROM TIMESTAMP_TBL;
SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz;
SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz;
-SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL;
+SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL;
-- Demonstrate functions and operators
SELECT '' AS "48", d1 FROM TIMESTAMPTZ_TBL
FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
-- TO_CHAR()
-SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
FROM TIMESTAMPTZ_TBL;
-
+
SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL;
SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
- FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+
+SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
FROM TIMESTAMPTZ_TBL;
-SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
- FROM TIMESTAMPTZ_TBL;
-
SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS')
- FROM TIMESTAMPTZ_TBL;
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMPTZ_TBL;
-SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth')
+SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
FROM TIMESTAMPTZ_TBL;
-
-SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
- FROM TIMESTAMPTZ_TBL;
SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID')
FROM TIMESTAMPTZ_TBL;
VALUES ('["Feb 15 1990 12:15:03" "2001-09-23 11:12:13"]');
--- badly formatted tintervals
+-- badly formatted tintervals
INSERT INTO TINTERVAL_TBL (f1)
VALUES ('["bad time specifications" ""]');
-- contains
SELECT '' AS five, t1.f1
FROM TINTERVAL_TBL t1
- WHERE not t1.f1 <<
+ WHERE not t1.f1 <<
tinterval '["Aug 15 14:23:19 1980" "Sep 16 14:23:19 1990"]'
ORDER BY t1.f1;
BEGIN;
-SELECT *
+SELECT *
INTO TABLE xacttest
FROM aggtest;
ABORT;
--- should not exist
+-- should not exist
SELECT oid FROM pg_class WHERE relname = 'disappear';
--- should have members again
+-- should have members again
SELECT * FROM aggtest;
DELETE FROM savepoints WHERE a=2;
ROLLBACK;
COMMIT; -- should not be in a transaction block
-
+
SELECT * FROM savepoints;
-- test whole-tree commit on an aborted subtransaction
-- (fkey1, fkey2) --> pkeys (pkey1, pkey2)
-- (fkey3) --> fkeys2 (pkey23)
--
-create trigger check_fkeys_pkey_exist
- before insert or update on fkeys
- for each row
- execute procedure
+create trigger check_fkeys_pkey_exist
+ before insert or update on fkeys
+ for each row
+ execute procedure
check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2');
-create trigger check_fkeys_pkey2_exist
- before insert or update on fkeys
- for each row
+create trigger check_fkeys_pkey2_exist
+ before insert or update on fkeys
+ for each row
execute procedure check_primary_key ('fkey3', 'fkeys2', 'pkey23');
--
-- For fkeys2:
-- (fkey21, fkey22) --> pkeys (pkey1, pkey2)
--
-create trigger check_fkeys2_pkey_exist
- before insert or update on fkeys2
- for each row
- execute procedure
+create trigger check_fkeys2_pkey_exist
+ before insert or update on fkeys2
+ for each row
+ execute procedure
check_primary_key ('fkey21', 'fkey22', 'pkeys', 'pkey1', 'pkey2');
-- Test comments
-- fkeys (fkey1, fkey2) and fkeys2 (fkey21, fkey22)
--
create trigger check_pkeys_fkey_cascade
- before delete or update on pkeys
- for each row
- execute procedure
- check_foreign_key (2, 'cascade', 'pkey1', 'pkey2',
+ before delete or update on pkeys
+ for each row
+ execute procedure
+ check_foreign_key (2, 'cascade', 'pkey1', 'pkey2',
'fkeys', 'fkey1', 'fkey2', 'fkeys2', 'fkey21', 'fkey22');
--
-- ON DELETE/UPDATE (pkey23) RESTRICT:
-- fkeys (fkey3)
--
-create trigger check_fkeys2_fkey_restrict
+create trigger check_fkeys2_fkey_restrict
before delete or update on fkeys2
- for each row
+ for each row
execute procedure check_foreign_key (1, 'restrict', 'pkey23', 'fkeys', 'fkey3');
insert into fkeys2 values (10, '1', 1);
-- -- Jan
--
-- create table dup17 (x int4);
---
--- create trigger dup17_before
+--
+-- create trigger dup17_before
-- before insert on dup17
--- for each row
--- execute procedure
+-- for each row
+-- execute procedure
-- funny_dup17 ()
-- ;
---
+--
-- insert into dup17 values (17);
-- select count(*) from dup17;
-- insert into dup17 values (17);
-- select count(*) from dup17;
---
+--
-- drop trigger dup17_before on dup17;
---
+--
-- create trigger dup17_after
-- after insert on dup17
--- for each row
--- execute procedure
+-- for each row
+-- execute procedure
-- funny_dup17 ()
-- ;
-- insert into dup17 values (13);
-- select count(*) from dup17 where x = 13;
-- insert into dup17 values (13);
-- select count(*) from dup17 where x = 13;
---
+--
-- DROP TABLE dup17;
create sequence ttdummy_seq increment 10 start 0 minvalue 0;
create table tttest (
- price_id int4,
- price_val int4,
+ price_id int4,
+ price_val int4,
price_on int4,
price_off int4 default 999999
);
-create trigger ttdummy
+create trigger ttdummy
before delete or update on tttest
- for each row
- execute procedure
+ for each row
+ execute procedure
ttdummy (price_on, price_off);
-create trigger ttserial
+create trigger ttserial
before insert or update on tttest
- for each row
- execute procedure
+ for each row
+ execute procedure
autoinc (price_on, ttdummy_seq);
insert into tttest values (1, 1, null);
v varchar
);
-CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
+CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
LANGUAGE plpgsql AS $$
declare
relid := TG_relid::regclass;
-- plpgsql can't discover its trigger data in a hash like perl and python
- -- can, or by a sort of reflection like tcl can,
+ -- can, or by a sort of reflection like tcl can,
-- so we have to hard code the names.
raise NOTICE 'TG_NAME: %', TG_name;
raise NOTICE 'TG_WHEN: %', TG_when;
end;
$$;
-CREATE TRIGGER show_trigger_data_trig
+CREATE TRIGGER show_trigger_data_trig
BEFORE INSERT OR UPDATE OR DELETE ON trigger_test
FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
insert into trigger_test values(1,'insert');
update trigger_test set v = 'update' where i = 1;
delete from trigger_test;
-
+
DROP TRIGGER show_trigger_data_trig on trigger_test;
-
+
DROP FUNCTION trigger_data();
DROP TABLE trigger_test;
INSERT INTO min_updates_test_oids VALUES ('a',1,2),('b','2',null);
-CREATE TRIGGER z_min_update
+CREATE TRIGGER z_min_update
BEFORE UPDATE ON min_updates_test
FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
-CREATE TRIGGER z_min_update
+CREATE TRIGGER z_min_update
BEFORE UPDATE ON min_updates_test_oids
FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
CREATE TRIGGER t
BEFORE TRUNCATE ON trunc_trigger_test
-FOR EACH STATEMENT
+FOR EACH STATEMENT
EXECUTE PROCEDURE trunctrigger('before trigger truncate');
SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
CREATE TRIGGER tt
AFTER TRUNCATE ON trunc_trigger_test
-FOR EACH STATEMENT
+FOR EACH STATEMENT
EXECUTE PROCEDURE trunctrigger('after trigger truncate');
SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
-- Synonim dictionary
CREATE TEXT SEARCH DICTIONARY synonym (
- Template=synonym,
+ Template=synonym,
Synonyms=synonym_sample
);
-- cannot pass more than one word to thesaurus.
CREATE TEXT SEARCH DICTIONARY thesaurus (
Template=thesaurus,
- DictFile=thesaurus_sample,
+ DictFile=thesaurus_sample,
Dictionary=english_stem
);
COPY=english
);
-ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR
- asciiword, hword_asciipart, asciihword
+ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR
+ asciiword, hword_asciipart, asciihword
WITH synonym, english_stem;
SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre');
COPY=synonym_tst
);
-ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR
- asciiword, hword_asciipart, asciihword
+ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR
+ asciiword, hword_asciipart, asciihword
WITH synonym, thesaurus, english_stem;
SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one');
-- Look for pg_ts_config_map entries that aren't one of parser's token types
SELECT * FROM
( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid
- FROM pg_ts_config ) AS tt
+ FROM pg_ts_config ) AS tt
RIGHT JOIN pg_ts_config_map AS m
ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype)
WHERE
SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
-
+
RESET enable_seqscan;
INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH');
SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10;
</html>',
to_tsquery('english', 'sea&foo'), 'HighlightAll=true');
---Check if headline fragments work
+--Check if headline fragments work
SELECT ts_headline('english', '
Day after day, day after day,
We stuck, nor breath nor motion,
p2.typelem = p1.oid and p1.typarray = p2.oid);
-- Make sure typarray points to a varlena array type of our own base
-SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype,
+SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype,
p2.typelem, p2.typlen
FROM pg_type p1 LEFT JOIN pg_type p2 ON (p1.typarray = p2.oid)
WHERE p1.typarray <> 0 AND
INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
--- any of the following three input formats are acceptable
+-- any of the following three input formats are acceptable
INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
INSERT INTO VARCHAR_TBL (f1) VALUES (2);
INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
--- zero-length char
+-- zero-length char
INSERT INTO VARCHAR_TBL (f1) VALUES ('');
--- try varchar's of greater than 1 length
+-- try varchar's of greater than 1 length
INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
-- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window.
-SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM
(SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s
SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four
FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s;
-SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
FROM tenk1 GROUP BY ten, two;
SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10;
-SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
- sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
+SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
FROM tenk1 WHERE unique2 < 10;
-- opexpr with different windows evaluation.
SELECT * FROM(
- SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
- sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
+ SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount,
sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum
FROM tenk1
SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10;
-SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten);
-- more than one window with GROUP BY
o compile and run this program
If your platform requires special thread flags that are not tested by
-/config/acx_pthread.m4, add PTHREAD_CFLAGS and PTHREAD_LIBS defines to
+/config/acx_pthread.m4, add PTHREAD_CFLAGS and PTHREAD_LIBS defines to
your template/${port} file.
Windows Systems
===============
Windows systems do not vary in their thread-safeness in the same way that
-other systems might, nor do they generally have pthreads installed, hence
-on Windows this test is skipped by the configure program (pthreads is
+other systems might, nor do they generally have pthreads installed, hence
+on Windows this test is skipped by the configure program (pthreads is
required by the test program, but not PostgreSQL itself). If you do wish
to test your system however, you can do so as follows:
1) Install pthreads in you Mingw/Msys environment. You can download pthreads
from ftp://sources.redhat.com/pub/pthreads-win32/.
-
+
2) Build the test program:
gcc -o thread_test.exe \
}
If we wanted to add a third argument:
-
+
void print_stuff(int arg1, int arg2, int arg3)
{
printf("stuff: %d %d %d\n", arg1, arg2, arg3);
src/tools/backend/README
-Just point your browser at the index.html file, and click on the
+Just point your browser at the index.html file, and click on the
flowchart to see the description and source code.
href="../../backend/port">port</a> - compatibility routines</h2>
<br />
-
+
<hr />
<small>Maintainer: Bruce Momjian ( <a
$path = $ARGV[0];
shift @ARGV;
} else {
- $path = ".";
+ $path = ".";
}
$[ = 1; # set array base to 1
if ($arr[$fieldIndexer] eq '|') {
next;
}
-
+
# Put this keyword into the right list
push @{$keywords{$kcat}}, $arr[$fieldIndexer];
}
(add-hook 'c-mode-hook
(function
- (lambda nil
+ (lambda nil
(if (string-match "pgsql" buffer-file-name)
(progn
(c-set-style "bsd")
- (setq c-basic-offset 4)
+ (setq c-basic-offset 4)
(setq tab-width 4)
(c-set-offset 'case-label '+)
(setq indent-tabs-mode t)
#
TARGET = entab
BINDIR = /usr/local/bin
-XFLAGS =
+XFLAGS =
CFLAGS = -O
-LIBS =
+LIBS =
$(TARGET) : entab.o halt.o
$(CC) -o $(TARGET) $(XFLAGS) $(CFLAGS) entab.o halt.o $(LIBS)
-entab.o : entab.c
+entab.o : entab.c
$(CC) -c $(XFLAGS) $(CFLAGS) entab.c
-halt.o : halt.c
+halt.o : halt.c
$(CC) -c $(XFLAGS) $(CFLAGS) halt.c
clean:
The quote-protection option allows tab replacement without
quoted strings being changed.
Useful when strings in source code will not have the same tab stops
-when executed in the program.
+when executed in the program.
.LP
To change a text file created on a system with one size of tab
stop to display properly on a device with different tab setting,
copy debug from '/tmp/"$$"';
- select *
- into table debug2
+ select *
+ into table debug2
from debug;
create index idebug on debug(scope,func);
vacuum debug;
vacuum debug2;
- update debug2
- set scope = '_'
+ update debug2
+ set scope = '_'
from debug
where debug2.func = debug.func and
debug2.scope = 'T' and debug.scope = 'U';
# This script attempts to find all typedef's in the postgres binaries
# by using 'nm' to report all typedef debugging symbols.
-#
-# For this program to work, you must have compiled all binaries with
+#
+# For this program to work, you must have compiled all binaries with
# debugging symbols.
#
# This is run on BSD/OS 4.0 or Linux, so you may need to make changes.
-#
+#
# Ignore the nm errors about a file not being a binary file.
#
# It gets typedefs by reading "STABS":
cporig `grep -l HeapTuple *`
If I use mkid (from ftp.postgreSQL.org), I can do:
-
+
cporig `lid -kn 'fsyncOff'`
and get a copy of every file containing that word. I can then do:
When I am ready to generate a patch, I run 'difforig' command from the top of
the source tree:
-
+
I pipe the output of this to a file to hold my patch, and the file names
it processes appear on my screen. It creates a nice patch for me of all
the files I used with cporig.
my $libpq;
my $contrib_defines = {'refint' => 'REFINT_VERBOSE'};
-my @contrib_uselibpq = ('dblink', 'oid2name', 'pgbench', 'pg_upgrade',
+my @contrib_uselibpq = ('dblink', 'oid2name', 'pgbench', 'pg_upgrade',
'vacuumlo');
-my @contrib_uselibpgport = ('oid2name', 'pgbench', 'pg_standby',
+my @contrib_uselibpgport = ('oid2name', 'pgbench', 'pg_standby',
'pg_archivecleanup', 'pg_upgrade', 'vacuumlo');
my $contrib_extralibs = {'pgbench' => ['wsock32.lib']};
my $contrib_extraincludes = {'tsearch2' => ['contrib/tsearch2'], 'dblink' => ['src/backend']};
Notes about Visual Studio Express
---------------------------------
To build PostgreSQL using Visual Studio Express, the Platform SDK
-has to be installed. Since this is not included in the product
+has to be installed. Since this is not included in the product
originally, extra steps are needed to make it work.
-First, download and install the latest Platform SDK from
-www.microsoft.com.
+First, download and install the latest Platform SDK from
+www.microsoft.com.
-Locate the files vcprojectengine.dll.express.config and
+Locate the files vcprojectengine.dll.express.config and
vcprojectengine.dll.config in the vc\vcpackages directory of
the Visual C++ Express installation. In these files, add the paths
to the Platform SDK to the Include, Library and Path tags. Be sure
# src/tools/pginclude/pgrminclude
trap "rm -f /tmp/$$.c /tmp/$$.o /tmp/$$ /tmp/$$a /tmp/$$b" 0 1 2 3 15
-find . \( -name CVS -a -prune \) -o -type f -name '*.[ch]' -print |
+find . \( -name CVS -a -prune \) -o -type f -name '*.[ch]' -print |
grep -v '\./postgres.h' |
grep -v '\./pg_config.h' |
grep -v '\./c.h' |
then IS_INCLUDE="Y"
else IS_INCLUDE="N"
fi
-
+
# loop through all includes
cat "$FILE" | grep "^#include" |
sed 's/^#include[ ]*[<"]\([^>"]*\).*$/\1/g' |
# remove defines from include files
if [ "$IS_INCLUDE" = "Y" ]
- then cat "$FILE" | grep -v "^#if" | grep -v "^#else" |
+ then cat "$FILE" | grep -v "^#if" | grep -v "^#else" |
grep -v "^#endif" | sed 's/->[a-zA-Z0-9_\.]*//g' >/tmp/$$a
else cat "$FILE" >/tmp/$$a
fi
========
This can format all PostgreSQL *.c and *.h files, but excludes *.y, and
-*.l files.
+*.l files.
1) Change directory to the top of the build tree.
---------------------------------------------------------------------------
-We have standardized on NetBSD's indent. We have fixed a few bugs which
-requre the NetBSD source to be patched with indent.bsd.patch patch. A
+We have standardized on NetBSD's indent. We have fixed a few bugs which
+requre the NetBSD source to be patched with indent.bsd.patch patch. A
fully patched version is available at ftp://ftp.postgresql.org/pub/dev.
GNU indent, version 2.2.6, has several problems, and is not recommended.
then echo "You appear to have GNU indent rather than BSD indent." >&2
echo "See the pgindent/README file for a description of its problems." >&2
EXTRA_OPTS="-cdb -bli0 -npcs -cli4 -sc"
-else
+else
EXTRA_OPTS="-cli1"
fi
# isn't needed for general use.
# awk '
# {
-# line3 = $0;
+# line3 = $0;
# if (skips > 0)
# skips--;
# if (line1 ~ / *{$/ &&
# Remove blank line between opening brace and block comment.
awk '
{
- line3 = $0;
+ line3 = $0;
if (skips > 0)
skips--;
if (line1 ~ / *{$/ &&
print line1;
}' |
-# Move prototype names to the same line as return type. Useful for ctags.
+# Move prototype names to the same line as return type. Useful for ctags.
# Indent should do this, but it does not. It formats prototypes just
# like real functions.
- awk ' BEGIN {paren_level = 0}
+ awk ' BEGIN {paren_level = 0}
{
if ($0 ~ /^[a-zA-Z_][a-zA-Z_0-9]*[^\(]*$/)
{
[ "X$1" != "X-n" ] && PGCLEAN=clean
-(gmake $PGCLEAN check 2>&1; echo "$?" > $TMP/ret) |
+(gmake $PGCLEAN check 2>&1; echo "$?" > $TMP/ret) |
(tee $TMP/0; exit `cat $TMP/ret`) &&
cat $TMP/0 |
-# The following grep's have to be adjusted for your setup because
+# The following grep's have to be adjusted for your setup because
# certain warnings are acceptable.
-grep -i warning |
-grep -v setproctitle |
-grep -v find_rule |
+grep -i warning |
+grep -v setproctitle |
+grep -v find_rule |
grep -v yy_flex_realloc
-- descendants.
-----------------------------
--- For example, the capitals table inherits from cities table. (It inherits
+-- For example, the capitals table inherits from cities table. (It inherits
-- all data fields from cities.)
CREATE TABLE cities (
-----------------------------
-- Populating a Table With Rows:
--- An INSERT statement is used to insert a new row into a table. There
+-- An INSERT statement is used to insert a new row into a table. There
-- are several ways you can specify what columns the data should go to.
-----------------------------
-- 1. The simplest case is when the list of value correspond to the order of
-- the columns specified in CREATE TABLE.
-INSERT INTO weather
+INSERT INTO weather
VALUES ('San Francisco', 46, 50, 0.25, '1994-11-27');
-INSERT INTO cities
+INSERT INTO cities
VALUES ('San Francisco', '(-194.0, 53.0)');
-- 2. You can also specify what column the values correspond to. (The columns
SELECT *
FROM weather
- WHERE city = 'San Francisco'
+ WHERE city = 'San Francisco'
AND prcp > 0.0;
-- Here is a more complicated one. Duplicates are removed when DISTINCT is
-- Suppose we want to find all the records that are in the temperature range
-- of other records. W1 and W2 are aliases for weather.
-SELECT W1.city, W1.temp_lo, W1.temp_hi,
+SELECT W1.city, W1.temp_lo, W1.temp_hi,
W2.city, W2.temp_lo, W2.temp_hi
FROM weather W1, weather W2
-WHERE W1.temp_lo < W2.temp_lo
+WHERE W1.temp_lo < W2.temp_lo
and W1.temp_hi > W2.temp_hi;
-- Aggregate with GROUP BY
SELECT city, max(temp_lo)
- FROM weather
+ FROM weather
GROUP BY city;
-- ... and HAVING
SELECT * FROM weather;
-- You can also delete all the rows in a table by doing the following. (This
--- is different from DROP TABLE which removes the table in addition to the
+-- is different from DROP TABLE which removes the table in addition to the
-- removing the rows.)
DELETE FROM weather;
-- complex.sql-
-- This file shows how to create a new user-defined type and how to
-- use this new type.
---
+--
--
-- Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
-- Portions Copyright (c) 1994, Regents of the University of California
-- C code. We also mark them IMMUTABLE, since they always return the
-- same outputs given the same inputs.
--- the input function 'complex_in' takes a null-terminated string (the
+-- the input function 'complex_in' takes a null-terminated string (the
-- textual representation of the type) and turns it into the internal
-- (in memory) representation. You will get a message telling you 'complex'
-- does not exist yet but that's okay.
-- memory block required to hold the type (we need two 8-byte doubles).
CREATE TYPE complex (
- internallength = 16,
+ internallength = 16,
input = complex_in,
output = complex_out,
receive = complex_recv,
);
-- data for user-defined types are just strings in the proper textual
--- representation.
+-- representation.
INSERT INTO test_complex VALUES ('(1.0, 2.5)', '(4.2, 3.55 )');
INSERT INTO test_complex VALUES ('(33.0, 51.4)', '(100.42, 93.55)');
-- Creating an operator for the new type:
-- Let's define an add operator for complex types. Since POSTGRES
-- supports function overloading, we'll use + as the add operator.
--- (Operator names can be reused with different numbers and types of
+-- (Operator names can be reused with different numbers and types of
-- arguments.)
-----------------------------
-- we can now define the operator. We show a binary operator here but you
-- can also define unary operators by omitting either of leftarg or rightarg.
-CREATE OPERATOR + (
+CREATE OPERATOR + (
leftarg = complex,
rightarg = complex,
procedure = complex_add,
-----------------------------
--
--- let's create a simple SQL function that takes no arguments and
+-- let's create a simple SQL function that takes no arguments and
-- returns 1
CREATE FUNCTION one() RETURNS integer
AS 'SELECT 1 as ONE' LANGUAGE SQL;
--
--- functions can be used in any expressions (eg. in the target list or
+-- functions can be used in any expressions (eg. in the target list or
-- qualifications)
SELECT one() AS answer;
INSERT INTO EMP VALUES ('Bill', 4200, 36, '(2,1)');
INSERT INTO EMP VALUES ('Ginger', 4800, 30, '(2,4)');
--- the argument of a function can also be a tuple. For instance,
+-- the argument of a function can also be a tuple. For instance,
-- double_salary takes a tuple of the EMP table
CREATE FUNCTION double_salary(EMP) RETURNS integer
FROM EMP
WHERE EMP.cubicle ~= '(2,1)'::point;
--- the return value of a function can also be a tuple. However, make sure
--- that the expressions in the target list is in the same order as the
+-- the return value of a function can also be a tuple. However, make sure
+-- that the expressions in the target list is in the same order as the
-- columns of EMP.
CREATE FUNCTION new_emp() RETURNS EMP
-----------------------------
-- Creating C Functions
--- in addition to SQL functions, you can also create C functions.
+-- in addition to SQL functions, you can also create C functions.
-- See funcs.c for the definition of the C functions.
-----------------------------
SELECT copytext('hello world!');
SELECT name, c_overpaid(EMP, 1500) AS overpaid
-FROM EMP
+FROM EMP
WHERE name = 'Bill' or name = 'Sam';
-- remove functions that were created in this file
-- column reference)
--
SELECT n.nspname AS schema_name,
- bc.relname AS class_name,
- ic.relname AS index_name,
+ bc.relname AS class_name,
+ ic.relname AS index_name,
a.attname
FROM pg_namespace n,
pg_class bc, -- base class
-- classes
--
SELECT n.nspname, c.relname, a.attname, format_type(t.oid, null) as typname
- FROM pg_namespace n, pg_class c,
+ FROM pg_namespace n, pg_class c,
pg_attribute a, pg_type t
WHERE n.oid = c.relnamespace
and c.relkind = 'r' -- no indices
--
-- lists all left unary operators
--
-SELECT n.nspname, o.oprname AS left_unary,
+SELECT n.nspname, o.oprname AS left_unary,
format_type(right_type.oid, null) AS operand,
format_type(result.oid, null) AS return_type
- FROM pg_namespace n, pg_operator o,
+ FROM pg_namespace n, pg_operator o,
pg_type right_type, pg_type result
WHERE o.oprnamespace = n.oid
and o.oprkind = 'l' -- left unary
--
-- lists all right unary operators
--
-SELECT n.nspname, o.oprname AS right_unary,
+SELECT n.nspname, o.oprname AS right_unary,
format_type(left_type.oid, null) AS operand,
format_type(result.oid, null) AS return_type
- FROM pg_namespace n, pg_operator o,
+ FROM pg_namespace n, pg_operator o,
pg_type left_type, pg_type result
WHERE o.oprnamespace = n.oid
and o.oprkind = 'r' -- right unary
format_type(left_type.oid, null) AS left_opr,
format_type(right_type.oid, null) AS right_opr,
format_type(result.oid, null) AS return_type
- FROM pg_namespace n, pg_operator o, pg_type left_type,
+ FROM pg_namespace n, pg_operator o, pg_type left_type,
pg_type right_type, pg_type result
WHERE o.oprnamespace = n.oid
and o.oprkind = 'b' -- binary
-- C functions
--
SELECT n.nspname, p.proname, p.pronargs, format_type(t.oid, null) as return_type
- FROM pg_namespace n, pg_proc p,
+ FROM pg_namespace n, pg_proc p,
pg_language l, pg_type t
WHERE p.pronamespace = n.oid
and n.nspname not like 'pg\\_%' -- no catalogs
and n.nspname != 'information_schema' -- no information_schema
- and p.prolang = l.oid
+ and p.prolang = l.oid
and p.prorettype = t.oid
and l.lanname = 'c'
ORDER BY nspname, proname, pronargs, return_type;
-- lists all aggregate functions and the types to which they can be applied
--
SELECT n.nspname, p.proname, format_type(t.oid, null) as typname
- FROM pg_namespace n, pg_aggregate a,
+ FROM pg_namespace n, pg_aggregate a,
pg_proc p, pg_type t
WHERE p.pronamespace = n.oid
and a.aggfnoid = p.oid
-- families
--
SELECT am.amname, n.nspname, opf.opfname, opr.oprname
- FROM pg_namespace n, pg_am am, pg_opfamily opf,
+ FROM pg_namespace n, pg_am am, pg_opfamily opf,
pg_amop amop, pg_operator opr
WHERE opf.opfnamespace = n.oid
and opf.opfmethod = am.oid
!IF "$(OS)" == "Windows_NT"
NULL=
-!ELSE
+!ELSE
NULL=nul
-!ENDIF
+!ENDIF
-ALL:
+ALL:
cd include
if not exist pg_config.h copy pg_config.h.win32 pg_config.h
if not exist pg_config_os.h copy port\win32.h pg_config_os.h