/* Methods */
- bool (*f_gt) (const void *, const void *, Oid); /* greater than */
- bool (*f_ge) (const void *, const void *, Oid); /* greater equal */
- bool (*f_eq) (const void *, const void *, Oid); /* equal */
- bool (*f_le) (const void *, const void *, Oid); /* less equal */
- bool (*f_lt) (const void *, const void *, Oid); /* less than */
- int32 (*f_cmp) (const void *, const void *, Oid); /* compare */
+ bool (*f_gt) (const void *, const void *, Oid); /* greater than */
+ bool (*f_ge) (const void *, const void *, Oid); /* greater equal */
+ bool (*f_eq) (const void *, const void *, Oid); /* equal */
+ bool (*f_le) (const void *, const void *, Oid); /* less equal */
+ bool (*f_lt) (const void *, const void *, Oid); /* less than */
+ int32 (*f_cmp) (const void *, const void *, Oid); /* compare */
GBT_VARKEY *(*f_l2n) (GBT_VARKEY *); /* convert leaf to node */
} gbtree_vinfo;
"Main intended use as restore_command in recovery.conf:\n"
" restore_command = 'pg_standby [OPTION]... ARCHIVELOCATION %%f %%p %%r'\n"
"e.g.\n"
- " restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
+ " restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
}
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
pg_log(PG_FATAL, "New cluster database \"%s\" is not empty\n",
- new_cluster.dbarr.dbs[dbnum].db_name);
+ new_cluster.dbarr.dbs[dbnum].db_name);
}
}
static void
check_old_cluster_has_new_cluster_dbs(void)
{
- int old_dbnum, new_dbnum;
+ int old_dbnum,
+ new_dbnum;
for (new_dbnum = 0; new_dbnum < new_cluster.dbarr.ndbs; new_dbnum++)
{
for (old_dbnum = 0; old_dbnum < old_cluster.dbarr.ndbs; old_dbnum++)
if (strcmp(old_cluster.dbarr.dbs[old_dbnum].db_name,
- new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
+ new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
break;
if (old_dbnum == old_cluster.dbarr.ndbs)
pg_log(PG_FATAL, "New cluster database \"%s\" does not exist in the old cluster\n",
- new_cluster.dbarr.dbs[new_dbnum].db_name);
+ new_cluster.dbarr.dbs[new_dbnum].db_name);
}
}
if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
pg_log(PG_FATAL, "database user \"%s\" is not a superuser\n",
- os_info.user);
+ os_info.user);
PQclear(res);
pg_putenv("LC_TIME", NULL);
pg_putenv("LANG",
#ifndef WIN32
- NULL);
+ NULL);
#else
/* On Windows the default locale cannot be English, so force it */
- "en");
+ "en");
#endif
pg_putenv("LANGUAGE", NULL);
pg_putenv("LC_ALL", NULL);
if (access(".", R_OK | W_OK
#ifndef WIN32
+
/*
- * Do a directory execute check only on Unix because execute permission
- * on NTFS means "can execute scripts", which we don't care about.
- * Also, X_OK is not defined in the Windows API.
+ * Do a directory execute check only on Unix because execute permission on
+ * NTFS means "can execute scripts", which we don't care about. Also, X_OK
+ * is not defined in the Windows API.
*/
- | X_OK
+ | X_OK
#endif
- ) != 0)
+ ) != 0)
pg_log(PG_FATAL,
- "You must have read and write access in the current directory.\n");
+ "You must have read and write access in the current directory.\n");
check_bin_dir(&old_cluster);
check_data_dir(old_cluster.pgdata);
{
char subDirName[MAXPGPATH];
int subdirnum;
+
/* start check with top-most directory */
const char *requiredSubdirs[] = {"", "base", "global", "pg_clog",
"pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase",
- "pg_xlog"};
+ "pg_xlog"};
for (subdirnum = 0;
subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]);
++subdirnum)
{
struct stat statBuf;
+
snprintf(subDirName, sizeof(subDirName), "%s/%s", pg_data,
requiredSubdirs[subdirnum]);
report_status(PG_FATAL, "check for %s failed: %s\n",
cluster->bindir, getErrorText(errno));
else if (!S_ISDIR(statBuf.st_mode))
- report_status(PG_FATAL, "%s is not a directory\n",
- cluster->bindir);
+ report_status(PG_FATAL, "%s is not a directory\n",
+ cluster->bindir);
validate_exec(cluster->bindir, "postgres");
validate_exec(cluster->bindir, "pg_ctl");
case 'u':
pg_free(os_info.user);
os_info.user = pg_strdup(optarg);
+
/*
* Push the user name into the environment so pre-9.1
* pg_ctl/libpq uses it.
void pg_free(void *ptr);
const char *getErrorText(int errNum);
unsigned int str2uint(const char *str);
-void pg_putenv(const char *var, const char *val);
+void pg_putenv(const char *var, const char *val);
/* version.c */
char conn_opts[MAXPGPATH];
snprintf(conn_opts, sizeof(conn_opts),
- "dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
- cluster->port);
+ "dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
+ cluster->port);
return PQconnectdb(conn_opts);
}
PGconn *conn;
bool exit_hook_registered = false;
int pg_ctl_return = 0;
+
#ifndef WIN32
- char *output_filename = log_opts.filename;
+ char *output_filename = log_opts.filename;
#else
+
/*
* On Win32, we can't send both pg_upgrade output and pg_ctl output to the
* same file because we get the error: "The process cannot access the file
* because it is being used by another process." so we have to send all
* other output to 'nul'.
*/
- char *output_filename = DEVNULL;
+ char *output_filename = DEVNULL;
#endif
if (!exit_hook_registered)
"-o \"-p %d %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
cluster->bindir, output_filename, cluster->pgdata, cluster->port,
(cluster->controldata.cat_ver >=
- BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
- "-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
+ BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
+ "-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
log_opts.filename);
/*
- * Don't throw an error right away, let connecting throw the error
- * because it might supply a reason for the failure.
+ * Don't throw an error right away, let connecting throw the error because
+ * it might supply a reason for the failure.
*/
pg_ctl_return = exec_prog(false, "%s", cmd);
{
pg_log(PG_REPORT, "\nconnection to database failed: %s\n",
PQerrorMessage(conn));
- if (conn)
+ if (conn)
PQfinish(conn);
pg_log(PG_FATAL, "unable to connect to %s postmaster started with the command: %s\n",
CLUSTER_NAME(cluster), cmd);
/* If the connection didn't fail, fail now */
if (pg_ctl_return != 0)
pg_log(PG_FATAL, "pg_ctl failed to start the %s server\n",
- CLUSTER_NAME(cluster));
-
+ CLUSTER_NAME(cluster));
+
os_info.running_cluster = cluster;
}
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
+
#ifndef WIN32
- char *output_filename = log_opts.filename;
+ char *output_filename = log_opts.filename;
#else
/* See comment in start_postmaster() about why win32 output is ignored. */
- char *output_filename = DEVNULL;
+ char *output_filename = DEVNULL;
#endif
if (os_info.running_cluster == &old_cluster)
for (option = start; option->keyword != NULL; option++)
{
if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 ||
- strcmp(option->envvar, "PGHOSTADDR") == 0))
+ strcmp(option->envvar, "PGHOSTADDR") == 0))
{
const char *value = getenv(option->envvar);
if (value && strlen(value) > 0 &&
- /* check for 'local' host values */
+ /* check for 'local' host values */
(strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
strcmp(value, "::1") != 0 && value[0] != '/'))
pg_log(PG_FATAL,
- "libpq environment variable %s has a non-local server value: %s\n",
- option->envvar, value);
+ "libpq environment variable %s has a non-local server value: %s\n",
+ option->envvar, value);
}
}
key->recheckCurItem = true;
return DatumGetBool(FunctionCall8Coll(&ginstate->consistentFn[key->attnum - 1],
- ginstate->supportCollation[key->attnum - 1],
+ ginstate->supportCollation[key->attnum - 1],
PointerGetDatum(key->entryRes),
UInt16GetDatum(key->strategy),
key->query,
UInt32GetDatum(key->nuserentries),
PointerGetDatum(key->extra_data),
- PointerGetDatum(&key->recheckCurItem),
+ PointerGetDatum(&key->recheckCurItem),
PointerGetDatum(key->queryValues),
- PointerGetDatum(key->queryCategories)));
+ PointerGetDatum(key->queryCategories)));
}
/*
*----------
*/
cmp = DatumGetInt32(FunctionCall4Coll(&btree->ginstate->comparePartialFn[attnum - 1],
- btree->ginstate->supportCollation[attnum - 1],
+ btree->ginstate->supportCollation[attnum - 1],
scanEntry->queryKey,
idatum,
UInt16GetDatum(scanEntry->strategy),
*----------
*/
cmp = DatumGetInt32(FunctionCall4Coll(&ginstate->comparePartialFn[entry->attnum - 1],
- ginstate->supportCollation[entry->attnum - 1],
+ ginstate->supportCollation[entry->attnum - 1],
entry->queryKey,
datum[off - 1],
- UInt16GetDatum(entry->strategy),
+ UInt16GetDatum(entry->strategy),
PointerGetDatum(entry->extra_data)));
if (cmp == 0)
return true;
/* OK to call the extractQueryFn */
queryValues = (Datum *)
DatumGetPointer(FunctionCall7Coll(&so->ginstate.extractQueryFn[skey->sk_attno - 1],
- so->ginstate.supportCollation[skey->sk_attno - 1],
+ so->ginstate.supportCollation[skey->sk_attno - 1],
skey->sk_argument,
PointerGetDatum(&nQueryValues),
- UInt16GetDatum(skey->sk_strategy),
- PointerGetDatum(&partial_matches),
+ UInt16GetDatum(skey->sk_strategy),
+ PointerGetDatum(&partial_matches),
PointerGetDatum(&extra_data),
PointerGetDatum(&nullFlags),
PointerGetDatum(&searchMode)));
* type for a noncollatable indexed data type (for instance, hstore
* uses text index entries). If there's no index collation then
* specify default collation in case the support functions need
- * collation. This is harmless if the support functions don't
- * care about collation, so we just do it unconditionally. (We could
+ * collation. This is harmless if the support functions don't care
+ * about collation, so we just do it unconditionally. (We could
* alternatively call get_typcollation, but that seems like expensive
* overkill --- there aren't going to be any cases where a GIN storage
* type has a nondefault collation.)
/* both not null, so safe to call the compareFn */
return DatumGetInt32(FunctionCall2Coll(&ginstate->compareFn[attnum - 1],
- ginstate->supportCollation[attnum - 1],
+ ginstate->supportCollation[attnum - 1],
a, b));
}
nullFlags = NULL; /* in case extractValue doesn't set it */
entries = (Datum *)
DatumGetPointer(FunctionCall3Coll(&ginstate->extractValueFn[attnum - 1],
- ginstate->supportCollation[attnum - 1],
+ ginstate->supportCollation[attnum - 1],
value,
PointerGetDatum(nentries),
PointerGetDatum(&nullFlags)));
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
- * type for a noncollatable indexed data type. If there's no index
+ * type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it
gistentryinit(*e, k, r, pg, o, l);
dep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->decompressFn[nkey],
- giststate->supportCollation[nkey],
+ giststate->supportCollation[nkey],
PointerGetDatum(e)));
/* decompressFn may just return the given pointer */
if (dep != e)
gistentryinit(*e, k, r, pg, o, l);
cep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[nkey],
- giststate->supportCollation[nkey],
+ giststate->supportCollation[nkey],
PointerGetDatum(e)));
/* compressFn may just return the given pointer */
if (cep != e)
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
- * that is currently being rebuilt or pending rebuild. This helps to catch
+ * that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs. The assertion
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for
{
compare =
DatumGetInt32(FunctionCall2Coll(&entry->sk_func,
- entry->sk_collation,
+ entry->sk_collation,
attrDatum1,
attrDatum2));
*result = DatumGetBool(OidFunctionCall2Coll(cmp_proc,
op->sk_collation,
leftarg->sk_argument,
- rightarg->sk_argument));
+ rightarg->sk_argument));
return true;
}
}
ereport(FATAL,
(errmsg("requested recovery stop point is before consistent recovery point")));
}
+
/*
- * Ran off end of WAL before reaching end-of-backup WAL record,
- * or minRecoveryPoint. That's usually a bad sign, indicating that
- * you tried to recover from an online backup but never called
+ * Ran off end of WAL before reaching end-of-backup WAL record, or
+ * minRecoveryPoint. That's usually a bad sign, indicating that you
+ * tried to recover from an online backup but never called
* pg_stop_backup(), or you didn't archive all the WAL up to that
- * point. However, this also happens in crash recovery, if the
- * system crashes while an online backup is in progress. We
- * must not treat that as an error, or the database will refuse
- * to start up.
+ * point. However, this also happens in crash recovery, if the system
+ * crashes while an online backup is in progress. We must not treat
+ * that as an error, or the database will refuse to start up.
*/
if (InArchiveRecovery)
{
errhint("Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery.")));
else
ereport(FATAL,
- (errmsg("WAL ends before consistent recovery point")));
+ (errmsg("WAL ends before consistent recovery point")));
}
}
* However, when reindexing an existing index, we should do nothing here.
* Any HOT chains that are broken with respect to the index must predate
* the index's original creation, so there is no need to change the
- * index's usability horizon. Moreover, we *must not* try to change
- * the index's pg_index entry while reindexing pg_index itself, and this
+ * index's usability horizon. Moreover, we *must not* try to change the
+ * index's pg_index entry while reindexing pg_index itself, and this
* optimization nicely prevents that.
*/
if (indexInfo->ii_BrokenHotChain && !isreindex)
/*
* If it's for an exclusion constraint, make a second pass over the heap
- * to verify that the constraint is satisfied. We must not do this until
+ * to verify that the constraint is satisfied. We must not do this until
* the index is fully valid. (Broken HOT chains shouldn't matter, though;
* see comments for IndexCheckExclusion.)
*/
/*
* It's a HOT-updated tuple deleted by our own xact.
* We can assume the deletion will commit (else the
- * index contents don't matter), so treat the same
- * as RECENTLY_DEAD HOT-updated tuples.
+ * index contents don't matter), so treat the same as
+ * RECENTLY_DEAD HOT-updated tuples.
*/
indexIt = false;
/* mark the index as unsafe for old snapshots */
else
{
/*
- * It's a regular tuple deleted by our own xact.
- * Index it but don't check for uniqueness, the same
- * as a RECENTLY_DEAD tuple.
+ * It's a regular tuple deleted by our own xact. Index
+ * it but don't check for uniqueness, the same as a
+ * RECENTLY_DEAD tuple.
*/
indexIt = true;
}
/*
* If we are reindexing the target index, mark it as no longer being
- * reindexed, to forestall an Assert in index_beginscan when we try to
- * use the index for probes. This is OK because the index is now
- * fully valid.
+ * reindexed, to forestall an Assert in index_beginscan when we try to use
+ * the index for probes. This is OK because the index is now fully valid.
*/
if (ReindexIsCurrentlyProcessingIndex(RelationGetRelid(indexRelation)))
ResetReindexProcessing();
*
* We can also reset indcheckxmin, because we have now done a
* non-concurrent index build, *except* in the case where index_build
- * found some still-broken HOT chains. If it did, we normally leave
+ * found some still-broken HOT chains. If it did, we normally leave
* indcheckxmin alone (note that index_build won't have changed it,
- * because this is a reindex). But if the index was invalid or not ready
+ * because this is a reindex). But if the index was invalid or not ready
* and there were broken HOT chains, it seems best to force indcheckxmin
* true, because the normal argument that the HOT chains couldn't conflict
* with the index is suspect for an invalid index.
* the data in a manner that risks a change in constraint validity.
*
* Returns true if any indexes were rebuilt (including toast table's index
- * when relevant). Note that a CommandCounterIncrement will occur after each
+ * when relevant). Note that a CommandCounterIncrement will occur after each
* index rebuild.
*/
bool
/*
* RangeVarGetAndCheckCreationNamespace
- * As RangeVarGetCreationNamespace, but with a permissions check.
+ * As RangeVarGetCreationNamespace, but with a permissions check.
*/
Oid
RangeVarGetAndCheckCreationNamespace(const RangeVar *newRelation)
/*
* Grab a DDL-exclusive lock on the target table, since we'll update the
- * pg_class tuple. This is redundant for all present users. Tuple toasting
- * behaves safely in the face of a concurrent TOAST table add.
+ * pg_class tuple. This is redundant for all present users. Tuple
+ * toasting behaves safely in the face of a concurrent TOAST table add.
*/
rel = heap_open(relOid, ShareUpdateExclusiveLock);
coloptions[1] = 0;
index_create(toast_rel, toast_idxname, toastIndexOid,
- indexInfo,
- list_make2("chunk_id", "chunk_seq"),
- BTREE_AM_OID,
- rel->rd_rel->reltablespace,
- collationObjectId, classObjectId, coloptions, (Datum) 0,
- true, false, false, false,
- true, false, false);
+ indexInfo,
+ list_make2("chunk_id", "chunk_seq"),
+ BTREE_AM_OID,
+ rel->rd_rel->reltablespace,
+ collationObjectId, classObjectId, coloptions, (Datum) 0,
+ true, false, false, false,
+ true, false, false);
heap_close(toast_rel, NoLock);
}
/*
- * Report ANALYZE to the stats collector, too. However, if doing
+ * Report ANALYZE to the stats collector, too. However, if doing
* inherited stats we shouldn't report, because the stats collector only
* tracks per-table stats.
*/
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
/*
- * Estimate total numbers of rows in relation. For live rows, use
+ * Estimate total numbers of rows in relation. For live rows, use
* vac_estimate_reltuples; for dead rows, we have no source of old
* information, so we have to assume the density is the same in unseen
* pages as in the pages we scanned.
/*
* If the OldHeap has a toast table, get lock on the toast table to keep
- * it from being vacuumed. This is needed because autovacuum processes
+ * it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
- * latter. If an autovacuum were to start on the toast table after we
+ * latter. If an autovacuum were to start on the toast table after we
* compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
- * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
+ * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
*
* We don't need to open the toast relation here, just lock it. The lock
rel->rd_rel->relkind != RELKIND_UNCATALOGED)
{
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+
/*
- * Custom error message for FOREIGN TABLE since the term is
- * close to a regular table and can confuse the user.
+ * Custom error message for FOREIGN TABLE since the term is close
+ * to a regular table and can confuse the user.
*/
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot create index on foreign table \"%s\"",
- heapRelation->relname)));
+ heapRelation->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
/*
- * Previous releases of Postgres neglected to prevent SELECT FOR UPDATE
- * on a sequence, which would leave a non-frozen XID in the sequence
- * tuple's xmax, which eventually leads to clog access failures or worse.
- * If we see this has happened, clean up after it. We treat this like a
- * hint bit update, ie, don't bother to WAL-log it, since we can certainly
- * do this again if the update gets lost.
+ * Previous releases of Postgres neglected to prevent SELECT FOR UPDATE on
+ * a sequence, which would leave a non-frozen XID in the sequence tuple's
+ * xmax, which eventually leads to clog access failures or worse. If we
+ * see this has happened, clean up after it. We treat this like a hint
+ * bit update, ie, don't bother to WAL-log it, since we can certainly do
+ * this again if the update gets lost.
*/
if (HeapTupleHeaderGetXmax(tuple.t_data) != InvalidTransactionId)
{
* These subcommands affect implicit row type conversion. They
* have affects similar to CREATE/DROP CAST on queries. We
* don't provide for invalidating parse trees as a result of
- * such changes. Do avoid concurrent pg_class updates, though.
+ * such changes. Do avoid concurrent pg_class updates,
+ * though.
*/
case AT_AddOf:
case AT_DropOf:
case AT_DisableRule:
case AT_DropInherit: /* NO INHERIT */
case AT_AddOf: /* OF */
- case AT_DropOf: /* NOT OF */
+ case AT_DropOf: /* NOT OF */
ATSimplePermissions(rel, ATT_TABLE);
/* These commands never recurse */
/* No command-specific prep needed */
*
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
* isn't suitable, throw an error. Currently, we require that the type
- * originated with CREATE TYPE AS. We could support any row type, but doing so
+ * originated with CREATE TYPE AS. We could support any row type, but doing so
* would require handling a number of extra corner cases in the DDL commands.
*/
void
Assert(OidIsValid(typ->typrelid));
typeRelation = relation_open(typ->typrelid, AccessShareLock);
typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE);
+
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
* commit. That will prevent someone else from deleting or ALTERing
default:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, view, sequence, or foreign table",
- NameStr(tuple_class->relname))));
+ errmsg("\"%s\" is not a table, view, sequence, or foreign table",
+ NameStr(tuple_class->relname))));
}
/*
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
- * be TypeRelationId). There's no convenient way to do this, so go trawling
+ * be TypeRelationId). There's no convenient way to do this, so go trawling
* through pg_depend.
*/
static void
if (strncmp(table_attname, type_attname, NAMEDATALEN) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("table has column \"%s\" where type requires \"%s\"",
- table_attname, type_attname)));
+ errmsg("table has column \"%s\" where type requires \"%s\"",
+ table_attname, type_attname)));
/* Compare type. */
if (table_attr->atttypid != type_attr->atttypid ||
table_attr->attcollation != type_attr->attcollation)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(rel), type_attname)));
+ errmsg("table \"%s\" has different type for column \"%s\"",
+ RelationGetRelationName(rel), type_attname)));
}
DecrTupleDescRefCount(typeTupleDesc);
for (; table_attno <= tableTupleDesc->natts; table_attno++)
{
Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1];
+
if (!table_attr->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/*
* ALTER TABLE NOT OF
*
- * Detach a typed table from its originating type. Just clear reloftype and
+ * Detach a typed table from its originating type. Just clear reloftype and
* remove the dependency.
*/
static void
RelationGetRelationName(rel))));
/*
- * We don't bother to check ownership of the type --- ownership of the table
- * is presumed enough rights. No lock required on the type, either.
+ * We don't bother to check ownership of the type --- ownership of the
+ * table is presumed enough rights. No lock required on the type, either.
*/
drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
break;
case Anum_pg_ts_parser_prslextype:
nargs = 1;
+
/*
* Note: because the lextype method returns type internal, it must
* have an internal-type argument for security reasons. The
basetypeMod, /* typeMod value */
typNDims, /* Array dimensions for base type */
typNotNull, /* Type NOT NULL */
- domaincoll); /* type's collation */
+ domaincoll); /* type's collation */
/*
* Process constraints which refer to the domain ID returned by TypeCreate
* If we scanned the whole relation then we should just use the count of
* live tuples seen; but if we did not, we should not trust the count
* unreservedly, especially not in VACUUM, which may have scanned a quite
- * nonrandom subset of the table. When we have only partial information,
+ * nonrandom subset of the table. When we have only partial information,
* we take the old value of pg_class.reltuples as a measurement of the
* tuple density in the unscanned pages.
*
BlockNumber scanned_pages,
double scanned_tuples)
{
- BlockNumber old_rel_pages = relation->rd_rel->relpages;
+ BlockNumber old_rel_pages = relation->rd_rel->relpages;
double old_rel_tuples = relation->rd_rel->reltuples;
double old_density;
double new_density;
return scanned_tuples;
/*
- * If scanned_pages is zero but total_pages isn't, keep the existing
- * value of reltuples.
+ * If scanned_pages is zero but total_pages isn't, keep the existing value
+ * of reltuples.
*/
if (scanned_pages == 0)
return old_rel_tuples;
/*
* Okay, we've covered the corner cases. The normal calculation is to
- * convert the old measurement to a density (tuples per page), then
- * update the density using an exponential-moving-average approach,
- * and finally compute reltuples as updated_density * total_pages.
+ * convert the old measurement to a density (tuples per page), then update
+ * the density using an exponential-moving-average approach, and finally
+ * compute reltuples as updated_density * total_pages.
*
- * For ANALYZE, the moving average multiplier is just the fraction of
- * the table's pages we scanned. This is equivalent to assuming
- * that the tuple density in the unscanned pages didn't change. Of
- * course, it probably did, if the new density measurement is different.
- * But over repeated cycles, the value of reltuples will converge towards
- * the correct value, if repeated measurements show the same new density.
+ * For ANALYZE, the moving average multiplier is just the fraction of the
+ * table's pages we scanned. This is equivalent to assuming that the
+ * tuple density in the unscanned pages didn't change. Of course, it
+ * probably did, if the new density measurement is different. But over
+ * repeated cycles, the value of reltuples will converge towards the
+ * correct value, if repeated measurements show the same new density.
*
* For VACUUM, the situation is a bit different: we have looked at a
* nonrandom sample of pages, but we know for certain that the pages we
* didn't look at are precisely the ones that haven't changed lately.
* Thus, there is a reasonable argument for doing exactly the same thing
- * as for the ANALYZE case, that is use the old density measurement as
- * the value for the unscanned pages.
+ * as for the ANALYZE case, that is use the old density measurement as the
+ * value for the unscanned pages.
*
* This logic could probably use further refinement.
*/
/* Overall statistics about rel */
BlockNumber rel_pages; /* total number of pages */
BlockNumber scanned_pages; /* number of pages we examined */
- double scanned_tuples; /* counts only tuples on scanned pages */
+ double scanned_tuples; /* counts only tuples on scanned pages */
double old_rel_tuples; /* previous value of pg_class.reltuples */
double new_rel_tuples; /* new estimated total # of tuples */
BlockNumber pages_removed;
vac_update_relstats(onerel,
vacrelstats->rel_pages, vacrelstats->new_rel_tuples,
vacrelstats->hasindex,
- (vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
+ (vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
InvalidTransactionId :
FreezeLimit);
* of pages.
*
* Before entering the main loop, establish the invariant that
- * next_not_all_visible_block is the next block number >= blkno that's
- * not all-visible according to the visibility map, or nblocks if there's
- * no such block. Also, we set up the skipping_all_visible_blocks flag,
+ * next_not_all_visible_block is the next block number >= blkno that's not
+ * all-visible according to the visibility map, or nblocks if there's no
+ * such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
/* now we can compute the new value for pg_class.reltuples */
vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
nblocks,
- vacrelstats->scanned_pages,
+ vacrelstats->scanned_pages,
num_tuples);
/* If any tuples need to be deleted, perform final vacuum cycle */
if (new_rel_pages != old_rel_pages)
{
/*
- * Note: we intentionally don't update vacrelstats->rel_pages with
- * the new rel size here. If we did, it would amount to assuming that
- * the new pages are empty, which is unlikely. Leaving the numbers
- * alone amounts to assuming that the new pages have the same tuple
- * density as existing ones, which is less unlikely.
+ * Note: we intentionally don't update vacrelstats->rel_pages with the
+ * new rel size here. If we did, it would amount to assuming that the
+ * new pages are empty, which is unlikely. Leaving the numbers alone
+ * amounts to assuming that the new pages have the same tuple density
+ * as existing ones, which is less unlikely.
*/
UnlockRelation(onerel, AccessExclusiveLock);
return;
*
* XXX Although canonicalizing seems like a good idea in the abstract, it
* breaks pre-9.1 JDBC drivers, which expect that if they send "UNICODE"
- * as the client_encoding setting then it will read back the same way.
- * As a workaround, don't replace the string if it's "UNICODE". Remove
- * that hack when pre-9.1 JDBC drivers are no longer in use.
+ * as the client_encoding setting then it will read back the same way. As
+ * a workaround, don't replace the string if it's "UNICODE". Remove that
+ * hack when pre-9.1 JDBC drivers are no longer in use.
*/
if (strcmp(*newval, canonical_name) != 0 &&
strcmp(*newval, "UNICODE") != 0)
/*
* We check for interrupts here because this corresponds to
- * where we'd fetch a row from a child plan node in other
- * join types.
+ * where we'd fetch a row from a child plan node in other join
+ * types.
*/
CHECK_FOR_INTERRUPTS();
/* Fetch major status message */
msg_ctx = 0;
gss_display_status(&lmin_s, maj_stat, GSS_C_GSS_CODE,
- GSS_C_NO_OID, &msg_ctx, &gmsg);
+ GSS_C_NO_OID, &msg_ctx, &gmsg);
strlcpy(msg_major, gmsg.value, sizeof(msg_major));
gss_release_buffer(&lmin_s, &gmsg);
/* Fetch mechanism minor status message */
msg_ctx = 0;
gss_display_status(&lmin_s, min_stat, GSS_C_MECH_CODE,
- GSS_C_NO_OID, &msg_ctx, &gmsg);
+ GSS_C_NO_OID, &msg_ctx, &gmsg);
strlcpy(msg_minor, gmsg.value, sizeof(msg_minor));
gss_release_buffer(&lmin_s, &gmsg);
if (errno == ENOSYS)
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("peer authentication is not supported on this platform")));
+ errmsg("peer authentication is not supported on this platform")));
else
ereport(LOG,
(errcode_for_socket_access(),
return true;
}
else if (strcmp(tok, role) == 0 ||
- (strcmp(tok, "replication\n") == 0 &&
- strcmp(role,"replication") ==0) ||
+ (strcmp(tok, "replication\n") == 0 &&
+ strcmp(role, "replication") == 0) ||
strcmp(tok, "all\n") == 0)
return true;
}
/* Allocate new memory because later getpwuid() calls can overwrite it. */
return strdup(pw->pw_name);
#else
- unsigned long namesize = 256 /* UNLEN */ + 1;
+ unsigned long namesize = 256 /* UNLEN */ + 1;
char *name;
name = malloc(namesize);
Pool *pool;
int pool_size,
number_generations;
+
#ifdef GEQO_DEBUG
int status_interval;
#endif
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
- * going to end up being used. The above per-RestrictInfo caching would
+ * going to end up being used. The above per-RestrictInfo caching would
* not mix well with trying to re-order clauses anyway.
*/
if (IsA(node, FuncExpr))
* dummy.
*
* Also, when called during GEQO join planning, we are in a short-lived
- * memory context. We must make sure that the dummy path attached to a
+ * memory context. We must make sure that the dummy path attached to a
* baserel survives the GEQO cycle, else the baserel is trashed for future
* GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
* we don't want the dummy path to clutter the main planning context. Upshot
* opposite nulls direction is redundant.
*
* We could probably consider sort keys with the same sortop and
- * different collations to be redundant too, but for the moment
- * treat them as not redundant. This will be needed if we ever
- * support collations with different notions of equality.
+ * different collations to be redundant too, but for the moment treat
+ * them as not redundant. This will be needed if we ever support
+ * collations with different notions of equality.
*/
if (sortColIdx[i] == colIdx &&
sortOperators[numCols] == sortOp &&
*
* We must convert the pathkey information into arrays of sort key column
* numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
- * which is the representation the executor wants. These are returned into
+ * which is the representation the executor wants. These are returned into
* the output parameters *p_numsortkeys etc.
*
* If the pathkeys include expressions that aren't simple Vars, we will
if (parse->hasAggs)
{
/*
- * Collect statistics about aggregates for estimating costs.
- * Note: we do not attempt to detect duplicate aggregates here; a
+ * Collect statistics about aggregates for estimating costs. Note:
+ * we do not attempt to detect duplicate aggregates here; a
* somewhat-overestimated cost is okay for our present purposes.
*/
count_agg_clauses(root, (Node *) tlist, &agg_costs);
}
/*
- * Ensure the tlist entry's exposed collation matches the set-op.
- * This is necessary because plan_set_operations() reports the result
+ * Ensure the tlist entry's exposed collation matches the set-op. This
+ * is necessary because plan_set_operations() reports the result
* ordering as a list of SortGroupClauses, which don't carry collation
- * themselves but just refer to tlist entries. If we don't show the
+ * themselves but just refer to tlist entries. If we don't show the
* right collation then planner.c might do the wrong thing in
* higher-level queries.
*
static bool contain_agg_clause_walker(Node *node, void *context);
static bool pull_agg_clause_walker(Node *node, List **context);
static bool count_agg_clauses_walker(Node *node,
- count_agg_clauses_context *context);
+ count_agg_clauses_context *context);
static bool find_window_functions_walker(Node *node, WindowFuncLists *lists);
static bool expression_returns_set_rows_walker(Node *node, double *count);
static bool contain_subplans_walker(Node *node, void *context);
/*
* We can remove null constants from the list. For a non-null
* constant, if it has not been preceded by any other
- * non-null-constant expressions then it is the result.
- * Otherwise, it's the next argument, but we can drop following
- * arguments since they will never be reached.
+ * non-null-constant expressions then it is the result. Otherwise,
+ * it's the next argument, but we can drop following arguments
+ * since they will never be reached.
*/
if (IsA(e, Const))
{
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
* collations independently in each row, and don't worry about
- * whether they are consistent vertically. The outer INSERT query
+ * whether they are consistent vertically. The outer INSERT query
* isn't going to care about the collations of the VALUES columns,
* so it's not worth the effort to identify a common collation for
* each one here. (But note this does have one user-visible
* doesn't process rangetable entries, and (2) we need to label the VALUES
* RTE with column collations for use in the outer query. We don't
* consider conflict of implicit collations to be an error here; instead
- * the column will just show InvalidOid as its collation, and you'll get
- * a failure later if that results in failure to resolve a collation.
+ * the column will just show InvalidOid as its collation, and you'll get a
+ * failure later if that results in failure to resolve a collation.
*
* Note we modify the per-column expression lists in-place.
*/
collations = NIL;
for (i = 0; i < sublist_length; i++)
{
- Oid coltype;
- Oid colcoll;
+ Oid coltype;
+ Oid colcoll;
coltype = select_common_type(pstate, colexprs[i], "VALUES", NULL);
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("VALUES must not contain table references"),
parser_errposition(pstate,
- locate_var_of_level((Node *) exprsLists, 0))));
+ locate_var_of_level((Node *) exprsLists, 0))));
/*
* Another thing we can't currently support is NEW/OLD references in rules
errmsg("VALUES must not contain OLD or NEW references"),
errhint("Use SELECT ... UNION ALL ... instead."),
parser_errposition(pstate,
- locate_var_of_level((Node *) exprsLists, 0))));
+ locate_var_of_level((Node *) exprsLists, 0))));
qry->rtable = pstate->p_rtable;
qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
(errcode(ERRCODE_GROUPING_ERROR),
errmsg("cannot use aggregate function in VALUES"),
parser_errposition(pstate,
- locate_agg_of_level((Node *) exprsLists, 0))));
+ locate_agg_of_level((Node *) exprsLists, 0))));
if (pstate->p_hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
errmsg("cannot use window function in VALUES"),
parser_errposition(pstate,
- locate_windowfunc((Node *) exprsLists))));
+ locate_windowfunc((Node *) exprsLists))));
assign_query_collations(pstate, qry);
*
* These cases are unlike the ones above because the exposed type of
* the argument must be an actual array or enum type. In particular
- * the argument must *not* be an UNKNOWN constant. If it is, we just
+ * the argument must *not* be an UNKNOWN constant. If it is, we just
* fall through; below, we'll call anyarray_in or anyenum_in, which
* will produce an error. Also, if what we have is a domain over
* array or enum, we have to relabel it to its base type.
*
* Domains over arrays match ANYARRAY, and are immediately flattened to their
* base type. (Thus, for example, we will consider it a match if one ANYARRAY
- * argument is a domain over int4[] while another one is just int4[].) Also
+ * argument is a domain over int4[] while another one is just int4[].) Also
* notice that such a domain does *not* match ANYNONARRAY.
*
* If we have UNKNOWN input (ie, an untyped literal) for any polymorphic
* is an extra restriction if not.)
*
* Domains over arrays match ANYARRAY arguments, and are immediately flattened
- * to their base type. (In particular, if the return type is also ANYARRAY,
+ * to their base type. (In particular, if the return type is also ANYARRAY,
* we'll set it to the base type not the domain type.)
*
* When allow_poly is false, we are not expecting any of the actual_arg_types
stmt = (CreateStmt *) copyObject(stmt);
/*
- * Look up the creation namespace. This also checks permissions on the
+ * Look up the creation namespace. This also checks permissions on the
* target namespace, so that we throw any permissions error as early as
* possible.
*/
*/
if (stmt->if_not_exists)
{
- Oid existing_relid;
+ Oid existing_relid;
existing_relid = get_relname_relid(stmt->relation->relname,
namespaceid);
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" already exists, skipping",
- stmt->relation->relname)));
+ stmt->relation->relname)));
return NIL;
}
}
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(ctype);
LookupCollation(cxt->pstate,
- column->collClause->collname,
- column->collClause->location);
+ column->collClause->collname,
+ column->collClause->location);
/* Complain if COLLATE is applied to an uncollatable type */
if (!OidIsValid(typtup->typcollation))
ereport(ERROR,
"semaphore sets (SEMMNI), or the system wide maximum number of "
"semaphores (SEMMNS), would be exceeded. You need to raise the "
"respective kernel parameter. Alternatively, reduce PostgreSQL's "
- "consumption of semaphores by reducing its max_connections parameter.\n"
+ "consumption of semaphores by reducing its max_connections parameter.\n"
"The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.") : 0));
}
systemTicks = GetTickCount();
snprintf(dumpPath, _MAX_PATH,
- "crashdumps\\postgres-pid%0i-%0i.mdmp",
+ "crashdumps\\postgres-pid%0i-%0i.mdmp",
(int) selfPid, (int) systemTicks);
dumpPath[_MAX_PATH - 1] = '\0';
* The second argument to send() is defined by SUS to be a "const void *"
* and so we use the same signature here to keep compilers happy when
* handling callers.
- *
+ *
* But the buf member of a WSABUF struct is defined as "char *", so we cast
* the second argument to that here when assigning it, also to keep compilers
* happy.
DWORD rc;
HANDLE events[3];
HANDLE latchevent;
- HANDLE sockevent = WSA_INVALID_EVENT; /* silence compiler */
+ HANDLE sockevent = WSA_INVALID_EVENT; /* silence compiler */
int numevents;
int result = 0;
WalWriterPID = StartWalWriter();
/*
- * If we have lost the autovacuum launcher, try to start a new one.
- * We don't want autovacuum to run in binary upgrade mode because
- * autovacuum might update relfrozenxid for empty tables before
- * the physical files are put in place.
+ * If we have lost the autovacuum launcher, try to start a new one. We
+ * don't want autovacuum to run in binary upgrade mode because
+ * autovacuum might update relfrozenxid for empty tables before the
+ * physical files are put in place.
*/
if (!IsBinaryUpgrade && AutoVacPID == 0 &&
(AutoVacuumingActive() || start_autovac_launcher) &&
* several implementation strategies depending on the situation:
*
* 1. In C/POSIX collations, we use hard-wired code. We can't depend on
- * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
+ * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
* collations don't give a fig about multibyte characters.
*
* 2. In the "default" collation (which is supposed to obey LC_CTYPE):
*
* 2b. In all other encodings, or on machines that lack <wctype.h>, we use
* the <ctype.h> functions for pg_wchar values up to 255, and punt for values
- * above that. This is only 100% correct in single-byte encodings such as
- * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
+ * above that. This is only 100% correct in single-byte encodings such as
+ * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
* character sets for which the properties being tested here aren't very
- * relevant for higher code values anyway. The difficulty with using the
+ * relevant for higher code values anyway. The difficulty with using the
* <wctype.h> functions with non-Unicode multibyte encodings is that we can
* have no certainty that the platform's wchar_t representation matches
* what we do in pg_wchar conversions.
#define PG_ISSPACE 0x80
static const unsigned char pg_char_properties[128] = {
- /* NUL */ 0,
- /* ^A */ 0,
- /* ^B */ 0,
- /* ^C */ 0,
- /* ^D */ 0,
- /* ^E */ 0,
- /* ^F */ 0,
- /* ^G */ 0,
- /* ^H */ 0,
- /* ^I */ PG_ISSPACE,
- /* ^J */ PG_ISSPACE,
- /* ^K */ PG_ISSPACE,
- /* ^L */ PG_ISSPACE,
- /* ^M */ PG_ISSPACE,
- /* ^N */ 0,
- /* ^O */ 0,
- /* ^P */ 0,
- /* ^Q */ 0,
- /* ^R */ 0,
- /* ^S */ 0,
- /* ^T */ 0,
- /* ^U */ 0,
- /* ^V */ 0,
- /* ^W */ 0,
- /* ^X */ 0,
- /* ^Y */ 0,
- /* ^Z */ 0,
- /* ^[ */ 0,
- /* ^\ */ 0,
- /* ^] */ 0,
- /* ^^ */ 0,
- /* ^_ */ 0,
- /* */ PG_ISPRINT | PG_ISSPACE,
- /* ! */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* " */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* # */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* $ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* % */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* & */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ' */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ( */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ) */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* * */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* + */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* , */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* - */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* . */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* / */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* 0 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 1 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 2 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 3 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 4 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 5 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 6 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 7 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 8 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* 9 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
- /* : */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ; */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* < */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* = */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* > */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ? */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* @ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* A */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* B */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* C */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* D */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* E */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* F */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* G */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* H */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* I */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* J */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* K */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* L */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* M */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* N */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* O */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* P */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* Q */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* R */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* S */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* T */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* U */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* V */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* W */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* X */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* Y */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* Z */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
- /* [ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* \ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ] */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ^ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* _ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ` */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* a */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* b */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* c */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* d */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* e */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* f */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* g */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* h */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* i */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* j */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* k */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* l */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* m */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* n */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* o */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* p */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* q */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* r */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* s */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* t */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* u */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* v */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* w */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* x */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* y */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* z */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
- /* { */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* | */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* } */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* ~ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
- /* DEL */ 0
+ /* NUL */ 0,
+ /* ^A */ 0,
+ /* ^B */ 0,
+ /* ^C */ 0,
+ /* ^D */ 0,
+ /* ^E */ 0,
+ /* ^F */ 0,
+ /* ^G */ 0,
+ /* ^H */ 0,
+ /* ^I */ PG_ISSPACE,
+ /* ^J */ PG_ISSPACE,
+ /* ^K */ PG_ISSPACE,
+ /* ^L */ PG_ISSPACE,
+ /* ^M */ PG_ISSPACE,
+ /* ^N */ 0,
+ /* ^O */ 0,
+ /* ^P */ 0,
+ /* ^Q */ 0,
+ /* ^R */ 0,
+ /* ^S */ 0,
+ /* ^T */ 0,
+ /* ^U */ 0,
+ /* ^V */ 0,
+ /* ^W */ 0,
+ /* ^X */ 0,
+ /* ^Y */ 0,
+ /* ^Z */ 0,
+ /* ^[ */ 0,
+ /* ^\ */ 0,
+ /* ^] */ 0,
+ /* ^^ */ 0,
+ /* ^_ */ 0,
+ /* */ PG_ISPRINT | PG_ISSPACE,
+ /* ! */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* " */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* # */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* $ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* % */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* & */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ' */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ( */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ) */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* * */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* + */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* , */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* - */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* . */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* / */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* 0 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 1 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 2 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 3 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 4 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 5 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 6 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 7 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 8 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* 9 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+ /* : */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ; */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* < */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* = */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* > */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ? */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* @ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* A */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* B */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* C */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* D */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* E */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* F */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* G */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* H */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* I */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* J */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* K */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* L */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* M */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* N */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* O */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* P */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* Q */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* R */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* S */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* T */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* U */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* V */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* W */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* X */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* Y */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* Z */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+ /* [ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* \ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ] */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ^ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* _ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ` */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* a */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* b */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* c */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* d */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* e */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* f */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* g */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* h */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* i */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* j */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* k */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* l */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* m */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* n */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* o */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* p */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* q */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* r */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* s */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* t */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* u */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* v */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* w */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* x */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* y */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* z */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+ /* { */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* | */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* } */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* ~ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+ /* DEL */ 0
};
{
/*
* NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
- * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does
- * not have to be considered below.
+ * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
+ * have to be considered below.
*/
pg_regex_locale = pg_newlocale_from_collation(collation);
}
/*
* If the postmaster dies, we'll probably never get an
- * acknowledgement, because all the wal sender processes will exit.
- * So just bail out.
+ * acknowledgement, because all the wal sender processes will exit. So
+ * just bail out.
*/
if (!PostmasterIsAlive(true))
{
if (action != NIL || is_instead)
{
InsertRule(rulename,
- event_type,
- event_relid,
- event_attno,
- is_instead,
- event_qual,
- action,
- replace);
+ event_type,
+ event_relid,
+ event_attno,
+ is_instead,
+ event_qual,
+ action,
+ replace);
/*
* Set pg_class 'relhasrules' field TRUE for event relation. If
}
/*
- * If the original query has any CTEs, copy them into the rule action.
- * But we don't need them for a utility action.
+ * If the original query has any CTEs, copy them into the rule action. But
+ * we don't need them for a utility action.
*/
if (parsetree->cteList != NIL && sub_action->commandType != CMD_UTILITY)
{
ListCell *lc;
/*
- * Annoying implementation restriction: because CTEs are identified
- * by name within a cteList, we can't merge a CTE from the original
- * query if it has the same name as any CTE in the rule action.
+ * Annoying implementation restriction: because CTEs are identified by
+ * name within a cteList, we can't merge a CTE from the original query
+ * if it has the same name as any CTE in the rule action.
*
* This could possibly be fixed by using some sort of internally
* generated ID, instead of names, to link CTE RTEs to their CTEs.
/*
* If the original query has a CTE list, and we generated more than one
- * non-utility result query, we have to fail because we'll have copied
- * the CTE list into each result query. That would break the expectation
- * of single evaluation of CTEs. This could possibly be fixed by
+ * non-utility result query, we have to fail because we'll have copied the
+ * CTE list into each result query. That would break the expectation of
+ * single evaluation of CTEs. This could possibly be fixed by
* restructuring so that a CTE list can be shared across multiple Query
* and PlannableStatement nodes.
*/
if (parsetree->cteList != NIL)
{
- int qcount = 0;
+ int qcount = 0;
foreach(lc1, rewritten)
{
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
- pg_locale_t mylocale = 0; /* TODO */
+ pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isdigit(TOUCHAR(ptr));
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
- pg_locale_t mylocale = 0; /* TODO */
+ pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isspace(TOUCHAR(ptr));
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
- pg_locale_t mylocale = 0; /* TODO */
+ pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isalpha(TOUCHAR(ptr));
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
- pg_locale_t mylocale = 0; /* TODO */
+ pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isprint(TOUCHAR(ptr));
#ifdef USE_WIDE_UPPER_LOWER
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
- pg_locale_t mylocale = 0; /* TODO */
+ pg_locale_t mylocale = 0; /* TODO */
#endif
if (len == 0)
if (prs->charmaxlen > 1)
{
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
- pg_locale_t mylocale = 0; /* TODO */
+ pg_locale_t mylocale = 0; /* TODO */
prs->usewide = true;
if (lc_ctype_is_c(collation))
/* Compatible with postgresql < 8.4 when DateStyle = 'iso' */
case INTSTYLE_POSTGRES:
cp = AddPostgresIntPart(cp, year, "year", &is_zero, &is_before);
+
/*
- * Ideally we should spell out "month" like we do for "year"
- * and "day". However, for backward compatibility, we can't
- * easily fix this. bjm 2011-05-24
+ * Ideally we should spell out "month" like we do for "year" and
+ * "day". However, for backward compatibility, we can't easily
+ * fix this. bjm 2011-05-24
*/
cp = AddPostgresIntPart(cp, mon, "mon", &is_zero, &is_before);
cp = AddPostgresIntPart(cp, mday, "day", &is_zero, &is_before);
dst[len] = '\0';
if (encoding != PG_UTF8)
{
- char *convstr =
- (char *) pg_do_encoding_conversion((unsigned char *) dst,
- len, PG_UTF8, encoding);
+ char *convstr =
+ (char *) pg_do_encoding_conversion((unsigned char *) dst,
+ len, PG_UTF8, encoding);
if (dst != convstr)
{
#ifdef HAVE_WCSTOMBS_L
/* Use wcstombs_l for nondefault locales */
result = wcstombs_l(to, from, tolen, locale);
-#else /* !HAVE_WCSTOMBS_L */
+#else /* !HAVE_WCSTOMBS_L */
/* We have to temporarily set the locale as current ... ugh */
locale_t save_locale = uselocale(locale);
result = wcstombs(to, from, tolen);
uselocale(save_locale);
-#endif /* HAVE_WCSTOMBS_L */
-#else /* !HAVE_LOCALE_T */
+#endif /* HAVE_WCSTOMBS_L */
+#else /* !HAVE_LOCALE_T */
/* Can't have locale != 0 without HAVE_LOCALE_T */
elog(ERROR, "wcstombs_l is not available");
result = 0; /* keep compiler quiet */
-#endif /* HAVE_LOCALE_T */
+#endif /* HAVE_LOCALE_T */
}
return result;
#ifdef HAVE_WCSTOMBS_L
/* Use mbstowcs_l for nondefault locales */
result = mbstowcs_l(to, str, tolen, locale);
-#else /* !HAVE_WCSTOMBS_L */
+#else /* !HAVE_WCSTOMBS_L */
/* We have to temporarily set the locale as current ... ugh */
locale_t save_locale = uselocale(locale);
result = mbstowcs(to, str, tolen);
uselocale(save_locale);
-#endif /* HAVE_WCSTOMBS_L */
-#else /* !HAVE_LOCALE_T */
+#endif /* HAVE_WCSTOMBS_L */
+#else /* !HAVE_LOCALE_T */
/* Can't have locale != 0 without HAVE_LOCALE_T */
elog(ERROR, "mbstowcs_l is not available");
- result = 0; /* keep compiler quiet */
-#endif /* HAVE_LOCALE_T */
+ result = 0; /* keep compiler quiet */
+#endif /* HAVE_LOCALE_T */
}
pfree(str);
return result;
}
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
#define RIAttName(rel, attnum) NameStr(*attnumAttName(rel, attnum))
#define RIAttType(rel, attnum) attnumTypeId(rel, attnum)
-#define RIAttCollation(rel, attnum) attnumCollationId(rel, attnum)
+#define RIAttCollation(rel, attnum) attnumCollationId(rel, attnum)
#define RI_TRIGTYPE_INSERT 1
#define RI_TRIGTYPE_UPDATE 2
collname = NameStr(colltup->collname);
/*
- * We qualify the name always, for simplicity and to ensure the query
- * is not search-path-dependent.
+ * We qualify the name always, for simplicity and to ensure the query is
+ * not search-path-dependent.
*/
quoteOneName(onename, get_namespace_name(colltup->collnamespace));
appendStringInfo(buf, " COLLATE %s", onename);
}
/*
- * Apply the comparison operator. We assume it doesn't
- * care about collations.
+ * Apply the comparison operator. We assume it doesn't care about
+ * collations.
*/
return DatumGetBool(FunctionCall2(&entry->eq_opr_finfo,
oldvalue, newvalue));
if (caseexpr->arg)
{
/*
- * The parser should have produced WHEN clauses of
- * the form "CaseTestExpr = RHS", possibly with an
+ * The parser should have produced WHEN clauses of the
+ * form "CaseTestExpr = RHS", possibly with an
* implicit coercion inserted above the CaseTestExpr.
* For accurate decompilation of rules it's essential
* that we show just the RHS. However in an
/* be careful to apply operator right way 'round */
if (varonleft)
match = DatumGetBool(FunctionCall2Coll(&eqproc,
- DEFAULT_COLLATION_OID,
+ DEFAULT_COLLATION_OID,
values[i],
constval));
else
match = DatumGetBool(FunctionCall2Coll(&eqproc,
- DEFAULT_COLLATION_OID,
+ DEFAULT_COLLATION_OID,
constval,
values[i]));
if (match)
}
/*
- * Divide pattern into fixed prefix and remainder. XXX we have to assume
+ * Divide pattern into fixed prefix and remainder. XXX we have to assume
* default collation here, because we don't have access to the actual
* input collation for the operator. FIXME ...
*/
* before doing the division.
*
* Crude as the above is, it's completely useless if we don't have
- * reliable ndistinct values for both sides. Hence, if either nd1
- * or nd2 is default, punt and assume half of the uncertain rows
- * have join partners.
+ * reliable ndistinct values for both sides. Hence, if either nd1 or
+ * nd2 is default, punt and assume half of the uncertain rows have
+ * join partners.
*/
if (nd1 != DEFAULT_NUM_DISTINCT && nd2 != DEFAULT_NUM_DISTINCT)
{
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
int pos,
match_pos;
bool is_multibyte = (pg_database_encoding_max_length() > 1);
- pg_locale_t locale = 0;
+ pg_locale_t locale = 0;
bool locale_is_c = false;
/* the right-hand const is type text or bytea */
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
/* If case-insensitive, we need locale info */
if (lc_ctype_is_c(collation))
/* Stop if case-varying character (it's sort of a wildcard) */
if (case_insensitive &&
- pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
+ pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
break;
match[match_pos++] = patt[pos];
char *rest;
Oid typeid = patt_const->consttype;
bool is_multibyte = (pg_database_encoding_max_length() > 1);
- pg_locale_t locale = 0;
+ pg_locale_t locale = 0;
bool locale_is_c = false;
/*
/* Stop if case-varying character (it's sort of a wildcard) */
if (case_insensitive &&
- pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
+ pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
break;
/*
if (*cp < '0' || *cp > '9')
{
++arg;
- if (arg <= 0) /* overflow? */
+ if (arg <= 0) /* overflow? */
{
/*
* Should not happen, as you can't pass billions of arguments
arg = 0;
do
{
- int newarg = arg * 10 + (*cp - '0');
+ int newarg = arg * 10 + (*cp - '0');
- if (newarg / 10 != arg) /* overflow? */
+ if (newarg / 10 != arg) /* overflow? */
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("argument number is out of range")));
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array(). Because those routines are used as index
+ * record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
typentry->eq_opr == InvalidOid)
{
- Oid eq_opr = InvalidOid;
+ Oid eq_opr = InvalidOid;
if (typentry->btree_opf != InvalidOid)
eq_opr = get_opfamily_member(typentry->btree_opf,
HTEqualStrategyNumber);
/*
- * If the proposed equality operator is array_eq or record_eq,
- * check to see if the element type or column types support equality.
- * If not, array_eq or record_eq would fail at runtime, so we don't
- * want to report that the type has equality.
+ * If the proposed equality operator is array_eq or record_eq, check
+ * to see if the element type or column types support equality. If
+ * not, array_eq or record_eq would fail at runtime, so we don't want
+ * to report that the type has equality.
*/
if (eq_opr == ARRAY_EQ_OP &&
!array_element_has_equality(typentry))
}
if ((flags & TYPECACHE_LT_OPR) && typentry->lt_opr == InvalidOid)
{
- Oid lt_opr = InvalidOid;
+ Oid lt_opr = InvalidOid;
if (typentry->btree_opf != InvalidOid)
lt_opr = get_opfamily_member(typentry->btree_opf,
}
if ((flags & TYPECACHE_GT_OPR) && typentry->gt_opr == InvalidOid)
{
- Oid gt_opr = InvalidOid;
+ Oid gt_opr = InvalidOid;
if (typentry->btree_opf != InvalidOid)
gt_opr = get_opfamily_member(typentry->btree_opf,
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
{
- Oid cmp_proc = InvalidOid;
+ Oid cmp_proc = InvalidOid;
if (typentry->btree_opf != InvalidOid)
cmp_proc = get_opfamily_proc(typentry->btree_opf,
if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
typentry->hash_proc == InvalidOid)
{
- Oid hash_proc = InvalidOid;
+ Oid hash_proc = InvalidOid;
/*
* We insist that the eq_opr, if one has been determined, match the
{
Relation rel;
- if (!OidIsValid(typentry->typrelid)) /* should not happen */
+ if (!OidIsValid(typentry->typrelid)) /* should not happen */
elog(ERROR, "invalid typrelid for composite type %u",
typentry->type_id);
rel = relation_open(typentry->typrelid, AccessShareLock);
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for
- * this, because the reference mustn't be entered in the current
- * resource owner; it can outlive the current query.
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
+ * because the reference mustn't be entered in the current resource owner;
+ * it can outlive the current query.
*/
typentry->tupDesc = RelationGetDescr(rel);
static void
cache_array_element_properties(TypeCacheEntry *typentry)
{
- Oid elem_type = get_base_element_type(typentry->type_id);
+ Oid elem_type = get_base_element_type(typentry->type_id);
if (OidIsValid(elem_type))
{
{
TupleDesc tupdesc;
int newflags;
- int i;
+ int i;
/* Fetch composite type's tupdesc if we don't have it already */
if (typentry->tupDesc == NULL)
Datum
DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3)
+ Datum arg3)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4)
+ Datum arg3, Datum arg4)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
DirectFunctionCall5Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5)
+ Datum arg3, Datum arg4, Datum arg5)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
DirectFunctionCall6Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
DirectFunctionCall7Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8,
- Datum arg9)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8,
+ Datum arg9)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3)
+ Datum arg3)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4)
+ Datum arg3, Datum arg4)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall5Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5)
+ Datum arg3, Datum arg4, Datum arg5)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall6Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall7Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall8Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8,
- Datum arg9)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8,
+ Datum arg9)
{
FunctionCallInfoData fcinfo;
Datum result;
Datum
OidFunctionCall3Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3)
+ Datum arg3)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum
OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4)
+ Datum arg3, Datum arg4)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum
OidFunctionCall5Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5)
+ Datum arg3, Datum arg4, Datum arg5)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum
OidFunctionCall6Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum
OidFunctionCall7Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum
OidFunctionCall8Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
Datum
OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8,
- Datum arg9)
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8,
+ Datum arg9)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
*/
if (IsBinaryUpgrade && !am_superuser)
{
- ereport(FATAL,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to connect in binary upgrade mode")));
+ ereport(FATAL,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to connect in binary upgrade mode")));
}
/*
"@authcomment@",
strcmp(authmethod, "trust") ? "" : AUTHTRUST_WARNING);
- /* Replace username for replication */
+ /* Replace username for replication */
conflines = replace_token(conflines,
"@default_username@",
username);
*/
if (normalize_locale_name(alias, localebuf))
PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation VALUES (E'%s', E'%s', %d);\n",
- escape_quotes(alias), quoted_locale, enc);
+ escape_quotes(alias), quoted_locale, enc);
}
/* Add an SQL-standard name */
" encoding, locale, locale "
" FROM tmp_pg_collation"
" WHERE NOT EXISTS (SELECT 1 FROM pg_collation WHERE collname = tmp_pg_collation.collname)"
- " ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
+ " ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
pclose(locale_a_handle);
PG_CMD_CLOSE;
#else /* not HAVE_LOCALE_T && not WIN32 */
printf(_("not supported on this platform\n"));
fflush(stdout);
-#endif /* not HAVE_LOCALE_T && not WIN32*/
+#endif /* not HAVE_LOCALE_T && not WIN32 */
}
/*
static void
strreplace(char *str, char *needle, char *replacement)
{
- char *s;
+ char *s;
s = strstr(str, needle);
if (s != NULL)
{
- int replacementlen = strlen(replacement);
- char *rest = s + strlen(needle);
+ int replacementlen = strlen(replacement);
+ char *rest = s + strlen(needle);
memcpy(s, replacement, replacementlen);
memmove(s + replacementlen, rest, strlen(rest) + 1);
}
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/*
* Windows has a problem with locale names that have a dot in the country
locale = xstrdup(locale);
#ifdef WIN32
+
/*
* Map the full country name to an abbreviation that setlocale() accepts.
*
/*
* The ISO-3166 country code for Macau S.A.R. is MAC, but Windows doesn't
- * seem to recognize that. And Macau isn't listed in the table of
- * accepted abbreviations linked above.
+ * seem to recognize that. And Macau isn't listed in the table of accepted
+ * abbreviations linked above.
*
- * Fortunately, "ZHM" seems to be accepted as an alias for
- * "Chinese (Traditional)_Macau S.A.R..950", so we use that. Note that
- * it's unlike HKG and ARE, ZHM is an alias for the whole locale name,
- * not just the country part. I'm not sure where that "ZHM" comes from,
- * must be some legacy naming scheme. But hey, it works.
+ * Fortunately, "ZHM" seems to be accepted as an alias for "Chinese
+ * (Traditional)_Macau S.A.R..950", so we use that. Note that it's unlike
+ * HKG and ARE, ZHM is an alias for the whole locale name, not just the
+ * country part. I'm not sure where that "ZHM" comes from, must be some
+ * legacy naming scheme. But hey, it works.
*
* Some versions of Windows spell it "Macau", others "Macao".
*/
strreplace(locale, "Chinese_Macau S.A.R..950", "ZHM");
strreplace(locale, "Chinese (Traditional)_Macao S.A.R..950", "ZHM");
strreplace(locale, "Chinese_Macao S.A.R..950", "ZHM");
-#endif /* WIN32 */
+#endif /* WIN32 */
return locale;
}
else if (!pg_valid_server_encoding_id(ctype_enc))
{
/*
- * We recognized it, but it's not a legal server encoding.
- * On Windows, UTF-8 works with any locale, so we can fall back
- * to UTF-8.
+ * We recognized it, but it's not a legal server encoding. On
+ * Windows, UTF-8 works with any locale, so we can fall back to
+ * UTF-8.
*/
#ifdef WIN32
printf(_("Encoding %s implied by locale is not allowed as a server-side encoding.\n"
- "The default database encoding will be set to %s instead.\n"),
+ "The default database encoding will be set to %s instead.\n"),
pg_encoding_to_char(ctype_enc),
pg_encoding_to_char(PG_UTF8));
ctype_enc = PG_UTF8;
printf(_(" -Z, --compress=0-9 compress tar output with given compression level\n"));
printf(_("\nGeneral options:\n"));
printf(_(" -c, --checkpoint=fast|spread\n"
- " set fast or spread checkpointing\n"));
+ " set fast or spread checkpointing\n"));
printf(_(" -l, --label=LABEL set backup label\n"));
printf(_(" -P, --progress show progress information\n"));
printf(_(" -v, --verbose output verbose messages\n"));
#ifdef HAVE_LIBZ
compresslevel = Z_DEFAULT_COMPRESSION;
#else
- compresslevel = 1; /* will be rejected below */
+ compresslevel = 1; /* will be rejected below */
#endif
break;
case 'Z':
* Since there might be quotes to handle here, it is easier simply to pass
* everything to a shell to process them.
*
- * XXX it would be better to fork and exec so that we would know the
- * child postmaster's PID directly; then test_postmaster_connection could
- * use the PID without having to rely on reading it back from the pidfile.
+ * XXX it would be better to fork and exec so that we would know the child
+ * postmaster's PID directly; then test_postmaster_connection could use
+ * the PID without having to rely on reading it back from the pidfile.
*/
if (log_file != NULL)
snprintf(cmd, MAXPGPATH, SYSTEMQUOTE "\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &" SYSTEMQUOTE,
time_t pmstart;
/*
- * Make sanity checks. If it's for a standalone backend
+ * Make sanity checks. If it's for a standalone backend
* (negative PID), or the recorded start time is before
* pg_ctl started, then either we are looking at the wrong
* data directory, or this is a pre-existing pidfile that
if (pmpid <= 0 || pmstart < start_time - 2)
{
/*
- * Set flag to report stale pidfile if it doesn't
- * get overwritten before we give up waiting.
+ * Set flag to report stale pidfile if it doesn't get
+ * overwritten before we give up waiting.
*/
found_stale_pidfile = true;
}
* timeout first.
*/
snprintf(connstr, sizeof(connstr),
- "dbname=postgres port=%d host='%s' connect_timeout=5",
+ "dbname=postgres port=%d host='%s' connect_timeout=5",
portnum, host_str);
}
}
/*
* The postmaster should create postmaster.pid very soon after being
* started. If it's not there after we've waited 5 or more seconds,
- * assume startup failed and give up waiting. (Note this covers
- * both cases where the pidfile was never created, and where it was
- * created and then removed during postmaster exit.) Also, if there
- * *is* a file there but it appears stale, issue a suitable warning
- * and give up waiting.
+ * assume startup failed and give up waiting. (Note this covers both
+ * cases where the pidfile was never created, and where it was created
+ * and then removed during postmaster exit.) Also, if there *is* a
+ * file there but it appears stale, issue a suitable warning and give
+ * up waiting.
*/
if (i >= 5)
{
/*
* If we've been able to identify the child postmaster's PID, check
- * the process is still alive. This covers cases where the postmaster
+ * the process is still alive. This covers cases where the postmaster
* successfully created the pidfile but then crashed without removing
* it.
*/
* restore */
int use_setsessauth;/* Use SET SESSION AUTHORIZATION commands
* instead of OWNER TO */
- int no_security_labels; /* Skip security label entries */
+ int no_security_labels; /* Skip security label entries */
char *superuser; /* Username to use as superuser */
char *use_role; /* Issue SET ROLE to this */
int dataOnly;
* collation does not matter for those.
*/
appendPQExpBuffer(query, "SELECT a.attname, "
- "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
"a.attlen, a.attalign, a.attisdropped, "
"CASE WHEN a.attcollation <> at.typcollation "
"THEN a.attcollation ELSE 0 END AS attcollation, "
"ct.typrelid "
"FROM pg_catalog.pg_type ct "
- "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
- "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
+ "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
+ "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
"WHERE ct.oid = '%u'::pg_catalog.oid "
"ORDER BY a.attnum ",
tyinfo->dobj.catId.oid);
* always be false.
*/
appendPQExpBuffer(query, "SELECT a.attname, "
- "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
"a.attlen, a.attalign, a.attisdropped, "
"0 AS attcollation, "
"ct.typrelid "
- "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
+ "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
"WHERE ct.oid = '%u'::pg_catalog.oid "
"AND a.attrelid = ct.typrelid "
"ORDER BY a.attnum ",
{
/*
* This is a dropped attribute and we're in binary_upgrade mode.
- * Insert a placeholder for it in the CREATE TYPE command, and
- * set length and alignment with direct UPDATE to the catalogs
+ * Insert a placeholder for it in the CREATE TYPE command, and set
+ * length and alignment with direct UPDATE to the catalogs
* afterwards. See similar code in dumpTableSchema().
*/
appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
/* stash separately for insertion after the CREATE TYPE */
appendPQExpBuffer(dropped,
- "\n-- For binary upgrade, recreate dropped column.\n");
+ "\n-- For binary upgrade, recreate dropped column.\n");
appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
"SET attlen = %s, "
"attalign = '%s', attbyval = false\n"
* However, for a language that belongs to an extension, we must not use
* the shouldDumpProcLangs heuristic, but just dump the language iff we're
* told to (via dobj.dump). Generally the support functions will belong
- * to the same extension and so have the same dump flags ... if they don't,
- * this might not work terribly nicely.
+ * to the same extension and so have the same dump flags ... if they
+ * don't, this might not work terribly nicely.
*/
useParams = (funcInfo != NULL &&
(inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
return;
/*
- * FDWs that belong to an extension are dumped based on their "dump" field.
- * Otherwise omit them if we are only dumping some specific object.
+ * FDWs that belong to an extension are dumped based on their "dump"
+ * field. Otherwise omit them if we are only dumping some specific object.
*/
if (!fdwinfo->dobj.ext_member)
if (!include_everything)
if (binary_upgrade)
binary_upgrade_set_type_oids_by_rel_oid(q,
- tbinfo->dobj.catId.oid);
+ tbinfo->dobj.catId.oid);
/* Is it a table or a view? */
if (tbinfo->relkind == RELKIND_VIEW)
"UNLOGGED " : "",
reltypename,
fmtId(tbinfo->dobj.name));
+
/*
* In case of a binary upgrade, we dump the table normally and attach
* it to the type afterward.
{
printfPQExpBuffer(&buf,
"SELECT conname,\n"
- " pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
+ " pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1",
oid);
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" t.typname as \"%s\",\n"
- " pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
+ " pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
" TRIM(LEADING\n",
gettext_noop("Schema"),
gettext_noop("Name"),
" COALESCE((SELECT ' collate ' || c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt\n"
" WHERE c.oid = t.typcollation AND bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation), '') ||\n");
appendPQExpBuffer(&buf,
- " CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
+ " CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
" CASE WHEN t.typdefault IS NOT NULL THEN ' default ' || t.typdefault ELSE '' END\n"
" ) as \"%s\",\n",
gettext_noop("Modifier"));
appendPQExpBuffer(&sql, ";\n");
- /*
- * Connect to the 'postgres' database by default, except have
- * the 'postgres' user use 'template1' so he can create the
- * 'postgres' database.
- */
+ /*
+ * Connect to the 'postgres' database by default, except have the
+ * 'postgres' user use 'template1' so he can create the 'postgres'
+ * database.
+ */
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);
appendPQExpBuffer(&sql, "DROP DATABASE %s;\n",
fmtId(dbname));
- /*
- * Connect to the 'postgres' database by default, except have
- * the 'postgres' user use 'template1' so he can drop the
- * 'postgres' database.
- */
+ /*
+ * Connect to the 'postgres' database by default, except have the
+ * 'postgres' user use 'template1' so he can drop the 'postgres' database.
+ */
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);
* are allowed to be NULL.
*/
extern Datum DirectFunctionCall1Coll(PGFunction func, Oid collation,
- Datum arg1);
+ Datum arg1);
extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2);
+ Datum arg1, Datum arg2);
extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3);
+ Datum arg1, Datum arg2,
+ Datum arg3);
extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4);
extern Datum DirectFunctionCall5Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5);
extern Datum DirectFunctionCall6Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6);
extern Datum DirectFunctionCall7Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7);
extern Datum DirectFunctionCall8Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8);
extern Datum DirectFunctionCall9Coll(PGFunction func, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8,
- Datum arg9);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8,
+ Datum arg9);
/* These are for invocation of a previously-looked-up function with a
* directly-computed parameter list. Note that neither arguments nor result
* are allowed to be NULL.
*/
extern Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1);
+ Datum arg1);
extern Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2);
+ Datum arg1, Datum arg2);
extern Datum FunctionCall3Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3);
+ Datum arg1, Datum arg2,
+ Datum arg3);
extern Datum FunctionCall4Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4);
extern Datum FunctionCall5Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5);
extern Datum FunctionCall6Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6);
extern Datum FunctionCall7Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7);
extern Datum FunctionCall8Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8);
extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8,
- Datum arg9);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8,
+ Datum arg9);
/* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
*/
extern Datum OidFunctionCall0Coll(Oid functionId, Oid collation);
extern Datum OidFunctionCall1Coll(Oid functionId, Oid collation,
- Datum arg1);
+ Datum arg1);
extern Datum OidFunctionCall2Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2);
+ Datum arg1, Datum arg2);
extern Datum OidFunctionCall3Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3);
+ Datum arg1, Datum arg2,
+ Datum arg3);
extern Datum OidFunctionCall4Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4);
extern Datum OidFunctionCall5Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5);
extern Datum OidFunctionCall6Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6);
extern Datum OidFunctionCall7Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7);
extern Datum OidFunctionCall8Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8);
extern Datum OidFunctionCall9Coll(Oid functionId, Oid collation,
- Datum arg1, Datum arg2,
- Datum arg3, Datum arg4, Datum arg5,
- Datum arg6, Datum arg7, Datum arg8,
- Datum arg9);
+ Datum arg1, Datum arg2,
+ Datum arg3, Datum arg4, Datum arg5,
+ Datum arg6, Datum arg7, Datum arg8,
+ Datum arg9);
/* These macros allow the collation argument to be omitted (with a default of
* InvalidOid, ie, no collation). They exist mostly for backwards
*
* If the function returns RECORD, funccoltypes lists the column types
* declared in the RTE's column type specification, funccoltypmods lists
- * their declared typmods, funccolcollations their collations. Otherwise,
+ * their declared typmods, funccolcollations their collations. Otherwise,
* those fields are NIL.
*/
Node *funcexpr; /* expression tree for func call */
extern bool contain_agg_clause(Node *clause);
extern List *pull_agg_clause(Node *clause);
extern void count_agg_clauses(PlannerInfo *root, Node *clause,
- AggClauseCosts *costs);
+ AggClauseCosts *costs);
extern bool contain_window_function(Node *clause);
extern WindowFuncLists *find_window_functions(Node *clause, Index maxWinRef);
#define HASH_CONTEXT 0x200 /* Set memory allocation context */
#define HASH_COMPARE 0x400 /* Set user defined comparison function */
#define HASH_KEYCOPY 0x800 /* Set user defined key-copying function */
-#define HASH_FIXED_SIZE 0x1000 /* Initial size is a hard limit */
+#define HASH_FIXED_SIZE 0x1000 /* Initial size is a hard limit */
/* max_dsize value to indicate expansible directory */
Const **prefix,
Const **rest);
extern Const *make_greater_string(const Const *str_const, FmgrInfo *ltproc,
- Oid collation);
+ Oid collation);
extern Datum eqsel(PG_FUNCTION_ARGS);
extern Datum neqsel(PG_FUNCTION_ARGS);
strcpy(fname, PQfname(res, i));
sqlda->sqlvar[i].sqlname = fname;
fname += strlen(sqlda->sqlvar[i].sqlname) + 1;
- /* this is reserved for future use, so we leave it empty for the time being */
- /* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i);*/
+
+ /*
+ * this is reserved for future use, so we leave it empty for the time
+ * being
+ */
+ /* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i); */
sqlda->sqlvar[i].sqlxid = PQftype(res, i);
sqlda->sqlvar[i].sqltypelen = PQfsize(res, i);
}
case 'G':
{
/* Keep compiler quiet - Don't use a literal format */
- const char *fmt = "%G";
+ const char *fmt = "%G";
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, fmt, tm);
case 'V':
{
/* Keep compiler quiet - Don't use a literal format */
- const char *fmt = "%V";
+ const char *fmt = "%V";
i = strftime(q, *pstr_len, fmt, tm);
if (i == 0)
do
{
gss_display_status(&lmin_s, stat, type,
- GSS_C_NO_OID, &msg_ctx, &lmsg);
+ GSS_C_NO_OID, &msg_ctx, &lmsg);
appendPQExpBuffer(str, "%s: %s\n", mprefix, (char *) lmsg.value);
gss_release_buffer(&lmin_s, &lmsg);
} while (msg_ctx);
struct cmsghdr *cmsg;
union
{
- struct cmsghdr hdr;
- unsigned char buf[CMSG_SPACE(sizeof(struct cmsgcred))];
- } cmsgbuf;
+ struct cmsghdr hdr;
+ unsigned char buf[CMSG_SPACE(sizeof(struct cmsgcred))];
+ } cmsgbuf;
/*
* The backend doesn't care what we send here, but it wants exactly one
if ((conn->pghostaddr == NULL) &&
(conn->pghost == NULL || strcmp(conn->pghost, host_addr) != 0))
appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not connect to server: %s\n"
- "\tIs the server running on host \"%s\" (%s) and accepting\n"
- "\tTCP/IP connections on port %s?\n"),
+ libpq_gettext("could not connect to server: %s\n"
+ "\tIs the server running on host \"%s\" (%s) and accepting\n"
+ "\tTCP/IP connections on port %s?\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
displayed_host,
host_addr,
conn->pgport);
else
appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not connect to server: %s\n"
- "\tIs the server running on host \"%s\" and accepting\n"
- "\tTCP/IP connections on port %s?\n"),
+ libpq_gettext("could not connect to server: %s\n"
+ "\tIs the server running on host \"%s\" and accepting\n"
+ "\tTCP/IP connections on port %s?\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
displayed_host,
conn->pgport);
int packetlen;
#ifdef HAVE_UNIX_SOCKETS
+
/*
* Implement requirepeer check, if requested and it's a
* Unix-domain socket.
errno = 0;
if (getpeereid(conn->sock, &uid, &gid) != 0)
{
- /* Provide special error message if getpeereid is a stub */
+ /*
+ * Provide special error message if getpeereid is a
+ * stub
+ */
if (errno == ENOSYS)
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("requirepeer parameter is not supported on this platform\n"));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get peer credentials: %s\n"),
- pqStrerror(errno, sebuf, sizeof(sebuf)));
+ pqStrerror(errno, sebuf, sizeof(sebuf)));
goto error_return;
}
goto error_return;
}
}
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
#ifdef USE_SSL
if (!TDsv)
elog(ERROR, "couldn't fetch $_TD");
- save_item(TDsv); /* local $_TD */
+ save_item(TDsv); /* local $_TD */
sv_setsv(TDsv, td);
PUSHMARK(sp);
* does not appear that hashes track UTF-8-ness of keys at all in Perl
* 5.6.
*/
- hlen = - (int) strlen(hkey);
+ hlen = -(int) strlen(hkey);
ret = hv_store(hv, hkey, hlen, val, 0);
if (hkey != key)
GetDatabaseEncoding(), PG_UTF8);
/* See notes in hv_store_string */
- hlen = - (int) strlen(hkey);
+ hlen = -(int) strlen(hkey);
ret = hv_fetch(hv, hkey, hlen, 0);
if (hkey != key)
#undef vsnprintf
#endif
#ifdef __GNUC__
-#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
-#define snprintf(...) pg_snprintf(__VA_ARGS__)
+#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
+#define snprintf(...) pg_snprintf(__VA_ARGS__)
#else
-#define vsnprintf pg_vsnprintf
-#define snprintf pg_snprintf
-#endif /* __GNUC__ */
-#endif /* USE_REPL_SNPRINTF */
+#define vsnprintf pg_vsnprintf
+#define snprintf pg_snprintf
+#endif /* __GNUC__ */
+#endif /* USE_REPL_SNPRINTF */
/* perl version and platform portability */
#define NEED_eval_pv
default:
elog(ERROR, "unrecognized dtype: %d", datum->dtype);
- *typeid = InvalidOid; /* keep compiler quiet */
+ *typeid = InvalidOid; /* keep compiler quiet */
*typmod = -1;
*collation = InvalidOid;
break;
/*
* Sanity check, next < s if the line was all-whitespace, which should
- * never happen if Python reported a frame created on that line, but
- * check anyway.
+ * never happen if Python reported a frame created on that line, but check
+ * anyway.
*/
if (next < s)
return NULL;
&tbstr, "\n PL/Python function \"%s\", line %ld, in %s",
proname, plain_lineno - 1, fname);
- /* function code object was compiled with "<string>" as the filename */
+ /*
+ * function code object was compiled with "<string>" as the
+ * filename
+ */
if (PLy_curr_procedure && plain_filename != NULL &&
strcmp(plain_filename, "<string>") == 0)
{
#define BADARG (int)':'
#define EMSG ""
-int getopt(int nargc, char *const * nargv, const char * ostr);
+int getopt(int nargc, char *const * nargv, const char *ostr);
/*
* getopt
* returning -1.)
*/
int
-getopt(int nargc, char *const * nargv, const char * ostr)
+getopt(int nargc, char *const * nargv, const char *ostr)
{
static char *place = EMSG; /* option letter processing */
char *oli; /* option letter list index */
*gid = ucred_getegid(ucred);
ucred_free(ucred);
- if (*uid == (uid_t)(-1) || *gid == (gid_t)(-1))
+ if (*uid == (uid_t) (-1) || *gid == (gid_t) (-1))
return -1;
return 0;
#else
* We need to cover both the address family constants used by the PG inet
* type (PGSQL_AF_INET and PGSQL_AF_INET6) and those used by the system
* libraries (AF_INET and AF_INET6). We can safely assume PGSQL_AF_INET
- * == AF_INET, but the INET6 constants are very likely to be different.
- * If AF_INET6 isn't defined, silently ignore it.
+ * == AF_INET, but the INET6 constants are very likely to be different. If
+ * AF_INET6 isn't defined, silently ignore it.
*/
switch (af)
{
#if !defined(WIN32)
return (fcntl(sock, F_SETFL, O_NONBLOCK) != -1);
#else
- unsigned long ioctlsocket_ret = 1;
+ unsigned long ioctlsocket_ret = 1;
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
return false;
return true;
#else
- unsigned long ioctlsocket_ret = 0;
+ unsigned long ioctlsocket_ret = 0;
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
#ifndef WIN32_ONLY_COMPILER
snprintf(buf, sizeof(buf),
SYSTEMQUOTE "\"%s\" -C \"%s/%s\" DESTDIR=\"%s/install\" install >> \"%s/log/install.log\" 2>&1" SYSTEMQUOTE,
- makeprog, top_builddir, sl->str, temp_install, outputdir);
+ makeprog, top_builddir, sl->str, temp_install, outputdir);
#else
fprintf(stderr, _("\n%s: --extra-install option not supported on this platform\n"), progname);
exit_nicely(2);
* postgresql.conf, this code will not do what you might expect, namely
* call select_default_timezone() and install that value as the setting.
* Rather, the previously active setting --- typically the one from
- * postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR.
- * If we did try to install the "correct" default value, the effect would
- * be that each postmaster child would independently run an extremely
+ * postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR. If
+ * we did try to install the "correct" default value, the effect would be
+ * that each postmaster child would independently run an extremely
* expensive search of the timezone database, bringing the database to its
* knees for possibly multiple seconds. This is so unpleasant, and could
* so easily be triggered quite unintentionally, that it seems better to