Use sufficiently large buffer in SharedQueueWrite
authorTomas Vondra <[email protected]>
Fri, 12 Oct 2018 12:23:29 +0000 (14:23 +0200)
committerTomas Vondra <[email protected]>
Fri, 12 Oct 2018 12:31:41 +0000 (14:31 +0200)
The sq_key alone may be up to 64 bytes, so we need more than that.
We could use dynamic memory instead, but 128 bytes should be enough
both for the sq_key and the other pieces.

src/backend/pgxc/squeue/squeue.c

index 2f782b92e893a81dd2d1139997739845635400ab..e87a77e149e1170a94cc47e0b88725f066eb538e 100644 (file)
@@ -902,15 +902,15 @@ SharedQueueWrite(SharedQueue squeue, int consumerIdx,
                if (*tuplestore == NULL)
                {
                        int                     ptrno;
-                       char            storename[64];
+                       char            storename[128];
 
 #ifdef SQUEUE_STAT
                        elog(DEBUG1, "Start buffering %s node %d, %d tuples in queue, %ld writes and %ld reads so far",
                                 squeue->sq_key, cstate->cs_node, cstate->cs_ntuples, cstate->stat_writes, cstate->stat_reads);
 #endif
                        *tuplestore = tuplestore_begin_datarow(false, work_mem, tmpcxt);
-                       /* We need is to be able to remember/restore the read position */
-                       snprintf(storename, 64, "%s node %d", squeue->sq_key, cstate->cs_node);
+                       /* We need to be able to remember/restore the read position. */
+                       snprintf(storename, 128, "%s node %d", squeue->sq_key, cstate->cs_node);
                        tuplestore_collect_stat(*tuplestore, storename);
                        /*
                         * Allocate a second read pointer to read from the store. We know