Use sufficiently large buffer in SharedQueueWrite
authorTomas Vondra <[email protected]>
Fri, 12 Oct 2018 12:23:29 +0000 (14:23 +0200)
committerTomas Vondra <[email protected]>
Fri, 12 Oct 2018 12:54:37 +0000 (14:54 +0200)
The sq_key alone may be up to 64 bytes, so we need more than that.
We could use dynamic memory instead, but 128 bytes should be enough
both for the sq_key and the other pieces.

src/backend/pgxc/squeue/squeue.c

index a93857114707285f2c35fc399b7df9a9151751bc..63fdbcd5f5aa564f11740bba49e76047a30820f7 100644 (file)
@@ -857,15 +857,15 @@ SharedQueueWrite(SharedQueue squeue, int consumerIdx,
                if (*tuplestore == NULL)
                {
                        int                     ptrno;
-                       char            storename[64];
+                       char            storename[128];
 
 #ifdef SQUEUE_STAT
                        elog(DEBUG1, "Start buffering %s node %d, %d tuples in queue, %ld writes and %ld reads so far",
                                 squeue->sq_key, cstate->cs_node, cstate->cs_ntuples, cstate->stat_writes, cstate->stat_reads);
 #endif
                        *tuplestore = tuplestore_begin_datarow(false, work_mem, tmpcxt);
-                       /* We need is to be able to remember/restore the read position */
-                       snprintf(storename, 64, "%s node %d", squeue->sq_key, cstate->cs_node);
+                       /* We need to be able to remember/restore the read position. */
+                       snprintf(storename, 128, "%s node %d", squeue->sq_key, cstate->cs_node);
                        tuplestore_collect_stat(*tuplestore, storename);
                        /*
                         * Allocate a second read pointer to read from the store. We know