*/
  #define ParallelVacuumIsActive(lps) PointerIsValid(lps)
  
 +/* Phases of vacuum during which we report error context. */
 +typedef enum
 +{
 +   VACUUM_ERRCB_PHASE_UNKNOWN,
 +   VACUUM_ERRCB_PHASE_SCAN_HEAP,
 +   VACUUM_ERRCB_PHASE_VACUUM_INDEX,
 +   VACUUM_ERRCB_PHASE_VACUUM_HEAP,
 +   VACUUM_ERRCB_PHASE_INDEX_CLEANUP,
 +   VACUUM_ERRCB_PHASE_TRUNCATE
 +} VacErrPhase;
 +
  /*
   * LVDeadTuples stores the dead tuple TIDs collected during the heap scan.
   * This is allocated in the DSM segment in parallel mode and in local memory
   
  typedef struct LVRelStats
  {
 +   char       *relnamespace;
 +   char       *relname;
     /* useindex = true means two-pass strategy; false means one-pass */
     bool        useindex;
     /* Overall statistics about rel */
      int         num_index_scans;
     TransactionId latestRemovedXid;
     bool        lock_waiter_detected;
 -} LVRelStats;
  
 +   /* Used for error callback */
 +   char       *indname;
 +   BlockNumber blkno;          /* used only for heap operations */
 +   VacErrPhase phase;
 +} LVRelStats;
  
  /* A few variables that don't seem worth passing around as parameters */
  static int elevel = -1;
                                      LVRelStats *vacrelstats, LVParallelState *lps,
                                     int nindexes);
  static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 -                             LVDeadTuples *dead_tuples, double reltuples);
 +                             LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats);
  static void lazy_cleanup_index(Relation indrel,
                                IndexBulkDeleteResult **stats,
 -                              double reltuples, bool estimated_count);
 +                              double reltuples, bool estimated_count, LVRelStats *vacrelstats);
  static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
                              int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
  static bool should_attempt_truncation(VacuumParams *params,
                                           int nindexes);
  static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
                                   LVShared *lvshared, LVDeadTuples *dead_tuples,
 -                                 int nindexes);
 +                                 int nindexes, LVRelStats *vacrelstats);
  static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
                                   LVRelStats *vacrelstats, LVParallelState *lps,
                                   int nindexes);
  static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
                              LVShared *lvshared, LVSharedIndStats *shared_indstats,
 -                            LVDeadTuples *dead_tuples);
 +                            LVDeadTuples *dead_tuples, LVRelStats *vacrelstats);
  static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
                                      LVRelStats *vacrelstats, LVParallelState *lps,
                                      int nindexes);
                                  LVParallelState *lps, int nindexes);
  static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
  static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared);
 +static void vacuum_error_callback(void *arg);
 +static void update_vacuum_error_info(LVRelStats *errinfo, int phase,
 +                                    BlockNumber blkno, char *indname);
  
  
  /*
      double      new_live_tuples;
     TransactionId new_frozen_xid;
     MultiXactId new_min_multi;
 +   ErrorContextCallback errcallback;
  
     Assert(params != NULL);
     Assert(params->index_cleanup != VACOPT_TERNARY_DEFAULT);
   
     vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
  
 +   vacrelstats->relnamespace = get_namespace_name(RelationGetNamespace(onerel));
 +   vacrelstats->relname = pstrdup(RelationGetRelationName(onerel));
 +   vacrelstats->indname = NULL;
 +   vacrelstats->phase = VACUUM_ERRCB_PHASE_UNKNOWN;
     vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
     vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
     vacrelstats->num_index_scans = 0;
      vacrelstats->useindex = (nindexes > 0 &&
                              params->index_cleanup == VACOPT_TERNARY_ENABLED);
  
 +   /*
 +    * Setup error traceback support for ereport().  The idea is to set up an
 +    * error context callback to display additional information on any error
 +    * during a vacuum.  During different phases of vacuum (heap scan, heap
 +    * vacuum, index vacuum, index clean up, heap truncate), we update the
 +    * error context callback to display appropriate information.
 +    *
 +    * Note that the index vacuum and heap vacuum phases may be called
 +    * multiple times in the middle of the heap scan phase.  So the old phase
 +    * information is restored at the end of those phases.
 +    */
 +   errcallback.callback = vacuum_error_callback;
 +   errcallback.arg = vacrelstats;
 +   errcallback.previous = error_context_stack;
 +   error_context_stack = &errcallback;
 +
     /* Do the vacuuming */
     lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive);
  
       * Optionally truncate the relation.
      */
     if (should_attempt_truncation(params, vacrelstats))
 +   {
 +       /*
 +        * Update error traceback information.  This is the last phase during
 +        * which we add context information to errors, so we don't need to
 +        * revert to the previous phase.
 +        */
 +       update_vacuum_error_info(vacrelstats, VACUUM_ERRCB_PHASE_TRUNCATE,
 +                                vacrelstats->nonempty_pages, NULL);
         lazy_truncate_heap(onerel, vacrelstats);
 +   }
 +
 +   /* Pop the error context stack */
 +   error_context_stack = errcallback.previous;
  
     /* Report that we are now doing final cleanup */
     pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
      BlockNumber nblocks,
                 blkno;
     HeapTupleData tuple;
 -   char       *relname;
     TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
     TransactionId relminmxid = onerel->rd_rel->relminmxid;
     BlockNumber empty_pages,
   
     pg_rusage_init(&ru0);
  
 -   relname = RelationGetRelationName(onerel);
     if (aggressive)
         ereport(elevel,
                 (errmsg("aggressively vacuuming \"%s.%s\"",
 -                       get_namespace_name(RelationGetNamespace(onerel)),
 -                       relname)));
 +                       vacrelstats->relnamespace,
 +                       vacrelstats->relname)));
     else
         ereport(elevel,
                 (errmsg("vacuuming \"%s.%s\"",
 -                       get_namespace_name(RelationGetNamespace(onerel)),
 -                       relname)));
 +                       vacrelstats->relnamespace,
 +                       vacrelstats->relname)));
  
     empty_pages = vacuumed_pages = 0;
     next_fsm_block_to_vacuum = (BlockNumber) 0;
   
         pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
  
 +       update_vacuum_error_info(vacrelstats, VACUUM_ERRCB_PHASE_SCAN_HEAP,
 +                                blkno, NULL);
 +
         if (blkno == next_unskippable_block)
         {
             /* Time to advance next_unskippable_block */
                   && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
         {
             elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
 -                relname, blkno);
 +                vacrelstats->relname, blkno);
             visibilitymap_clear(onerel, blkno, vmbuffer,
                                 VISIBILITYMAP_VALID_BITS);
         }
          else if (PageIsAllVisible(page) && has_dead_tuples)
         {
             elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
 -                relname, blkno);
 +                vacrelstats->relname, blkno);
             PageClearAllVisible(page);
             MarkBufferDirty(buf);
             visibilitymap_clear(onerel, blkno, vmbuffer,
   
         for (idx = 0; idx < nindexes; idx++)
             lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
 -                             vacrelstats->old_live_tuples);
 +                             vacrelstats->old_live_tuples, vacrelstats);
     }
  
     /* Increase and report the number of index scans */
      int         npages;
     PGRUsage    ru0;
     Buffer      vmbuffer = InvalidBuffer;
 +   LVRelStats  olderrinfo;
  
     /* Report that we are now vacuuming the heap */
     pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
                                  PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
  
 +   /* Update error traceback information */
 +   olderrinfo = *vacrelstats;
 +   update_vacuum_error_info(vacrelstats, VACUUM_ERRCB_PHASE_VACUUM_HEAP,
 +                            InvalidBlockNumber, NULL);
 +
     pg_rusage_init(&ru0);
     npages = 0;
  
          vacuum_delay_point();
  
         tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
 +       vacrelstats->blkno = tblk;
         buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
                                  vac_strategy);
         if (!ConditionalLockBufferForCleanup(buf))
                      RelationGetRelationName(onerel),
                     tupindex, npages),
              errdetail_internal("%s", pg_rusage_show(&ru0))));
 +
 +   /* Revert to the previous phase information for error traceback */
 +   update_vacuum_error_info(vacrelstats,
 +                            olderrinfo.phase,
 +                            olderrinfo.blkno,
 +                            olderrinfo.indname);
  }
  
  /*
      int         uncnt = 0;
     TransactionId visibility_cutoff_xid;
     bool        all_frozen;
 +   LVRelStats  olderrinfo;
  
     pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
  
 +   /* Update error traceback information */
 +   olderrinfo = *vacrelstats;
 +   update_vacuum_error_info(vacrelstats, VACUUM_ERRCB_PHASE_VACUUM_HEAP,
 +                            blkno, NULL);
 +
     START_CRIT_SECTION();
  
     for (; tupindex < dead_tuples->num_tuples; tupindex++)
                                *vmbuffer, visibility_cutoff_xid, flags);
     }
  
 +   /* Revert to the previous phase information for error traceback */
 +   update_vacuum_error_info(vacrelstats,
 +                            olderrinfo.phase,
 +                            olderrinfo.blkno,
 +                            olderrinfo.indname);
     return tupindex;
  }
  
       * indexes in the case where no workers are launched.
      */
     parallel_vacuum_index(Irel, stats, lps->lvshared,
 -                         vacrelstats->dead_tuples, nindexes);
 +                         vacrelstats->dead_tuples, nindexes, vacrelstats);
  
     /* Wait for all vacuum workers to finish */
     WaitForParallelWorkersToFinish(lps->pcxt);
   static void
  parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
                       LVShared *lvshared, LVDeadTuples *dead_tuples,
 -                     int nindexes)
 +                     int nindexes, LVRelStats *vacrelstats)
  {
     /*
      * Increment the active worker count if we are able to launch any worker.
   
         /* Do vacuum or cleanup of the index */
         vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats,
 -                        dead_tuples);
 +                        dead_tuples, vacrelstats);
     }
  
     /*
          if (shared_indstats == NULL ||
             skip_parallel_vacuum_index(Irel[i], lps->lvshared))
             vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
 -                            shared_indstats, vacrelstats->dead_tuples);
 +                            shared_indstats, vacrelstats->dead_tuples,
 +                            vacrelstats);
     }
  
     /*
   static void
  vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
                  LVShared *lvshared, LVSharedIndStats *shared_indstats,
 -                LVDeadTuples *dead_tuples)
 +                LVDeadTuples *dead_tuples, LVRelStats *vacrelstats)
  {
     IndexBulkDeleteResult *bulkdelete_res = NULL;
  
      /* Do vacuum or cleanup of the index */
     if (lvshared->for_cleanup)
         lazy_cleanup_index(indrel, stats, lvshared->reltuples,
 -                          lvshared->estimated_count);
 +                          lvshared->estimated_count, vacrelstats);
     else
         lazy_vacuum_index(indrel, stats, dead_tuples,
 -                         lvshared->reltuples);
 +                         lvshared->reltuples, vacrelstats);
  
     /*
      * Copy the index bulk-deletion result returned from ambulkdelete and
          for (idx = 0; idx < nindexes; idx++)
             lazy_cleanup_index(Irel[idx], &stats[idx],
                                vacrelstats->new_rel_tuples,
 -                              vacrelstats->tupcount_pages < vacrelstats->rel_pages);
 +                              vacrelstats->tupcount_pages < vacrelstats->rel_pages,
 +                              vacrelstats);
     }
  }
  
    */
  static void
  lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 -                 LVDeadTuples *dead_tuples, double reltuples)
 +                 LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats)
  {
     IndexVacuumInfo ivinfo;
     const char *msg;
     PGRUsage    ru0;
 +   LVRelStats  olderrinfo;
  
     pg_rusage_init(&ru0);
  
      ivinfo.num_heap_tuples = reltuples;
     ivinfo.strategy = vac_strategy;
  
 +   /* Update error traceback information */
 +   olderrinfo = *vacrelstats;
 +   update_vacuum_error_info(vacrelstats,
 +                            VACUUM_ERRCB_PHASE_VACUUM_INDEX,
 +                            InvalidBlockNumber,
 +                            RelationGetRelationName(indrel));
 +
     /* Do bulk deletion */
     *stats = index_bulk_delete(&ivinfo, *stats,
                                lazy_tid_reaped, (void *) dead_tuples);
                      RelationGetRelationName(indrel),
                     dead_tuples->num_tuples),
              errdetail_internal("%s", pg_rusage_show(&ru0))));
 +
 +   /* Revert to the previous phase information for error traceback */
 +   update_vacuum_error_info(vacrelstats,
 +                            olderrinfo.phase,
 +                            olderrinfo.blkno,
 +                            olderrinfo.indname);
  }
  
  /*
   static void
  lazy_cleanup_index(Relation indrel,
                    IndexBulkDeleteResult **stats,
 -                  double reltuples, bool estimated_count)
 +                  double reltuples, bool estimated_count, LVRelStats *vacrelstats)
  {
     IndexVacuumInfo ivinfo;
     const char *msg;
     PGRUsage    ru0;
 +   LVRelStats  olderrcbarg;
  
     pg_rusage_init(&ru0);
  
      ivinfo.num_heap_tuples = reltuples;
     ivinfo.strategy = vac_strategy;
  
 +   /* Update error traceback information */
 +   olderrcbarg = *vacrelstats;
 +   update_vacuum_error_info(vacrelstats,
 +                            VACUUM_ERRCB_PHASE_INDEX_CLEANUP,
 +                            InvalidBlockNumber,
 +                            RelationGetRelationName(indrel));
 +
     *stats = index_vacuum_cleanup(&ivinfo, *stats);
  
 +   /* Revert back to the old phase information for error traceback */
 +   update_vacuum_error_info(vacrelstats,
 +                            olderrcbarg.phase,
 +                            olderrcbarg.blkno,
 +                            olderrcbarg.indname);
     if (!(*stats))
         return;
  
           * were vacuuming.
          */
         new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
 +       vacrelstats->blkno = new_rel_pages;
  
         if (new_rel_pages >= old_rel_pages)
         {
      int         nindexes;
     char       *sharedquery;
     IndexBulkDeleteResult **stats;
 +   LVRelStats  vacrelstats;
 +   ErrorContextCallback errcallback;
  
     lvshared = (LVShared *) shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_SHARED,
                                            false);
      if (lvshared->maintenance_work_mem_worker > 0)
         maintenance_work_mem = lvshared->maintenance_work_mem_worker;
  
 +   /*
 +    * Initialize vacrelstats for use as error callback arg by parallel
 +    * worker.
 +    */
 +   vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
 +   vacrelstats.relname = pstrdup(RelationGetRelationName(onerel));
 +   vacrelstats.indname = NULL;
 +   vacrelstats.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */
 +
 +   /* Setup error traceback support for ereport() */
 +   errcallback.callback = vacuum_error_callback;
 +   errcallback.arg = &vacrelstats;
 +   errcallback.previous = error_context_stack;
 +   error_context_stack = &errcallback;
 +
     /* Process indexes to perform vacuum/cleanup */
 -   parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes);
 +   parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes,
 +                         &vacrelstats);
 +
 +   /* Pop the error context stack */
 +   error_context_stack = errcallback.previous;
  
     vac_close_indexes(nindexes, indrels, RowExclusiveLock);
     table_close(onerel, ShareUpdateExclusiveLock);
     pfree(stats);
  }
 +
 +/*
 + * Error context callback for errors occurring during vacuum.
 + */
 +static void
 +vacuum_error_callback(void *arg)
 +{
 +   LVRelStats *errinfo = arg;
 +
 +   switch (errinfo->phase)
 +   {
 +       case VACUUM_ERRCB_PHASE_SCAN_HEAP:
 +           if (BlockNumberIsValid(errinfo->blkno))
 +               errcontext("while scanning block %u of relation \"%s.%s\"",
 +                          errinfo->blkno, errinfo->relnamespace, errinfo->relname);
 +           break;
 +
 +       case VACUUM_ERRCB_PHASE_VACUUM_HEAP:
 +           if (BlockNumberIsValid(errinfo->blkno))
 +               errcontext("while vacuuming block %u of relation \"%s.%s\"",
 +                          errinfo->blkno, errinfo->relnamespace, errinfo->relname);
 +           break;
 +
 +       case VACUUM_ERRCB_PHASE_VACUUM_INDEX:
 +           errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
 +                      errinfo->indname, errinfo->relnamespace, errinfo->relname);
 +           break;
 +
 +       case VACUUM_ERRCB_PHASE_INDEX_CLEANUP:
 +           errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
 +                      errinfo->indname, errinfo->relnamespace, errinfo->relname);
 +           break;
 +
 +       case VACUUM_ERRCB_PHASE_TRUNCATE:
 +           if (BlockNumberIsValid(errinfo->blkno))
 +               errcontext("while truncating relation \"%s.%s\" to %u blocks",
 +                          errinfo->relnamespace, errinfo->relname, errinfo->blkno);
 +           break;
 +
 +       case VACUUM_ERRCB_PHASE_UNKNOWN:
 +       default:
 +           return;             /* do nothing; the errinfo may not be
 +                                * initialized */
 +   }
 +}
 +
 +/* Update vacuum error callback for the current phase, block, and index. */
 +static void
 +update_vacuum_error_info(LVRelStats *errinfo, int phase, BlockNumber blkno,
 +                        char *indname)
 +{
 +   errinfo->blkno = blkno;
 +   errinfo->phase = phase;
 +
 +   /* Free index name from any previous phase */
 +   if (errinfo->indname)
 +       pfree(errinfo->indname);
 +
 +   /* For index phases, save the name of the current index for the callback */
 +   errinfo->indname = indname ? pstrdup(indname) : NULL;
 +}