*/
     if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
         /* advance global request to include new block(s) */
 -       if (xlogctl->LogwrtRqst.Write < EndPos)
 -           xlogctl->LogwrtRqst.Write = EndPos;
 +       if (XLogCtl->LogwrtRqst.Write < EndPos)
 +           XLogCtl->LogwrtRqst.Write = EndPos;
         /* update local result copy while I have the chance */
 -       LogwrtResult = xlogctl->LogwrtResult;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       LogwrtResult = XLogCtl->LogwrtResult;
 +       SpinLockRelease(&XLogCtl->info_lck);
     }
  
     /*
   ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos,
                           XLogRecPtr *PrevPtr)
  {
 -   volatile XLogCtlInsert *Insert = &XLogCtl->Insert;
 +   XLogCtlInsert *Insert = &XLogCtl->Insert;
     uint64      startbytepos;
     uint64      endbytepos;
     uint64      prevbytepos;
   static bool
  ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
  {
 -   volatile XLogCtlInsert *Insert = &XLogCtl->Insert;
 +   XLogCtlInsert *Insert = &XLogCtl->Insert;
     uint64      startbytepos;
     uint64      endbytepos;
     uint64      prevbytepos;
      uint64      bytepos;
     XLogRecPtr  reservedUpto;
     XLogRecPtr  finishedUpto;
 -   volatile XLogCtlInsert *Insert = &XLogCtl->Insert;
 +   XLogCtlInsert *Insert = &XLogCtl->Insert;
     int         i;
  
     if (MyProc == NULL)
                  break;
  
             /* Before waiting, get info_lck and update LogwrtResult */
 -           {
 -               /* use volatile pointer to prevent code rearrangement */
 -               volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -               SpinLockAcquire(&xlogctl->info_lck);
 -               if (xlogctl->LogwrtRqst.Write < OldPageRqstPtr)
 -                   xlogctl->LogwrtRqst.Write = OldPageRqstPtr;
 -               LogwrtResult = xlogctl->LogwrtResult;
 -               SpinLockRelease(&xlogctl->info_lck);
 -           }
 +           SpinLockAcquire(&XLogCtl->info_lck);
 +           if (XLogCtl->LogwrtRqst.Write < OldPageRqstPtr)
 +               XLogCtl->LogwrtRqst.Write = OldPageRqstPtr;
 +           LogwrtResult = XLogCtl->LogwrtResult;
 +           SpinLockRelease(&XLogCtl->info_lck);
  
             /*
              * Now that we have an up-to-date LogwrtResult value, see if we
       * code in a couple of places.
      */
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       xlogctl->LogwrtResult = LogwrtResult;
 -       if (xlogctl->LogwrtRqst.Write < LogwrtResult.Write)
 -           xlogctl->LogwrtRqst.Write = LogwrtResult.Write;
 -       if (xlogctl->LogwrtRqst.Flush < LogwrtResult.Flush)
 -           xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       XLogCtl->LogwrtResult = LogwrtResult;
 +       if (XLogCtl->LogwrtRqst.Write < LogwrtResult.Write)
 +           XLogCtl->LogwrtRqst.Write = LogwrtResult.Write;
 +       if (XLogCtl->LogwrtRqst.Flush < LogwrtResult.Flush)
 +           XLogCtl->LogwrtRqst.Flush = LogwrtResult.Flush;
 +       SpinLockRelease(&XLogCtl->info_lck);
     }
  }
  
      XLogRecPtr  WriteRqstPtr = asyncXactLSN;
     bool        sleeping;
  
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   LogwrtResult = xlogctl->LogwrtResult;
 -   sleeping = xlogctl->WalWriterSleeping;
 -   if (xlogctl->asyncXactLSN < asyncXactLSN)
 -       xlogctl->asyncXactLSN = asyncXactLSN;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   LogwrtResult = XLogCtl->LogwrtResult;
 +   sleeping = XLogCtl->WalWriterSleeping;
 +   if (XLogCtl->asyncXactLSN < asyncXactLSN)
 +       XLogCtl->asyncXactLSN = asyncXactLSN;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /*
      * If the WALWriter is sleeping, we should kick it to make it come out of
   void
  XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->replicationSlotMinLSN = lsn;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->replicationSlotMinLSN = lsn;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }
  
  
   static XLogRecPtr
  XLogGetReplicationSlotMinimumLSN(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogRecPtr  retval;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   retval = xlogctl->replicationSlotMinLSN;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   retval = XLogCtl->replicationSlotMinLSN;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return retval;
  }
          updateMinRecoveryPoint = false;
     else if (force || minRecoveryPoint < lsn)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
         XLogRecPtr  newMinRecoveryPoint;
         TimeLineID  newMinRecoveryPointTLI;
  
           * all.  Instead, we just log a warning and continue with recovery.
          * (See also the comments about corrupt LSNs in XLogFlush.)
          */
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       newMinRecoveryPoint = xlogctl->replayEndRecPtr;
 -       newMinRecoveryPointTLI = xlogctl->replayEndTLI;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       newMinRecoveryPoint = XLogCtl->replayEndRecPtr;
 +       newMinRecoveryPointTLI = XLogCtl->replayEndTLI;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         if (!force && newMinRecoveryPoint < lsn)
             elog(WARNING,
       */
     for (;;)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
         XLogRecPtr  insertpos;
  
         /* read LogwrtResult and update local state */
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       if (WriteRqstPtr < xlogctl->LogwrtRqst.Write)
 -           WriteRqstPtr = xlogctl->LogwrtRqst.Write;
 -       LogwrtResult = xlogctl->LogwrtResult;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       if (WriteRqstPtr < XLogCtl->LogwrtRqst.Write)
 +           WriteRqstPtr = XLogCtl->LogwrtRqst.Write;
 +       LogwrtResult = XLogCtl->LogwrtResult;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         /* done already? */
         if (record <= LogwrtResult.Flush)
          return false;
  
     /* read LogwrtResult and update local state */
 -   {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       LogwrtResult = xlogctl->LogwrtResult;
 -       WriteRqstPtr = xlogctl->LogwrtRqst.Write;
 -       SpinLockRelease(&xlogctl->info_lck);
 -   }
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   LogwrtResult = XLogCtl->LogwrtResult;
 +   WriteRqstPtr = XLogCtl->LogwrtRqst.Write;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /* back off to last completed page boundary */
     WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ;
      /* if we have already flushed that far, consider async commit records */
     if (WriteRqstPtr <= LogwrtResult.Flush)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       WriteRqstPtr = xlogctl->asyncXactLSN;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       WriteRqstPtr = XLogCtl->asyncXactLSN;
 +       SpinLockRelease(&XLogCtl->info_lck);
         flexible = false;       /* ensure it all gets written */
     }
  
          return false;
  
     /* read LogwrtResult and update local state */
 -   {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       LogwrtResult = xlogctl->LogwrtResult;
 -       SpinLockRelease(&xlogctl->info_lck);
 -   }
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   LogwrtResult = XLogCtl->LogwrtResult;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /* check again */
     if (record <= LogwrtResult.Flush)
   void
  CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogSegNo   lastRemovedSegNo;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   lastRemovedSegNo = xlogctl->lastRemovedSegNo;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     if (segno <= lastRemovedSegNo)
     {
   XLogSegNo
  XLogGetLastRemovedSegno(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogSegNo   lastRemovedSegNo;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   lastRemovedSegNo = xlogctl->lastRemovedSegNo;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return lastRemovedSegNo;
  }
   static void
  UpdateLastRemovedPtr(char *filename)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     uint32      tli;
     XLogSegNo   segno;
  
     XLogFromFileName(filename, &tli, &segno);
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   if (segno > xlogctl->lastRemovedSegNo)
 -       xlogctl->lastRemovedSegNo = segno;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   if (segno > XLogCtl->lastRemovedSegNo)
 +       XLogCtl->lastRemovedSegNo = segno;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }
  
  /*
   {
     XLogRecPtr  nextUnloggedLSN;
  
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
     /* increment the unloggedLSN counter, need SpinLock */
 -   SpinLockAcquire(&xlogctl->ulsn_lck);
 -   nextUnloggedLSN = xlogctl->unloggedLSN++;
 -   SpinLockRelease(&xlogctl->ulsn_lck);
 +   SpinLockAcquire(&XLogCtl->ulsn_lck);
 +   nextUnloggedLSN = XLogCtl->unloggedLSN++;
 +   SpinLockRelease(&XLogCtl->ulsn_lck);
  
     return nextUnloggedLSN;
  }
   bool
  RecoveryIsPaused(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     bool        recoveryPause;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   recoveryPause = xlogctl->recoveryPause;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   recoveryPause = XLogCtl->recoveryPause;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return recoveryPause;
  }
   void
  SetRecoveryPause(bool recoveryPause)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->recoveryPause = recoveryPause;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->recoveryPause = recoveryPause;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }
  
  /*
   static void
  SetLatestXTime(TimestampTz xtime)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->recoveryLastXTime = xtime;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->recoveryLastXTime = xtime;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }
  
  /*
   TimestampTz
  GetLatestXTime(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     TimestampTz xtime;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xtime = xlogctl->recoveryLastXTime;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   xtime = XLogCtl->recoveryLastXTime;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return xtime;
  }
   static void
  SetCurrentChunkStartTime(TimestampTz xtime)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->currentChunkStartTime = xtime;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->currentChunkStartTime = xtime;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }
  
  /*
   TimestampTz
  GetCurrentChunkReplayStartTime(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     TimestampTz xtime;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xtime = xlogctl->currentChunkStartTime;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   xtime = XLogCtl->currentChunkStartTime;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return xtime;
  }
      {
         int         rmid;
  
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
         /*
          * Update pg_control to show that we are recovering and to show the
          * selected checkpoint as the place we are starting from. We also mark
           * if we had just replayed the record before the REDO location (or the
          * checkpoint record itself, if it's a shutdown checkpoint).
          */
 -       SpinLockAcquire(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
         if (checkPoint.redo < RecPtr)
 -           xlogctl->replayEndRecPtr = checkPoint.redo;
 +           XLogCtl->replayEndRecPtr = checkPoint.redo;
         else
 -           xlogctl->replayEndRecPtr = EndRecPtr;
 -       xlogctl->replayEndTLI = ThisTimeLineID;
 -       xlogctl->lastReplayedEndRecPtr = xlogctl->replayEndRecPtr;
 -       xlogctl->lastReplayedTLI = xlogctl->replayEndTLI;
 -       xlogctl->recoveryLastXTime = 0;
 -       xlogctl->currentChunkStartTime = 0;
 -       xlogctl->recoveryPause = false;
 -       SpinLockRelease(&xlogctl->info_lck);
 +           XLogCtl->replayEndRecPtr = EndRecPtr;
 +       XLogCtl->replayEndTLI = ThisTimeLineID;
 +       XLogCtl->lastReplayedEndRecPtr = XLogCtl->replayEndRecPtr;
 +       XLogCtl->lastReplayedTLI = XLogCtl->replayEndTLI;
 +       XLogCtl->recoveryLastXTime = 0;
 +       XLogCtl->currentChunkStartTime = 0;
 +       XLogCtl->recoveryPause = false;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         /* Also ensure XLogReceiptTime has a sane value */
         XLogReceiptTime = GetCurrentTimestamp();
                   * otherwise would is a minor issue, so it doesn't seem worth
                  * adding another spinlock cycle to prevent that.
                  */
 -               if (xlogctl->recoveryPause)
 +               if (((volatile XLogCtlData *) XLogCtl)->recoveryPause)
                     recoveryPausesHere();
  
                 /*
                       * here otherwise pausing during the delay-wait wouldn't
                      * work.
                      */
 -                   if (xlogctl->recoveryPause)
 +                   if (((volatile XLogCtlData *) XLogCtl)->recoveryPause)
                         recoveryPausesHere();
                 }
  
                   * Update shared replayEndRecPtr before replaying this record,
                  * so that XLogFlush will update minRecoveryPoint correctly.
                  */
 -               SpinLockAcquire(&xlogctl->info_lck);
 -               xlogctl->replayEndRecPtr = EndRecPtr;
 -               xlogctl->replayEndTLI = ThisTimeLineID;
 -               SpinLockRelease(&xlogctl->info_lck);
 +               SpinLockAcquire(&XLogCtl->info_lck);
 +               XLogCtl->replayEndRecPtr = EndRecPtr;
 +               XLogCtl->replayEndTLI = ThisTimeLineID;
 +               SpinLockRelease(&XLogCtl->info_lck);
  
                 /*
                  * If we are attempting to enter Hot Standby mode, process
                   * Update lastReplayedEndRecPtr after this record has been
                  * successfully replayed.
                  */
 -               SpinLockAcquire(&xlogctl->info_lck);
 -               xlogctl->lastReplayedEndRecPtr = EndRecPtr;
 -               xlogctl->lastReplayedTLI = ThisTimeLineID;
 -               SpinLockRelease(&xlogctl->info_lck);
 +               SpinLockAcquire(&XLogCtl->info_lck);
 +               XLogCtl->lastReplayedEndRecPtr = EndRecPtr;
 +               XLogCtl->lastReplayedTLI = ThisTimeLineID;
 +               SpinLockRelease(&XLogCtl->info_lck);
  
                 /* Remember this record as the last-applied one */
                 LastRec = ReadRecPtr;
       * there are no race conditions concerning visibility of other recent
      * updates to shared memory.)
      */
 -   {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       xlogctl->SharedRecoveryInProgress = false;
 -       SpinLockRelease(&xlogctl->info_lck);
 -   }
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->SharedRecoveryInProgress = false;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /*
      * If there were cascading standby servers connected to us, nudge any wal
          reachedConsistency &&
         IsUnderPostmaster)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       xlogctl->SharedHotStandbyActive = true;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       XLogCtl->SharedHotStandbyActive = true;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         LocalHotStandbyActive = true;
  
          return true;
     else
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
         /* spinlock is essential on machines with weak memory ordering! */
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       LocalHotStandbyActive = xlogctl->SharedHotStandbyActive;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       LocalHotStandbyActive = XLogCtl->SharedHotStandbyActive;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         return LocalHotStandbyActive;
     }
   XLogRecPtr
  GetRedoRecPtr(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogRecPtr  ptr;
  
     /*
       * grabbed a WAL insertion lock to read the master copy, someone might
      * update it just after we've released the lock.
      */
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   ptr = xlogctl->RedoRecPtr;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   ptr = XLogCtl->RedoRecPtr;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     if (RedoRecPtr < ptr)
         RedoRecPtr = ptr;
   XLogRecPtr
  GetInsertRecPtr(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogRecPtr  recptr;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   recptr = xlogctl->LogwrtRqst.Write;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   recptr = XLogCtl->LogwrtRqst.Write;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return recptr;
  }
   XLogRecPtr
  GetFlushRecPtr(void)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogRecPtr  recptr;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   recptr = xlogctl->LogwrtResult.Flush;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   recptr = XLogCtl->LogwrtResult.Flush;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return recptr;
  }
      TransactionId nextXid;
  
     /* Must read checkpoint info first, else have race condition */
 -   {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       ckptXidEpoch = xlogctl->ckptXidEpoch;
 -       ckptXid = xlogctl->ckptXid;
 -       SpinLockRelease(&xlogctl->info_lck);
 -   }
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   ckptXidEpoch = XLogCtl->ckptXidEpoch;
 +   ckptXid = XLogCtl->ckptXid;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /* Now fetch current nextXid */
     nextXid = ReadNewTransactionId();
   void
  CreateCheckPoint(int flags)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     bool        shutdown;
     CheckPoint  checkPoint;
     XLogRecPtr  recptr;
       * XLogInserts that happen while we are dumping buffers must assume that
      * their buffer changes are not included in the checkpoint.
      */
 -   RedoRecPtr = xlogctl->Insert.RedoRecPtr = checkPoint.redo;
 +   RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
  
     /*
      * Now we can release the WAL insertion locks, allowing other xacts to
      WALInsertLockRelease();
  
     /* Update the info_lck-protected copy of RedoRecPtr as well */
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->RedoRecPtr = checkPoint.redo;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->RedoRecPtr = checkPoint.redo;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /*
      * If enabled, log checkpoint start.  We postpone this until now so as not
      LWLockRelease(ControlFileLock);
  
     /* Update shared-memory copy of checkpoint XID/epoch */
 -   {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       xlogctl->ckptXidEpoch = checkPoint.nextXidEpoch;
 -       xlogctl->ckptXid = checkPoint.nextXid;
 -       SpinLockRelease(&xlogctl->info_lck);
 -   }
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
 +   XLogCtl->ckptXid = checkPoint.nextXid;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /*
      * We are now done with critical updates; no need for system panic if we
   static void
  RecoveryRestartPoint(const CheckPoint *checkPoint)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
     /*
      * Also refrain from creating a restartpoint if we have seen any
      * references to non-existent pages. Restarting recovery from the
       * Copy the checkpoint record to shared memory, so that checkpointer can
      * work out the next time it wants to perform a restartpoint.
      */
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->lastCheckPointRecPtr = ReadRecPtr;
 -   xlogctl->lastCheckPoint = *checkPoint;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
 +   XLogCtl->lastCheckPoint = *checkPoint;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }
  
  /*
      XLogSegNo   _logSegNo;
     TimestampTz xtime;
  
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
     /*
      * Acquire CheckpointLock to ensure only one restartpoint or checkpoint
      * happens at a time.
      LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
  
     /* Get a local copy of the last safe checkpoint record. */
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   lastCheckPointRecPtr = xlogctl->lastCheckPointRecPtr;
 -   lastCheckPoint = xlogctl->lastCheckPoint;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   lastCheckPointRecPtr = XLogCtl->lastCheckPointRecPtr;
 +   lastCheckPoint = XLogCtl->lastCheckPoint;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /*
      * Check that we're still in recovery mode. It's ok if we exit recovery
       * happening.
      */
     WALInsertLockAcquireExclusive();
 -   xlogctl->Insert.RedoRecPtr = lastCheckPoint.redo;
 +   XLogCtl->Insert.RedoRecPtr = lastCheckPoint.redo;
     WALInsertLockRelease();
  
     /* Also update the info_lck-protected copy */
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->RedoRecPtr = lastCheckPoint.redo;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->RedoRecPtr = lastCheckPoint.redo;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     /*
      * Prepare to accumulate statistics.
          ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
  
         /* Update shared-memory copy of checkpoint XID/epoch */
 -       {
 -           /* use volatile pointer to prevent code rearrangement */
 -           volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -           SpinLockAcquire(&xlogctl->info_lck);
 -           xlogctl->ckptXidEpoch = checkPoint.nextXidEpoch;
 -           xlogctl->ckptXid = checkPoint.nextXid;
 -           SpinLockRelease(&xlogctl->info_lck);
 -       }
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
 +       XLogCtl->ckptXid = checkPoint.nextXid;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         /*
          * We should've already switched to the new TLI before replaying this
          ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
  
         /* Update shared-memory copy of checkpoint XID/epoch */
 -       {
 -           /* use volatile pointer to prevent code rearrangement */
 -           volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -           SpinLockAcquire(&xlogctl->info_lck);
 -           xlogctl->ckptXidEpoch = checkPoint.nextXidEpoch;
 -           xlogctl->ckptXid = checkPoint.nextXid;
 -           SpinLockRelease(&xlogctl->info_lck);
 -       }
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
 +       XLogCtl->ckptXid = checkPoint.nextXid;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         /* TLI should not change in an on-line checkpoint */
         if (checkPoint.ThisTimeLineID != ThisTimeLineID)
      }
     else if (info == XLOG_FPW_CHANGE)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
         bool        fpw;
  
         memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
           */
         if (!fpw)
         {
 -           SpinLockAcquire(&xlogctl->info_lck);
 -           if (xlogctl->lastFpwDisableRecPtr < ReadRecPtr)
 -               xlogctl->lastFpwDisableRecPtr = ReadRecPtr;
 -           SpinLockRelease(&xlogctl->info_lck);
 +           SpinLockAcquire(&XLogCtl->info_lck);
 +           if (XLogCtl->lastFpwDisableRecPtr < ReadRecPtr)
 +               XLogCtl->lastFpwDisableRecPtr = ReadRecPtr;
 +           SpinLockRelease(&XLogCtl->info_lck);
         }
  
         /* Keep track of full_page_writes */
   
             if (backup_started_in_recovery)
             {
 -               /* use volatile pointer to prevent code rearrangement */
 -               volatile XLogCtlData *xlogctl = XLogCtl;
                 XLogRecPtr  recptr;
  
                 /*
                   * (i.e., since last restartpoint used as backup starting
                  * checkpoint) contain full-page writes.
                  */
 -               SpinLockAcquire(&xlogctl->info_lck);
 -               recptr = xlogctl->lastFpwDisableRecPtr;
 -               SpinLockRelease(&xlogctl->info_lck);
 +               SpinLockAcquire(&XLogCtl->info_lck);
 +               recptr = XLogCtl->lastFpwDisableRecPtr;
 +               SpinLockRelease(&XLogCtl->info_lck);
  
                 if (!checkpointfpw || startpoint <= recptr)
                     ereport(ERROR,
       */
     if (backup_started_in_recovery)
     {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
         XLogRecPtr  recptr;
  
         /*
          * Check to see if all WAL replayed during online backup contain
          * full-page writes.
          */
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       recptr = xlogctl->lastFpwDisableRecPtr;
 -       SpinLockRelease(&xlogctl->info_lck);
 +       SpinLockAcquire(&XLogCtl->info_lck);
 +       recptr = XLogCtl->lastFpwDisableRecPtr;
 +       SpinLockRelease(&XLogCtl->info_lck);
  
         if (startpoint <= recptr)
             ereport(ERROR,
   XLogRecPtr
  GetXLogReplayRecPtr(TimeLineID *replayTLI)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
     XLogRecPtr  recptr;
     TimeLineID  tli;
  
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   recptr = xlogctl->lastReplayedEndRecPtr;
 -   tli = xlogctl->lastReplayedTLI;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   recptr = XLogCtl->lastReplayedEndRecPtr;
 +   tli = XLogCtl->lastReplayedTLI;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     if (replayTLI)
         *replayTLI = tli;
   XLogRecPtr
  GetXLogInsertRecPtr(void)
  {
 -   volatile XLogCtlInsert *Insert = &XLogCtl->Insert;
 +   XLogCtlInsert *Insert = &XLogCtl->Insert;
     uint64      current_bytepos;
  
     SpinLockAcquire(&Insert->insertpos_lck);
   XLogRecPtr
  GetXLogWriteRecPtr(void)
  {
 -   {
 -       /* use volatile pointer to prevent code rearrangement */
 -       volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -       SpinLockAcquire(&xlogctl->info_lck);
 -       LogwrtResult = xlogctl->LogwrtResult;
 -       SpinLockRelease(&xlogctl->info_lck);
 -   }
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   LogwrtResult = XLogCtl->LogwrtResult;
 +   SpinLockRelease(&XLogCtl->info_lck);
  
     return LogwrtResult.Write;
  }
   void
  SetWalWriterSleeping(bool sleeping)
  {
 -   /* use volatile pointer to prevent code rearrangement */
 -   volatile XLogCtlData *xlogctl = XLogCtl;
 -
 -   SpinLockAcquire(&xlogctl->info_lck);
 -   xlogctl->WalWriterSleeping = sleeping;
 -   SpinLockRelease(&xlogctl->info_lck);
 +   SpinLockAcquire(&XLogCtl->info_lck);
 +   XLogCtl->WalWriterSleeping = sleeping;
 +   SpinLockRelease(&XLogCtl->info_lck);
  }