Re: 2.3.42 alpha updates

From: Richard Henderson (rth@cygnus.com)
Date: Sat Feb 05 2000 - 15:06:26 EST


On Sat, Feb 05, 2000 at 11:57:34AM +0100, Manfred Spraul wrote:
> It seems that Alpha is broken, because it relies on "current->active_mm"
> to check if a flush is required.

Indeed it is broken. What do you think of the following?

r~

Index: include/asm-alpha/pgalloc.h
===================================================================
RCS file: /disk1/u4/cvs/linux/include/asm-alpha/pgalloc.h,v
retrieving revision 1.2
diff -c -p -d -r1.2 pgalloc.h
*** include/asm-alpha/pgalloc.h 2000/02/03 10:13:24 1.2
--- include/asm-alpha/pgalloc.h 2000/02/05 15:44:42
*************** ev5_flush_tlb_other(struct mm_struct *mm
*** 42,49 ****
  
  /*
   * Flush just one page in the current TLB set.
! * We need to be very careful about the icache here, there
! * is no way to invalidate a specific icache page..
   */
  
  __EXTERN_INLINE void
--- 42,51 ----
  
  /*
   * Flush just one page in the current TLB set.
! *
! * Do a tbisd (type = 2) normally, and a tbis (type = 3) if it is an
! * executable mapping. We want to avoid the itlb flush, because that
! * potentially also does a icache flush.
   */
  
  __EXTERN_INLINE void
*************** static inline void flush_tlb_mm(struct m
*** 118,128 ****
  
  /*
   * Page-granular tlb flush.
- *
- * do a tbisd (type = 2) normally, and a tbis (type = 3)
- * if it is an executable mapping. We want to avoid the
- * itlb flush, because that potentially also does a
- * icache flush.
   */
  static inline void flush_tlb_page(struct vm_area_struct *vma,
          unsigned long addr)
--- 120,125 ----
*************** static inline void flush_tlb_page(struct
*** 136,143 ****
  }
  
  /*
! * Flush a specified range of user mapping: on the
! * Alpha we flush the whole user tlb.
   */
  static inline void flush_tlb_range(struct mm_struct *mm,
          unsigned long start, unsigned long end)
--- 133,140 ----
  }
  
  /*
! * Flush a specified range of user mapping: on the Alpha we flush the
! * whole user tlb.
   */
  static inline void flush_tlb_range(struct mm_struct *mm,
          unsigned long start, unsigned long end)
Index: include/asm-alpha/smp.h
===================================================================
RCS file: /disk1/u4/cvs/linux/include/asm-alpha/smp.h,v
retrieving revision 1.14
diff -c -p -d -r1.14 smp.h
*** include/asm-alpha/smp.h 2000/02/03 04:33:58 1.14
--- include/asm-alpha/smp.h 2000/02/05 15:44:42
*************** struct cpuinfo_alpha {
*** 34,39 ****
--- 34,40 ----
          unsigned char mcheck_expected;
          unsigned char mcheck_taken;
          unsigned char mcheck_extra;
+ struct mm_struct *delayed_flush;
  } __attribute__((aligned(64)));
  
  extern struct cpuinfo_alpha cpu_data[NR_CPUS];
Index: include/asm-alpha/system.h
===================================================================
RCS file: /disk1/u4/cvs/linux/include/asm-alpha/system.h,v
retrieving revision 1.34
diff -c -p -d -r1.34 system.h
*** include/asm-alpha/system.h 1999/12/20 04:57:59 1.34
--- include/asm-alpha/system.h 2000/02/05 15:44:43
*************** struct el_common_EV6_mcheck {
*** 112,124 ****
--- 112,141 ----
  
  extern void halt(void) __attribute__((noreturn));
  
+ #ifdef __SMP__
+ #define prepare_to_switch() cpu_data[smp_processor_id()].delayed_flush = 0
+ #define __delayed_flush() \
+ do { \
+ struct mm_struct *mm; \
+ /* Note that we're syncronizing only with the current \
+ processor, not with other processors. */ \
+ mm = cpu_data[smp_processor_id()].delayed_flush; \
+ cpu_data[smp_processor_id()].delayed_flush = 0; \
+ if (current->active_mm == mm) \
+ flush_tlb_current(mm); \
+ } while (0)
+ #else
  #define prepare_to_switch() do { } while(0)
+ #define __delayed_flush() do { } while(0)
+ #endif
+
  #define switch_to(prev,next,last) \
  do { \
          unsigned long pcbb; \
          current = (next); \
          pcbb = virt_to_phys(&current->thread); \
          (last) = alpha_switch_to(pcbb, (prev)); \
+ __delayed_flush(); \
  } while (0)
  
  extern struct task_struct* alpha_switch_to(unsigned long, struct task_struct*);
Index: arch/alpha/kernel/smp.c
===================================================================
RCS file: /disk1/u4/cvs/linux/arch/alpha/kernel/smp.c,v
retrieving revision 1.27
diff -c -p -d -r1.27 smp.c
*** arch/alpha/kernel/smp.c 2000/02/03 04:34:02 1.27
--- arch/alpha/kernel/smp.c 2000/02/05 15:45:06
*************** struct smp_call_struct {
*** 693,730 ****
  };
  
  static struct smp_call_struct *smp_call_function_data;
!
! /* Atomicly drop data into a shared pointer. The pointer is free if
! it is initially locked. If retry, spin until free. */
!
! static inline int
! pointer_lock (void *lock, void *data, int retry)
! {
! void *old, *tmp;
!
! mb();
! again:
! /* Compare and swap with zero. */
! asm volatile (
! "1: ldq_l %0,%1\n"
! " mov %3,%2\n"
! " bne %0,2f\n"
! " stq_c %2,%1\n"
! " beq %2,1b\n"
! "2:"
! : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
! : "r"(data)
! : "memory");
!
! if (old == 0)
! return 0;
! if (! retry)
! return -EBUSY;
!
! while (*(void **)lock)
! schedule();
! goto again;
! }
  
  void
  handle_ipi(struct pt_regs *regs)
--- 693,699 ----
  };
  
  static struct smp_call_struct *smp_call_function_data;
! static spinlock_t smp_call_function_lock = SPIN_LOCK_UNLOCKED;
  
  void
  handle_ipi(struct pt_regs *regs)
*************** smp_call_function (void (*func) (void *i
*** 840,849 ****
          atomic_set(&data.unfinished_count, smp_num_cpus - 1);
  
          /* Aquire the smp_call_function_data mutex. */
! if (pointer_lock(&smp_call_function_data, &data, retry))
! return -EBUSY;
  
          /* Send a message to all other CPUs. */
          send_ipi_message(to_whom, IPI_CALL_FUNC);
  
          /* Wait for a minimal response. */
--- 809,826 ----
          atomic_set(&data.unfinished_count, smp_num_cpus - 1);
  
          /* Aquire the smp_call_function_data mutex. */
! local_bh_disable();
! if (retry) {
! if (! spin_trylock(&smp_call_function_lock)) {
! local_bh_enable();
! return -EBUSY;
! }
! } else {
! spin_lock(&smp_call_function_lock);
! }
  
          /* Send a message to all other CPUs. */
+ smp_call_function_data = &data;
          send_ipi_message(to_whom, IPI_CALL_FUNC);
  
          /* Wait for a minimal response. */
*************** smp_call_function (void (*func) (void *i
*** 855,860 ****
--- 832,838 ----
          /* We either got one or timed out -- clear the lock. */
          mb();
          smp_call_function_data = 0;
+ local_bh_enable();
          if (atomic_read (&data.unstarted_count) > 0)
                  return -ETIMEDOUT;
  
*************** static void
*** 889,896 ****
--- 867,879 ----
  ipi_flush_tlb_mm(void *x)
  {
          struct mm_struct *mm = (struct mm_struct *) x;
+
          if (mm == current->active_mm)
                  flush_tlb_current(mm);
+
+ /* We may be between switch_mm and switch_to. Leave a crumb
+ so that we can check after the fact if a flush is required. */
+ cpu_data[smp_processor_id()].delayed_flush = mm;
  }
  
  void
*************** static void
*** 918,925 ****
--- 901,913 ----
  ipi_flush_tlb_page(void *x)
  {
          struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
+
          if (data->mm == current->active_mm)
                  flush_tlb_current_page(data->mm, data->vma, data->addr);
+
+ /* We may be between switch_mm and switch_to. Leave a crumb
+ so that we can check after the fact if a flush is required. */
+ cpu_data[smp_processor_id()].delayed_flush = data->mm;
  }
  
  void
*************** flush_tlb_page(struct vm_area_struct *vm
*** 932,939 ****
                  flush_tlb_current_page(mm, vma, addr);
                  if (atomic_read(&mm->mm_users) <= 1)
                          return;
! } else
                  flush_tlb_other(mm);
  
          data.vma = vma;
          data.mm = mm;
--- 920,928 ----
                  flush_tlb_current_page(mm, vma, addr);
                  if (atomic_read(&mm->mm_users) <= 1)
                          return;
! } else {
                  flush_tlb_other(mm);
+ }
  
          data.vma = vma;
          data.mm = mm;

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Mon Feb 07 2000 - 21:00:12 EST