Print this page
    
8620 pcplusmp shouldn't support x2APIC mode
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/i86pc/io/pcplusmp/apic.c
          +++ new/usr/src/uts/i86pc/io/pcplusmp/apic.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  24   24   */
  25   25  /*
  26   26   * Copyright (c) 2010, Intel Corporation.
  27   27   * All rights reserved.
  28   28   */
  29   29  /*
  30   30   * Copyright (c) 2017, Joyent, Inc.  All rights reserved.
  31   31   */
  32   32  
  33   33  /*
  34   34   * To understand how the pcplusmp module interacts with the interrupt subsystem
  35   35   * read the theory statement in uts/i86pc/os/intr.c.
  36   36   */
  37   37  
  38   38  /*
  39   39   * PSMI 1.1 extensions are supported only in 2.6 and later versions.
  40   40   * PSMI 1.2 extensions are supported only in 2.7 and later versions.
  41   41   * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
  42   42   * PSMI 1.5 extensions are supported in Solaris Nevada.
  43   43   * PSMI 1.6 extensions are supported in Solaris Nevada.
  44   44   * PSMI 1.7 extensions are supported in Solaris Nevada.
  45   45   */
  46   46  #define PSMI_1_7
  47   47  
  48   48  #include <sys/processor.h>
  49   49  #include <sys/time.h>
  50   50  #include <sys/psm.h>
  51   51  #include <sys/smp_impldefs.h>
  52   52  #include <sys/cram.h>
  53   53  #include <sys/acpi/acpi.h>
  54   54  #include <sys/acpica.h>
  55   55  #include <sys/psm_common.h>
  56   56  #include <sys/apic.h>
  57   57  #include <sys/pit.h>
  58   58  #include <sys/ddi.h>
  59   59  #include <sys/sunddi.h>
  60   60  #include <sys/ddi_impldefs.h>
  61   61  #include <sys/pci.h>
  62   62  #include <sys/promif.h>
  63   63  #include <sys/x86_archext.h>
  64   64  #include <sys/cpc_impl.h>
  65   65  #include <sys/uadmin.h>
  66   66  #include <sys/panic.h>
  67   67  #include <sys/debug.h>
  68   68  #include <sys/archsystm.h>
  69   69  #include <sys/trap.h>
  70   70  #include <sys/machsystm.h>
  71   71  #include <sys/sysmacros.h>
  72   72  #include <sys/cpuvar.h>
  73   73  #include <sys/rm_platter.h>
  74   74  #include <sys/privregs.h>
  75   75  #include <sys/note.h>
  76   76  #include <sys/pci_intr_lib.h>
  77   77  #include <sys/spl.h>
  78   78  #include <sys/clock.h>
  79   79  #include <sys/cyclic.h>
  80   80  #include <sys/dditypes.h>
  81   81  #include <sys/sunddi.h>
  82   82  #include <sys/x_call.h>
  83   83  #include <sys/reboot.h>
  84   84  #include <sys/hpet.h>
  85   85  #include <sys/apic_common.h>
  86   86  #include <sys/apic_timer.h>
  87   87  
  88   88  /*
  89   89   *      Local Function Prototypes
  90   90   */
  91   91  static void apic_init_intr(void);
  92   92  
  
    | 
      ↓ open down ↓ | 
    92 lines elided | 
    
      ↑ open up ↑ | 
  
  93   93  /*
  94   94   *      standard MP entries
  95   95   */
  96   96  static int      apic_probe(void);
  97   97  static int      apic_getclkirq(int ipl);
  98   98  static void     apic_init(void);
  99   99  static void     apic_picinit(void);
 100  100  static int      apic_post_cpu_start(void);
 101  101  static int      apic_intr_enter(int ipl, int *vect);
 102  102  static void     apic_setspl(int ipl);
 103      -static void     x2apic_setspl(int ipl);
 104  103  static int      apic_addspl(int ipl, int vector, int min_ipl, int max_ipl);
 105  104  static int      apic_delspl(int ipl, int vector, int min_ipl, int max_ipl);
 106  105  static int      apic_disable_intr(processorid_t cpun);
 107  106  static void     apic_enable_intr(processorid_t cpun);
 108  107  static int              apic_get_ipivect(int ipl, int type);
 109  108  static void     apic_post_cyclic_setup(void *arg);
 110  109  
 111  110  /*
 112  111   * The following vector assignments influence the value of ipltopri and
 113  112   * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
 114  113   * idle to 0 and IPL 0 to 0xf to differentiate idle in case
 115  114   * we care to do so in future. Note some IPLs which are rarely used
 116  115   * will share the vector ranges and heavily used IPLs (5 and 6) have
 117  116   * a wide range.
 118  117   *
 119  118   * This array is used to initialize apic_ipls[] (in apic_init()).
 120  119   *
 121  120   *      IPL             Vector range.           as passed to intr_enter
 122  121   *      0               none.
 123  122   *      1,2,3           0x20-0x2f               0x0-0xf
 124  123   *      4               0x30-0x3f               0x10-0x1f
 125  124   *      5               0x40-0x5f               0x20-0x3f
 126  125   *      6               0x60-0x7f               0x40-0x5f
 127  126   *      7,8,9           0x80-0x8f               0x60-0x6f
 128  127   *      10              0x90-0x9f               0x70-0x7f
 129  128   *      11              0xa0-0xaf               0x80-0x8f
 130  129   *      ...             ...
 131  130   *      15              0xe0-0xef               0xc0-0xcf
 132  131   *      15              0xf0-0xff               0xd0-0xdf
 133  132   */
 134  133  uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
 135  134          3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
 136  135  };
 137  136          /*
 138  137           * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
 139  138           * NOTE that this is vector as passed into intr_enter which is
 140  139           * programmed vector - 0x20 (APIC_BASE_VECT)
 141  140           */
 142  141  
 143  142  uchar_t apic_ipltopri[MAXIPL + 1];      /* unix ipl to apic pri */
 144  143          /* The taskpri to be programmed into apic to mask given ipl */
 145  144  
 146  145  /*
 147  146   * Correlation of the hardware vector to the IPL in use, initialized
 148  147   * from apic_vectortoipl[] in apic_init().  The final IPLs may not correlate
 149  148   * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
 150  149   * connected to errata-stricken IOAPICs
 151  150   */
 152  151  uchar_t apic_ipls[APIC_AVAIL_VECTOR];
 153  152  
 154  153  /*
 155  154   * Patchable global variables.
 156  155   */
 157  156  int     apic_enable_hwsoftint = 0;      /* 0 - disable, 1 - enable      */
 158  157  int     apic_enable_bind_log = 1;       /* 1 - display interrupt binding log */
 159  158  
 160  159  /*
 161  160   *      Local static data
 162  161   */
 163  162  static struct   psm_ops apic_ops = {
 164  163          apic_probe,
 165  164  
 166  165          apic_init,
 167  166          apic_picinit,
 168  167          apic_intr_enter,
 169  168          apic_intr_exit,
 170  169          apic_setspl,
 171  170          apic_addspl,
 172  171          apic_delspl,
 173  172          apic_disable_intr,
 174  173          apic_enable_intr,
 175  174          (int (*)(int))NULL,             /* psm_softlvl_to_irq */
 176  175          (void (*)(int))NULL,            /* psm_set_softintr */
 177  176  
 178  177          apic_set_idlecpu,
 179  178          apic_unset_idlecpu,
 180  179  
 181  180          apic_clkinit,
 182  181          apic_getclkirq,
 183  182          (void (*)(void))NULL,           /* psm_hrtimeinit */
 184  183          apic_gethrtime,
 185  184  
 186  185          apic_get_next_processorid,
 187  186          apic_cpu_start,
 188  187          apic_post_cpu_start,
 189  188          apic_shutdown,
 190  189          apic_get_ipivect,
 191  190          apic_send_ipi,
 192  191  
 193  192          (int (*)(dev_info_t *, int))NULL,       /* psm_translate_irq */
 194  193          (void (*)(int, char *))NULL,    /* psm_notify_error */
 195  194          (void (*)(int))NULL,            /* psm_notify_func */
 196  195          apic_timer_reprogram,
 197  196          apic_timer_enable,
 198  197          apic_timer_disable,
 199  198          apic_post_cyclic_setup,
 200  199          apic_preshutdown,
 201  200          apic_intr_ops,                  /* Advanced DDI Interrupt framework */
 202  201          apic_state,                     /* save, restore apic state for S3 */
 203  202          apic_cpu_ops,                   /* CPU control interface. */
 204  203  };
 205  204  
 206  205  struct psm_ops *psmops = &apic_ops;
 207  206  
 208  207  static struct   psm_info apic_psm_info = {
 209  208          PSM_INFO_VER01_7,                       /* version */
 210  209          PSM_OWN_EXCLUSIVE,                      /* ownership */
 211  210          (struct psm_ops *)&apic_ops,            /* operation */
 212  211          APIC_PCPLUSMP_NAME,                     /* machine name */
 213  212          "pcplusmp v1.4 compatible",
 214  213  };
 215  214  
 216  215  static void *apic_hdlp;
 217  216  
 218  217  /* to gather intr data and redistribute */
 219  218  static void apic_redistribute_compute(void);
 220  219  
 221  220  /*
 222  221   *      This is the loadable module wrapper
 223  222   */
 224  223  
 225  224  int
 226  225  _init(void)
 227  226  {
 228  227          if (apic_coarse_hrtime)
 229  228                  apic_ops.psm_gethrtime = &apic_gettime;
 230  229          return (psm_mod_init(&apic_hdlp, &apic_psm_info));
 231  230  }
 232  231  
 233  232  int
 234  233  _fini(void)
 235  234  {
 236  235          return (psm_mod_fini(&apic_hdlp, &apic_psm_info));
 237  236  }
 238  237  
 239  238  int
 240  239  _info(struct modinfo *modinfop)
 241  240  {
 242  241          return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop));
 243  242  }
 244  243  
 245  244  static int
 246  245  apic_probe(void)
 247  246  {
 248  247          /* check if apix is initialized */
 249  248          if (apix_enable && apix_loaded())
 250  249                  return (PSM_FAILURE);
 251  250  
 252  251          /*
 253  252           * Check whether x2APIC mode was activated by BIOS. We don't support
 254  253           * that in pcplusmp as apix normally handles that.
 255  254           */
 256  255          if (apic_local_mode() == LOCAL_X2APIC)
 257  256                  return (PSM_FAILURE);
 258  257  
 259  258          /* continue using pcplusmp PSM */
 260  259          apix_enable = 0;
 261  260  
 262  261          return (apic_probe_common(apic_psm_info.p_mach_idstring));
 263  262  }
 264  263  
 265  264  static uchar_t
 266  265  apic_xlate_vector_by_irq(uchar_t irq)
 267  266  {
 268  267          if (apic_irq_table[irq] == NULL)
 269  268                  return (0);
 270  269  
 271  270          return (apic_irq_table[irq]->airq_vector);
 272  271  }
 273  272  
 274  273  void
 275  274  apic_init(void)
 276  275  {
 277  276          int i;
 278  277          int     j = 1;
 279  278  
 280  279          psm_get_ioapicid = apic_get_ioapicid;
 281  280          psm_get_localapicid = apic_get_localapicid;
 282  281          psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
 283  282  
 284  283          apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
 285  284          for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
 286  285                  if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
 287  286                      (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
 288  287                          /* get to highest vector at the same ipl */
 289  288                          continue;
 290  289                  for (; j <= apic_vectortoipl[i]; j++) {
 291  290                          apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
 292  291                              APIC_BASE_VECT;
 293  292                  }
 294  293          }
 295  294          for (; j < MAXIPL + 1; j++)
 296  295                  /* fill up any empty ipltopri slots */
 297  296                  apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
 298  297          apic_init_common();
 299  298  
 300  299  #if !defined(__amd64)
 301  300          if (cpuid_have_cr8access(CPU))
 302  301                  apic_have_32bit_cr8 = 1;
 303  302  #endif
 304  303  }
  
    | 
      ↓ open down ↓ | 
    191 lines elided | 
    
      ↑ open up ↑ | 
  
 305  304  
 306  305  static void
 307  306  apic_init_intr(void)
 308  307  {
 309  308          processorid_t   cpun = psm_get_cpu_id();
 310  309          uint_t nlvt;
 311  310          uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
 312  311  
 313  312          apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
 314  313  
 315      -        if (apic_mode == LOCAL_APIC) {
 316      -                /*
 317      -                 * We are running APIC in MMIO mode.
 318      -                 */
 319      -                if (apic_flat_model) {
 320      -                        apic_reg_ops->apic_write(APIC_FORMAT_REG,
 321      -                            APIC_FLAT_MODEL);
 322      -                } else {
 323      -                        apic_reg_ops->apic_write(APIC_FORMAT_REG,
 324      -                            APIC_CLUSTER_MODEL);
 325      -                }
      314 +        ASSERT(apic_mode == LOCAL_APIC);
 326  315  
 327      -                apic_reg_ops->apic_write(APIC_DEST_REG,
 328      -                    AV_HIGH_ORDER >> cpun);
      316 +        /*
      317 +         * We are running APIC in MMIO mode.
      318 +         */
      319 +        if (apic_flat_model) {
      320 +                apic_reg_ops->apic_write(APIC_FORMAT_REG, APIC_FLAT_MODEL);
      321 +        } else {
      322 +                apic_reg_ops->apic_write(APIC_FORMAT_REG, APIC_CLUSTER_MODEL);
 329  323          }
 330  324  
      325 +        apic_reg_ops->apic_write(APIC_DEST_REG, AV_HIGH_ORDER >> cpun);
      326 +
 331  327          if (apic_directed_EOI_supported()) {
 332  328                  /*
 333  329                   * Setting the 12th bit in the Spurious Interrupt Vector
 334  330                   * Register suppresses broadcast EOIs generated by the local
 335  331                   * APIC. The suppression of broadcast EOIs happens only when
 336  332                   * interrupts are level-triggered.
 337  333                   */
 338  334                  svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI;
 339  335          }
 340  336  
 341  337          /* need to enable APIC before unmasking NMI */
 342  338          apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
 343  339  
 344  340          /*
 345  341           * Presence of an invalid vector with delivery mode AV_FIXED can
 346  342           * cause an error interrupt, even if the entry is masked...so
 347  343           * write a valid vector to LVT entries along with the mask bit
 348  344           */
 349  345  
 350  346          /* All APICs have timer and LINT0/1 */
 351  347          apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ);
 352  348          apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ);
 353  349          apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI);       /* enable NMI */
 354  350  
 355  351          /*
 356  352           * On integrated APICs, the number of LVT entries is
 357  353           * 'Max LVT entry' + 1; on 82489DX's (non-integrated
 358  354           * APICs), nlvt is "3" (LINT0, LINT1, and timer)
 359  355           */
 360  356  
 361  357          if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
 362  358                  nlvt = 3;
 363  359          } else {
 364  360                  nlvt = ((apic_reg_ops->apic_read(APIC_VERS_REG) >> 16) &
 365  361                      0xFF) + 1;
 366  362          }
 367  363  
 368  364          if (nlvt >= 5) {
 369  365                  /* Enable performance counter overflow interrupt */
 370  366  
 371  367                  if (!is_x86_feature(x86_featureset, X86FSET_MSR))
 372  368                          apic_enable_cpcovf_intr = 0;
 373  369                  if (apic_enable_cpcovf_intr) {
 374  370                          if (apic_cpcovf_vect == 0) {
 375  371                                  int ipl = APIC_PCINT_IPL;
 376  372                                  int irq = apic_get_ipivect(ipl, -1);
 377  373  
 378  374                                  ASSERT(irq != -1);
 379  375                                  apic_cpcovf_vect =
 380  376                                      apic_irq_table[irq]->airq_vector;
 381  377                                  ASSERT(apic_cpcovf_vect);
 382  378                                  (void) add_avintr(NULL, ipl,
 383  379                                      (avfunc)kcpc_hw_overflow_intr,
 384  380                                      "apic pcint", irq, NULL, NULL, NULL, NULL);
 385  381                                  kcpc_hw_overflow_intr_installed = 1;
 386  382                                  kcpc_hw_enable_cpc_intr =
 387  383                                      apic_cpcovf_mask_clear;
 388  384                          }
 389  385                          apic_reg_ops->apic_write(APIC_PCINT_VECT,
 390  386                              apic_cpcovf_vect);
 391  387                  }
 392  388          }
 393  389  
 394  390          if (nlvt >= 6) {
 395  391                  /* Only mask TM intr if the BIOS apparently doesn't use it */
 396  392  
 397  393                  uint32_t lvtval;
 398  394  
 399  395                  lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT);
 400  396                  if (((lvtval & AV_MASK) == AV_MASK) ||
 401  397                      ((lvtval & AV_DELIV_MODE) != AV_SMI)) {
 402  398                          apic_reg_ops->apic_write(APIC_THERM_VECT,
 403  399                              AV_MASK|APIC_RESV_IRQ);
 404  400                  }
 405  401          }
 406  402  
 407  403          /* Enable error interrupt */
 408  404  
 409  405          if (nlvt >= 4 && apic_enable_error_intr) {
 410  406                  if (apic_errvect == 0) {
 411  407                          int ipl = 0xf;  /* get highest priority intr */
 412  408                          int irq = apic_get_ipivect(ipl, -1);
 413  409  
 414  410                          ASSERT(irq != -1);
 415  411                          apic_errvect = apic_irq_table[irq]->airq_vector;
 416  412                          ASSERT(apic_errvect);
 417  413                          /*
 418  414                           * Not PSMI compliant, but we are going to merge
 419  415                           * with ON anyway
 420  416                           */
 421  417                          (void) add_avintr((void *)NULL, ipl,
 422  418                              (avfunc)apic_error_intr, "apic error intr",
 423  419                              irq, NULL, NULL, NULL, NULL);
 424  420                  }
 425  421                  apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect);
 426  422                  apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
 427  423                  apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
 428  424          }
 429  425  
 430  426          /* Enable CMCI interrupt */
 431  427          if (cmi_enable_cmci) {
 432  428  
 433  429                  mutex_enter(&cmci_cpu_setup_lock);
 434  430                  if (cmci_cpu_setup_registered == 0) {
 435  431                          mutex_enter(&cpu_lock);
 436  432                          register_cpu_setup_func(cmci_cpu_setup, NULL);
 437  433                          mutex_exit(&cpu_lock);
 438  434                          cmci_cpu_setup_registered = 1;
 439  435                  }
 440  436                  mutex_exit(&cmci_cpu_setup_lock);
 441  437  
 442  438                  if (apic_cmci_vect == 0) {
 443  439                          int ipl = 0x2;
 444  440                          int irq = apic_get_ipivect(ipl, -1);
 445  441  
 446  442                          ASSERT(irq != -1);
 447  443                          apic_cmci_vect = apic_irq_table[irq]->airq_vector;
 448  444                          ASSERT(apic_cmci_vect);
 449  445  
 450  446                          (void) add_avintr(NULL, ipl,
 451  447                              (avfunc)cmi_cmci_trap,
 452  448                              "apic cmci intr", irq, NULL, NULL, NULL, NULL);
 453  449                  }
 454  450                  apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect);
 455  451          }
 456  452  }
 457  453  
 458  454  static void
 459  455  apic_picinit(void)
 460  456  {
 461  457          int i, j;
 462  458          uint_t isr;
 463  459  
 464  460          /*
 465  461           * Initialize and enable interrupt remapping before apic
 466  462           * hardware initialization
 467  463           */
 468  464          apic_intrmap_init(apic_mode);
 469  465  
 470  466          /*
 471  467           * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
 472  468           * bit on without clearing it with EOI.  Since softint
 473  469           * uses vector 0x20 to interrupt itself, so softint will
 474  470           * not work on this machine.  In order to fix this problem
 475  471           * a check is made to verify all the isr bits are clear.
 476  472           * If not, EOIs are issued to clear the bits.
 477  473           */
 478  474          for (i = 7; i >= 1; i--) {
 479  475                  isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4));
 480  476                  if (isr != 0)
 481  477                          for (j = 0; ((j < 32) && (isr != 0)); j++)
 482  478                                  if (isr & (1 << j)) {
 483  479                                          apic_reg_ops->apic_write(
 484  480                                              APIC_EOI_REG, 0);
 485  481                                          isr &= ~(1 << j);
 486  482                                          apic_error |= APIC_ERR_BOOT_EOI;
 487  483                                  }
 488  484          }
 489  485  
 490  486          /* set a flag so we know we have run apic_picinit() */
 491  487          apic_picinit_called = 1;
 492  488          LOCK_INIT_CLEAR(&apic_gethrtime_lock);
 493  489          LOCK_INIT_CLEAR(&apic_ioapic_lock);
 494  490          LOCK_INIT_CLEAR(&apic_error_lock);
 495  491          LOCK_INIT_CLEAR(&apic_mode_switch_lock);
 496  492  
 497  493          picsetup();      /* initialise the 8259 */
 498  494  
 499  495          /* add nmi handler - least priority nmi handler */
 500  496          LOCK_INIT_CLEAR(&apic_nmi_lock);
 501  497  
 502  498          if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr,
 503  499              "pcplusmp NMI handler", (caddr_t)NULL))
 504  500                  cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler");
 505  501  
 506  502          /*
 507  503           * Check for directed-EOI capability in the local APIC.
 508  504           */
 509  505          if (apic_directed_EOI_supported() == 1) {
 510  506                  apic_set_directed_EOI_handler();
 511  507          }
 512  508  
 513  509          apic_init_intr();
 514  510  
 515  511          /* enable apic mode if imcr present */
 516  512          if (apic_imcrp) {
 517  513                  outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
 518  514                  outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC);
 519  515          }
 520  516  
 521  517          ioapic_init_intr(IOAPIC_MASK);
 522  518  }
 523  519  
 524  520  #ifdef  DEBUG
 525  521  void
 526  522  apic_break(void)
 527  523  {
 528  524  }
 529  525  #endif /* DEBUG */
 530  526  
 531  527  /*
 532  528   * platform_intr_enter
 533  529   *
 534  530   *      Called at the beginning of the interrupt service routine to
 535  531   *      mask all level equal to and below the interrupt priority
 536  532   *      of the interrupting vector.  An EOI should be given to
 537  533   *      the interrupt controller to enable other HW interrupts.
 538  534   *
 539  535   *      Return -1 for spurious interrupts
 540  536   *
 541  537   */
 542  538  /*ARGSUSED*/
 543  539  static int
 544  540  apic_intr_enter(int ipl, int *vectorp)
 545  541  {
 546  542          uchar_t vector;
 547  543          int nipl;
 548  544          int irq;
 549  545          ulong_t iflag;
 550  546          apic_cpus_info_t *cpu_infop;
 551  547  
 552  548          /*
 553  549           * The real vector delivered is (*vectorp + 0x20), but our caller
 554  550           * subtracts 0x20 from the vector before passing it to us.
 555  551           * (That's why APIC_BASE_VECT is 0x20.)
 556  552           */
 557  553          vector = (uchar_t)*vectorp;
 558  554  
 559  555          /* if interrupted by the clock, increment apic_nsec_since_boot */
 560  556          if (vector == apic_clkvect) {
 561  557                  if (!apic_oneshot) {
 562  558                          /* NOTE: this is not MT aware */
 563  559                          apic_hrtime_stamp++;
 564  560                          apic_nsec_since_boot += apic_nsec_per_intr;
 565  561                          apic_hrtime_stamp++;
 566  562                          last_count_read = apic_hertz_count;
 567  563                          apic_redistribute_compute();
 568  564                  }
 569  565  
 570  566                  /* We will avoid all the book keeping overhead for clock */
 571  567                  nipl = apic_ipls[vector];
 572  568  
 573  569                  *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
 574  570  
 575  571                  apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
 576  572                  apic_reg_ops->apic_send_eoi(0);
 577  573  
 578  574                  return (nipl);
 579  575          }
 580  576  
 581  577          cpu_infop = &apic_cpus[psm_get_cpu_id()];
 582  578  
 583  579          if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
 584  580                  cpu_infop->aci_spur_cnt++;
 585  581                  return (APIC_INT_SPURIOUS);
 586  582          }
 587  583  
 588  584          /* Check if the vector we got is really what we need */
 589  585          if (apic_revector_pending) {
 590  586                  /*
 591  587                   * Disable interrupts for the duration of
 592  588                   * the vector translation to prevent a self-race for
 593  589                   * the apic_revector_lock.  This cannot be done
 594  590                   * in apic_xlate_vector because it is recursive and
 595  591                   * we want the vector translation to be atomic with
 596  592                   * respect to other (higher-priority) interrupts.
 597  593                   */
 598  594                  iflag = intr_clear();
 599  595                  vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
 600  596                      APIC_BASE_VECT;
 601  597                  intr_restore(iflag);
 602  598          }
 603  599  
 604  600          nipl = apic_ipls[vector];
 605  601          *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
 606  602  
 607  603          apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
 608  604  
 609  605          cpu_infop->aci_current[nipl] = (uchar_t)irq;
 610  606          cpu_infop->aci_curipl = (uchar_t)nipl;
 611  607          cpu_infop->aci_ISR_in_progress |= 1 << nipl;
 612  608  
 613  609          /*
 614  610           * apic_level_intr could have been assimilated into the irq struct.
 615  611           * but, having it as a character array is more efficient in terms of
 616  612           * cache usage. So, we leave it as is.
 617  613           */
 618  614          if (!apic_level_intr[irq]) {
 619  615                  apic_reg_ops->apic_send_eoi(0);
 620  616          }
 621  617  
 622  618  #ifdef  DEBUG
 623  619          APIC_DEBUG_BUF_PUT(vector);
 624  620          APIC_DEBUG_BUF_PUT(irq);
 625  621          APIC_DEBUG_BUF_PUT(nipl);
  
    | 
      ↓ open down ↓ | 
    285 lines elided | 
    
      ↑ open up ↑ | 
  
 626  622          APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
 627  623          if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
 628  624                  drv_usecwait(apic_stretch_interrupts);
 629  625  
 630  626          if (apic_break_on_cpu == psm_get_cpu_id())
 631  627                  apic_break();
 632  628  #endif /* DEBUG */
 633  629          return (nipl);
 634  630  }
 635  631  
 636      -/*
 637      - * This macro is a common code used by MMIO local apic and X2APIC
 638      - * local apic.
 639      - */
 640      -#define APIC_INTR_EXIT() \
 641      -{ \
 642      -        cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
 643      -        if (apic_level_intr[irq]) \
 644      -                apic_reg_ops->apic_send_eoi(irq); \
 645      -        cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
 646      -        /* ISR above current pri could not be in progress */ \
 647      -        cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
 648      -}
 649      -
 650      -/*
 651      - * Any changes made to this function must also change X2APIC
 652      - * version of intr_exit.
 653      - */
 654  632  void
 655  633  apic_intr_exit(int prev_ipl, int irq)
 656  634  {
 657  635          apic_cpus_info_t *cpu_infop;
 658  636  
 659  637          apic_reg_ops->apic_write_task_reg(apic_ipltopri[prev_ipl]);
 660  638  
 661      -        APIC_INTR_EXIT();
      639 +        cpu_infop = &apic_cpus[psm_get_cpu_id()];
      640 +        if (apic_level_intr[irq])
      641 +                apic_reg_ops->apic_send_eoi(irq);
      642 +        cpu_infop->aci_curipl = (uchar_t)prev_ipl;
      643 +        /* ISR above current pri could not be in progress */
      644 +        cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1;
 662  645  }
 663  646  
 664      -/*
 665      - * Same as apic_intr_exit() except it uses MSR rather than MMIO
 666      - * to access local apic registers.
 667      - */
 668      -void
 669      -x2apic_intr_exit(int prev_ipl, int irq)
 670      -{
 671      -        apic_cpus_info_t *cpu_infop;
 672      -
 673      -        X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
 674      -        APIC_INTR_EXIT();
 675      -}
 676      -
 677  647  intr_exit_fn_t
 678  648  psm_intr_exit_fn(void)
 679  649  {
 680      -        if (apic_mode == LOCAL_X2APIC)
 681      -                return (x2apic_intr_exit);
 682      -
 683  650          return (apic_intr_exit);
 684  651  }
 685  652  
 686  653  /*
 687  654   * Mask all interrupts below or equal to the given IPL.
 688      - * Any changes made to this function must also change X2APIC
 689      - * version of setspl.
 690  655   */
 691  656  static void
 692  657  apic_setspl(int ipl)
 693  658  {
 694  659          apic_reg_ops->apic_write_task_reg(apic_ipltopri[ipl]);
 695  660  
 696  661          /* interrupts at ipl above this cannot be in progress */
 697  662          apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
 698  663          /*
 699  664           * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
 700  665           * have enough time to come in before the priority is raised again
 701  666           * during the idle() loop.
 702  667           */
 703  668          if (apic_setspl_delay)
 704  669                  (void) apic_reg_ops->apic_get_pri();
 705  670  }
 706  671  
 707      -/*
 708      - * X2APIC version of setspl.
 709      - * Mask all interrupts below or equal to the given IPL
 710      - */
 711      -static void
 712      -x2apic_setspl(int ipl)
 713      -{
 714      -        X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
 715      -
 716      -        /* interrupts at ipl above this cannot be in progress */
 717      -        apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
 718      -}
 719      -
 720  672  /*ARGSUSED*/
 721  673  static int
 722  674  apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
 723  675  {
 724  676          return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl));
 725  677  }
 726  678  
 727  679  static int
 728  680  apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
 729  681  {
 730  682          return (apic_delspl_common(irqno, ipl, min_ipl,  max_ipl));
 731  683  }
  
    | 
      ↓ open down ↓ | 
    2 lines elided | 
    
      ↑ open up ↑ | 
  
 732  684  
 733  685  static int
 734  686  apic_post_cpu_start(void)
 735  687  {
 736  688          int cpun;
 737  689          static int cpus_started = 1;
 738  690  
 739  691          /* We know this CPU + BSP  started successfully. */
 740  692          cpus_started++;
 741  693  
 742      -        /*
 743      -         * On BSP we would have enabled X2APIC, if supported by processor,
 744      -         * in acpi_probe(), but on AP we do it here.
 745      -         *
 746      -         * We enable X2APIC mode only if BSP is running in X2APIC & the
 747      -         * local APIC mode of the current CPU is MMIO (xAPIC).
 748      -         */
 749      -        if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() &&
 750      -            apic_local_mode() == LOCAL_APIC) {
 751      -                apic_enable_x2apic();
 752      -        }
 753      -
 754      -        /*
 755      -         * Switch back to x2apic IPI sending method for performance when target
 756      -         * CPU has entered x2apic mode.
 757      -         */
 758      -        if (apic_mode == LOCAL_X2APIC) {
 759      -                apic_switch_ipi_callback(B_FALSE);
 760      -        }
 761      -
 762  694          splx(ipltospl(LOCK_LEVEL));
 763  695          apic_init_intr();
 764  696  
 765  697          /*
 766  698           * since some systems don't enable the internal cache on the non-boot
 767  699           * cpus, so we have to enable them here
 768  700           */
 769  701          setcr0(getcr0() & ~(CR0_CD | CR0_NW));
 770  702  
 771      -#ifdef  DEBUG
 772  703          APIC_AV_PENDING_SET();
 773      -#else
 774      -        if (apic_mode == LOCAL_APIC)
 775      -                APIC_AV_PENDING_SET();
 776      -#endif  /* DEBUG */
 777  704  
 778  705          /*
 779  706           * We may be booting, or resuming from suspend; aci_status will
 780  707           * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
 781  708           * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
 782  709           */
 783  710          cpun = psm_get_cpu_id();
 784  711          apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
 785  712  
 786  713          apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
 787  714          return (PSM_SUCCESS);
 788  715  }
 789  716  
 790  717  /*
 791  718   * type == -1 indicates it is an internal request. Do not change
 792  719   * resv_vector for these requests
 793  720   */
 794  721  static int
 795  722  apic_get_ipivect(int ipl, int type)
 796  723  {
 797  724          uchar_t vector;
 798  725          int irq;
 799  726  
 800  727          if ((irq = apic_allocate_irq(APIC_VECTOR(ipl))) != -1) {
 801  728                  if ((vector = apic_allocate_vector(ipl, irq, 1))) {
 802  729                          apic_irq_table[irq]->airq_mps_intr_index =
 803  730                              RESERVE_INDEX;
 804  731                          apic_irq_table[irq]->airq_vector = vector;
 805  732                          if (type != -1) {
 806  733                                  apic_resv_vector[ipl] = vector;
 807  734                          }
 808  735                          return (irq);
 809  736                  }
 810  737          }
 811  738          apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
 812  739          return (-1);    /* shouldn't happen */
 813  740  }
 814  741  
 815  742  static int
 816  743  apic_getclkirq(int ipl)
 817  744  {
 818  745          int     irq;
 819  746  
 820  747          if ((irq = apic_get_ipivect(ipl, -1)) == -1)
 821  748                  return (-1);
 822  749          /*
 823  750           * Note the vector in apic_clkvect for per clock handling.
 824  751           */
 825  752          apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT;
 826  753          APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n",
 827  754              apic_clkvect));
 828  755          return (irq);
 829  756  }
 830  757  
 831  758  /*
 832  759   * Try and disable all interrupts. We just assign interrupts to other
 833  760   * processors based on policy. If any were bound by user request, we
 834  761   * let them continue and return failure. We do not bother to check
 835  762   * for cache affinity while rebinding.
 836  763   */
 837  764  
 838  765  static int
 839  766  apic_disable_intr(processorid_t cpun)
 840  767  {
 841  768          int bind_cpu = 0, i, hardbound = 0;
 842  769          apic_irq_t *irq_ptr;
 843  770          ulong_t iflag;
 844  771  
 845  772          iflag = intr_clear();
 846  773          lock_set(&apic_ioapic_lock);
 847  774  
 848  775          for (i = 0; i <= APIC_MAX_VECTOR; i++) {
 849  776                  if (apic_reprogram_info[i].done == B_FALSE) {
 850  777                          if (apic_reprogram_info[i].bindcpu == cpun) {
 851  778                                  /*
 852  779                                   * CPU is busy -- it's the target of
 853  780                                   * a pending reprogramming attempt
 854  781                                   */
 855  782                                  lock_clear(&apic_ioapic_lock);
 856  783                                  intr_restore(iflag);
 857  784                                  return (PSM_FAILURE);
 858  785                          }
 859  786                  }
 860  787          }
 861  788  
 862  789          apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE;
 863  790  
 864  791          apic_cpus[cpun].aci_curipl = 0;
 865  792  
 866  793          i = apic_min_device_irq;
 867  794          for (; i <= apic_max_device_irq; i++) {
 868  795                  /*
 869  796                   * If there are bound interrupts on this cpu, then
 870  797                   * rebind them to other processors.
 871  798                   */
 872  799                  if ((irq_ptr = apic_irq_table[i]) != NULL) {
 873  800                          ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) ||
 874  801                              (irq_ptr->airq_temp_cpu == IRQ_UNINIT) ||
 875  802                              (apic_cpu_in_range(irq_ptr->airq_temp_cpu)));
 876  803  
 877  804                          if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) {
 878  805                                  hardbound = 1;
 879  806                                  continue;
 880  807                          }
 881  808  
 882  809                          if (irq_ptr->airq_temp_cpu == cpun) {
 883  810                                  do {
 884  811                                          bind_cpu =
 885  812                                              apic_find_cpu(APIC_CPU_INTR_ENABLE);
 886  813                                  } while (apic_rebind_all(irq_ptr, bind_cpu));
 887  814                          }
 888  815                  }
 889  816          }
 890  817  
 891  818          lock_clear(&apic_ioapic_lock);
 892  819          intr_restore(iflag);
 893  820  
 894  821          if (hardbound) {
 895  822                  cmn_err(CE_WARN, "Could not disable interrupts on %d"
 896  823                      "due to user bound interrupts", cpun);
 897  824                  return (PSM_FAILURE);
 898  825          }
 899  826          else
 900  827                  return (PSM_SUCCESS);
 901  828  }
 902  829  
 903  830  /*
 904  831   * Bind interrupts to the CPU's local APIC.
 905  832   * Interrupts should not be bound to a CPU's local APIC until the CPU
 906  833   * is ready to receive interrupts.
 907  834   */
 908  835  static void
 909  836  apic_enable_intr(processorid_t cpun)
 910  837  {
 911  838          int     i;
 912  839          apic_irq_t *irq_ptr;
 913  840          ulong_t iflag;
 914  841  
 915  842          iflag = intr_clear();
 916  843          lock_set(&apic_ioapic_lock);
 917  844  
 918  845          apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE;
 919  846  
 920  847          i = apic_min_device_irq;
 921  848          for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) {
 922  849                  if ((irq_ptr = apic_irq_table[i]) != NULL) {
 923  850                          if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) {
 924  851                                  (void) apic_rebind_all(irq_ptr,
 925  852                                      irq_ptr->airq_cpu);
 926  853                          }
 927  854                  }
 928  855          }
 929  856  
 930  857          if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND)
 931  858                  apic_cpus[cpun].aci_status &= ~APIC_CPU_SUSPEND;
 932  859  
 933  860          lock_clear(&apic_ioapic_lock);
 934  861          intr_restore(iflag);
 935  862  }
 936  863  
 937  864  /*
 938  865   * If this module needs a periodic handler for the interrupt distribution, it
 939  866   * can be added here. The argument to the periodic handler is not currently
 940  867   * used, but is reserved for future.
 941  868   */
 942  869  static void
 943  870  apic_post_cyclic_setup(void *arg)
 944  871  {
 945  872  _NOTE(ARGUNUSED(arg))
 946  873  
 947  874          cyc_handler_t cyh;
 948  875          cyc_time_t cyt;
 949  876  
 950  877          /* cpu_lock is held */
 951  878          /* set up a periodic handler for intr redistribution */
 952  879  
 953  880          /*
 954  881           * In peridoc mode intr redistribution processing is done in
 955  882           * apic_intr_enter during clk intr processing
 956  883           */
 957  884          if (!apic_oneshot)
 958  885                  return;
 959  886  
 960  887          /*
 961  888           * Register a periodical handler for the redistribution processing.
 962  889           * Though we would generally prefer to use the DDI interface for
 963  890           * periodic handler invocation, ddi_periodic_add(9F), we are
 964  891           * unfortunately already holding cpu_lock, which ddi_periodic_add will
 965  892           * attempt to take for us.  Thus, we add our own cyclic directly:
 966  893           */
 967  894          cyh.cyh_func = (void (*)(void *))apic_redistribute_compute;
 968  895          cyh.cyh_arg = NULL;
 969  896          cyh.cyh_level = CY_LOW_LEVEL;
 970  897  
 971  898          cyt.cyt_when = 0;
 972  899          cyt.cyt_interval = apic_redistribute_sample_interval;
 973  900  
 974  901          apic_cyclic_id = cyclic_add(&cyh, &cyt);
 975  902  }
 976  903  
 977  904  static void
 978  905  apic_redistribute_compute(void)
 979  906  {
 980  907          int     i, j, max_busy;
 981  908  
 982  909          if (apic_enable_dynamic_migration) {
 983  910                  if (++apic_nticks == apic_sample_factor_redistribution) {
 984  911                          /*
 985  912                           * Time to call apic_intr_redistribute().
 986  913                           * reset apic_nticks. This will cause max_busy
 987  914                           * to be calculated below and if it is more than
 988  915                           * apic_int_busy, we will do the whole thing
 989  916                           */
 990  917                          apic_nticks = 0;
 991  918                  }
 992  919                  max_busy = 0;
 993  920                  for (i = 0; i < apic_nproc; i++) {
 994  921                          if (!apic_cpu_in_range(i))
 995  922                                  continue;
 996  923  
 997  924                          /*
 998  925                           * Check if curipl is non zero & if ISR is in
 999  926                           * progress
1000  927                           */
1001  928                          if (((j = apic_cpus[i].aci_curipl) != 0) &&
1002  929                              (apic_cpus[i].aci_ISR_in_progress & (1 << j))) {
1003  930  
1004  931                                  int     irq;
1005  932                                  apic_cpus[i].aci_busy++;
1006  933                                  irq = apic_cpus[i].aci_current[j];
1007  934                                  apic_irq_table[irq]->airq_busy++;
1008  935                          }
1009  936  
1010  937                          if (!apic_nticks &&
1011  938                              (apic_cpus[i].aci_busy > max_busy))
1012  939                                  max_busy = apic_cpus[i].aci_busy;
1013  940                  }
1014  941                  if (!apic_nticks) {
1015  942                          if (max_busy > apic_int_busy_mark) {
1016  943                          /*
1017  944                           * We could make the following check be
1018  945                           * skipped > 1 in which case, we get a
1019  946                           * redistribution at half the busy mark (due to
1020  947                           * double interval). Need to be able to collect
1021  948                           * more empirical data to decide if that is a
1022  949                           * good strategy. Punt for now.
1023  950                           */
1024  951                                  if (apic_skipped_redistribute) {
1025  952                                          apic_cleanup_busy();
1026  953                                          apic_skipped_redistribute = 0;
1027  954                                  } else {
1028  955                                          apic_intr_redistribute();
1029  956                                  }
1030  957                          } else
1031  958                                  apic_skipped_redistribute++;
1032  959                  }
1033  960          }
1034  961  }
1035  962  
1036  963  
1037  964  /*
1038  965   * The following functions are in the platform specific file so that they
1039  966   * can be different functions depending on whether we are running on
1040  967   * bare metal or a hypervisor.
1041  968   */
1042  969  
1043  970  /*
1044  971   * Check to make sure there are enough irq slots
1045  972   */
1046  973  int
1047  974  apic_check_free_irqs(int count)
1048  975  {
1049  976          int i, avail;
1050  977  
1051  978          avail = 0;
1052  979          for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) {
1053  980                  if ((apic_irq_table[i] == NULL) ||
1054  981                      apic_irq_table[i]->airq_mps_intr_index == FREE_INDEX) {
1055  982                          if (++avail >= count)
1056  983                                  return (PSM_SUCCESS);
1057  984                  }
1058  985          }
1059  986          return (PSM_FAILURE);
1060  987  }
1061  988  
1062  989  /*
1063  990   * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
1064  991   */
1065  992  int
1066  993  apic_alloc_msi_vectors(dev_info_t *dip, int inum, int count, int pri,
1067  994      int behavior)
1068  995  {
1069  996          int     rcount, i;
1070  997          uchar_t start, irqno;
1071  998          uint32_t cpu;
1072  999          major_t major;
1073 1000          apic_irq_t      *irqptr;
1074 1001  
1075 1002          DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: dip=0x%p "
1076 1003              "inum=0x%x  pri=0x%x count=0x%x behavior=%d\n",
1077 1004              (void *)dip, inum, pri, count, behavior));
1078 1005  
1079 1006          if (count > 1) {
1080 1007                  if (behavior == DDI_INTR_ALLOC_STRICT &&
1081 1008                      apic_multi_msi_enable == 0)
1082 1009                          return (0);
1083 1010                  if (apic_multi_msi_enable == 0)
1084 1011                          count = 1;
1085 1012          }
1086 1013  
1087 1014          if ((rcount = apic_navail_vector(dip, pri)) > count)
1088 1015                  rcount = count;
1089 1016          else if (rcount == 0 || (rcount < count &&
1090 1017              behavior == DDI_INTR_ALLOC_STRICT))
1091 1018                  return (0);
1092 1019  
1093 1020          /* if not ISP2, then round it down */
1094 1021          if (!ISP2(rcount))
1095 1022                  rcount = 1 << (highbit(rcount) - 1);
1096 1023  
1097 1024          mutex_enter(&airq_mutex);
1098 1025  
1099 1026          for (start = 0; rcount > 0; rcount >>= 1) {
1100 1027                  if ((start = apic_find_multi_vectors(pri, rcount)) != 0 ||
1101 1028                      behavior == DDI_INTR_ALLOC_STRICT)
1102 1029                          break;
1103 1030          }
1104 1031  
1105 1032          if (start == 0) {
1106 1033                  /* no vector available */
1107 1034                  mutex_exit(&airq_mutex);
1108 1035                  return (0);
1109 1036          }
1110 1037  
1111 1038          if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
1112 1039                  /* not enough free irq slots available */
1113 1040                  mutex_exit(&airq_mutex);
1114 1041                  return (0);
1115 1042          }
1116 1043  
1117 1044          major = (dip != NULL) ? ddi_driver_major(dip) : 0;
1118 1045          for (i = 0; i < rcount; i++) {
1119 1046                  if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
1120 1047                      (uchar_t)-1) {
1121 1048                          /*
1122 1049                           * shouldn't happen because of the
1123 1050                           * apic_check_free_irqs() check earlier
1124 1051                           */
1125 1052                          mutex_exit(&airq_mutex);
1126 1053                          DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
1127 1054                              "apic_allocate_irq failed\n"));
1128 1055                          return (i);
1129 1056                  }
1130 1057                  apic_max_device_irq = max(irqno, apic_max_device_irq);
1131 1058                  apic_min_device_irq = min(irqno, apic_min_device_irq);
1132 1059                  irqptr = apic_irq_table[irqno];
1133 1060  #ifdef  DEBUG
1134 1061                  if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ)
1135 1062                          DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
1136 1063                              "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
1137 1064  #endif
1138 1065                  apic_vector_to_irq[start + i] = (uchar_t)irqno;
1139 1066  
1140 1067                  irqptr->airq_vector = (uchar_t)(start + i);
1141 1068                  irqptr->airq_ioapicindex = (uchar_t)inum;       /* start */
1142 1069                  irqptr->airq_intin_no = (uchar_t)rcount;
1143 1070                  irqptr->airq_ipl = pri;
1144 1071                  irqptr->airq_vector = start + i;
1145 1072                  irqptr->airq_origirq = (uchar_t)(inum + i);
1146 1073                  irqptr->airq_share_id = 0;
1147 1074                  irqptr->airq_mps_intr_index = MSI_INDEX;
1148 1075                  irqptr->airq_dip = dip;
1149 1076                  irqptr->airq_major = major;
1150 1077                  if (i == 0) /* they all bound to the same cpu */
1151 1078                          cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno,
1152 1079                              0xff, 0xff);
1153 1080                  else
1154 1081                          irqptr->airq_cpu = cpu;
1155 1082                  DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: irq=0x%x "
1156 1083                      "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno,
1157 1084                      (void *)irqptr->airq_dip, irqptr->airq_vector,
1158 1085                      irqptr->airq_origirq, pri));
1159 1086          }
1160 1087          mutex_exit(&airq_mutex);
1161 1088          return (rcount);
1162 1089  }
1163 1090  
1164 1091  /*
1165 1092   * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
1166 1093   */
1167 1094  int
1168 1095  apic_alloc_msix_vectors(dev_info_t *dip, int inum, int count, int pri,
1169 1096      int behavior)
1170 1097  {
1171 1098          int     rcount, i;
1172 1099          major_t major;
1173 1100  
1174 1101          mutex_enter(&airq_mutex);
1175 1102  
1176 1103          if ((rcount = apic_navail_vector(dip, pri)) > count)
1177 1104                  rcount = count;
1178 1105          else if (rcount == 0 || (rcount < count &&
1179 1106              behavior == DDI_INTR_ALLOC_STRICT)) {
1180 1107                  rcount = 0;
1181 1108                  goto out;
1182 1109          }
1183 1110  
1184 1111          if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
1185 1112                  /* not enough free irq slots available */
1186 1113                  rcount = 0;
1187 1114                  goto out;
1188 1115          }
1189 1116  
1190 1117          major = (dip != NULL) ? ddi_driver_major(dip) : 0;
1191 1118          for (i = 0; i < rcount; i++) {
1192 1119                  uchar_t vector, irqno;
1193 1120                  apic_irq_t      *irqptr;
1194 1121  
1195 1122                  if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
1196 1123                      (uchar_t)-1) {
1197 1124                          /*
1198 1125                           * shouldn't happen because of the
1199 1126                           * apic_check_free_irqs() check earlier
1200 1127                           */
1201 1128                          DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
1202 1129                              "apic_allocate_irq failed\n"));
1203 1130                          rcount = i;
1204 1131                          goto out;
1205 1132                  }
1206 1133                  if ((vector = apic_allocate_vector(pri, irqno, 1)) == 0) {
1207 1134                          /*
1208 1135                           * shouldn't happen because of the
1209 1136                           * apic_navail_vector() call earlier
1210 1137                           */
1211 1138                          DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
1212 1139                              "apic_allocate_vector failed\n"));
1213 1140                          rcount = i;
1214 1141                          goto out;
1215 1142                  }
1216 1143                  apic_max_device_irq = max(irqno, apic_max_device_irq);
1217 1144                  apic_min_device_irq = min(irqno, apic_min_device_irq);
1218 1145                  irqptr = apic_irq_table[irqno];
1219 1146                  irqptr->airq_vector = (uchar_t)vector;
1220 1147                  irqptr->airq_ipl = pri;
1221 1148                  irqptr->airq_origirq = (uchar_t)(inum + i);
1222 1149                  irqptr->airq_share_id = 0;
1223 1150                  irqptr->airq_mps_intr_index = MSIX_INDEX;
1224 1151                  irqptr->airq_dip = dip;
1225 1152                  irqptr->airq_major = major;
1226 1153                  irqptr->airq_cpu = apic_bind_intr(dip, irqno, 0xff, 0xff);
1227 1154          }
1228 1155  out:
1229 1156          mutex_exit(&airq_mutex);
1230 1157          return (rcount);
1231 1158  }
1232 1159  
1233 1160  /*
1234 1161   * Allocate a free vector for irq at ipl. Takes care of merging of multiple
1235 1162   * IPLs into a single APIC level as well as stretching some IPLs onto multiple
1236 1163   * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
1237 1164   * requests and allocated only when pri is set.
1238 1165   */
1239 1166  uchar_t
1240 1167  apic_allocate_vector(int ipl, int irq, int pri)
1241 1168  {
1242 1169          int     lowest, highest, i;
1243 1170  
1244 1171          highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK;
1245 1172          lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL;
1246 1173  
1247 1174          if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
1248 1175                  lowest -= APIC_VECTOR_PER_IPL;
1249 1176  
1250 1177  #ifdef  DEBUG
1251 1178          if (apic_restrict_vector)       /* for testing shared interrupt logic */
1252 1179                  highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS;
1253 1180  #endif /* DEBUG */
1254 1181          if (pri == 0)
1255 1182                  highest -= APIC_HI_PRI_VECTS;
1256 1183  
1257 1184          for (i = lowest; i <= highest; i++) {
1258 1185                  if (APIC_CHECK_RESERVE_VECTORS(i))
1259 1186                          continue;
1260 1187                  if (apic_vector_to_irq[i] == APIC_RESV_IRQ) {
1261 1188                          apic_vector_to_irq[i] = (uchar_t)irq;
1262 1189                          return (i);
1263 1190                  }
1264 1191          }
1265 1192  
1266 1193          return (0);
1267 1194  }
1268 1195  
1269 1196  /* Mark vector as not being used by any irq */
1270 1197  void
1271 1198  apic_free_vector(uchar_t vector)
1272 1199  {
1273 1200          apic_vector_to_irq[vector] = APIC_RESV_IRQ;
1274 1201  }
1275 1202  
1276 1203  /*
1277 1204   * Call rebind to do the actual programming.
1278 1205   * Must be called with interrupts disabled and apic_ioapic_lock held
1279 1206   * 'p' is polymorphic -- if this function is called to process a deferred
1280 1207   * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
1281 1208   * the irq pointer is retrieved.  If not doing deferred reprogramming,
1282 1209   * p is of the type 'apic_irq_t *'.
1283 1210   *
1284 1211   * apic_ioapic_lock must be held across this call, as it protects apic_rebind
1285 1212   * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be
1286 1213   * taken offline after a cpu is selected, but before apic_rebind is called to
1287 1214   * bind interrupts to it.
1288 1215   */
1289 1216  int
1290 1217  apic_setup_io_intr(void *p, int irq, boolean_t deferred)
1291 1218  {
1292 1219          apic_irq_t *irqptr;
1293 1220          struct ioapic_reprogram_data *drep = NULL;
1294 1221          int rv;
1295 1222  
1296 1223          if (deferred) {
1297 1224                  drep = (struct ioapic_reprogram_data *)p;
1298 1225                  ASSERT(drep != NULL);
1299 1226                  irqptr = drep->irqp;
1300 1227          } else
1301 1228                  irqptr = (apic_irq_t *)p;
1302 1229  
1303 1230          ASSERT(irqptr != NULL);
1304 1231  
1305 1232          rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, drep);
1306 1233          if (rv) {
1307 1234                  /*
1308 1235                   * CPU is not up or interrupts are disabled. Fall back to
1309 1236                   * the first available CPU
1310 1237                   */
1311 1238                  rv = apic_rebind(irqptr, apic_find_cpu(APIC_CPU_INTR_ENABLE),
1312 1239                      drep);
1313 1240          }
1314 1241  
1315 1242          return (rv);
1316 1243  }
1317 1244  
1318 1245  
1319 1246  uchar_t
1320 1247  apic_modify_vector(uchar_t vector, int irq)
1321 1248  {
1322 1249          apic_vector_to_irq[vector] = (uchar_t)irq;
  
    | 
      ↓ open down ↓ | 
    536 lines elided | 
    
      ↑ open up ↑ | 
  
1323 1250          return (vector);
1324 1251  }
1325 1252  
1326 1253  char *
1327 1254  apic_get_apic_type(void)
1328 1255  {
1329 1256          return (apic_psm_info.p_mach_idstring);
1330 1257  }
1331 1258  
1332 1259  void
1333      -x2apic_update_psm(void)
     1260 +apic_switch_ipi_callback(boolean_t enter)
1334 1261  {
1335      -        struct psm_ops *pops = &apic_ops;
     1262 +        ASSERT(enter == B_TRUE);
     1263 +}
1336 1264  
1337      -        ASSERT(pops != NULL);
     1265 +int
     1266 +apic_detect_x2apic(void)
     1267 +{
     1268 +        return (0);
     1269 +}
1338 1270  
1339      -        pops->psm_intr_exit = x2apic_intr_exit;
1340      -        pops->psm_setspl = x2apic_setspl;
     1271 +void
     1272 +apic_enable_x2apic(void)
     1273 +{
     1274 +        cmn_err(CE_PANIC, "apic_enable_x2apic() called in pcplusmp");
     1275 +}
1341 1276  
1342      -        pops->psm_send_ipi =  x2apic_send_ipi;
1343      -        send_dirintf = pops->psm_send_ipi;
1344      -
1345      -        apic_mode = LOCAL_X2APIC;
1346      -        apic_change_ops();
     1277 +void
     1278 +x2apic_update_psm(void)
     1279 +{
     1280 +        cmn_err(CE_PANIC, "x2apic_update_psm() called in pcplusmp");
1347 1281  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX