1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2013 Pluribus Networks, Inc.
  24  */
  25 
  26 /*
  27  * apic_introp.c:
  28  *      Has code for Advanced DDI interrupt framework support.
  29  */
  30 
  31 #include <sys/cpuvar.h>
  32 #include <sys/psm.h>
  33 #include <sys/archsystm.h>
  34 #include <sys/apic.h>
  35 #include <sys/sunddi.h>
  36 #include <sys/ddi_impldefs.h>
  37 #include <sys/mach_intr.h>
  38 #include <sys/sysmacros.h>
  39 #include <sys/trap.h>
  40 #include <sys/pci.h>
  41 #include <sys/pci_intr_lib.h>
  42 #include <sys/apic_common.h>
  43 
  44 extern struct av_head autovect[];
  45 
  46 /*
  47  *      Local Function Prototypes
  48  */
  49 apic_irq_t      *apic_find_irq(dev_info_t *, struct intrspec *, int);
  50 
  51 /*
  52  * apic_pci_msi_enable_vector:
  53  *      Set the address/data fields in the MSI/X capability structure
  54  *      XXX: MSI-X support
  55  */
  56 /* ARGSUSED */
  57 void
  58 apic_pci_msi_enable_vector(apic_irq_t *irq_ptr, int type, int inum, int vector,
  59     int count, int target_apic_id)
  60 {
  61         uint64_t                msi_addr, msi_data;
  62         ushort_t                msi_ctrl;
  63         dev_info_t              *dip = irq_ptr->airq_dip;
  64         int                     cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
  65         ddi_acc_handle_t        handle = i_ddi_get_pci_config_handle(dip);
  66         msi_regs_t              msi_regs;
  67         int                     irqno, i;
  68         void                    *intrmap_tbl[PCI_MSI_MAX_INTRS];
  69 
  70         DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n"
  71             "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip,
  72             ddi_driver_name(dip), inum, vector, target_apic_id));
  73 
  74         ASSERT((handle != NULL) && (cap_ptr != 0));
  75 
  76         msi_regs.mr_data = vector;
  77         msi_regs.mr_addr = target_apic_id;
  78 
  79         for (i = 0; i < count; i++) {
  80                 irqno = apic_vector_to_irq[vector + i];
  81                 intrmap_tbl[i] = apic_irq_table[irqno]->airq_intrmap_private;
  82         }
  83         apic_vt_ops->apic_intrmap_alloc_entry(intrmap_tbl, dip, type,
  84             count, 0xff);
  85         for (i = 0; i < count; i++) {
  86                 irqno = apic_vector_to_irq[vector + i];
  87                 apic_irq_table[irqno]->airq_intrmap_private =
  88                     intrmap_tbl[i];
  89         }
  90 
  91         apic_vt_ops->apic_intrmap_map_entry(irq_ptr->airq_intrmap_private,
  92             (void *)&msi_regs, type, count);
  93         apic_vt_ops->apic_intrmap_record_msi(irq_ptr->airq_intrmap_private,
  94             &msi_regs);
  95 
  96         /* MSI Address */
  97         msi_addr = msi_regs.mr_addr;
  98 
  99         /* MSI Data: MSI is edge triggered according to spec */
 100         msi_data = msi_regs.mr_data;
 101 
 102         DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx "
 103             "data=0x%lx\n", (long)msi_addr, (long)msi_data));
 104 
 105         if (type == DDI_INTR_TYPE_MSI) {
 106                 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
 107 
 108                 /* Set the bits to inform how many MSIs are enabled */
 109                 msi_ctrl |= ((highbit(count) -1) << PCI_MSI_MME_SHIFT);
 110                 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
 111 
 112                 /*
 113                  * Only set vector if not on hypervisor
 114                  */
 115                 pci_config_put32(handle,
 116                     cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr);
 117 
 118                 if (msi_ctrl &  PCI_MSI_64BIT_MASK) {
 119                         pci_config_put32(handle,
 120                             cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32);
 121                         pci_config_put16(handle,
 122                             cap_ptr + PCI_MSI_64BIT_DATA, msi_data);
 123                 } else {
 124                         pci_config_put16(handle,
 125                             cap_ptr + PCI_MSI_32BIT_DATA, msi_data);
 126                 }
 127 
 128         } else if (type == DDI_INTR_TYPE_MSIX) {
 129                 uintptr_t       off;
 130                 ddi_intr_msix_t *msix_p = i_ddi_get_msix(dip);
 131 
 132                 ASSERT(msix_p != NULL);
 133 
 134                 /* Offset into the "inum"th entry in the MSI-X table */
 135                 off = (uintptr_t)msix_p->msix_tbl_addr +
 136                     (inum  * PCI_MSIX_VECTOR_SIZE);
 137 
 138                 ddi_put32(msix_p->msix_tbl_hdl,
 139                     (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data);
 140                 ddi_put32(msix_p->msix_tbl_hdl,
 141                     (uint32_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr);
 142                 ddi_put32(msix_p->msix_tbl_hdl,
 143                     (uint32_t *)(off + PCI_MSIX_UPPER_ADDR_OFFSET),
 144                     msi_addr >> 32);
 145         }
 146 }
 147 
 148 /*
 149  * This function returns the no. of vectors available for the pri.
 150  * dip is not used at this moment.  If we really don't need that,
 151  * it will be removed.
 152  */
 153 /*ARGSUSED*/
 154 int
 155 apic_navail_vector(dev_info_t *dip, int pri)
 156 {
 157         int     lowest, highest, i, navail, count;
 158 
 159         DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n",
 160             (void *)dip, pri));
 161 
 162         highest = apic_ipltopri[pri] + APIC_VECTOR_MASK;
 163         lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL;
 164         navail = count = 0;
 165 
 166         if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
 167                 lowest -= APIC_VECTOR_PER_IPL;
 168 
 169         /* It has to be contiguous */
 170         for (i = lowest; i <= highest; i++) {
 171                 count = 0;
 172                 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) &&
 173                     (i <= highest)) {
 174                         if (APIC_CHECK_RESERVE_VECTORS(i))
 175                                 break;
 176                         count++;
 177                         i++;
 178                 }
 179                 if (count > navail)
 180                         navail = count;
 181         }
 182         return (navail);
 183 }
 184 
 185 /*
 186  * Finds "count" contiguous MSI vectors starting at the proper alignment
 187  * at "pri".
 188  * Caller needs to make sure that count has to be power of 2 and should not
 189  * be < 1.
 190  */
 191 uchar_t
 192 apic_find_multi_vectors(int pri, int count)
 193 {
 194         int     lowest, highest, i, navail, start, msibits;
 195 
 196         DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n",
 197             pri, count));
 198 
 199         highest = apic_ipltopri[pri] + APIC_VECTOR_MASK;
 200         lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL;
 201         navail = 0;
 202 
 203         if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
 204                 lowest -= APIC_VECTOR_PER_IPL;
 205 
 206         /*
 207          * msibits is the no. of lower order message data bits for the
 208          * allocated MSI vectors and is used to calculate the aligned
 209          * starting vector
 210          */
 211         msibits = count - 1;
 212 
 213         /* It has to be contiguous */
 214         for (i = lowest; i <= highest; i++) {
 215                 navail = 0;
 216 
 217                 /*
 218                  * starting vector has to be aligned accordingly for
 219                  * multiple MSIs
 220                  */
 221                 if (msibits)
 222                         i = (i + msibits) & ~msibits;
 223                 start = i;
 224                 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) &&
 225                     (i <= highest)) {
 226                         if (APIC_CHECK_RESERVE_VECTORS(i))
 227                                 break;
 228                         navail++;
 229                         if (navail >= count)
 230                                 return (start);
 231                         i++;
 232                 }
 233         }
 234         return (0);
 235 }
 236 
 237 
 238 /*
 239  * It finds the apic_irq_t associates with the dip, ispec and type.
 240  */
 241 apic_irq_t *
 242 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type)
 243 {
 244         apic_irq_t      *irqp;
 245         int i;
 246 
 247         DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x "
 248             "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec,
 249             ispec->intrspec_pri, type));
 250 
 251         for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) {
 252                 for (irqp = apic_irq_table[i]; irqp; irqp = irqp->airq_next) {
 253                         if ((irqp->airq_dip == dip) &&
 254                             (irqp->airq_origirq == ispec->intrspec_vec) &&
 255                             (irqp->airq_ipl == ispec->intrspec_pri)) {
 256                                 if (type == DDI_INTR_TYPE_MSI) {
 257                                         if (irqp->airq_mps_intr_index ==
 258                                             MSI_INDEX)
 259                                                 return (irqp);
 260                                 } else if (type == DDI_INTR_TYPE_MSIX) {
 261                                         if (irqp->airq_mps_intr_index ==
 262                                             MSIX_INDEX)
 263                                                 return (irqp);
 264                                 } else
 265                                         return (irqp);
 266                         }
 267                 }
 268         }
 269         DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n"));
 270         return (NULL);
 271 }
 272 
 273 /*
 274  * This function will return the pending bit of the irqp.
 275  * It either comes from the IRR register of the APIC or the RDT
 276  * entry of the I/O APIC.
 277  * For the IRR to work, it needs to be to its binding CPU
 278  */
 279 static int
 280 apic_get_pending(apic_irq_t *irqp, int type)
 281 {
 282         int                     bit, index, irr, pending;
 283         int                     intin_no;
 284         int                     apic_ix;
 285 
 286         DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x "
 287             "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND,
 288             type));
 289 
 290         /* need to get on the bound cpu */
 291         mutex_enter(&cpu_lock);
 292         affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND);
 293 
 294         index = irqp->airq_vector / 32;
 295         bit = irqp->airq_vector % 32;
 296         irr = apic_reg_ops->apic_read(APIC_IRR_REG + index);
 297 
 298         affinity_clear();
 299         mutex_exit(&cpu_lock);
 300 
 301         pending = (irr & (1 << bit)) ? 1 : 0;
 302         if (!pending && (type == DDI_INTR_TYPE_FIXED)) {
 303                 /* check I/O APIC for fixed interrupt */
 304                 intin_no = irqp->airq_intin_no;
 305                 apic_ix = irqp->airq_ioapicindex;
 306                 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no) &
 307                     AV_PENDING) ? 1 : 0;
 308         }
 309         return (pending);
 310 }
 311 
 312 
 313 /*
 314  * This function will clear the mask for the interrupt on the I/O APIC
 315  */
 316 static void
 317 apic_clear_mask(apic_irq_t *irqp)
 318 {
 319         int                     intin_no;
 320         ulong_t                 iflag;
 321         int32_t                 rdt_entry;
 322         int                     apic_ix;
 323 
 324         DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n",
 325             (void *)irqp));
 326 
 327         intin_no = irqp->airq_intin_no;
 328         apic_ix = irqp->airq_ioapicindex;
 329 
 330         iflag = intr_clear();
 331         lock_set(&apic_ioapic_lock);
 332 
 333         rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no);
 334 
 335         /* clear mask */
 336         WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no,
 337             ((~AV_MASK) & rdt_entry));
 338 
 339         lock_clear(&apic_ioapic_lock);
 340         intr_restore(iflag);
 341 }
 342 
 343 
 344 /*
 345  * This function will mask the interrupt on the I/O APIC
 346  */
 347 static void
 348 apic_set_mask(apic_irq_t *irqp)
 349 {
 350         int                     intin_no;
 351         int                     apic_ix;
 352         ulong_t                 iflag;
 353         int32_t                 rdt_entry;
 354 
 355         DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp));
 356 
 357         intin_no = irqp->airq_intin_no;
 358         apic_ix = irqp->airq_ioapicindex;
 359 
 360         iflag = intr_clear();
 361 
 362         lock_set(&apic_ioapic_lock);
 363 
 364         rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no);
 365 
 366         /* mask it */
 367         WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no,
 368             (AV_MASK | rdt_entry));
 369 
 370         lock_clear(&apic_ioapic_lock);
 371         intr_restore(iflag);
 372 }
 373 
 374 
 375 void
 376 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type)
 377 {
 378         int i;
 379         apic_irq_t *irqptr;
 380         struct intrspec ispec;
 381 
 382         DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x "
 383             "count: %x pri: %x type: %x\n",
 384             (void *)dip, inum, count, pri, type));
 385 
 386         /* for MSI/X only */
 387         if (!DDI_INTR_IS_MSI_OR_MSIX(type))
 388                 return;
 389 
 390         for (i = 0; i < count; i++) {
 391                 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x "
 392                     "pri=0x%x count=0x%x\n", inum, pri, count));
 393                 ispec.intrspec_vec = inum + i;
 394                 ispec.intrspec_pri = pri;
 395                 if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) {
 396                         DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: "
 397                             "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() "
 398                             "failed\n", (void *)dip, inum, pri));
 399                         continue;
 400                 }
 401                 irqptr->airq_mps_intr_index = FREE_INDEX;
 402                 apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ;
 403         }
 404 }
 405 
 406 /*
 407  * apic_pci_msi_enable_mode:
 408  */
 409 void
 410 apic_pci_msi_enable_mode(dev_info_t *rdip, int type, int inum)
 411 {
 412         ushort_t                msi_ctrl;
 413         int                     cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
 414         ddi_acc_handle_t        handle = i_ddi_get_pci_config_handle(rdip);
 415 
 416         ASSERT((handle != NULL) && (cap_ptr != 0));
 417 
 418         if (type == DDI_INTR_TYPE_MSI) {
 419                 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
 420                 if ((msi_ctrl & PCI_MSI_ENABLE_BIT))
 421                         return;
 422 
 423                 msi_ctrl |= PCI_MSI_ENABLE_BIT;
 424                 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
 425 
 426         } else if (type == DDI_INTR_TYPE_MSIX) {
 427                 uintptr_t       off;
 428                 uint32_t        mask;
 429                 ddi_intr_msix_t *msix_p;
 430 
 431                 msix_p = i_ddi_get_msix(rdip);
 432 
 433                 ASSERT(msix_p != NULL);
 434 
 435                 /* Offset into "inum"th entry in the MSI-X table & clear mask */
 436                 off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
 437                     PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
 438 
 439                 mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
 440 
 441                 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask & ~1));
 442 
 443                 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
 444 
 445                 if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) {
 446                         msi_ctrl |= PCI_MSIX_ENABLE_BIT;
 447                         pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
 448                             msi_ctrl);
 449                 }
 450         }
 451 }
 452 
 453 static int
 454 apic_set_cpu(int irqno, int cpu, int *result)
 455 {
 456         apic_irq_t *irqp;
 457         ulong_t iflag;
 458         int ret;
 459 
 460         DDI_INTR_IMPLDBG((CE_CONT, "APIC_SET_CPU\n"));
 461 
 462         mutex_enter(&airq_mutex);
 463         irqp = apic_irq_table[irqno];
 464         mutex_exit(&airq_mutex);
 465 
 466         if (irqp == NULL) {
 467                 *result = ENXIO;
 468                 return (PSM_FAILURE);
 469         }
 470 
 471         /* Fail if this is an MSI intr and is part of a group. */
 472         if ((irqp->airq_mps_intr_index == MSI_INDEX) &&
 473             (irqp->airq_intin_no > 1)) {
 474                 *result = ENXIO;
 475                 return (PSM_FAILURE);
 476         }
 477 
 478         iflag = intr_clear();
 479         lock_set(&apic_ioapic_lock);
 480 
 481         ret = apic_rebind_all(irqp, cpu);
 482 
 483         lock_clear(&apic_ioapic_lock);
 484         intr_restore(iflag);
 485 
 486         if (ret) {
 487                 *result = EIO;
 488                 return (PSM_FAILURE);
 489         }
 490         /*
 491          * keep tracking the default interrupt cpu binding
 492          */
 493         irqp->airq_cpu = cpu;
 494 
 495         *result = 0;
 496         return (PSM_SUCCESS);
 497 }
 498 
 499 static int
 500 apic_grp_set_cpu(int irqno, int new_cpu, int *result)
 501 {
 502         dev_info_t *orig_dip;
 503         uint32_t orig_cpu;
 504         ulong_t iflag;
 505         apic_irq_t *irqps[PCI_MSI_MAX_INTRS];
 506         int i;
 507         int cap_ptr;
 508         int msi_mask_off;
 509         ushort_t msi_ctrl;
 510         uint32_t msi_pvm;
 511         ddi_acc_handle_t handle;
 512         int num_vectors = 0;
 513         uint32_t vector;
 514 
 515         DDI_INTR_IMPLDBG((CE_CONT, "APIC_GRP_SET_CPU\n"));
 516 
 517         /*
 518          * Take mutex to insure that table doesn't change out from underneath
 519          * us while we're playing with it.
 520          */
 521         mutex_enter(&airq_mutex);
 522         irqps[0] = apic_irq_table[irqno];
 523         orig_cpu = irqps[0]->airq_temp_cpu;
 524         orig_dip = irqps[0]->airq_dip;
 525         num_vectors = irqps[0]->airq_intin_no;
 526         vector = irqps[0]->airq_vector;
 527 
 528         /* A "group" of 1 */
 529         if (num_vectors == 1) {
 530                 mutex_exit(&airq_mutex);
 531                 return (apic_set_cpu(irqno, new_cpu, result));
 532         }
 533 
 534         *result = ENXIO;
 535 
 536         if (irqps[0]->airq_mps_intr_index != MSI_INDEX) {
 537                 mutex_exit(&airq_mutex);
 538                 DDI_INTR_IMPLDBG((CE_CONT, "set_grp: intr not MSI\n"));
 539                 goto set_grp_intr_done;
 540         }
 541         if ((num_vectors < 1) || ((num_vectors - 1) & vector)) {
 542                 mutex_exit(&airq_mutex);
 543                 DDI_INTR_IMPLDBG((CE_CONT,
 544                     "set_grp: base vec not part of a grp or not aligned: "
 545                     "vec:0x%x, num_vec:0x%x\n", vector, num_vectors));
 546                 goto set_grp_intr_done;
 547         }
 548         DDI_INTR_IMPLDBG((CE_CONT, "set_grp: num intrs in grp: %d\n",
 549             num_vectors));
 550 
 551         ASSERT((num_vectors + vector) < APIC_MAX_VECTOR);
 552 
 553         *result = EIO;
 554 
 555         /*
 556          * All IRQ entries in the table for the given device will be not
 557          * shared.  Since they are not shared, the dip in the table will
 558          * be true to the device of interest.
 559          */
 560         for (i = 1; i < num_vectors; i++) {
 561                 irqps[i] = apic_irq_table[apic_vector_to_irq[vector + i]];
 562                 if (irqps[i] == NULL) {
 563                         mutex_exit(&airq_mutex);
 564                         goto set_grp_intr_done;
 565                 }
 566 #ifdef DEBUG
 567                 /* Sanity check: CPU and dip is the same for all entries. */
 568                 if ((irqps[i]->airq_dip != orig_dip) ||
 569                     (irqps[i]->airq_temp_cpu != orig_cpu)) {
 570                         mutex_exit(&airq_mutex);
 571                         DDI_INTR_IMPLDBG((CE_CONT,
 572                             "set_grp: cpu or dip for vec 0x%x difft than for "
 573                             "vec 0x%x\n", vector, vector + i));
 574                         DDI_INTR_IMPLDBG((CE_CONT,
 575                             "  cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu,
 576                             irqps[i]->airq_temp_cpu, (void *)orig_dip,
 577                             (void *)irqps[i]->airq_dip));
 578                         goto set_grp_intr_done;
 579                 }
 580 #endif /* DEBUG */
 581         }
 582         mutex_exit(&airq_mutex);
 583 
 584         cap_ptr = i_ddi_get_msi_msix_cap_ptr(orig_dip);
 585         handle = i_ddi_get_pci_config_handle(orig_dip);
 586         msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
 587 
 588         /* MSI Per vector masking is supported. */
 589         if (msi_ctrl & PCI_MSI_PVM_MASK) {
 590                 if (msi_ctrl &  PCI_MSI_64BIT_MASK)
 591                         msi_mask_off = cap_ptr + PCI_MSI_64BIT_MASKBITS;
 592                 else
 593                         msi_mask_off = cap_ptr + PCI_MSI_32BIT_MASK;
 594                 msi_pvm = pci_config_get32(handle, msi_mask_off);
 595                 pci_config_put32(handle, msi_mask_off, (uint32_t)-1);
 596                 DDI_INTR_IMPLDBG((CE_CONT,
 597                     "set_grp: pvm supported.  Mask set to 0x%x\n",
 598                     pci_config_get32(handle, msi_mask_off)));
 599         }
 600 
 601         iflag = intr_clear();
 602         lock_set(&apic_ioapic_lock);
 603 
 604         /*
 605          * Do the first rebind and check for errors.  Apic_rebind_all returns
 606          * an error if the CPU is not accepting interrupts.  If the first one
 607          * succeeds they all will.
 608          */
 609         if (apic_rebind_all(irqps[0], new_cpu))
 610                 (void) apic_rebind_all(irqps[0], orig_cpu);
 611         else {
 612                 irqps[0]->airq_cpu = new_cpu;
 613 
 614                 for (i = 1; i < num_vectors; i++) {
 615                         (void) apic_rebind_all(irqps[i], new_cpu);
 616                         irqps[i]->airq_cpu = new_cpu;
 617                 }
 618                 *result = 0;    /* SUCCESS */
 619         }
 620 
 621         lock_clear(&apic_ioapic_lock);
 622         intr_restore(iflag);
 623 
 624         /* Reenable vectors if per vector masking is supported. */
 625         if (msi_ctrl & PCI_MSI_PVM_MASK) {
 626                 pci_config_put32(handle, msi_mask_off, msi_pvm);
 627                 DDI_INTR_IMPLDBG((CE_CONT,
 628                     "set_grp: pvm supported.  Mask restored to 0x%x\n",
 629                     pci_config_get32(handle, msi_mask_off)));
 630         }
 631 
 632 set_grp_intr_done:
 633         if (*result != 0)
 634                 return (PSM_FAILURE);
 635 
 636         return (PSM_SUCCESS);
 637 }
 638 
 639 int
 640 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p)
 641 {
 642         struct autovec *av_dev;
 643         uchar_t irqno;
 644         int i;
 645         apic_irq_t *irq_p;
 646 
 647         /* Sanity check the vector/irq argument. */
 648         ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR));
 649 
 650         mutex_enter(&airq_mutex);
 651 
 652         /*
 653          * Convert the vecirq arg to an irq using vector_to_irq table
 654          * if the arg is a vector.  Pass thru if already an irq.
 655          */
 656         if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) ==
 657             PSMGI_INTRBY_VEC)
 658                 irqno = apic_vector_to_irq[vecirq];
 659         else
 660                 irqno = vecirq;
 661 
 662         irq_p = apic_irq_table[irqno];
 663 
 664         if ((irq_p == NULL) ||
 665             ((irq_p->airq_mps_intr_index != RESERVE_INDEX) &&
 666             ((irq_p->airq_temp_cpu == IRQ_UNBOUND) ||
 667             (irq_p->airq_temp_cpu == IRQ_UNINIT)))) {
 668                 mutex_exit(&airq_mutex);
 669                 return (PSM_FAILURE);
 670         }
 671 
 672         if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) {
 673 
 674                 /* Get the (temp) cpu from apic_irq table, indexed by irq. */
 675                 intr_params_p->avgi_cpu_id = irq_p->airq_temp_cpu;
 676 
 677                 /* Return user bound info for intrd. */
 678                 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) {
 679                         intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND;
 680                         intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND;
 681                 }
 682         }
 683 
 684         if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR)
 685                 intr_params_p->avgi_vector = irq_p->airq_vector;
 686 
 687         if (intr_params_p->avgi_req_flags &
 688             (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS))
 689                 /* Get number of devices from apic_irq table shared field. */
 690                 intr_params_p->avgi_num_devs = irq_p->airq_share;
 691 
 692         if (intr_params_p->avgi_req_flags &  PSMGI_REQ_GET_DEVS) {
 693 
 694                 intr_params_p->avgi_req_flags  |= PSMGI_REQ_NUM_DEVS;
 695 
 696                 /* Some devices have NULL dip.  Don't count these. */
 697                 if (intr_params_p->avgi_num_devs > 0) {
 698                         for (i = 0, av_dev = autovect[irqno].avh_link;
 699                             av_dev; av_dev = av_dev->av_link)
 700                                 if (av_dev->av_vector && av_dev->av_dip)
 701                                         i++;
 702                         intr_params_p->avgi_num_devs =
 703                             MIN(intr_params_p->avgi_num_devs, i);
 704                 }
 705 
 706                 /* There are no viable dips to return. */
 707                 if (intr_params_p->avgi_num_devs == 0)
 708                         intr_params_p->avgi_dip_list = NULL;
 709 
 710                 else {  /* Return list of dips */
 711 
 712                         /* Allocate space in array for that number of devs. */
 713                         intr_params_p->avgi_dip_list = kmem_zalloc(
 714                             intr_params_p->avgi_num_devs *
 715                             sizeof (dev_info_t *),
 716                             KM_SLEEP);
 717 
 718                         /*
 719                          * Loop through the device list of the autovec table
 720                          * filling in the dip array.
 721                          *
 722                          * Note that the autovect table may have some special
 723                          * entries which contain NULL dips.  These will be
 724                          * ignored.
 725                          */
 726                         for (i = 0, av_dev = autovect[irqno].avh_link;
 727                             av_dev; av_dev = av_dev->av_link)
 728                                 if (av_dev->av_vector && av_dev->av_dip)
 729                                         intr_params_p->avgi_dip_list[i++] =
 730                                             av_dev->av_dip;
 731                 }
 732         }
 733 
 734         mutex_exit(&airq_mutex);
 735 
 736         return (PSM_SUCCESS);
 737 }
 738 
 739 /*
 740  * This function provides external interface to the nexus for all
 741  * functionalities related to the new DDI interrupt framework.
 742  *
 743  * Input:
 744  * dip     - pointer to the dev_info structure of the requested device
 745  * hdlp    - pointer to the internal interrupt handle structure for the
 746  *           requested interrupt
 747  * intr_op - opcode for this call
 748  * result  - pointer to the integer that will hold the result to be
 749  *           passed back if return value is PSM_SUCCESS
 750  *
 751  * Output:
 752  * return value is either PSM_SUCCESS or PSM_FAILURE
 753  */
 754 int
 755 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
 756     psm_intr_op_t intr_op, int *result)
 757 {
 758         int             cap;
 759         int             count_vec;
 760         int             old_priority;
 761         int             new_priority;
 762         int             new_cpu;
 763         apic_irq_t      *irqp;
 764         struct intrspec *ispec, intr_spec;
 765 
 766         DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p "
 767             "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op));
 768 
 769         ispec = &intr_spec;
 770         ispec->intrspec_pri = hdlp->ih_pri;
 771         ispec->intrspec_vec = hdlp->ih_inum;
 772         ispec->intrspec_func = hdlp->ih_cb_func;
 773 
 774         switch (intr_op) {
 775         case PSM_INTR_OP_CHECK_MSI:
 776                 /*
 777                  * Check MSI/X is supported or not at APIC level and
 778                  * masked off the MSI/X bits in hdlp->ih_type if not
 779                  * supported before return.  If MSI/X is supported,
 780                  * leave the ih_type unchanged and return.
 781                  *
 782                  * hdlp->ih_type passed in from the nexus has all the
 783                  * interrupt types supported by the device.
 784                  */
 785                 if (apic_support_msi == 0) {
 786                         /*
 787                          * if apic_support_msi is not set, call
 788                          * apic_check_msi_support() to check whether msi
 789                          * is supported first
 790                          */
 791                         if (apic_check_msi_support() == PSM_SUCCESS)
 792                                 apic_support_msi = 1;
 793                         else
 794                                 apic_support_msi = -1;
 795                 }
 796                 if (apic_support_msi == 1) {
 797                         if (apic_msix_enable)
 798                                 *result = hdlp->ih_type;
 799                         else
 800                                 *result = hdlp->ih_type & ~DDI_INTR_TYPE_MSIX;
 801                 } else
 802                         *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
 803                             DDI_INTR_TYPE_MSIX);
 804                 break;
 805         case PSM_INTR_OP_ALLOC_VECTORS:
 806                 if (hdlp->ih_type == DDI_INTR_TYPE_MSI)
 807                         *result = apic_alloc_msi_vectors(dip, hdlp->ih_inum,
 808                             hdlp->ih_scratch1, hdlp->ih_pri,
 809                             (int)(uintptr_t)hdlp->ih_scratch2);
 810                 else
 811                         *result = apic_alloc_msix_vectors(dip, hdlp->ih_inum,
 812                             hdlp->ih_scratch1, hdlp->ih_pri,
 813                             (int)(uintptr_t)hdlp->ih_scratch2);
 814                 break;
 815         case PSM_INTR_OP_FREE_VECTORS:
 816                 apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1,
 817                     hdlp->ih_pri, hdlp->ih_type);
 818                 break;
 819         case PSM_INTR_OP_NAVAIL_VECTORS:
 820                 *result = apic_navail_vector(dip, hdlp->ih_pri);
 821                 break;
 822         case PSM_INTR_OP_XLATE_VECTOR:
 823                 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
 824                 *result = apic_introp_xlate(dip, ispec, hdlp->ih_type);
 825                 if (*result == -1)
 826                         return (PSM_FAILURE);
 827                 break;
 828         case PSM_INTR_OP_GET_PENDING:
 829                 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL)
 830                         return (PSM_FAILURE);
 831                 *result = apic_get_pending(irqp, hdlp->ih_type);
 832                 break;
 833         case PSM_INTR_OP_CLEAR_MASK:
 834                 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
 835                         return (PSM_FAILURE);
 836                 irqp = apic_find_irq(dip, ispec, hdlp->ih_type);
 837                 if (irqp == NULL)
 838                         return (PSM_FAILURE);
 839                 apic_clear_mask(irqp);
 840                 break;
 841         case PSM_INTR_OP_SET_MASK:
 842                 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
 843                         return (PSM_FAILURE);
 844                 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL)
 845                         return (PSM_FAILURE);
 846                 apic_set_mask(irqp);
 847                 break;
 848         case PSM_INTR_OP_GET_CAP:
 849                 cap = DDI_INTR_FLAG_PENDING;
 850                 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
 851                         cap |= DDI_INTR_FLAG_MASKABLE;
 852                 *result = cap;
 853                 break;
 854         case PSM_INTR_OP_GET_SHARED:
 855                 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
 856                         return (PSM_FAILURE);
 857                 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
 858                 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL)
 859                         return (PSM_FAILURE);
 860                 *result = (irqp->airq_share > 1) ? 1: 0;
 861                 break;
 862         case PSM_INTR_OP_SET_PRI:
 863                 old_priority = hdlp->ih_pri; /* save old value */
 864                 new_priority = *(int *)result;  /* try the new value */
 865 
 866                 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) {
 867                         return (PSM_SUCCESS);
 868                 }
 869 
 870                 /* Now allocate the vectors */
 871                 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) {
 872                         /* SET_PRI does not support the case of multiple MSI */
 873                         if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) > 1)
 874                                 return (PSM_FAILURE);
 875 
 876                         count_vec = apic_alloc_msi_vectors(dip, hdlp->ih_inum,
 877                             1, new_priority,
 878                             DDI_INTR_ALLOC_STRICT);
 879                 } else {
 880                         count_vec = apic_alloc_msix_vectors(dip, hdlp->ih_inum,
 881                             1, new_priority,
 882                             DDI_INTR_ALLOC_STRICT);
 883                 }
 884 
 885                 /* Did we get new vectors? */
 886                 if (!count_vec)
 887                         return (PSM_FAILURE);
 888 
 889                 /* Finally, free the previously allocated vectors */
 890                 apic_free_vectors(dip, hdlp->ih_inum, count_vec,
 891                     old_priority, hdlp->ih_type);
 892                 break;
 893         case PSM_INTR_OP_SET_CPU:
 894         case PSM_INTR_OP_GRP_SET_CPU:
 895                 /*
 896                  * The interrupt handle given here has been allocated
 897                  * specifically for this command, and ih_private carries
 898                  * a CPU value.
 899                  */
 900                 new_cpu = (int)(intptr_t)hdlp->ih_private;
 901                 if (!apic_cpu_in_range(new_cpu)) {
 902                         DDI_INTR_IMPLDBG((CE_CONT,
 903                             "[grp_]set_cpu: cpu out of range: %d\n", new_cpu));
 904                         *result = EINVAL;
 905                         return (PSM_FAILURE);
 906                 }
 907                 if (hdlp->ih_vector > APIC_MAX_VECTOR) {
 908                         DDI_INTR_IMPLDBG((CE_CONT,
 909                             "[grp_]set_cpu: vector out of range: %d\n",
 910                             hdlp->ih_vector));
 911                         *result = EINVAL;
 912                         return (PSM_FAILURE);
 913                 }
 914                 if ((hdlp->ih_flags & PSMGI_INTRBY_FLAGS) == PSMGI_INTRBY_VEC)
 915                         hdlp->ih_vector = apic_vector_to_irq[hdlp->ih_vector];
 916                 if (intr_op == PSM_INTR_OP_SET_CPU) {
 917                         if (apic_set_cpu(hdlp->ih_vector, new_cpu, result) !=
 918                             PSM_SUCCESS)
 919                                 return (PSM_FAILURE);
 920                 } else {
 921                         if (apic_grp_set_cpu(hdlp->ih_vector, new_cpu,
 922                             result) != PSM_SUCCESS)
 923                                 return (PSM_FAILURE);
 924                 }
 925                 break;
 926         case PSM_INTR_OP_GET_INTR:
 927                 /*
 928                  * The interrupt handle given here has been allocated
 929                  * specifically for this command, and ih_private carries
 930                  * a pointer to a apic_get_intr_t.
 931                  */
 932                 if (apic_get_vector_intr_info(
 933                     hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS)
 934                         return (PSM_FAILURE);
 935                 break;
 936         case PSM_INTR_OP_APIC_TYPE:
 937                 ((apic_get_type_t *)(hdlp->ih_private))->avgi_type =
 938                     apic_get_apic_type();
 939                 ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_intr =
 940                     APIC_MAX_VECTOR;
 941                 ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_cpu =
 942                     boot_ncpus;
 943                 hdlp->ih_ver = apic_get_apic_version();
 944                 break;
 945         case PSM_INTR_OP_SET_CAP:
 946         default:
 947                 return (PSM_FAILURE);
 948         }
 949         return (PSM_SUCCESS);
 950 }