2154 /* if we don't need a copy buffer, we don't need to sync */
2155 if (sinfo->si_copybuf_req == 0) {
2156 hp->dmai_rflags |= DMP_NOSYNC;
2157 }
2158
2159 /*
2160 * if we don't need the copybuf and we don't need to do a partial, we
2161 * hit the fast path. All the high performance devices should be trying
2162 * to hit this path. To hit this path, a device should be able to reach
2163 * all of memory, shouldn't try to bind more than it can transfer, and
2164 * the buffer shouldn't require more cookies than the driver/device can
2165 * handle [sgllen]).
2166 *
2167 * Note that negative values of dma_attr_sgllen are supposed
2168 * to mean unlimited, but we just cast them to mean a
2169 * "ridiculous large limit". This saves some extra checks on
2170 * hot paths.
2171 */
2172 if ((sinfo->si_copybuf_req == 0) &&
2173 (sinfo->si_sgl_size <= (unsigned)attr->dma_attr_sgllen) &&
2174 (dmao->dmao_size < dma->dp_maxxfer)) {
2175 fast:
2176 /*
2177 * If the driver supports FMA, insert the handle in the FMA DMA
2178 * handle cache.
2179 */
2180 if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
2181 hp->dmai_error.err_cf = rootnex_dma_check;
2182
2183 /*
2184 * copy out the first cookie and ccountp, set the cookie
2185 * pointer to the second cookie. The first cookie is passed
2186 * back on the stack. Additional cookies are accessed via
2187 * ddi_dma_nextcookie()
2188 */
2189 *cookiep = dma->dp_cookies[0];
2190 *ccountp = sinfo->si_sgl_size;
2191 hp->dmai_cookie++;
2192 hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2193 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2194 ROOTNEX_DPROBE4(rootnex__bind__fast, dev_info_t *, rdip,
|
2154 /* if we don't need a copy buffer, we don't need to sync */
2155 if (sinfo->si_copybuf_req == 0) {
2156 hp->dmai_rflags |= DMP_NOSYNC;
2157 }
2158
2159 /*
2160 * if we don't need the copybuf and we don't need to do a partial, we
2161 * hit the fast path. All the high performance devices should be trying
2162 * to hit this path. To hit this path, a device should be able to reach
2163 * all of memory, shouldn't try to bind more than it can transfer, and
2164 * the buffer shouldn't require more cookies than the driver/device can
2165 * handle [sgllen]).
2166 *
2167 * Note that negative values of dma_attr_sgllen are supposed
2168 * to mean unlimited, but we just cast them to mean a
2169 * "ridiculous large limit". This saves some extra checks on
2170 * hot paths.
2171 */
2172 if ((sinfo->si_copybuf_req == 0) &&
2173 (sinfo->si_sgl_size <= (unsigned)attr->dma_attr_sgllen) &&
2174 (dmao->dmao_size <= dma->dp_maxxfer)) {
2175 fast:
2176 /*
2177 * If the driver supports FMA, insert the handle in the FMA DMA
2178 * handle cache.
2179 */
2180 if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
2181 hp->dmai_error.err_cf = rootnex_dma_check;
2182
2183 /*
2184 * copy out the first cookie and ccountp, set the cookie
2185 * pointer to the second cookie. The first cookie is passed
2186 * back on the stack. Additional cookies are accessed via
2187 * ddi_dma_nextcookie()
2188 */
2189 *cookiep = dma->dp_cookies[0];
2190 *ccountp = sinfo->si_sgl_size;
2191 hp->dmai_cookie++;
2192 hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2193 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2194 ROOTNEX_DPROBE4(rootnex__bind__fast, dev_info_t *, rdip,
|