Print this page
8628 nvme: use a semaphore to guard submission queue
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Jason King <jason.king@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nvme/nvme_var.h
+++ new/usr/src/uts/common/io/nvme/nvme_var.h
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
14 14 * Copyright 2016 The MathWorks, Inc. All rights reserved.
15 15 */
16 16
17 17 #ifndef _NVME_VAR_H
18 18 #define _NVME_VAR_H
19 19
20 20 #include <sys/ddi.h>
21 21 #include <sys/sunddi.h>
22 22 #include <sys/blkdev.h>
23 23 #include <sys/taskq_impl.h>
24 24
25 25 /*
26 26 * NVMe driver state
27 27 */
28 28
29 29 #ifdef __cplusplus
30 30 extern "C" {
31 31 #endif
32 32
33 33 #define NVME_FMA_INIT 0x1
34 34 #define NVME_REGS_MAPPED 0x2
35 35 #define NVME_ADMIN_QUEUE 0x4
36 36 #define NVME_CTRL_LIMITS 0x8
37 37 #define NVME_INTERRUPTS 0x10
38 38
39 39 #define NVME_MIN_ADMIN_QUEUE_LEN 16
40 40 #define NVME_MIN_IO_QUEUE_LEN 16
41 41 #define NVME_DEFAULT_ADMIN_QUEUE_LEN 256
42 42 #define NVME_DEFAULT_IO_QUEUE_LEN 1024
43 43 #define NVME_DEFAULT_ASYNC_EVENT_LIMIT 10
44 44 #define NVME_MIN_ASYNC_EVENT_LIMIT 1
45 45 #define NVME_DEFAULT_MIN_BLOCK_SIZE 512
46 46
47 47
48 48 typedef struct nvme nvme_t;
49 49 typedef struct nvme_namespace nvme_namespace_t;
50 50 typedef struct nvme_minor_state nvme_minor_state_t;
51 51 typedef struct nvme_dma nvme_dma_t;
52 52 typedef struct nvme_cmd nvme_cmd_t;
53 53 typedef struct nvme_qpair nvme_qpair_t;
54 54 typedef struct nvme_task_arg nvme_task_arg_t;
55 55
56 56 struct nvme_minor_state {
57 57 kmutex_t nm_mutex;
58 58 boolean_t nm_oexcl;
59 59 uint_t nm_ocnt;
60 60 };
61 61
62 62 struct nvme_dma {
63 63 ddi_dma_handle_t nd_dmah;
64 64 ddi_acc_handle_t nd_acch;
65 65 ddi_dma_cookie_t nd_cookie;
66 66 uint_t nd_ncookie;
67 67 caddr_t nd_memp;
68 68 size_t nd_len;
69 69 boolean_t nd_cached;
70 70 };
71 71
72 72 struct nvme_cmd {
73 73 nvme_sqe_t nc_sqe;
74 74 nvme_cqe_t nc_cqe;
75 75
76 76 void (*nc_callback)(void *);
77 77 bd_xfer_t *nc_xfer;
78 78 boolean_t nc_completed;
79 79 boolean_t nc_dontpanic;
80 80 uint16_t nc_sqid;
81 81
82 82 nvme_dma_t *nc_dma;
83 83
84 84 kmutex_t nc_mutex;
85 85 kcondvar_t nc_cv;
86 86
87 87 taskq_ent_t nc_tqent;
88 88 nvme_t *nc_nvme;
89 89 };
90 90
91 91 struct nvme_qpair {
92 92 size_t nq_nentry;
93 93
94 94 nvme_dma_t *nq_sqdma;
95 95 nvme_sqe_t *nq_sq;
96 96 uint_t nq_sqhead;
97 97 uint_t nq_sqtail;
98 98 uintptr_t nq_sqtdbl;
99 99
100 100 nvme_dma_t *nq_cqdma;
101 101 nvme_cqe_t *nq_cq;
↓ open down ↓ |
101 lines elided |
↑ open up ↑ |
102 102 uint_t nq_cqhead;
103 103 uint_t nq_cqtail;
104 104 uintptr_t nq_cqhdbl;
105 105
106 106 nvme_cmd_t **nq_cmd;
107 107 uint16_t nq_next_cmd;
108 108 uint_t nq_active_cmds;
109 109 int nq_phase;
110 110
111 111 kmutex_t nq_mutex;
112 + ksema_t nq_sema;
112 113 };
113 114
114 115 struct nvme {
115 116 dev_info_t *n_dip;
116 117 int n_progress;
117 118
118 119 caddr_t n_regs;
119 120 ddi_acc_handle_t n_regh;
120 121
121 122 kmem_cache_t *n_cmd_cache;
122 123 kmem_cache_t *n_prp_cache;
123 124
124 125 size_t n_inth_sz;
125 126 ddi_intr_handle_t *n_inth;
126 127 int n_intr_cnt;
127 128 uint_t n_intr_pri;
128 129 int n_intr_cap;
129 130 int n_intr_type;
130 131 int n_intr_types;
131 132
132 133 char *n_product;
133 134 char *n_vendor;
134 135
135 136 nvme_version_t n_version;
136 137 boolean_t n_dead;
137 138 boolean_t n_strict_version;
138 139 boolean_t n_ignore_unknown_vendor_status;
139 140 uint32_t n_admin_queue_len;
140 141 uint32_t n_io_queue_len;
141 142 uint16_t n_async_event_limit;
142 143 uint_t n_min_block_size;
143 144 uint16_t n_abort_command_limit;
144 145 uint64_t n_max_data_transfer_size;
145 146 boolean_t n_write_cache_present;
146 147 boolean_t n_write_cache_enabled;
147 148 int n_error_log_len;
148 149 boolean_t n_lba_range_supported;
149 150 boolean_t n_auto_pst_supported;
150 151
151 152 int n_nssr_supported;
152 153 int n_doorbell_stride;
153 154 int n_timeout;
154 155 int n_arbitration_mechanisms;
155 156 int n_cont_queues_reqd;
156 157 int n_max_queue_entries;
157 158 int n_pageshift;
158 159 int n_pagesize;
159 160
160 161 int n_namespace_count;
161 162 int n_ioq_count;
162 163
163 164 nvme_identify_ctrl_t *n_idctl;
164 165
165 166 nvme_qpair_t *n_adminq;
166 167 nvme_qpair_t **n_ioq;
167 168
168 169 nvme_namespace_t *n_ns;
169 170
170 171 ddi_dma_attr_t n_queue_dma_attr;
171 172 ddi_dma_attr_t n_prp_dma_attr;
172 173 ddi_dma_attr_t n_sgl_dma_attr;
173 174 ddi_device_acc_attr_t n_reg_acc_attr;
174 175 ddi_iblock_cookie_t n_fm_ibc;
175 176 int n_fm_cap;
176 177
177 178 ksema_t n_abort_sema;
178 179
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
179 180 ddi_taskq_t *n_cmd_taskq;
180 181
181 182 /* state for devctl minor node */
182 183 nvme_minor_state_t n_minor;
183 184
184 185 /* errors detected by driver */
185 186 uint32_t n_dma_bind_err;
186 187 uint32_t n_abort_failed;
187 188 uint32_t n_cmd_timeout;
188 189 uint32_t n_cmd_aborted;
189 - uint32_t n_async_resubmit_failed;
190 190 uint32_t n_wrong_logpage;
191 191 uint32_t n_unknown_logpage;
192 192 uint32_t n_too_many_cookies;
193 - uint32_t n_admin_queue_full;
194 193
195 194 /* errors detected by hardware */
196 195 uint32_t n_data_xfr_err;
197 196 uint32_t n_internal_err;
198 197 uint32_t n_abort_rq_err;
199 198 uint32_t n_abort_sq_del;
200 199 uint32_t n_nvm_cap_exc;
201 200 uint32_t n_nvm_ns_notrdy;
202 201 uint32_t n_inv_cq_err;
203 202 uint32_t n_inv_qid_err;
204 203 uint32_t n_max_qsz_exc;
205 204 uint32_t n_inv_int_vect;
206 205 uint32_t n_inv_log_page;
207 206 uint32_t n_inv_format;
208 207 uint32_t n_inv_q_del;
209 208 uint32_t n_cnfl_attr;
210 209 uint32_t n_inv_prot;
211 210 uint32_t n_readonly;
212 211
213 212 /* errors reported by asynchronous events */
214 213 uint32_t n_diagfail_event;
215 214 uint32_t n_persistent_event;
216 215 uint32_t n_transient_event;
217 216 uint32_t n_fw_load_event;
218 217 uint32_t n_reliability_event;
219 218 uint32_t n_temperature_event;
220 219 uint32_t n_spare_event;
221 220 uint32_t n_vendor_event;
222 221 uint32_t n_unknown_event;
223 222
224 223 };
225 224
226 225 struct nvme_namespace {
227 226 nvme_t *ns_nvme;
228 227 uint8_t ns_eui64[8];
229 228 char ns_name[17];
230 229
231 230 bd_handle_t ns_bd_hdl;
232 231
233 232 uint32_t ns_id;
234 233 size_t ns_block_count;
235 234 size_t ns_block_size;
236 235 size_t ns_best_block_size;
237 236
238 237 boolean_t ns_ignore;
239 238
240 239 nvme_identify_nsid_t *ns_idns;
241 240
242 241 /* state for attachment point minor node */
243 242 nvme_minor_state_t ns_minor;
244 243
245 244 /*
246 245 * If a namespace has no EUI64, we create a devid in
247 246 * nvme_prepare_devid().
248 247 */
249 248 char *ns_devid;
250 249 };
251 250
252 251 struct nvme_task_arg {
253 252 nvme_t *nt_nvme;
254 253 nvme_cmd_t *nt_cmd;
255 254 };
256 255
257 256
258 257 #ifdef __cplusplus
259 258 }
260 259 #endif
261 260
262 261 #endif /* _NVME_VAR_H */
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX