1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2000 to 2010, LSI Corporation.
28 * All rights reserved.
29 *
30 * Redistribution and use in source and binary forms of all code within
31 * this file that is exclusively owned by LSI, with or without
32 * modification, is permitted provided that, in addition to the CDDL 1.0
33 * License requirements, the following conditions are met:
34 *
35 * Neither the name of the author nor the names of its contributors may be
36 * used to endorse or promote products derived from this software without
37 * specific prior written permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
42 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
43 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
44 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
45 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
46 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
47 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
48 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
49 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
50 * DAMAGE.
51 */
52
53 /*
54 * mptsas_raid - This file contains all the RAID related functions for the
55 * MPT interface.
56 */
57
58 #if defined(lint) || defined(DEBUG)
59 #define MPTSAS_DEBUG
60 #endif
61
62 #define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX 2
63
64 /*
65 * standard header files
66 */
67 #include <sys/note.h>
68 #include <sys/scsi/scsi.h>
69 #include <sys/byteorder.h>
70 #include <sys/raidioctl.h>
71
72 #pragma pack(1)
73
74 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
75 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
76 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
77 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
78 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
81
82 #pragma pack()
83
84 /*
85 * private header files.
86 */
87 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
88
89 static int mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol);
90
91 extern int mptsas_check_dma_handle(ddi_dma_handle_t handle);
92 extern int mptsas_check_acc_handle(ddi_acc_handle_t handle);
93 extern mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t,
94 uint64_t, uint32_t, mptsas_phymask_t, uint8_t, mptsas_t *);
95
96 static int
mptsas_raidconf_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)97 mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
98 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
99 va_list ap)
100 {
101 #ifndef __lock_lint
102 _NOTE(ARGUNUSED(ap))
103 #endif
104 pMpi2RaidConfigurationPage0_t raidconfig_page0;
105 pMpi2RaidConfig0ConfigElement_t element;
106 uint32_t *confignum;
107 int rval = DDI_SUCCESS, i;
108 uint8_t numelements, vol, disk;
109 uint16_t elementtype, voldevhandle;
110 uint16_t etype_vol, etype_pd, etype_hs;
111 uint16_t etype_oce;
112 mptsas_slots_t *slots = mpt->m_active;
113 m_raidconfig_t *raidconfig;
114 uint64_t raidwwn;
115 uint32_t native;
116 mptsas_target_t *ptgt;
117 uint32_t configindex;
118
119 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
120 return (DDI_FAILURE);
121 }
122
123 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
124 mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 "
125 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
126 iocstatus, iocloginfo);
127 rval = DDI_FAILURE;
128 return (rval);
129 }
130 confignum = va_arg(ap, uint32_t *);
131 configindex = va_arg(ap, uint32_t);
132 raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp;
133 /*
134 * Get all RAID configurations.
135 */
136 etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT;
137 etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT;
138 etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT;
139 etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT;
140 /*
141 * Set up page address for next time through.
142 */
143 *confignum = ddi_get8(accessp,
144 &raidconfig_page0->ConfigNum);
145
146 /*
147 * Point to the right config in the structure.
148 * Increment the number of valid RAID configs.
149 */
150 raidconfig = &slots->m_raidconfig[configindex];
151 slots->m_num_raid_configs++;
152
153 /*
154 * Set the native flag if this is not a foreign
155 * configuration.
156 */
157 native = ddi_get32(accessp, &raidconfig_page0->Flags);
158 if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) {
159 native = FALSE;
160 } else {
161 native = TRUE;
162 }
163 raidconfig->m_native = (uint8_t)native;
164
165 /*
166 * Get volume information for the volumes in the
167 * config.
168 */
169 numelements = ddi_get8(accessp, &raidconfig_page0->NumElements);
170 vol = 0;
171 disk = 0;
172 element = (pMpi2RaidConfig0ConfigElement_t)
173 &raidconfig_page0->ConfigElement;
174
175 for (i = 0; ((i < numelements) && native); i++, element++) {
176 /*
177 * Get the element type. Could be Volume,
178 * PhysDisk, Hot Spare, or Online Capacity
179 * Expansion PhysDisk.
180 */
181 elementtype = ddi_get16(accessp, &element->ElementFlags);
182 elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
183
184 /*
185 * For volumes, get the RAID settings and the
186 * WWID.
187 */
188 if (elementtype == etype_vol) {
189 voldevhandle = ddi_get16(accessp,
190 &element->VolDevHandle);
191 raidconfig->m_raidvol[vol].m_israid = 1;
192 raidconfig->m_raidvol[vol].
193 m_raidhandle = voldevhandle;
194 /*
195 * Get the settings for the raid
196 * volume. This includes the
197 * DevHandles for the disks making up
198 * the raid volume.
199 */
200 if (mptsas_get_raid_settings(mpt,
201 &raidconfig->m_raidvol[vol]))
202 continue;
203
204 /*
205 * Get the WWID of the RAID volume for
206 * SAS HBA
207 */
208 if (mptsas_get_raid_wwid(mpt,
209 &raidconfig->m_raidvol[vol]))
210 continue;
211
212 raidwwn = raidconfig->m_raidvol[vol].
213 m_raidwwid;
214
215 /*
216 * RAID uses phymask of 0.
217 */
218 ptgt = mptsas_tgt_alloc(&slots->m_tgttbl,
219 voldevhandle, raidwwn, 0, 0, 0, mpt);
220
221 raidconfig->m_raidvol[vol].m_raidtgt =
222 ptgt;
223
224 /*
225 * Increment volume index within this
226 * raid config.
227 */
228 vol++;
229 } else if ((elementtype == etype_pd) ||
230 (elementtype == etype_hs) ||
231 (elementtype == etype_oce)) {
232 /*
233 * For all other element types, put
234 * their DevHandles in the phys disk
235 * list of the config. These are all
236 * some variation of a Phys Disk and
237 * this list is used to keep these
238 * disks from going online.
239 */
240 raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp,
241 &element->PhysDiskDevHandle);
242
243 /*
244 * Increment disk index within this
245 * raid config.
246 */
247 disk++;
248 }
249 }
250
251 return (rval);
252 }
253
254 int
mptsas_get_raid_info(mptsas_t * mpt)255 mptsas_get_raid_info(mptsas_t *mpt)
256 {
257 int rval = DDI_SUCCESS;
258 uint32_t confignum, pageaddress;
259 uint8_t configindex;
260 mptsas_slots_t *slots = mpt->m_active;
261
262 ASSERT(mutex_owned(&mpt->m_mutex));
263
264 /*
265 * Clear all RAID info before starting.
266 */
267 bzero(slots->m_raidconfig, sizeof (slots->m_raidconfig));
268 slots->m_num_raid_configs = 0;
269
270 configindex = 0;
271 confignum = 0xff;
272 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM | confignum;
273 while (rval == DDI_SUCCESS) {
274 /*
275 * Get the header and config page. reply contains the reply
276 * frame, which holds status info for the request.
277 */
278 rval = mptsas_access_config_page(mpt,
279 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
280 MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG, 0, pageaddress,
281 mptsas_raidconf_page_0_cb, &confignum, configindex);
282 configindex++;
283 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM |
284 confignum;
285 }
286
287 return (rval);
288 }
289
290 static int
mptsas_raidvol_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)291 mptsas_raidvol_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
292 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
293 va_list ap)
294 {
295 #ifndef __lock_lint
296 _NOTE(ARGUNUSED(ap))
297 #endif
298 pMpi2RaidVolPage0_t raidpage;
299 int rval = DDI_SUCCESS, i;
300 mptsas_raidvol_t *raidvol;
301 uint8_t numdisks, volstate, voltype, physdisknum;
302 uint32_t volsetting;
303 uint32_t statusflags, resync_flag;
304
305 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
306 return (DDI_FAILURE);
307
308 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
309 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page0_cb "
310 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
311 iocstatus, iocloginfo);
312 rval = DDI_FAILURE;
313 return (rval);
314 }
315
316 raidvol = va_arg(ap, mptsas_raidvol_t *);
317
318 raidpage = (pMpi2RaidVolPage0_t)page_memp;
319 volstate = ddi_get8(accessp, &raidpage->VolumeState);
320 volsetting = ddi_get32(accessp,
321 (uint32_t *)(void *)&raidpage->VolumeSettings);
322 statusflags = ddi_get32(accessp, &raidpage->VolumeStatusFlags);
323 voltype = ddi_get8(accessp, &raidpage->VolumeType);
324
325 raidvol->m_state = volstate;
326 raidvol->m_statusflags = statusflags;
327 /*
328 * Volume size is not used right now. Set to 0.
329 */
330 raidvol->m_raidsize = 0;
331 raidvol->m_settings = volsetting;
332 raidvol->m_raidlevel = voltype;
333
334 if (statusflags & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED) {
335 mptsas_log(mpt, CE_NOTE, "?Volume %d is quiesced\n",
336 raidvol->m_raidhandle);
337 }
338
339 if (statusflags &
340 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
341 mptsas_log(mpt, CE_NOTE, "?Volume %d is resyncing\n",
342 raidvol->m_raidhandle);
343 }
344
345 resync_flag = MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
346 switch (volstate) {
347 case MPI2_RAID_VOL_STATE_OPTIMAL:
348 mptsas_log(mpt, CE_NOTE, "?Volume %d is "
349 "optimal\n", raidvol->m_raidhandle);
350 break;
351 case MPI2_RAID_VOL_STATE_DEGRADED:
352 if ((statusflags & resync_flag) == 0) {
353 mptsas_log(mpt, CE_WARN, "Volume %d "
354 "is degraded\n",
355 raidvol->m_raidhandle);
356 }
357 break;
358 case MPI2_RAID_VOL_STATE_FAILED:
359 mptsas_log(mpt, CE_WARN, "Volume %d is "
360 "failed\n", raidvol->m_raidhandle);
361 break;
362 case MPI2_RAID_VOL_STATE_MISSING:
363 mptsas_log(mpt, CE_WARN, "Volume %d is "
364 "missing\n", raidvol->m_raidhandle);
365 break;
366 default:
367 break;
368 }
369 numdisks = raidpage->NumPhysDisks;
370 raidvol->m_ndisks = numdisks;
371 for (i = 0; i < numdisks; i++) {
372 physdisknum = raidpage->PhysDisk[i].PhysDiskNum;
373 raidvol->m_disknum[i] = physdisknum;
374 if (mptsas_get_physdisk_settings(mpt, raidvol,
375 physdisknum))
376 break;
377 }
378 return (rval);
379 }
380
381 int
mptsas_get_raid_settings(mptsas_t * mpt,mptsas_raidvol_t * raidvol)382 mptsas_get_raid_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
383 {
384 int rval = DDI_SUCCESS;
385 uint32_t page_address;
386
387 ASSERT(mutex_owned(&mpt->m_mutex));
388
389 /*
390 * Get the header and config page. reply contains the reply frame,
391 * which holds status info for the request.
392 */
393 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
394 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
395 rval = mptsas_access_config_page(mpt,
396 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
397 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 0, page_address,
398 mptsas_raidvol_page_0_cb, raidvol);
399
400 return (rval);
401 }
402
403 static int
mptsas_raidvol_page_1_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)404 mptsas_raidvol_page_1_cb(mptsas_t *mpt, caddr_t page_memp,
405 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
406 va_list ap)
407 {
408 #ifndef __lock_lint
409 _NOTE(ARGUNUSED(ap))
410 #endif
411 pMpi2RaidVolPage1_t raidpage;
412 int rval = DDI_SUCCESS, i;
413 uint8_t *sas_addr = NULL;
414 uint8_t tmp_sas_wwn[SAS_WWN_BYTE_SIZE];
415 uint64_t *sas_wwn;
416
417 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
418 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page_1_cb "
419 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
420 iocstatus, iocloginfo);
421 rval = DDI_FAILURE;
422 return (rval);
423 }
424 sas_wwn = va_arg(ap, uint64_t *);
425
426 raidpage = (pMpi2RaidVolPage1_t)page_memp;
427 sas_addr = (uint8_t *)(&raidpage->WWID);
428 for (i = 0; i < SAS_WWN_BYTE_SIZE; i++) {
429 tmp_sas_wwn[i] = ddi_get8(accessp, sas_addr + i);
430 }
431 bcopy(tmp_sas_wwn, sas_wwn, SAS_WWN_BYTE_SIZE);
432 *sas_wwn = LE_64(*sas_wwn);
433 return (rval);
434 }
435
436 static int
mptsas_get_raid_wwid(mptsas_t * mpt,mptsas_raidvol_t * raidvol)437 mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
438 {
439 int rval = DDI_SUCCESS;
440 uint32_t page_address;
441 uint64_t sas_wwn;
442
443 ASSERT(mutex_owned(&mpt->m_mutex));
444
445 /*
446 * Get the header and config page. reply contains the reply frame,
447 * which holds status info for the request.
448 */
449 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
450 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
451 rval = mptsas_access_config_page(mpt,
452 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
453 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 1, page_address,
454 mptsas_raidvol_page_1_cb, &sas_wwn);
455
456 /*
457 * Get the required information from the page.
458 */
459 if (rval == DDI_SUCCESS) {
460
461 /*
462 * replace top nibble of WWID of RAID to '3' for OBP
463 */
464 sas_wwn = MPTSAS_RAID_WWID(sas_wwn);
465 raidvol->m_raidwwid = sas_wwn;
466 }
467
468 done:
469 return (rval);
470 }
471
472 static int
mptsas_raidphydsk_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)473 mptsas_raidphydsk_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
474 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
475 va_list ap)
476 {
477 #ifndef __lock_lint
478 _NOTE(ARGUNUSED(ap))
479 #endif
480 pMpi2RaidPhysDiskPage0_t diskpage;
481 int rval = DDI_SUCCESS;
482 uint16_t *devhdl;
483 uint8_t *state;
484
485 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
486 return (DDI_FAILURE);
487
488 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
489 mptsas_log(mpt, CE_WARN, "mptsas_raidphydsk_page0_cb "
490 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
491 iocstatus, iocloginfo);
492 rval = DDI_FAILURE;
493 return (rval);
494 }
495 devhdl = va_arg(ap, uint16_t *);
496 state = va_arg(ap, uint8_t *);
497 diskpage = (pMpi2RaidPhysDiskPage0_t)page_memp;
498 *devhdl = ddi_get16(accessp, &diskpage->DevHandle);
499 *state = ddi_get8(accessp, &diskpage->PhysDiskState);
500 return (rval);
501 }
502
503 int
mptsas_get_physdisk_settings(mptsas_t * mpt,mptsas_raidvol_t * raidvol,uint8_t physdisknum)504 mptsas_get_physdisk_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol,
505 uint8_t physdisknum)
506 {
507 int rval = DDI_SUCCESS, i;
508 uint8_t state;
509 uint16_t devhdl;
510 uint32_t page_address;
511
512 ASSERT(mutex_owned(&mpt->m_mutex));
513
514 /*
515 * Get the header and config page. reply contains the reply frame,
516 * which holds status info for the request.
517 */
518 page_address = (MPI2_PHYSDISK_PGAD_FORM_MASK &
519 MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM) | physdisknum;
520 rval = mptsas_access_config_page(mpt,
521 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
522 MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK, 0, page_address,
523 mptsas_raidphydsk_page_0_cb, &devhdl, &state);
524
525 /*
526 * Get the required information from the page.
527 */
528 if (rval == DDI_SUCCESS) {
529 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
530 /* find the correct position in the arrays */
531 if (raidvol->m_disknum[i] == physdisknum)
532 break;
533 }
534 raidvol->m_devhdl[i] = devhdl;
535
536 switch (state) {
537 case MPI2_RAID_PD_STATE_OFFLINE:
538 raidvol->m_diskstatus[i] =
539 RAID_DISKSTATUS_FAILED;
540 break;
541
542 case MPI2_RAID_PD_STATE_HOT_SPARE:
543 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
544 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
545 break;
546
547 case MPI2_RAID_PD_STATE_DEGRADED:
548 case MPI2_RAID_PD_STATE_OPTIMAL:
549 case MPI2_RAID_PD_STATE_REBUILDING:
550 case MPI2_RAID_PD_STATE_ONLINE:
551 default:
552 raidvol->m_diskstatus[i] =
553 RAID_DISKSTATUS_GOOD;
554 break;
555 }
556 }
557
558 return (rval);
559 }
560
561 /*
562 * RAID Action for System Shutdown. This request uses the dedicated TM slot to
563 * avoid a call to mptsas_save_cmd. Since Solaris requires that the mutex is
564 * not held during the mptsas_quiesce function, this RAID action must not use
565 * the normal code path of requests and replies.
566 */
567 void
mptsas_raid_action_system_shutdown(mptsas_t * mpt)568 mptsas_raid_action_system_shutdown(mptsas_t *mpt)
569 {
570 pMpi2RaidActionRequest_t action;
571 uint8_t ir_active = FALSE, reply_type;
572 uint8_t function, found_reply = FALSE;
573 uint16_t SMID, action_type;
574 mptsas_slots_t *slots = mpt->m_active;
575 int config, vol;
576 mptsas_cmd_t *cmd;
577 uint32_t request_desc_low, reply_addr;
578 int cnt;
579 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
580 pMPI2DefaultReply_t reply;
581 pMpi2AddressReplyDescriptor_t address_reply;
582
583 /*
584 * Before doing the system shutdown RAID Action, make sure that the IOC
585 * supports IR and make sure there is a valid volume for the request.
586 */
587 if (mpt->m_ir_capable) {
588 for (config = 0; (config < slots->m_num_raid_configs) &&
589 (!ir_active); config++) {
590 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
591 if (slots->m_raidconfig[config].m_raidvol[vol].
592 m_israid) {
593 ir_active = TRUE;
594 break;
595 }
596 }
597 }
598 }
599 if (!ir_active) {
600 return;
601 }
602
603 /*
604 * If TM slot is already being used (highly unlikely), show message and
605 * don't issue the RAID action.
606 */
607 if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) {
608 mptsas_log(mpt, CE_WARN, "RAID Action slot in use. Cancelling"
609 " System Shutdown RAID Action.\n");
610 return;
611 }
612
613 /*
614 * Create the cmd and put it in the dedicated TM slot.
615 */
616 cmd = &(mpt->m_event_task_mgmt.m_event_cmd);
617 bzero((caddr_t)cmd, sizeof (*cmd));
618 cmd->cmd_pkt = NULL;
619 cmd->cmd_slot = MPTSAS_TM_SLOT(mpt);
620 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd;
621
622 /*
623 * Form message for raid action.
624 */
625 action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame +
626 (mpt->m_req_frame_size * cmd->cmd_slot));
627 bzero(action, mpt->m_req_frame_size);
628 action->Function = MPI2_FUNCTION_RAID_ACTION;
629 action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
630
631 /*
632 * Send RAID Action.
633 */
634 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
635 DDI_DMA_SYNC_FORDEV);
636 request_desc_low = (cmd->cmd_slot << 16) +
637 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
638 MPTSAS_START_CMD(mpt, request_desc_low, 0);
639
640 /*
641 * Even though reply does not matter because the system is shutting
642 * down, wait no more than 5 seconds here to get the reply just because
643 * we don't want to leave it hanging if it's coming. Poll because
644 * interrupts are disabled when this function is called.
645 */
646 for (cnt = 0; cnt < 5000; cnt++) {
647 /*
648 * Check for a reply.
649 */
650 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
651 DDI_DMA_SYNC_FORCPU);
652
653 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
654 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
655
656 if (ddi_get32(mpt->m_acc_post_queue_hdl,
657 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
658 ddi_get32(mpt->m_acc_post_queue_hdl,
659 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
660 drv_usecwait(1000);
661 continue;
662 }
663
664 /*
665 * There is a reply. If it's not an address reply, ignore it.
666 */
667 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
668 &reply_desc_union->Default.ReplyFlags);
669 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
670 if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
671 goto clear_and_continue;
672 }
673
674 /*
675 * SMID must be the TM slot since that's what we're using for
676 * this RAID action. If not, ignore this reply.
677 */
678 address_reply =
679 (pMpi2AddressReplyDescriptor_t)reply_desc_union;
680 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
681 &address_reply->SMID);
682 if (SMID != MPTSAS_TM_SLOT(mpt)) {
683 goto clear_and_continue;
684 }
685
686 /*
687 * If reply frame is not in the proper range ignore it.
688 */
689 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
690 &address_reply->ReplyFrameAddress);
691 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
692 (reply_addr >= (mpt->m_reply_frame_dma_addr +
693 (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
694 ((reply_addr - mpt->m_reply_frame_dma_addr) %
695 mpt->m_reply_frame_size != 0)) {
696 goto clear_and_continue;
697 }
698
699 /*
700 * If not a RAID action reply ignore it.
701 */
702 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
703 DDI_DMA_SYNC_FORCPU);
704 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame +
705 (reply_addr - mpt->m_reply_frame_dma_addr));
706 function = ddi_get8(mpt->m_acc_reply_frame_hdl,
707 &reply->Function);
708 if (function != MPI2_FUNCTION_RAID_ACTION) {
709 goto clear_and_continue;
710 }
711
712 /*
713 * Finally, make sure this is the System Shutdown RAID action.
714 * If not, ignore reply.
715 */
716 action_type = ddi_get16(mpt->m_acc_reply_frame_hdl,
717 &reply->FunctionDependent1);
718 if (action_type !=
719 MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) {
720 goto clear_and_continue;
721 }
722 found_reply = TRUE;
723
724 clear_and_continue:
725 /*
726 * Clear the reply descriptor for re-use and increment index.
727 */
728 ddi_put64(mpt->m_acc_post_queue_hdl,
729 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
730 0xFFFFFFFFFFFFFFFF);
731 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
732 DDI_DMA_SYNC_FORDEV);
733
734 /*
735 * Update the global reply index and keep looking for the
736 * reply if not found yet.
737 */
738 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
739 mpt->m_post_index = 0;
740 }
741 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex,
742 mpt->m_post_index);
743 if (!found_reply) {
744 continue;
745 }
746
747 break;
748 }
749
750 /*
751 * clear the used slot as the last step.
752 */
753 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL;
754 }
755
756 int
mptsas_delete_volume(mptsas_t * mpt,uint16_t volid)757 mptsas_delete_volume(mptsas_t *mpt, uint16_t volid)
758 {
759 int config, i = 0, vol = (-1);
760 mptsas_slots_t *slots = mpt->m_active;
761
762 for (config = 0; (config < slots->m_num_raid_configs) && (vol != i);
763 config++) {
764 for (i = 0; i < MPTSAS_MAX_RAIDVOLS; i++) {
765 if (slots->m_raidconfig[config].m_raidvol[i].
766 m_raidhandle == volid) {
767 vol = i;
768 break;
769 }
770 }
771 }
772
773 if (vol < 0) {
774 mptsas_log(mpt, CE_WARN, "raid doesn't exist at specified "
775 "target.");
776 return (-1);
777 }
778
779 slots->m_raidconfig[config].m_raidvol[vol].m_israid = 0;
780 slots->m_raidconfig[config].m_raidvol[vol].m_ndisks = 0;
781 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
782 slots->m_raidconfig[config].m_raidvol[vol].m_disknum[i] = 0;
783 slots->m_raidconfig[config].m_raidvol[vol].m_devhdl[i] = 0;
784 }
785
786 return (0);
787 }
788