1 /* 2 * Copyright (c) 2012 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/proc.h> 38 #include <sys/sysctl.h> 39 #include <sys/buf.h> 40 #include <sys/conf.h> 41 #include <sys/disklabel.h> 42 #include <sys/disklabel32.h> 43 #include <sys/disklabel64.h> 44 #include <sys/diskslice.h> 45 #include <sys/diskmbr.h> 46 #include <sys/disk.h> 47 #include <sys/malloc.h> 48 #include <sys/device.h> 49 #include <sys/devfs.h> 50 #include <sys/thread.h> 51 #include <sys/queue.h> 52 #include <sys/lock.h> 53 #include <sys/stat.h> 54 #include <sys/uuid.h> 55 56 #include <vfs/hammer2/hammer2_disk.h> 57 #include <sys/dmsg.h> 58 59 #include <sys/buf2.h> 60 #include <sys/mplock2.h> 61 #include <sys/msgport2.h> 62 #include <sys/thread2.h> 63 64 #include <vfs/hammer2/hammer2_disk.h> 65 66 struct dios_open { 67 int openrd; 68 int openwr; 69 }; 70 71 struct dios_io { 72 int count; 73 int eof; 74 }; 75 76 static MALLOC_DEFINE(M_DMSG_DISK, "dmsg_disk", "disk dmsg"); 77 78 static int disk_iocom_reconnect(struct disk *dp, struct file *fp); 79 static int disk_rcvdmsg(kdmsg_msg_t *msg); 80 81 static void disk_blk_open(struct disk *dp, kdmsg_msg_t *msg); 82 static void disk_blk_read(struct disk *dp, kdmsg_msg_t *msg); 83 static void disk_blk_write(struct disk *dp, kdmsg_msg_t *msg); 84 static void disk_blk_flush(struct disk *dp, kdmsg_msg_t *msg); 85 static void disk_blk_freeblks(struct disk *dp, kdmsg_msg_t *msg); 86 static void diskiodone(struct bio *bio); 87 88 void 89 disk_iocom_init(struct disk *dp) 90 { 91 kdmsg_iocom_init(&dp->d_iocom, dp, 92 KDMSG_IOCOMF_AUTOCONN | 93 KDMSG_IOCOMF_AUTOSPAN | 94 KDMSG_IOCOMF_AUTOCIRC, 95 M_DMSG_DISK, disk_rcvdmsg); 96 } 97 98 void 99 disk_iocom_update(struct disk *dp) 100 { 101 } 102 103 void 104 disk_iocom_uninit(struct disk *dp) 105 { 106 kdmsg_iocom_uninit(&dp->d_iocom); 107 } 108 109 int 110 disk_iocom_ioctl(struct disk *dp, int cmd, void *data) 111 { 112 struct file *fp; 113 struct disk_ioc_recluster *recl; 114 int error; 115 116 switch(cmd) { 117 case DIOCRECLUSTER: 118 recl = data; 119 fp = holdfp(curproc->p_fd, recl->fd, -1); 120 if (fp) { 121 error = disk_iocom_reconnect(dp, fp); 122 } else { 123 error = EINVAL; 124 } 125 break; 126 default: 127 error = EOPNOTSUPP; 128 break; 129 } 130 return error; 131 } 132 133 static 134 int 135 disk_iocom_reconnect(struct disk *dp, struct file *fp) 136 { 137 char devname[64]; 138 139 ksnprintf(devname, sizeof(devname), "%s%d", 140 dev_dname(dp->d_rawdev), dkunit(dp->d_rawdev)); 141 142 kdmsg_iocom_reconnect(&dp->d_iocom, fp, devname); 143 144 dp->d_iocom.auto_lnk_conn.pfs_type = DMSG_PFSTYPE_SERVER; 145 dp->d_iocom.auto_lnk_conn.proto_version = DMSG_SPAN_PROTO_1; 146 dp->d_iocom.auto_lnk_conn.peer_type = DMSG_PEER_BLOCK; 147 dp->d_iocom.auto_lnk_conn.peer_mask = 1LLU << DMSG_PEER_BLOCK; 148 dp->d_iocom.auto_lnk_conn.pfs_mask = (uint64_t)-1; 149 ksnprintf(dp->d_iocom.auto_lnk_conn.cl_label, 150 sizeof(dp->d_iocom.auto_lnk_conn.cl_label), 151 "%s/%s", hostname, devname); 152 if (dp->d_info.d_serialno) { 153 ksnprintf(dp->d_iocom.auto_lnk_conn.fs_label, 154 sizeof(dp->d_iocom.auto_lnk_conn.fs_label), 155 "%s", dp->d_info.d_serialno); 156 } 157 158 dp->d_iocom.auto_lnk_span.pfs_type = DMSG_PFSTYPE_SERVER; 159 dp->d_iocom.auto_lnk_span.proto_version = DMSG_SPAN_PROTO_1; 160 dp->d_iocom.auto_lnk_span.peer_type = DMSG_PEER_BLOCK; 161 dp->d_iocom.auto_lnk_span.media.block.bytes = 162 dp->d_info.d_media_size; 163 dp->d_iocom.auto_lnk_span.media.block.blksize = 164 dp->d_info.d_media_blksize; 165 ksnprintf(dp->d_iocom.auto_lnk_span.cl_label, 166 sizeof(dp->d_iocom.auto_lnk_span.cl_label), 167 "%s/%s", hostname, devname); 168 if (dp->d_info.d_serialno) { 169 ksnprintf(dp->d_iocom.auto_lnk_span.fs_label, 170 sizeof(dp->d_iocom.auto_lnk_span.fs_label), 171 "%s", dp->d_info.d_serialno); 172 } 173 174 kdmsg_iocom_autoinitiate(&dp->d_iocom, NULL); 175 176 return (0); 177 } 178 179 int 180 disk_rcvdmsg(kdmsg_msg_t *msg) 181 { 182 struct disk *dp = msg->iocom->handle; 183 184 /* 185 * Handle debug messages (these might not be in transactions) 186 */ 187 switch(msg->any.head.cmd & DMSGF_CMDSWMASK) { 188 case DMSG_DBG_SHELL: 189 /* 190 * Execute shell command (not supported atm) 191 */ 192 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP); 193 return(0); 194 case DMSG_DBG_SHELL | DMSGF_REPLY: 195 if (msg->aux_data) { 196 msg->aux_data[msg->aux_size - 1] = 0; 197 kprintf("diskiocom: DEBUGMSG: %s\n", msg->aux_data); 198 } 199 return(0); 200 } 201 202 /* 203 * All remaining messages must be in a transaction 204 * 205 * NOTE! We are switching on the first message's command. The 206 * actual message command within the transaction may be 207 * different (if streaming within a transaction). 208 */ 209 if (msg->state == NULL) { 210 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP); 211 return(0); 212 } 213 214 switch(msg->state->rxcmd & DMSGF_CMDSWMASK) { 215 case DMSG_BLK_OPEN: 216 case DMSG_BLK_CLOSE: 217 disk_blk_open(dp, msg); 218 break; 219 case DMSG_BLK_READ: 220 disk_blk_read(dp, msg); 221 break; 222 case DMSG_BLK_WRITE: 223 disk_blk_write(dp, msg); 224 break; 225 case DMSG_BLK_FLUSH: 226 disk_blk_flush(dp, msg); 227 break; 228 case DMSG_BLK_FREEBLKS: 229 disk_blk_freeblks(dp, msg); 230 break; 231 default: 232 if ((msg->any.head.cmd & DMSGF_REPLY) == 0) { 233 if (msg->any.head.cmd & DMSGF_DELETE) 234 kdmsg_msg_reply(msg, DMSG_ERR_NOSUPP); 235 else 236 kdmsg_msg_result(msg, DMSG_ERR_NOSUPP); 237 } 238 break; 239 } 240 return (0); 241 } 242 243 static 244 void 245 disk_blk_open(struct disk *dp, kdmsg_msg_t *msg) 246 { 247 struct dios_open *openst; 248 int error = DMSG_ERR_NOSUPP; 249 int fflags; 250 251 openst = msg->state->any.any; 252 if ((msg->any.head.cmd & DMSGF_CMDSWMASK) == DMSG_BLK_OPEN) { 253 if (openst == NULL) { 254 openst = kmalloc(sizeof(*openst), M_DEVBUF, 255 M_WAITOK | M_ZERO); 256 msg->state->any.any = openst; 257 } 258 fflags = 0; 259 if (msg->any.blk_open.modes & DMSG_BLKOPEN_RD) 260 fflags = FREAD; 261 if (msg->any.blk_open.modes & DMSG_BLKOPEN_WR) 262 fflags |= FWRITE; 263 error = dev_dopen(dp->d_rawdev, fflags, S_IFCHR, proc0.p_ucred, NULL); 264 if (error) { 265 error = DMSG_ERR_IO; 266 } else { 267 if (msg->any.blk_open.modes & DMSG_BLKOPEN_RD) 268 ++openst->openrd; 269 if (msg->any.blk_open.modes & DMSG_BLKOPEN_WR) 270 ++openst->openwr; 271 } 272 } 273 if ((msg->any.head.cmd & DMSGF_CMDSWMASK) == DMSG_BLK_CLOSE && 274 openst) { 275 fflags = 0; 276 if ((msg->any.blk_open.modes & DMSG_BLKOPEN_RD) && 277 openst->openrd) { 278 fflags = FREAD; 279 } 280 if ((msg->any.blk_open.modes & DMSG_BLKOPEN_WR) && 281 openst->openwr) { 282 fflags |= FWRITE; 283 } 284 error = dev_dclose(dp->d_rawdev, fflags, S_IFCHR, NULL); 285 if (error) { 286 error = DMSG_ERR_IO; 287 } else { 288 if (msg->any.blk_open.modes & DMSG_BLKOPEN_RD) 289 --openst->openrd; 290 if (msg->any.blk_open.modes & DMSG_BLKOPEN_WR) 291 --openst->openwr; 292 } 293 } 294 if (msg->any.head.cmd & DMSGF_DELETE) { 295 if (openst) { 296 while (openst->openrd && openst->openwr) { 297 --openst->openrd; 298 --openst->openwr; 299 dev_dclose(dp->d_rawdev, FREAD|FWRITE, S_IFCHR, NULL); 300 } 301 while (openst->openrd) { 302 --openst->openrd; 303 dev_dclose(dp->d_rawdev, FREAD, S_IFCHR, NULL); 304 } 305 while (openst->openwr) { 306 --openst->openwr; 307 dev_dclose(dp->d_rawdev, FWRITE, S_IFCHR, NULL); 308 } 309 kfree(openst, M_DEVBUF); 310 msg->state->any.any = NULL; 311 } 312 kdmsg_msg_reply(msg, error); 313 } else { 314 kdmsg_msg_result(msg, error); 315 } 316 } 317 318 static 319 void 320 disk_blk_read(struct disk *dp, kdmsg_msg_t *msg) 321 { 322 struct dios_io *iost; 323 struct buf *bp; 324 struct bio *bio; 325 int error = DMSG_ERR_NOSUPP; 326 int reterr = 1; 327 328 /* 329 * Only DMSG_BLK_READ commands imply read ops. 330 */ 331 iost = msg->state->any.any; 332 if ((msg->any.head.cmd & DMSGF_CMDSWMASK) == DMSG_BLK_READ) { 333 if (msg->any.blk_read.bytes < DEV_BSIZE || 334 msg->any.blk_read.bytes > MAXPHYS) { 335 error = DMSG_ERR_PARAM; 336 goto done; 337 } 338 if (iost == NULL) { 339 iost = kmalloc(sizeof(*iost), M_DEVBUF, 340 M_WAITOK | M_ZERO); 341 msg->state->any.any = iost; 342 } 343 reterr = 0; 344 bp = geteblk(msg->any.blk_read.bytes); 345 bio = &bp->b_bio1; 346 bp->b_cmd = BUF_CMD_READ; 347 bp->b_bcount = msg->any.blk_read.bytes; 348 bp->b_resid = bp->b_bcount; 349 bio->bio_offset = msg->any.blk_read.offset; 350 bio->bio_caller_info1.ptr = msg->state; 351 bio->bio_done = diskiodone; 352 /* kdmsg_state_hold(msg->state); */ 353 354 atomic_add_int(&iost->count, 1); 355 if (msg->any.head.cmd & DMSGF_DELETE) 356 iost->eof = 1; 357 BUF_KERNPROC(bp); 358 dev_dstrategy(dp->d_rawdev, bio); 359 } 360 done: 361 if (reterr) { 362 if (msg->any.head.cmd & DMSGF_DELETE) { 363 if (iost && iost->count == 0) { 364 kfree(iost, M_DEVBUF); 365 msg->state->any.any = NULL; 366 } 367 kdmsg_msg_reply(msg, error); 368 } else { 369 kdmsg_msg_result(msg, error); 370 } 371 } 372 } 373 374 static 375 void 376 disk_blk_write(struct disk *dp, kdmsg_msg_t *msg) 377 { 378 struct dios_io *iost; 379 struct buf *bp; 380 struct bio *bio; 381 int error = DMSG_ERR_NOSUPP; 382 int reterr = 1; 383 384 /* 385 * Only DMSG_BLK_WRITE commands imply read ops. 386 */ 387 iost = msg->state->any.any; 388 if ((msg->any.head.cmd & DMSGF_CMDSWMASK) == DMSG_BLK_WRITE) { 389 if (msg->any.blk_write.bytes < DEV_BSIZE || 390 msg->any.blk_write.bytes > MAXPHYS) { 391 error = DMSG_ERR_PARAM; 392 goto done; 393 } 394 if (iost == NULL) { 395 iost = kmalloc(sizeof(*iost), M_DEVBUF, 396 M_WAITOK | M_ZERO); 397 msg->state->any.any = iost; 398 } 399 400 /* 401 * Issue WRITE. Short data implies zeros. Try to optimize 402 * the buffer cache buffer for the case where we can just 403 * use the message's data pointer. 404 */ 405 reterr = 0; 406 if (msg->aux_size >= msg->any.blk_write.bytes) 407 bp = getpbuf(NULL); 408 else 409 bp = geteblk(msg->any.blk_write.bytes); 410 bio = &bp->b_bio1; 411 bp->b_cmd = BUF_CMD_WRITE; 412 bp->b_bcount = msg->any.blk_write.bytes; 413 bp->b_resid = bp->b_bcount; 414 if (msg->aux_size >= msg->any.blk_write.bytes) { 415 bp->b_data = msg->aux_data; 416 } else { 417 bcopy(msg->aux_data, bp->b_data, msg->aux_size); 418 bzero(bp->b_data + msg->aux_size, 419 msg->any.blk_write.bytes - msg->aux_size); 420 } 421 bio->bio_offset = msg->any.blk_write.offset; 422 bio->bio_caller_info1.ptr = msg->state; 423 bio->bio_done = diskiodone; 424 /* kdmsg_state_hold(msg->state); */ 425 426 atomic_add_int(&iost->count, 1); 427 if (msg->any.head.cmd & DMSGF_DELETE) 428 iost->eof = 1; 429 BUF_KERNPROC(bp); 430 dev_dstrategy(dp->d_rawdev, bio); 431 } 432 done: 433 if (reterr) { 434 if (msg->any.head.cmd & DMSGF_DELETE) { 435 if (iost && iost->count == 0) { 436 kfree(iost, M_DEVBUF); 437 msg->state->any.any = NULL; 438 } 439 kdmsg_msg_reply(msg, error); 440 } else { 441 kdmsg_msg_result(msg, error); 442 } 443 } 444 } 445 446 static 447 void 448 disk_blk_flush(struct disk *dp, kdmsg_msg_t *msg) 449 { 450 struct dios_io *iost; 451 struct buf *bp; 452 struct bio *bio; 453 int error = DMSG_ERR_NOSUPP; 454 int reterr = 1; 455 456 /* 457 * Only DMSG_BLK_FLUSH commands imply read ops. 458 */ 459 iost = msg->state->any.any; 460 if ((msg->any.head.cmd & DMSGF_CMDSWMASK) == DMSG_BLK_FLUSH) { 461 if (iost == NULL) { 462 iost = kmalloc(sizeof(*iost), M_DEVBUF, 463 M_WAITOK | M_ZERO); 464 msg->state->any.any = iost; 465 } 466 reterr = 0; 467 bp = getpbuf(NULL); 468 bio = &bp->b_bio1; 469 bp->b_cmd = BUF_CMD_FLUSH; 470 bp->b_bcount = msg->any.blk_flush.bytes; 471 bp->b_resid = 0; 472 bio->bio_offset = msg->any.blk_flush.offset; 473 bio->bio_caller_info1.ptr = msg->state; 474 bio->bio_done = diskiodone; 475 /* kdmsg_state_hold(msg->state); */ 476 477 atomic_add_int(&iost->count, 1); 478 if (msg->any.head.cmd & DMSGF_DELETE) 479 iost->eof = 1; 480 BUF_KERNPROC(bp); 481 dev_dstrategy(dp->d_rawdev, bio); 482 } 483 if (reterr) { 484 if (msg->any.head.cmd & DMSGF_DELETE) { 485 if (iost && iost->count == 0) { 486 kfree(iost, M_DEVBUF); 487 msg->state->any.any = NULL; 488 } 489 kdmsg_msg_reply(msg, error); 490 } else { 491 kdmsg_msg_result(msg, error); 492 } 493 } 494 } 495 496 static 497 void 498 disk_blk_freeblks(struct disk *dp, kdmsg_msg_t *msg) 499 { 500 struct dios_io *iost; 501 struct buf *bp; 502 struct bio *bio; 503 int error = DMSG_ERR_NOSUPP; 504 int reterr = 1; 505 506 /* 507 * Only DMSG_BLK_FREEBLKS commands imply read ops. 508 */ 509 iost = msg->state->any.any; 510 if ((msg->any.head.cmd & DMSGF_CMDSWMASK) == DMSG_BLK_FREEBLKS) { 511 if (iost == NULL) { 512 iost = kmalloc(sizeof(*iost), M_DEVBUF, 513 M_WAITOK | M_ZERO); 514 msg->state->any.any = iost; 515 } 516 reterr = 0; 517 bp = getpbuf(NULL); 518 bio = &bp->b_bio1; 519 bp->b_cmd = BUF_CMD_FREEBLKS; 520 bp->b_bcount = msg->any.blk_freeblks.bytes; 521 bp->b_resid = 0; 522 bio->bio_offset = msg->any.blk_freeblks.offset; 523 bio->bio_caller_info1.ptr = msg->state; 524 bio->bio_done = diskiodone; 525 /* kdmsg_state_hold(msg->state); */ 526 527 atomic_add_int(&iost->count, 1); 528 if (msg->any.head.cmd & DMSGF_DELETE) 529 iost->eof = 1; 530 BUF_KERNPROC(bp); 531 dev_dstrategy(dp->d_rawdev, bio); 532 } 533 if (reterr) { 534 if (msg->any.head.cmd & DMSGF_DELETE) { 535 if (iost && iost->count == 0) { 536 kfree(iost, M_DEVBUF); 537 msg->state->any.any = NULL; 538 } 539 kdmsg_msg_reply(msg, error); 540 } else { 541 kdmsg_msg_result(msg, error); 542 } 543 } 544 } 545 546 static 547 void 548 diskiodone(struct bio *bio) 549 { 550 struct buf *bp = bio->bio_buf; 551 kdmsg_state_t *state = bio->bio_caller_info1.ptr; 552 kdmsg_msg_t *rmsg; 553 struct dios_io *iost = state->any.any; 554 int error; 555 int resid = 0; 556 int bytes; 557 uint32_t cmd; 558 void *data; 559 560 cmd = DMSG_LNK_ERROR; 561 data = NULL; 562 bytes = 0; 563 564 switch(bp->b_cmd) { 565 case BUF_CMD_READ: 566 cmd = DMSG_LNK_ERROR; 567 data = bp->b_data; 568 bytes = bp->b_bcount; 569 /* fall through */ 570 case BUF_CMD_WRITE: 571 if (bp->b_flags & B_ERROR) { 572 error = bp->b_error; 573 } else { 574 error = 0; 575 resid = bp->b_resid; 576 } 577 break; 578 case BUF_CMD_FLUSH: 579 case BUF_CMD_FREEBLKS: 580 if (bp->b_flags & B_ERROR) 581 error = bp->b_error; 582 else 583 error = 0; 584 break; 585 default: 586 panic("diskiodone: Unknown bio cmd = %d\n", 587 bio->bio_buf->b_cmd); 588 error = 0; /* avoid compiler warning */ 589 break; /* NOT REACHED */ 590 } 591 592 /* 593 * Convert error to DMSG_ERR_* code. 594 */ 595 if (error) 596 error = DMSG_ERR_IO; 597 598 /* 599 * Convert LNK_ERROR or BLK_ERROR if non-zero resid. READS will 600 * have already converted cmd to BLK_ERROR and set up data to return. 601 */ 602 if (resid && cmd == DMSG_LNK_ERROR) 603 cmd = DMSG_BLK_ERROR; 604 /* XXX txcmd is delayed so this won't work for streaming */ 605 if ((state->txcmd & DMSGF_CREATE) == 0) /* assume serialized */ 606 cmd |= DMSGF_CREATE; 607 if (iost->eof) { 608 if (atomic_fetchadd_int(&iost->count, -1) == 1) 609 cmd |= DMSGF_DELETE; 610 } else { 611 atomic_add_int(&iost->count, -1); 612 } 613 cmd |= DMSGF_REPLY; 614 615 /* 616 * Allocate a basic or extended reply. Be careful not to populate 617 * extended header fields unless we allocated an extended reply. 618 */ 619 rmsg = kdmsg_msg_alloc_state(state, cmd, NULL, 0); 620 if (data) { 621 rmsg->aux_data = kmalloc(bytes, state->iocom->mmsg, M_INTWAIT); 622 rmsg->aux_size = bytes; 623 rmsg->flags |= KDMSG_FLAG_AUXALLOC; 624 bcopy(data, rmsg->aux_data, bytes); 625 } 626 rmsg->any.blk_error.head.error = error; 627 if ((cmd & DMSGF_BASECMDMASK) == DMSG_BLK_ERROR) 628 rmsg->any.blk_error.resid = resid; 629 bio->bio_caller_info1.ptr = NULL; 630 /* kdmsg_state_drop(state); */ 631 kdmsg_msg_write(rmsg); 632 if (bp->b_flags & B_PAGING) { 633 relpbuf(bio->bio_buf, NULL); 634 } else { 635 bp->b_flags |= B_INVAL | B_AGE; 636 brelse(bp); 637 } 638 } 639