1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * This module provides range lock functionality for CIFS/SMB clients.
27 * Lock range service functions process SMB lock and and unlock
28 * requests for a file by applying lock rules and marks file range
29 * as locked if the lock is successful otherwise return proper
30 * error code.
31 */
32
33 #include <smbsrv/smb_kproto.h>
34 #include <smbsrv/smb_fsops.h>
35 #include <sys/nbmlock.h>
36 #include <sys/param.h>
37
38 extern caller_context_t smb_ct;
39
40 static void smb_lock_posix_unlock(smb_node_t *, smb_lock_t *, cred_t *);
41 static boolean_t smb_is_range_unlocked(uint64_t, uint64_t, uint32_t,
42 smb_llist_t *, uint64_t *);
43 static int smb_lock_range_overlap(smb_lock_t *, uint64_t, uint64_t);
44 static uint32_t smb_lock_range_lckrules(smb_request_t *, smb_ofile_t *,
45 smb_node_t *, smb_lock_t *, smb_lock_t **);
46 static clock_t smb_lock_wait(smb_request_t *, smb_lock_t *, smb_lock_t *);
47 static uint32_t smb_lock_range_ulckrules(smb_request_t *, smb_node_t *,
48 uint64_t, uint64_t, smb_lock_t **nodelock);
49 static smb_lock_t *smb_lock_create(smb_request_t *, uint64_t, uint64_t,
50 uint32_t, uint32_t);
51 static void smb_lock_destroy(smb_lock_t *);
52 static void smb_lock_free(smb_lock_t *);
53
54 /*
55 * Return the number of range locks on the specified ofile.
56 */
57 uint32_t
smb_lock_get_lock_count(smb_node_t * node,smb_ofile_t * of)58 smb_lock_get_lock_count(smb_node_t *node, smb_ofile_t *of)
59 {
60 smb_lock_t *lock;
61 smb_llist_t *llist;
62 uint32_t count = 0;
63
64 SMB_NODE_VALID(node);
65 SMB_OFILE_VALID(of);
66
67 llist = &node->n_lock_list;
68
69 smb_llist_enter(llist, RW_READER);
70 for (lock = smb_llist_head(llist);
71 lock != NULL;
72 lock = smb_llist_next(llist, lock)) {
73 if (lock->l_file == of)
74 ++count;
75 }
76 smb_llist_exit(llist);
77
78 return (count);
79 }
80
81 /*
82 * smb_unlock_range
83 *
84 * locates lock range performed for corresponding to unlock request.
85 *
86 * NT_STATUS_SUCCESS - Lock range performed successfully.
87 * !NT_STATUS_SUCCESS - Error in unlock range operation.
88 */
89 uint32_t
smb_unlock_range(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length)90 smb_unlock_range(
91 smb_request_t *sr,
92 smb_node_t *node,
93 uint64_t start,
94 uint64_t length)
95 {
96 smb_lock_t *lock = NULL;
97 uint32_t status;
98
99 /* Apply unlocking rules */
100 smb_llist_enter(&node->n_lock_list, RW_WRITER);
101 status = smb_lock_range_ulckrules(sr, node, start, length, &lock);
102 if (status != NT_STATUS_SUCCESS) {
103 /*
104 * If lock range is not matching in the list
105 * return error.
106 */
107 ASSERT(lock == NULL);
108 smb_llist_exit(&node->n_lock_list);
109 return (status);
110 }
111
112 smb_llist_remove(&node->n_lock_list, lock);
113 smb_lock_posix_unlock(node, lock, sr->user_cr);
114 smb_llist_exit(&node->n_lock_list);
115 smb_lock_destroy(lock);
116
117 return (status);
118 }
119
120 /*
121 * smb_lock_range
122 *
123 * Checks for integrity of file lock operation for the given range of file data.
124 * This is performed by applying lock rules with all the elements of the node
125 * lock list.
126 *
127 * Break shared (levelII) oplocks. If there is an exclusive oplock, it is
128 * owned by this ofile and therefore should not be broken.
129 *
130 * The function returns with new lock added if lock request is non-conflicting
131 * with existing range lock for the file. Otherwise smb request is filed
132 * without returning.
133 *
134 * NT_STATUS_SUCCESS - Lock range performed successfully.
135 * !NT_STATUS_SUCCESS - Error in lock range operation.
136 */
137 uint32_t
smb_lock_range(smb_request_t * sr,uint64_t start,uint64_t length,uint32_t timeout,uint32_t locktype)138 smb_lock_range(
139 smb_request_t *sr,
140 uint64_t start,
141 uint64_t length,
142 uint32_t timeout,
143 uint32_t locktype)
144 {
145 smb_ofile_t *file = sr->fid_ofile;
146 smb_node_t *node = file->f_node;
147 smb_lock_t *lock;
148 smb_lock_t *clock = NULL;
149 uint32_t result = NT_STATUS_SUCCESS;
150 boolean_t lock_has_timeout = (timeout != 0);
151
152 lock = smb_lock_create(sr, start, length, locktype, timeout);
153
154 smb_llist_enter(&node->n_lock_list, RW_WRITER);
155 for (;;) {
156 clock_t rc;
157
158 /* Apply locking rules */
159 result = smb_lock_range_lckrules(sr, file, node, lock, &clock);
160
161 if ((result == NT_STATUS_CANCELLED) ||
162 (result == NT_STATUS_SUCCESS) ||
163 (result == NT_STATUS_RANGE_NOT_LOCKED)) {
164 ASSERT(clock == NULL);
165 break;
166 } else if (timeout == 0) {
167 break;
168 }
169
170 ASSERT(result == NT_STATUS_LOCK_NOT_GRANTED);
171 ASSERT(clock);
172 /*
173 * Call smb_lock_wait holding write lock for
174 * node lock list. smb_lock_wait will release
175 * this lock if it blocks.
176 */
177 ASSERT(node == clock->l_file->f_node);
178
179 rc = smb_lock_wait(sr, lock, clock);
180 if (rc == 0) {
181 result = NT_STATUS_CANCELLED;
182 break;
183 }
184 if (rc == -1)
185 timeout = 0;
186
187 clock = NULL;
188 }
189
190 lock->l_blocked_by = NULL;
191
192 if (result != NT_STATUS_SUCCESS) {
193 /*
194 * Under certain conditions NT_STATUS_FILE_LOCK_CONFLICT
195 * should be returned instead of NT_STATUS_LOCK_NOT_GRANTED.
196 */
197 if (result == NT_STATUS_LOCK_NOT_GRANTED) {
198 /*
199 * Locks with timeouts always return
200 * NT_STATUS_FILE_LOCK_CONFLICT
201 */
202 if (lock_has_timeout)
203 result = NT_STATUS_FILE_LOCK_CONFLICT;
204
205 /*
206 * Locks starting higher than 0xef000000 that do not
207 * have the MSB set always return
208 * NT_STATUS_FILE_LOCK_CONFLICT
209 */
210 if ((lock->l_start >= 0xef000000) &&
211 !(lock->l_start & (1ULL << 63))) {
212 result = NT_STATUS_FILE_LOCK_CONFLICT;
213 }
214
215 /*
216 * If the last lock attempt to fail on this file handle
217 * started at the same offset as this one then return
218 * NT_STATUS_FILE_LOCK_CONFLICT
219 */
220 mutex_enter(&file->f_mutex);
221 if ((file->f_flags & SMB_OFLAGS_LLF_POS_VALID) &&
222 (lock->l_start == file->f_llf_pos)) {
223 result = NT_STATUS_FILE_LOCK_CONFLICT;
224 }
225 mutex_exit(&file->f_mutex);
226 }
227
228 /* Update last lock failed offset */
229 mutex_enter(&file->f_mutex);
230 file->f_llf_pos = lock->l_start;
231 file->f_flags |= SMB_OFLAGS_LLF_POS_VALID;
232 mutex_exit(&file->f_mutex);
233
234 smb_lock_free(lock);
235 } else {
236 /*
237 * don't insert into the CIFS lock list unless the
238 * posix lock worked
239 */
240 if (smb_fsop_frlock(node, lock, B_FALSE, sr->user_cr))
241 result = NT_STATUS_FILE_LOCK_CONFLICT;
242 else
243 smb_llist_insert_tail(&node->n_lock_list, lock);
244 }
245 smb_llist_exit(&node->n_lock_list);
246
247 if (result == NT_STATUS_SUCCESS)
248 smb_oplock_break_levelII(node);
249
250 return (result);
251 }
252
253
254 /*
255 * smb_lock_range_access
256 *
257 * scans node lock list
258 * to check if there is any overlapping lock. Overlapping
259 * lock is allowed only under same session and client pid.
260 *
261 * Return values
262 * NT_STATUS_SUCCESS lock access granted.
263 * NT_STATUS_FILE_LOCK_CONFLICT access denied due to lock conflict.
264 */
265 int
smb_lock_range_access(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,boolean_t will_write)266 smb_lock_range_access(
267 smb_request_t *sr,
268 smb_node_t *node,
269 uint64_t start,
270 uint64_t length,
271 boolean_t will_write)
272 {
273 smb_lock_t *lock;
274 smb_llist_t *llist;
275 int status = NT_STATUS_SUCCESS;
276
277 llist = &node->n_lock_list;
278 smb_llist_enter(llist, RW_READER);
279 /* Search for any applicable lock */
280 for (lock = smb_llist_head(llist);
281 lock != NULL;
282 lock = smb_llist_next(llist, lock)) {
283
284 if (!smb_lock_range_overlap(lock, start, length))
285 /* Lock does not overlap */
286 continue;
287
288 if (lock->l_type == SMB_LOCK_TYPE_READONLY && !will_write)
289 continue;
290
291 if (lock->l_type == SMB_LOCK_TYPE_READWRITE &&
292 lock->l_session_kid == sr->session->s_kid &&
293 lock->l_pid == sr->smb_pid)
294 continue;
295
296 status = NT_STATUS_FILE_LOCK_CONFLICT;
297 break;
298 }
299 smb_llist_exit(llist);
300 return (status);
301 }
302
303 void
smb_node_destroy_lock_by_ofile(smb_node_t * node,smb_ofile_t * file)304 smb_node_destroy_lock_by_ofile(smb_node_t *node, smb_ofile_t *file)
305 {
306 smb_lock_t *lock;
307 smb_lock_t *nxtl;
308 list_t destroy_list;
309
310 SMB_NODE_VALID(node);
311 ASSERT(node->n_refcnt);
312
313 /*
314 * Move locks matching the specified file from the node->n_lock_list
315 * to a temporary list (holding the lock the entire time) then
316 * destroy all the matching locks. We can't call smb_lock_destroy
317 * while we are holding the lock for node->n_lock_list because we will
318 * deadlock and we can't drop the lock because the list contents might
319 * change (for example nxtl might get removed on another thread).
320 */
321 list_create(&destroy_list, sizeof (smb_lock_t),
322 offsetof(smb_lock_t, l_lnd));
323
324 smb_llist_enter(&node->n_lock_list, RW_WRITER);
325 lock = smb_llist_head(&node->n_lock_list);
326 while (lock) {
327 nxtl = smb_llist_next(&node->n_lock_list, lock);
328 if (lock->l_file == file) {
329 smb_llist_remove(&node->n_lock_list, lock);
330 smb_lock_posix_unlock(node, lock, file->f_user->u_cred);
331 list_insert_tail(&destroy_list, lock);
332 }
333 lock = nxtl;
334 }
335 smb_llist_exit(&node->n_lock_list);
336
337 lock = list_head(&destroy_list);
338 while (lock) {
339 nxtl = list_next(&destroy_list, lock);
340 list_remove(&destroy_list, lock);
341 smb_lock_destroy(lock);
342 lock = nxtl;
343 }
344
345 list_destroy(&destroy_list);
346 }
347
348 void
smb_lock_range_error(smb_request_t * sr,uint32_t status32)349 smb_lock_range_error(smb_request_t *sr, uint32_t status32)
350 {
351 uint16_t errcode;
352
353 if (status32 == NT_STATUS_CANCELLED)
354 errcode = ERROR_OPERATION_ABORTED;
355 else
356 errcode = ERRlock;
357
358 smbsr_error(sr, status32, ERRDOS, errcode);
359 }
360
361 /*
362 * smb_range_check()
363 *
364 * Perform range checking. First check for internal CIFS range conflicts
365 * and then check for external conflicts, for example, with NFS or local
366 * access.
367 *
368 * If nbmand is enabled, this function must be called from within an nbmand
369 * critical region
370 */
371
372 DWORD
smb_range_check(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,boolean_t will_write)373 smb_range_check(smb_request_t *sr, smb_node_t *node, uint64_t start,
374 uint64_t length, boolean_t will_write)
375 {
376 smb_error_t smberr;
377 int svmand;
378 int nbl_op;
379 int rc;
380
381 SMB_NODE_VALID(node);
382
383 ASSERT(smb_node_in_crit(node));
384
385 if (smb_node_is_dir(node))
386 return (NT_STATUS_SUCCESS);
387
388 rc = smb_lock_range_access(sr, node, start, length, will_write);
389 if (rc)
390 return (NT_STATUS_FILE_LOCK_CONFLICT);
391
392 if ((rc = nbl_svmand(node->vp, kcred, &svmand)) != 0) {
393 smbsr_map_errno(rc, &smberr);
394 return (smberr.status);
395 }
396
397 nbl_op = (will_write) ? NBL_WRITE : NBL_READ;
398
399 if (nbl_lock_conflict(node->vp, nbl_op, start, length, svmand, &smb_ct))
400 return (NT_STATUS_FILE_LOCK_CONFLICT);
401
402 return (NT_STATUS_SUCCESS);
403 }
404
405 /*
406 * smb_lock_posix_unlock
407 *
408 * checks if the current unlock request is in another lock and repeatedly calls
409 * smb_is_range_unlocked on a sliding basis to unlock all bits of the lock
410 * that are not in other locks
411 *
412 */
413 static void
smb_lock_posix_unlock(smb_node_t * node,smb_lock_t * lock,cred_t * cr)414 smb_lock_posix_unlock(smb_node_t *node, smb_lock_t *lock, cred_t *cr)
415 {
416 uint64_t new_mark;
417 uint64_t unlock_start;
418 uint64_t unlock_end;
419 smb_lock_t new_unlock;
420 smb_llist_t *llist;
421 boolean_t can_unlock;
422
423 new_mark = 0;
424 unlock_start = lock->l_start;
425 unlock_end = unlock_start + lock->l_length;
426 llist = &node->n_lock_list;
427
428 for (;;) {
429 can_unlock = smb_is_range_unlocked(unlock_start, unlock_end,
430 lock->l_file->f_uniqid, llist, &new_mark);
431 if (can_unlock) {
432 if (new_mark) {
433 new_unlock = *lock;
434 new_unlock.l_start = unlock_start;
435 new_unlock.l_length = new_mark - unlock_start;
436 (void) smb_fsop_frlock(node, &new_unlock,
437 B_TRUE, cr);
438 unlock_start = new_mark;
439 } else {
440 new_unlock = *lock;
441 new_unlock.l_start = unlock_start;
442 new_unlock.l_length = unlock_end - unlock_start;
443 (void) smb_fsop_frlock(node, &new_unlock,
444 B_TRUE, cr);
445 break;
446 }
447 } else if (new_mark) {
448 unlock_start = new_mark;
449 } else {
450 break;
451 }
452 }
453 }
454
455 /*
456 * smb_lock_range_overlap
457 *
458 * Checks if lock range(start, length) overlaps range in lock structure.
459 *
460 * Zero-length byte range locks actually affect no single byte of the stream,
461 * meaning they can still be accessed even with such locks in place. However,
462 * they do conflict with other ranges in the following manner:
463 * conflict will only exist if the positive-length range contains the
464 * zero-length range's offset but doesn't start at it
465 *
466 * return values:
467 * 0 - Lock range doesn't overlap
468 * 1 - Lock range overlaps.
469 */
470
471 #define RANGE_NO_OVERLAP 0
472 #define RANGE_OVERLAP 1
473
474 static int
smb_lock_range_overlap(struct smb_lock * lock,uint64_t start,uint64_t length)475 smb_lock_range_overlap(struct smb_lock *lock, uint64_t start, uint64_t length)
476 {
477 if (length == 0) {
478 if ((lock->l_start < start) &&
479 ((lock->l_start + lock->l_length) > start))
480 return (RANGE_OVERLAP);
481
482 return (RANGE_NO_OVERLAP);
483 }
484
485 /* The following test is intended to catch roll over locks. */
486 if ((start == lock->l_start) && (length == lock->l_length))
487 return (RANGE_OVERLAP);
488
489 if (start < lock->l_start) {
490 if (start + length > lock->l_start)
491 return (RANGE_OVERLAP);
492 } else if (start < lock->l_start + lock->l_length)
493 return (RANGE_OVERLAP);
494
495 return (RANGE_NO_OVERLAP);
496 }
497
498 /*
499 * smb_lock_range_lckrules
500 *
501 * Lock range rules:
502 * 1. Overlapping read locks are allowed if the
503 * current locks in the region are only read locks
504 * irrespective of pid of smb client issuing lock request.
505 *
506 * 2. Read lock in the overlapped region of write lock
507 * are allowed if the pervious lock is performed by the
508 * same pid and connection.
509 *
510 * return status:
511 * NT_STATUS_SUCCESS - Input lock range adapts to lock rules.
512 * NT_STATUS_LOCK_NOT_GRANTED - Input lock conflicts lock rules.
513 * NT_STATUS_CANCELLED - Error in processing lock rules
514 */
515 static uint32_t
smb_lock_range_lckrules(smb_request_t * sr,smb_ofile_t * file,smb_node_t * node,smb_lock_t * dlock,smb_lock_t ** clockp)516 smb_lock_range_lckrules(
517 smb_request_t *sr,
518 smb_ofile_t *file,
519 smb_node_t *node,
520 smb_lock_t *dlock,
521 smb_lock_t **clockp)
522 {
523 smb_lock_t *lock;
524 uint32_t status = NT_STATUS_SUCCESS;
525
526 /* Check if file is closed */
527 if (!smb_ofile_is_open(file)) {
528 return (NT_STATUS_RANGE_NOT_LOCKED);
529 }
530
531 /* Caller must hold lock for node->n_lock_list */
532 for (lock = smb_llist_head(&node->n_lock_list);
533 lock != NULL;
534 lock = smb_llist_next(&node->n_lock_list, lock)) {
535
536 if (!smb_lock_range_overlap(lock, dlock->l_start,
537 dlock->l_length))
538 continue;
539
540 /*
541 * Check to see if lock in the overlapping record
542 * is only read lock. Current finding is read
543 * locks can overlapped irrespective of pids.
544 */
545 if ((lock->l_type == SMB_LOCK_TYPE_READONLY) &&
546 (dlock->l_type == SMB_LOCK_TYPE_READONLY)) {
547 continue;
548 }
549
550 /*
551 * When the read lock overlaps write lock, check if
552 * allowed.
553 */
554 if ((dlock->l_type == SMB_LOCK_TYPE_READONLY) &&
555 !(lock->l_type == SMB_LOCK_TYPE_READONLY)) {
556 if (lock->l_file == sr->fid_ofile &&
557 lock->l_session_kid == sr->session->s_kid &&
558 lock->l_pid == sr->smb_pid &&
559 lock->l_uid == sr->smb_uid) {
560 continue;
561 }
562 }
563
564 /* Conflict in overlapping lock element */
565 *clockp = lock;
566 status = NT_STATUS_LOCK_NOT_GRANTED;
567 break;
568 }
569
570 return (status);
571 }
572
573 /*
574 * smb_lock_wait
575 *
576 * Wait operation for smb overlapping lock to be released. Caller must hold
577 * write lock for node->n_lock_list so that the set of active locks can't
578 * change unexpectedly. The lock for node->n_lock_list will be released
579 * within this function during the sleep after the lock dependency has
580 * been recorded.
581 *
582 * return value
583 *
584 * 0 The request was canceled.
585 * -1 The timeout was reached.
586 * >0 Condition met.
587 */
588 static clock_t
smb_lock_wait(smb_request_t * sr,smb_lock_t * b_lock,smb_lock_t * c_lock)589 smb_lock_wait(smb_request_t *sr, smb_lock_t *b_lock, smb_lock_t *c_lock)
590 {
591 clock_t rc;
592
593 ASSERT(sr->sr_awaiting == NULL);
594
595 mutex_enter(&sr->sr_mutex);
596
597 switch (sr->sr_state) {
598 case SMB_REQ_STATE_ACTIVE:
599 /*
600 * Wait up till the timeout time keeping track of actual
601 * time waited for possible retry failure.
602 */
603 sr->sr_state = SMB_REQ_STATE_WAITING_LOCK;
604 sr->sr_awaiting = c_lock;
605 mutex_exit(&sr->sr_mutex);
606
607 mutex_enter(&c_lock->l_mutex);
608 /*
609 * The conflict list (l_conflict_list) for a lock contains
610 * all the locks that are blocked by and in conflict with
611 * that lock. Add the new lock to the conflict list for the
612 * active lock.
613 *
614 * l_conflict_list is currently a fancy way of representing
615 * the references/dependencies on a lock. It could be
616 * replaced with a reference count but this approach
617 * has the advantage that MDB can display the lock
618 * dependencies at any point in time. In the future
619 * we should be able to leverage the list to implement
620 * an asynchronous locking model.
621 *
622 * l_blocked_by is the reverse of the conflict list. It
623 * points to the lock that the new lock conflicts with.
624 * As currently implemented this value is purely for
625 * debug purposes -- there are windows of time when
626 * l_blocked_by may be non-NULL even though there is no
627 * conflict list
628 */
629 b_lock->l_blocked_by = c_lock;
630 smb_slist_insert_tail(&c_lock->l_conflict_list, b_lock);
631 smb_llist_exit(&c_lock->l_file->f_node->n_lock_list);
632
633 if (SMB_LOCK_INDEFINITE_WAIT(b_lock)) {
634 cv_wait(&c_lock->l_cv, &c_lock->l_mutex);
635 } else {
636 rc = cv_timedwait(&c_lock->l_cv,
637 &c_lock->l_mutex, b_lock->l_end_time);
638 }
639
640 mutex_exit(&c_lock->l_mutex);
641
642 smb_llist_enter(&c_lock->l_file->f_node->n_lock_list,
643 RW_WRITER);
644 smb_slist_remove(&c_lock->l_conflict_list, b_lock);
645
646 mutex_enter(&sr->sr_mutex);
647 sr->sr_awaiting = NULL;
648 if (sr->sr_state == SMB_REQ_STATE_CANCELED) {
649 rc = 0;
650 } else {
651 sr->sr_state = SMB_REQ_STATE_ACTIVE;
652 }
653 break;
654
655 default:
656 ASSERT(sr->sr_state == SMB_REQ_STATE_CANCELED);
657 rc = 0;
658 break;
659 }
660 mutex_exit(&sr->sr_mutex);
661
662 return (rc);
663 }
664
665 /*
666 * smb_lock_range_ulckrules
667 *
668 * 1. Unlock should be performed at exactly matching ends.
669 * This has been changed because overlapping ends is
670 * allowed and there is no other precise way of locating
671 * lock entity in node lock list.
672 *
673 * 2. Unlock is failed if there is no corresponding lock exists.
674 *
675 * Return values
676 *
677 * NT_STATUS_SUCCESS Unlock request matches lock record
678 * pointed by 'nodelock' lock structure.
679 *
680 * NT_STATUS_RANGE_NOT_LOCKED Unlock request doen't match any
681 * of lock record in node lock request or
682 * error in unlock range processing.
683 */
684 static uint32_t
smb_lock_range_ulckrules(smb_request_t * sr,smb_node_t * node,uint64_t start,uint64_t length,smb_lock_t ** nodelock)685 smb_lock_range_ulckrules(
686 smb_request_t *sr,
687 smb_node_t *node,
688 uint64_t start,
689 uint64_t length,
690 smb_lock_t **nodelock)
691 {
692 smb_lock_t *lock;
693 uint32_t status = NT_STATUS_RANGE_NOT_LOCKED;
694
695 /* Caller must hold lock for node->n_lock_list */
696 for (lock = smb_llist_head(&node->n_lock_list);
697 lock != NULL;
698 lock = smb_llist_next(&node->n_lock_list, lock)) {
699
700 if ((start == lock->l_start) &&
701 (length == lock->l_length) &&
702 lock->l_file == sr->fid_ofile &&
703 lock->l_session_kid == sr->session->s_kid &&
704 lock->l_pid == sr->smb_pid &&
705 lock->l_uid == sr->smb_uid) {
706 *nodelock = lock;
707 status = NT_STATUS_SUCCESS;
708 break;
709 }
710 }
711
712 return (status);
713 }
714
715 static smb_lock_t *
smb_lock_create(smb_request_t * sr,uint64_t start,uint64_t length,uint32_t locktype,uint32_t timeout)716 smb_lock_create(
717 smb_request_t *sr,
718 uint64_t start,
719 uint64_t length,
720 uint32_t locktype,
721 uint32_t timeout)
722 {
723 smb_lock_t *lock;
724
725 ASSERT(locktype == SMB_LOCK_TYPE_READWRITE ||
726 locktype == SMB_LOCK_TYPE_READONLY);
727
728 lock = kmem_zalloc(sizeof (smb_lock_t), KM_SLEEP);
729 lock->l_magic = SMB_LOCK_MAGIC;
730 lock->l_sr = sr; /* Invalid after lock is active */
731 lock->l_session_kid = sr->session->s_kid;
732 lock->l_session = sr->session;
733 lock->l_file = sr->fid_ofile;
734 lock->l_uid = sr->smb_uid;
735 lock->l_pid = sr->smb_pid;
736 lock->l_type = locktype;
737 lock->l_start = start;
738 lock->l_length = length;
739 /*
740 * Calculate the absolute end time so that we can use it
741 * in cv_timedwait.
742 */
743 lock->l_end_time = ddi_get_lbolt() + MSEC_TO_TICK(timeout);
744 if (timeout == UINT_MAX)
745 lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE;
746
747 mutex_init(&lock->l_mutex, NULL, MUTEX_DEFAULT, NULL);
748 cv_init(&lock->l_cv, NULL, CV_DEFAULT, NULL);
749 smb_slist_constructor(&lock->l_conflict_list, sizeof (smb_lock_t),
750 offsetof(smb_lock_t, l_conflict_lnd));
751
752 return (lock);
753 }
754
755 static void
smb_lock_free(smb_lock_t * lock)756 smb_lock_free(smb_lock_t *lock)
757 {
758 smb_slist_destructor(&lock->l_conflict_list);
759 cv_destroy(&lock->l_cv);
760 mutex_destroy(&lock->l_mutex);
761
762 kmem_free(lock, sizeof (smb_lock_t));
763 }
764
765 /*
766 * smb_lock_destroy
767 *
768 * Caller must hold node->n_lock_list
769 */
770 static void
smb_lock_destroy(smb_lock_t * lock)771 smb_lock_destroy(smb_lock_t *lock)
772 {
773 /*
774 * Caller must hold node->n_lock_list lock.
775 */
776 mutex_enter(&lock->l_mutex);
777 cv_broadcast(&lock->l_cv);
778 mutex_exit(&lock->l_mutex);
779
780 /*
781 * The cv_broadcast above should wake up any locks that previous
782 * had conflicts with this lock. Wait for the locking threads
783 * to remove their references to this lock.
784 */
785 smb_slist_wait_for_empty(&lock->l_conflict_list);
786
787 smb_lock_free(lock);
788 }
789
790 /*
791 * smb_is_range_unlocked
792 *
793 * Checks if the current unlock byte range request overlaps another lock
794 * This function is used to determine where POSIX unlocks should be
795 * applied.
796 *
797 * The return code and the value of new_mark must be interpreted as
798 * follows:
799 *
800 * B_TRUE and (new_mark == 0):
801 * This is the last or only lock left to be unlocked
802 *
803 * B_TRUE and (new_mark > 0):
804 * The range from start to new_mark can be unlocked
805 *
806 * B_FALSE and (new_mark == 0):
807 * The unlock can't be performed and we are done
808 *
809 * B_FALSE and (new_mark > 0),
810 * The range from start to new_mark can't be unlocked
811 * Start should be reset to new_mark for the next pass
812 */
813
814 static boolean_t
smb_is_range_unlocked(uint64_t start,uint64_t end,uint32_t uniqid,smb_llist_t * llist_head,uint64_t * new_mark)815 smb_is_range_unlocked(uint64_t start, uint64_t end, uint32_t uniqid,
816 smb_llist_t *llist_head, uint64_t *new_mark)
817 {
818 struct smb_lock *lk = NULL;
819 uint64_t low_water_mark = MAXOFFSET_T;
820 uint64_t lk_start;
821 uint64_t lk_end;
822
823 *new_mark = 0;
824 lk = smb_llist_head(llist_head);
825 while (lk) {
826 if (lk->l_length == 0) {
827 lk = smb_llist_next(llist_head, lk);
828 continue;
829 }
830
831 if (lk->l_file->f_uniqid != uniqid) {
832 lk = smb_llist_next(llist_head, lk);
833 continue;
834 }
835
836 lk_end = lk->l_start + lk->l_length - 1;
837 lk_start = lk->l_start;
838
839 /*
840 * there is no overlap for the first 2 cases
841 * check next node
842 */
843 if (lk_end < start) {
844 lk = smb_llist_next(llist_head, lk);
845 continue;
846 }
847 if (lk_start > end) {
848 lk = smb_llist_next(llist_head, lk);
849 continue;
850 }
851
852 /* this range is completely locked */
853 if ((lk_start <= start) && (lk_end >= end)) {
854 return (B_FALSE);
855 }
856
857 /* the first part of this range is locked */
858 if ((start >= lk_start) && (start <= lk_end)) {
859 if (end > lk_end)
860 *new_mark = lk_end + 1;
861 return (B_FALSE);
862 }
863
864 /* this piece is unlocked */
865 if ((lk_start >= start) && (lk_start <= end)) {
866 if (low_water_mark > lk_start)
867 low_water_mark = lk_start;
868 }
869
870 lk = smb_llist_next(llist_head, lk);
871 }
872
873 if (low_water_mark != MAXOFFSET_T) {
874 *new_mark = low_water_mark;
875 return (B_TRUE);
876 }
877 /* the range is completely unlocked */
878 return (B_TRUE);
879 }
880