112198SEiji.Ota@Sun.COM /*
212198SEiji.Ota@Sun.COM * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
312198SEiji.Ota@Sun.COM */
412198SEiji.Ota@Sun.COM
512198SEiji.Ota@Sun.COM /*
612763SGiri.Adari@Sun.COM * This file contains code imported from the OFED rds source file recv.c
712763SGiri.Adari@Sun.COM * Oracle elects to have and use the contents of rds_recv.c under and governed
812763SGiri.Adari@Sun.COM * by the OpenIB.org BSD license (see below for full license text). However,
912763SGiri.Adari@Sun.COM * the following notice accompanied the original version of this file:
1012763SGiri.Adari@Sun.COM */
1112763SGiri.Adari@Sun.COM
1212763SGiri.Adari@Sun.COM /*
1312198SEiji.Ota@Sun.COM * Copyright (c) 2006 Oracle. All rights reserved.
1412198SEiji.Ota@Sun.COM *
1512198SEiji.Ota@Sun.COM * This software is available to you under a choice of one of two
1612198SEiji.Ota@Sun.COM * licenses. You may choose to be licensed under the terms of the GNU
1712198SEiji.Ota@Sun.COM * General Public License (GPL) Version 2, available from the file
1812198SEiji.Ota@Sun.COM * COPYING in the main directory of this source tree, or the
1912198SEiji.Ota@Sun.COM * OpenIB.org BSD license below:
2012198SEiji.Ota@Sun.COM *
2112198SEiji.Ota@Sun.COM * Redistribution and use in source and binary forms, with or
2212198SEiji.Ota@Sun.COM * without modification, are permitted provided that the following
2312198SEiji.Ota@Sun.COM * conditions are met:
2412198SEiji.Ota@Sun.COM *
2512198SEiji.Ota@Sun.COM * - Redistributions of source code must retain the above
2612198SEiji.Ota@Sun.COM * copyright notice, this list of conditions and the following
2712198SEiji.Ota@Sun.COM * disclaimer.
2812198SEiji.Ota@Sun.COM *
2912198SEiji.Ota@Sun.COM * - Redistributions in binary form must reproduce the above
3012198SEiji.Ota@Sun.COM * copyright notice, this list of conditions and the following
3112198SEiji.Ota@Sun.COM * disclaimer in the documentation and/or other materials
3212198SEiji.Ota@Sun.COM * provided with the distribution.
3312198SEiji.Ota@Sun.COM *
3412198SEiji.Ota@Sun.COM * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
3512198SEiji.Ota@Sun.COM * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
3612198SEiji.Ota@Sun.COM * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
3712198SEiji.Ota@Sun.COM * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
3812198SEiji.Ota@Sun.COM * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
3912198SEiji.Ota@Sun.COM * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
4012198SEiji.Ota@Sun.COM * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
4112198SEiji.Ota@Sun.COM * SOFTWARE.
4212198SEiji.Ota@Sun.COM *
4312198SEiji.Ota@Sun.COM */
4412198SEiji.Ota@Sun.COM #include <sys/rds.h>
4512198SEiji.Ota@Sun.COM
4612198SEiji.Ota@Sun.COM #include <sys/ib/clients/rdsv3/rdsv3.h>
4712198SEiji.Ota@Sun.COM #include <sys/ib/clients/rdsv3/rdma.h>
4812198SEiji.Ota@Sun.COM #include <sys/ib/clients/rdsv3/rdsv3_debug.h>
4912198SEiji.Ota@Sun.COM
5012198SEiji.Ota@Sun.COM void
rdsv3_inc_init(struct rdsv3_incoming * inc,struct rdsv3_connection * conn,uint32_be_t saddr)5112198SEiji.Ota@Sun.COM rdsv3_inc_init(struct rdsv3_incoming *inc, struct rdsv3_connection *conn,
5212198SEiji.Ota@Sun.COM uint32_be_t saddr)
5312198SEiji.Ota@Sun.COM {
5412198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_inc_init", "Enter(inc: %p, conn: %p)", inc, conn);
5512198SEiji.Ota@Sun.COM inc->i_refcount = 1;
5612198SEiji.Ota@Sun.COM list_link_init(&inc->i_item);
5712198SEiji.Ota@Sun.COM inc->i_conn = conn;
5812198SEiji.Ota@Sun.COM inc->i_saddr = saddr;
5912198SEiji.Ota@Sun.COM inc->i_rdma_cookie = 0;
6012198SEiji.Ota@Sun.COM }
6112198SEiji.Ota@Sun.COM
6212198SEiji.Ota@Sun.COM void
rdsv3_inc_addref(struct rdsv3_incoming * inc)6312198SEiji.Ota@Sun.COM rdsv3_inc_addref(struct rdsv3_incoming *inc)
6412198SEiji.Ota@Sun.COM {
6512198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_inc_addref",
6612198SEiji.Ota@Sun.COM "addref inc %p ref %d", inc, atomic_get(&inc->i_refcount));
6712198SEiji.Ota@Sun.COM atomic_add_32(&inc->i_refcount, 1);
6812198SEiji.Ota@Sun.COM }
6912198SEiji.Ota@Sun.COM
7012198SEiji.Ota@Sun.COM void
rdsv3_inc_put(struct rdsv3_incoming * inc)7112198SEiji.Ota@Sun.COM rdsv3_inc_put(struct rdsv3_incoming *inc)
7212198SEiji.Ota@Sun.COM {
7312198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_inc_put", "put inc %p ref %d",
7412198SEiji.Ota@Sun.COM inc, atomic_get(&inc->i_refcount));
7512198SEiji.Ota@Sun.COM if (atomic_dec_and_test(&inc->i_refcount)) {
7612198SEiji.Ota@Sun.COM ASSERT(!list_link_active(&inc->i_item));
7712198SEiji.Ota@Sun.COM
7812198SEiji.Ota@Sun.COM inc->i_conn->c_trans->inc_free(inc);
7912198SEiji.Ota@Sun.COM }
8012198SEiji.Ota@Sun.COM }
8112198SEiji.Ota@Sun.COM
8212198SEiji.Ota@Sun.COM /*ARGSUSED*/
8312198SEiji.Ota@Sun.COM static void
rdsv3_recv_rcvbuf_delta(struct rdsv3_sock * rs,struct rsock * sk,struct rdsv3_cong_map * map,int delta,uint16_be_t port)8412198SEiji.Ota@Sun.COM rdsv3_recv_rcvbuf_delta(struct rdsv3_sock *rs, struct rsock *sk,
8512198SEiji.Ota@Sun.COM struct rdsv3_cong_map *map,
8612198SEiji.Ota@Sun.COM int delta, uint16_be_t port)
8712198SEiji.Ota@Sun.COM {
8812198SEiji.Ota@Sun.COM int now_congested;
8912198SEiji.Ota@Sun.COM
9012198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_recv_rcvbuf_delta",
9112198SEiji.Ota@Sun.COM "Enter(rs: %p, map: %p, delta: %d, port: %d)",
9212198SEiji.Ota@Sun.COM rs, map, delta, port);
9312198SEiji.Ota@Sun.COM
9412198SEiji.Ota@Sun.COM if (delta == 0)
9512198SEiji.Ota@Sun.COM return;
9612198SEiji.Ota@Sun.COM
9712198SEiji.Ota@Sun.COM rs->rs_rcv_bytes += delta;
9812198SEiji.Ota@Sun.COM now_congested = rs->rs_rcv_bytes > rdsv3_sk_rcvbuf(rs);
9912198SEiji.Ota@Sun.COM
10012198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_recv_rcvbuf_delta",
10112198SEiji.Ota@Sun.COM "rs %p (%u.%u.%u.%u:%u) recv bytes %d buf %d "
10212198SEiji.Ota@Sun.COM "now_cong %d delta %d",
10312198SEiji.Ota@Sun.COM rs, NIPQUAD(rs->rs_bound_addr),
10412198SEiji.Ota@Sun.COM (int)ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
10512198SEiji.Ota@Sun.COM rdsv3_sk_rcvbuf(rs), now_congested, delta);
10612198SEiji.Ota@Sun.COM
10712198SEiji.Ota@Sun.COM /* wasn't -> am congested */
10812198SEiji.Ota@Sun.COM if (!rs->rs_congested && now_congested) {
10912198SEiji.Ota@Sun.COM rs->rs_congested = 1;
11012198SEiji.Ota@Sun.COM rdsv3_cong_set_bit(map, port);
11112198SEiji.Ota@Sun.COM rdsv3_cong_queue_updates(map);
11212198SEiji.Ota@Sun.COM }
11312198SEiji.Ota@Sun.COM /* was -> aren't congested */
11412198SEiji.Ota@Sun.COM /*
11512198SEiji.Ota@Sun.COM * Require more free space before reporting uncongested to prevent
11612198SEiji.Ota@Sun.COM * bouncing cong/uncong state too often
11712198SEiji.Ota@Sun.COM */
11812198SEiji.Ota@Sun.COM else if (rs->rs_congested &&
11912198SEiji.Ota@Sun.COM (rs->rs_rcv_bytes < (rdsv3_sk_rcvbuf(rs)/2))) {
12012198SEiji.Ota@Sun.COM rs->rs_congested = 0;
12112198SEiji.Ota@Sun.COM rdsv3_cong_clear_bit(map, port);
12212198SEiji.Ota@Sun.COM rdsv3_cong_queue_updates(map);
12312198SEiji.Ota@Sun.COM }
12412198SEiji.Ota@Sun.COM
12512198SEiji.Ota@Sun.COM /* do nothing if no change in cong state */
12612198SEiji.Ota@Sun.COM
12712198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_recv_rcvbuf_delta", "Return(rs: %p)", rs);
12812198SEiji.Ota@Sun.COM }
12912198SEiji.Ota@Sun.COM
13012198SEiji.Ota@Sun.COM /*
13112198SEiji.Ota@Sun.COM * Process all extension headers that come with this message.
13212198SEiji.Ota@Sun.COM */
13312198SEiji.Ota@Sun.COM static void
rdsv3_recv_incoming_exthdrs(struct rdsv3_incoming * inc,struct rdsv3_sock * rs)13412198SEiji.Ota@Sun.COM rdsv3_recv_incoming_exthdrs(struct rdsv3_incoming *inc, struct rdsv3_sock *rs)
13512198SEiji.Ota@Sun.COM {
13612198SEiji.Ota@Sun.COM struct rdsv3_header *hdr = &inc->i_hdr;
13712198SEiji.Ota@Sun.COM unsigned int pos = 0, type, len;
13812198SEiji.Ota@Sun.COM union {
13912198SEiji.Ota@Sun.COM struct rdsv3_ext_header_version version;
14012198SEiji.Ota@Sun.COM struct rdsv3_ext_header_rdma rdma;
14112198SEiji.Ota@Sun.COM struct rdsv3_ext_header_rdma_dest rdma_dest;
14212198SEiji.Ota@Sun.COM } buffer;
14312198SEiji.Ota@Sun.COM
14412198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_recv_incoming_exthdrs", "Enter");
14512198SEiji.Ota@Sun.COM while (1) {
14612198SEiji.Ota@Sun.COM len = sizeof (buffer);
14712198SEiji.Ota@Sun.COM type = rdsv3_message_next_extension(hdr, &pos, &buffer, &len);
14812198SEiji.Ota@Sun.COM if (type == RDSV3_EXTHDR_NONE)
14912198SEiji.Ota@Sun.COM break;
15012198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("recv_incoming_exthdrs", "type %d", type);
15112198SEiji.Ota@Sun.COM /* Process extension header here */
15212198SEiji.Ota@Sun.COM switch (type) {
15312198SEiji.Ota@Sun.COM case RDSV3_EXTHDR_RDMA:
15412198SEiji.Ota@Sun.COM rdsv3_rdma_unuse(rs, ntohl(buffer.rdma.h_rdma_rkey),
15512198SEiji.Ota@Sun.COM 0);
15612198SEiji.Ota@Sun.COM break;
15712198SEiji.Ota@Sun.COM
15812198SEiji.Ota@Sun.COM case RDSV3_EXTHDR_RDMA_DEST:
15912198SEiji.Ota@Sun.COM /*
16012198SEiji.Ota@Sun.COM * We ignore the size for now. We could stash it
16112198SEiji.Ota@Sun.COM * somewhere and use it for error checking.
16212198SEiji.Ota@Sun.COM */
16312198SEiji.Ota@Sun.COM inc->i_rdma_cookie = rdsv3_rdma_make_cookie(
16412198SEiji.Ota@Sun.COM ntohl(buffer.rdma_dest.h_rdma_rkey),
16512198SEiji.Ota@Sun.COM ntohl(buffer.rdma_dest.h_rdma_offset));
16612198SEiji.Ota@Sun.COM
16712198SEiji.Ota@Sun.COM break;
16812198SEiji.Ota@Sun.COM }
16912198SEiji.Ota@Sun.COM }
17012198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_recv_incoming_exthdrs", "Return");
17112198SEiji.Ota@Sun.COM }
17212198SEiji.Ota@Sun.COM
17312198SEiji.Ota@Sun.COM /*
17412198SEiji.Ota@Sun.COM * The transport must make sure that this is serialized against other
17512198SEiji.Ota@Sun.COM * rx and conn reset on this specific conn.
17612198SEiji.Ota@Sun.COM *
17712198SEiji.Ota@Sun.COM * We currently assert that only one fragmented message will be sent
17812198SEiji.Ota@Sun.COM * down a connection at a time. This lets us reassemble in the conn
17912198SEiji.Ota@Sun.COM * instead of per-flow which means that we don't have to go digging through
18012198SEiji.Ota@Sun.COM * flows to tear down partial reassembly progress on conn failure and
18112198SEiji.Ota@Sun.COM * we save flow lookup and locking for each frag arrival. It does mean
18212198SEiji.Ota@Sun.COM * that small messages will wait behind large ones. Fragmenting at all
18312198SEiji.Ota@Sun.COM * is only to reduce the memory consumption of pre-posted buffers.
18412198SEiji.Ota@Sun.COM *
18512198SEiji.Ota@Sun.COM * The caller passes in saddr and daddr instead of us getting it from the
18612198SEiji.Ota@Sun.COM * conn. This lets loopback, who only has one conn for both directions,
18712198SEiji.Ota@Sun.COM * tell us which roles the addrs in the conn are playing for this message.
18812198SEiji.Ota@Sun.COM */
18912198SEiji.Ota@Sun.COM /* ARGSUSED */
19012198SEiji.Ota@Sun.COM void
rdsv3_recv_incoming(struct rdsv3_connection * conn,uint32_be_t saddr,uint32_be_t daddr,struct rdsv3_incoming * inc,int gfp)19112198SEiji.Ota@Sun.COM rdsv3_recv_incoming(struct rdsv3_connection *conn, uint32_be_t saddr,
19212198SEiji.Ota@Sun.COM uint32_be_t daddr, struct rdsv3_incoming *inc, int gfp)
19312198SEiji.Ota@Sun.COM {
19412198SEiji.Ota@Sun.COM struct rdsv3_sock *rs = NULL;
19512198SEiji.Ota@Sun.COM struct rsock *sk;
19612198SEiji.Ota@Sun.COM
19712198SEiji.Ota@Sun.COM inc->i_conn = conn;
19812198SEiji.Ota@Sun.COM inc->i_rx_jiffies = jiffies;
19912198SEiji.Ota@Sun.COM
20012198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_recv_incoming",
20112198SEiji.Ota@Sun.COM "conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
20212198SEiji.Ota@Sun.COM "flags 0x%x rx_jiffies %lu", conn,
20312198SEiji.Ota@Sun.COM (unsigned long long)conn->c_next_rx_seq,
20412198SEiji.Ota@Sun.COM inc,
20512198SEiji.Ota@Sun.COM (unsigned long long)ntohll(inc->i_hdr.h_sequence),
20612198SEiji.Ota@Sun.COM ntohl(inc->i_hdr.h_len),
20712198SEiji.Ota@Sun.COM ntohs(inc->i_hdr.h_sport),
20812198SEiji.Ota@Sun.COM ntohs(inc->i_hdr.h_dport),
20912198SEiji.Ota@Sun.COM inc->i_hdr.h_flags,
21012198SEiji.Ota@Sun.COM inc->i_rx_jiffies);
21112198SEiji.Ota@Sun.COM
21212198SEiji.Ota@Sun.COM /*
21312198SEiji.Ota@Sun.COM * Sequence numbers should only increase. Messages get their
21412198SEiji.Ota@Sun.COM * sequence number as they're queued in a sending conn. They
21512198SEiji.Ota@Sun.COM * can be dropped, though, if the sending socket is closed before
21612198SEiji.Ota@Sun.COM * they hit the wire. So sequence numbers can skip forward
21712198SEiji.Ota@Sun.COM * under normal operation. They can also drop back in the conn
21812198SEiji.Ota@Sun.COM * failover case as previously sent messages are resent down the
21912198SEiji.Ota@Sun.COM * new instance of a conn. We drop those, otherwise we have
22012198SEiji.Ota@Sun.COM * to assume that the next valid seq does not come after a
22112198SEiji.Ota@Sun.COM * hole in the fragment stream.
22212198SEiji.Ota@Sun.COM *
22312198SEiji.Ota@Sun.COM * The headers don't give us a way to realize if fragments of
22412198SEiji.Ota@Sun.COM * a message have been dropped. We assume that frags that arrive
22512198SEiji.Ota@Sun.COM * to a flow are part of the current message on the flow that is
22612198SEiji.Ota@Sun.COM * being reassembled. This means that senders can't drop messages
22712198SEiji.Ota@Sun.COM * from the sending conn until all their frags are sent.
22812198SEiji.Ota@Sun.COM *
22912198SEiji.Ota@Sun.COM * XXX we could spend more on the wire to get more robust failure
23012198SEiji.Ota@Sun.COM * detection, arguably worth it to avoid data corruption.
23112198SEiji.Ota@Sun.COM */
23212198SEiji.Ota@Sun.COM if (ntohll(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
23312198SEiji.Ota@Sun.COM (inc->i_hdr.h_flags & RDSV3_FLAG_RETRANSMITTED)) {
23412198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_drop_old_seq);
23512198SEiji.Ota@Sun.COM goto out;
23612198SEiji.Ota@Sun.COM }
23712198SEiji.Ota@Sun.COM conn->c_next_rx_seq = ntohll(inc->i_hdr.h_sequence) + 1;
23812198SEiji.Ota@Sun.COM
23912198SEiji.Ota@Sun.COM if (rdsv3_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
24012198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_ping);
24112198SEiji.Ota@Sun.COM (void) rdsv3_send_pong(conn, inc->i_hdr.h_sport);
24212198SEiji.Ota@Sun.COM goto out;
24312198SEiji.Ota@Sun.COM }
24412198SEiji.Ota@Sun.COM
24512895SGiri.Adari@Sun.COM rs = rdsv3_find_bound(conn, inc->i_hdr.h_dport);
24612676SEiji.Ota@Sun.COM if (!rs) {
24712198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_drop_no_sock);
24812198SEiji.Ota@Sun.COM goto out;
24912198SEiji.Ota@Sun.COM }
25012198SEiji.Ota@Sun.COM
25112198SEiji.Ota@Sun.COM /* Process extension headers */
25212198SEiji.Ota@Sun.COM rdsv3_recv_incoming_exthdrs(inc, rs);
25312198SEiji.Ota@Sun.COM
25412198SEiji.Ota@Sun.COM /* We can be racing with rdsv3_release() which marks the socket dead. */
25512198SEiji.Ota@Sun.COM sk = rdsv3_rs_to_sk(rs);
25612198SEiji.Ota@Sun.COM
25712198SEiji.Ota@Sun.COM /* serialize with rdsv3_release -> sock_orphan */
25812198SEiji.Ota@Sun.COM rw_enter(&rs->rs_recv_lock, RW_WRITER);
25912198SEiji.Ota@Sun.COM if (!rdsv3_sk_sock_flag(sk, SOCK_DEAD)) {
26012198SEiji.Ota@Sun.COM int error, bytes;
26112198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_recv_incoming",
26212198SEiji.Ota@Sun.COM "adding inc %p to rs %p's recv queue", inc, rs);
26312198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_queued);
26412198SEiji.Ota@Sun.COM rdsv3_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
26512198SEiji.Ota@Sun.COM ntohl(inc->i_hdr.h_len),
26612198SEiji.Ota@Sun.COM inc->i_hdr.h_dport);
26712198SEiji.Ota@Sun.COM rdsv3_inc_addref(inc);
26812198SEiji.Ota@Sun.COM list_insert_tail(&rs->rs_recv_queue, inc);
26912198SEiji.Ota@Sun.COM bytes = rs->rs_rcv_bytes;
27012198SEiji.Ota@Sun.COM rw_exit(&rs->rs_recv_lock);
27112198SEiji.Ota@Sun.COM
27212198SEiji.Ota@Sun.COM __rdsv3_wake_sk_sleep(sk);
27312198SEiji.Ota@Sun.COM
27412198SEiji.Ota@Sun.COM /* wake up anyone waiting in poll */
27512198SEiji.Ota@Sun.COM sk->sk_upcalls->su_recv(sk->sk_upper_handle, NULL,
27612198SEiji.Ota@Sun.COM bytes, 0, &error, NULL);
27712198SEiji.Ota@Sun.COM if (error != 0) {
27812198SEiji.Ota@Sun.COM RDSV3_DPRINTF2("rdsv3_recv_incoming",
27912198SEiji.Ota@Sun.COM "su_recv returned: %d", error);
28012198SEiji.Ota@Sun.COM }
28112198SEiji.Ota@Sun.COM } else {
28212198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_drop_dead_sock);
28312198SEiji.Ota@Sun.COM rw_exit(&rs->rs_recv_lock);
28412198SEiji.Ota@Sun.COM }
28512198SEiji.Ota@Sun.COM
28612198SEiji.Ota@Sun.COM out:
28712198SEiji.Ota@Sun.COM if (rs)
28812198SEiji.Ota@Sun.COM rdsv3_sock_put(rs);
28912198SEiji.Ota@Sun.COM }
29012198SEiji.Ota@Sun.COM
29112198SEiji.Ota@Sun.COM /*
29212198SEiji.Ota@Sun.COM * be very careful here. This is being called as the condition in
29312198SEiji.Ota@Sun.COM * wait_event_*() needs to cope with being called many times.
29412198SEiji.Ota@Sun.COM */
29512198SEiji.Ota@Sun.COM static int
rdsv3_next_incoming(struct rdsv3_sock * rs,struct rdsv3_incoming ** inc)29612198SEiji.Ota@Sun.COM rdsv3_next_incoming(struct rdsv3_sock *rs, struct rdsv3_incoming **inc)
29712198SEiji.Ota@Sun.COM {
29812676SEiji.Ota@Sun.COM if (!*inc) {
29912198SEiji.Ota@Sun.COM rw_enter(&rs->rs_recv_lock, RW_READER);
30012198SEiji.Ota@Sun.COM if (!list_is_empty(&rs->rs_recv_queue)) {
30112198SEiji.Ota@Sun.COM *inc = list_head(&rs->rs_recv_queue);
30212198SEiji.Ota@Sun.COM rdsv3_inc_addref(*inc);
30312198SEiji.Ota@Sun.COM }
30412198SEiji.Ota@Sun.COM rw_exit(&rs->rs_recv_lock);
30512198SEiji.Ota@Sun.COM }
30612198SEiji.Ota@Sun.COM
30712198SEiji.Ota@Sun.COM return (*inc != NULL);
30812198SEiji.Ota@Sun.COM }
30912198SEiji.Ota@Sun.COM
31012198SEiji.Ota@Sun.COM static int
rdsv3_still_queued(struct rdsv3_sock * rs,struct rdsv3_incoming * inc,int drop)31112198SEiji.Ota@Sun.COM rdsv3_still_queued(struct rdsv3_sock *rs, struct rdsv3_incoming *inc,
31212198SEiji.Ota@Sun.COM int drop)
31312198SEiji.Ota@Sun.COM {
31412198SEiji.Ota@Sun.COM struct rsock *sk = rdsv3_rs_to_sk(rs);
31512198SEiji.Ota@Sun.COM int ret = 0;
31612198SEiji.Ota@Sun.COM
31712198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_still_queued", "Enter rs: %p inc: %p drop: %d",
31812198SEiji.Ota@Sun.COM rs, inc, drop);
31912198SEiji.Ota@Sun.COM
32012198SEiji.Ota@Sun.COM rw_enter(&rs->rs_recv_lock, RW_WRITER);
32112198SEiji.Ota@Sun.COM if (list_link_active(&inc->i_item)) {
32212198SEiji.Ota@Sun.COM ret = 1;
32312198SEiji.Ota@Sun.COM if (drop) {
32412198SEiji.Ota@Sun.COM /* XXX make sure this i_conn is reliable */
32512198SEiji.Ota@Sun.COM rdsv3_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
32612198SEiji.Ota@Sun.COM -ntohl(inc->i_hdr.h_len),
32712198SEiji.Ota@Sun.COM inc->i_hdr.h_dport);
32812198SEiji.Ota@Sun.COM list_remove_node(&inc->i_item);
32912198SEiji.Ota@Sun.COM rdsv3_inc_put(inc);
33012198SEiji.Ota@Sun.COM }
33112198SEiji.Ota@Sun.COM }
33212198SEiji.Ota@Sun.COM rw_exit(&rs->rs_recv_lock);
33312198SEiji.Ota@Sun.COM
33412198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_still_queued",
33512198SEiji.Ota@Sun.COM "inc %p rs %p still %d dropped %d", inc, rs, ret, drop);
33612198SEiji.Ota@Sun.COM return (ret);
33712198SEiji.Ota@Sun.COM }
33812198SEiji.Ota@Sun.COM
33912198SEiji.Ota@Sun.COM /*
34012198SEiji.Ota@Sun.COM * Pull errors off the error queue.
34112198SEiji.Ota@Sun.COM * If msghdr is NULL, we will just purge the error queue.
34212198SEiji.Ota@Sun.COM */
34312198SEiji.Ota@Sun.COM int
rdsv3_notify_queue_get(struct rdsv3_sock * rs,struct msghdr * msghdr)34412198SEiji.Ota@Sun.COM rdsv3_notify_queue_get(struct rdsv3_sock *rs, struct msghdr *msghdr)
34512198SEiji.Ota@Sun.COM {
34612198SEiji.Ota@Sun.COM struct rdsv3_notifier *notifier;
34712863SEiji.Ota@Sun.COM struct rds_rdma_notify cmsg;
34812198SEiji.Ota@Sun.COM unsigned int count = 0, max_messages = ~0U;
34912198SEiji.Ota@Sun.COM list_t copy;
35012198SEiji.Ota@Sun.COM int err = 0;
35112198SEiji.Ota@Sun.COM
35212198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_notify_queue_get", "Enter(rs: %p)", rs);
35312198SEiji.Ota@Sun.COM
35412198SEiji.Ota@Sun.COM list_create(©, sizeof (struct rdsv3_notifier),
35512198SEiji.Ota@Sun.COM offsetof(struct rdsv3_notifier, n_list));
35612198SEiji.Ota@Sun.COM
35712198SEiji.Ota@Sun.COM
35812198SEiji.Ota@Sun.COM /*
35912198SEiji.Ota@Sun.COM * put_cmsg copies to user space and thus may sleep. We can't do this
36012198SEiji.Ota@Sun.COM * with rs_lock held, so first grab as many notifications as we can
36112198SEiji.Ota@Sun.COM * stuff
36212198SEiji.Ota@Sun.COM * in the user provided cmsg buffer. We don't try to copy more, to avoid
36312198SEiji.Ota@Sun.COM * losing notifications - except when the buffer is so small that
36412198SEiji.Ota@Sun.COM * it wouldn't
36512198SEiji.Ota@Sun.COM * even hold a single notification. Then we give him as much of this
36612198SEiji.Ota@Sun.COM * single
36712198SEiji.Ota@Sun.COM * msg as we can squeeze in, and set MSG_CTRUNC.
36812198SEiji.Ota@Sun.COM */
36912198SEiji.Ota@Sun.COM if (msghdr) {
37012198SEiji.Ota@Sun.COM max_messages =
37112198SEiji.Ota@Sun.COM msghdr->msg_controllen / CMSG_SPACE(sizeof (cmsg));
37212198SEiji.Ota@Sun.COM if (!max_messages)
37312198SEiji.Ota@Sun.COM max_messages = 1;
37412198SEiji.Ota@Sun.COM }
37512198SEiji.Ota@Sun.COM
37612198SEiji.Ota@Sun.COM mutex_enter(&rs->rs_lock);
37712198SEiji.Ota@Sun.COM while (!list_is_empty(&rs->rs_notify_queue) && count < max_messages) {
37812198SEiji.Ota@Sun.COM notifier = list_remove_head(&rs->rs_notify_queue);
37912198SEiji.Ota@Sun.COM list_insert_tail(©, notifier);
38012198SEiji.Ota@Sun.COM count++;
38112198SEiji.Ota@Sun.COM }
38212198SEiji.Ota@Sun.COM mutex_exit(&rs->rs_lock);
38312198SEiji.Ota@Sun.COM
38412198SEiji.Ota@Sun.COM if (!count)
38512198SEiji.Ota@Sun.COM return (0);
38612198SEiji.Ota@Sun.COM
38712198SEiji.Ota@Sun.COM while (!list_is_empty(©)) {
38812198SEiji.Ota@Sun.COM notifier = list_remove_head(©);
38912198SEiji.Ota@Sun.COM
39012198SEiji.Ota@Sun.COM if (msghdr) {
39112198SEiji.Ota@Sun.COM cmsg.user_token = notifier->n_user_token;
39212198SEiji.Ota@Sun.COM cmsg.status = notifier->n_status;
39312198SEiji.Ota@Sun.COM
39412198SEiji.Ota@Sun.COM err = rdsv3_put_cmsg(msghdr, SOL_RDS,
39512863SEiji.Ota@Sun.COM RDS_CMSG_RDMA_STATUS, sizeof (cmsg), &cmsg);
39612198SEiji.Ota@Sun.COM if (err)
39712198SEiji.Ota@Sun.COM break;
39812198SEiji.Ota@Sun.COM }
39912198SEiji.Ota@Sun.COM
40012198SEiji.Ota@Sun.COM kmem_free(notifier, sizeof (struct rdsv3_notifier));
40112198SEiji.Ota@Sun.COM }
40212198SEiji.Ota@Sun.COM
40312198SEiji.Ota@Sun.COM /*
40412198SEiji.Ota@Sun.COM * If we bailed out because of an error in put_cmsg,
40512198SEiji.Ota@Sun.COM * we may be left with one or more notifications that we
40612198SEiji.Ota@Sun.COM * didn't process. Return them to the head of the list.
40712198SEiji.Ota@Sun.COM */
40812198SEiji.Ota@Sun.COM if (!list_is_empty(©)) {
40912198SEiji.Ota@Sun.COM mutex_enter(&rs->rs_lock);
41012198SEiji.Ota@Sun.COM list_splice(©, &rs->rs_notify_queue);
41112198SEiji.Ota@Sun.COM mutex_exit(&rs->rs_lock);
41212198SEiji.Ota@Sun.COM }
41312198SEiji.Ota@Sun.COM
41412198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_notify_queue_get", "Return(rs: %p)", rs);
41512198SEiji.Ota@Sun.COM
41612198SEiji.Ota@Sun.COM return (err);
41712198SEiji.Ota@Sun.COM }
41812198SEiji.Ota@Sun.COM
41912198SEiji.Ota@Sun.COM /*
42012198SEiji.Ota@Sun.COM * Queue a congestion notification
42112198SEiji.Ota@Sun.COM */
42212198SEiji.Ota@Sun.COM static int
rdsv3_notify_cong(struct rdsv3_sock * rs,struct msghdr * msghdr)42312198SEiji.Ota@Sun.COM rdsv3_notify_cong(struct rdsv3_sock *rs, struct msghdr *msghdr)
42412198SEiji.Ota@Sun.COM {
42512198SEiji.Ota@Sun.COM uint64_t notify = rs->rs_cong_notify;
42612198SEiji.Ota@Sun.COM int err;
42712198SEiji.Ota@Sun.COM
42812863SEiji.Ota@Sun.COM err = rdsv3_put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
42912198SEiji.Ota@Sun.COM sizeof (notify), ¬ify);
43012198SEiji.Ota@Sun.COM if (err)
43112198SEiji.Ota@Sun.COM return (err);
43212198SEiji.Ota@Sun.COM
43312198SEiji.Ota@Sun.COM mutex_enter(&rs->rs_lock);
43412198SEiji.Ota@Sun.COM rs->rs_cong_notify &= ~notify;
43512198SEiji.Ota@Sun.COM mutex_exit(&rs->rs_lock);
43612198SEiji.Ota@Sun.COM
43712198SEiji.Ota@Sun.COM return (0);
43812198SEiji.Ota@Sun.COM }
43912198SEiji.Ota@Sun.COM
44012198SEiji.Ota@Sun.COM /*
44112198SEiji.Ota@Sun.COM * Receive any control messages.
44212198SEiji.Ota@Sun.COM */
44312198SEiji.Ota@Sun.COM static int
rdsv3_cmsg_recv(struct rdsv3_incoming * inc,struct msghdr * msg)44412198SEiji.Ota@Sun.COM rdsv3_cmsg_recv(struct rdsv3_incoming *inc, struct msghdr *msg)
44512198SEiji.Ota@Sun.COM {
44612896SEiji.Ota@Sun.COM int ret = 0;
44712896SEiji.Ota@Sun.COM if (inc->i_rdma_cookie) {
44812896SEiji.Ota@Sun.COM ret = rdsv3_put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
44912896SEiji.Ota@Sun.COM sizeof (inc->i_rdma_cookie), &inc->i_rdma_cookie);
45012896SEiji.Ota@Sun.COM }
45112896SEiji.Ota@Sun.COM return (ret);
45212198SEiji.Ota@Sun.COM }
45312198SEiji.Ota@Sun.COM
45412198SEiji.Ota@Sun.COM int
rdsv3_recvmsg(struct rdsv3_sock * rs,uio_t * uio,struct nmsghdr * msg,size_t size,int msg_flags)45512198SEiji.Ota@Sun.COM rdsv3_recvmsg(struct rdsv3_sock *rs, uio_t *uio,
45612198SEiji.Ota@Sun.COM struct nmsghdr *msg, size_t size, int msg_flags)
45712198SEiji.Ota@Sun.COM {
45812198SEiji.Ota@Sun.COM struct rsock *sk = rdsv3_rs_to_sk(rs);
45912198SEiji.Ota@Sun.COM long timeo;
46012580SGiri.Adari@Sun.COM int ret = 0;
46112198SEiji.Ota@Sun.COM struct sockaddr_in *sin = NULL;
46212198SEiji.Ota@Sun.COM struct rdsv3_incoming *inc = NULL;
46312580SGiri.Adari@Sun.COM boolean_t nonblock = B_FALSE;
46412198SEiji.Ota@Sun.COM
46512198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_recvmsg",
46612198SEiji.Ota@Sun.COM "Enter(rs: %p size: %d msg_flags: 0x%x)", rs, size, msg_flags);
46712198SEiji.Ota@Sun.COM
46812580SGiri.Adari@Sun.COM if ((uio->uio_fmode & (FNDELAY | FNONBLOCK)) ||
46912580SGiri.Adari@Sun.COM (msg_flags & MSG_DONTWAIT))
47012580SGiri.Adari@Sun.COM nonblock = B_TRUE;
47112580SGiri.Adari@Sun.COM
47212198SEiji.Ota@Sun.COM /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
47312198SEiji.Ota@Sun.COM timeo = rdsv3_rcvtimeo(sk, nonblock);
47412198SEiji.Ota@Sun.COM
47512198SEiji.Ota@Sun.COM if (msg_flags & MSG_OOB)
47612198SEiji.Ota@Sun.COM goto out;
47712198SEiji.Ota@Sun.COM
47812198SEiji.Ota@Sun.COM /* mark the first cmsg position */
47912198SEiji.Ota@Sun.COM if (msg) {
48012198SEiji.Ota@Sun.COM msg->msg_control = NULL;
48112198SEiji.Ota@Sun.COM }
48212198SEiji.Ota@Sun.COM
48312198SEiji.Ota@Sun.COM while (1) {
48412198SEiji.Ota@Sun.COM /*
48512198SEiji.Ota@Sun.COM * If there are pending notifications, do those -
48612198SEiji.Ota@Sun.COM * and nothing else
48712198SEiji.Ota@Sun.COM */
48812198SEiji.Ota@Sun.COM if (!list_is_empty(&rs->rs_notify_queue)) {
48912198SEiji.Ota@Sun.COM ret = rdsv3_notify_queue_get(rs, msg);
49012198SEiji.Ota@Sun.COM
49112198SEiji.Ota@Sun.COM if (msg && msg->msg_namelen) {
49212198SEiji.Ota@Sun.COM sin = kmem_zalloc(sizeof (struct sockaddr_in),
49312198SEiji.Ota@Sun.COM KM_SLEEP);
49412198SEiji.Ota@Sun.COM sin->sin_family = AF_INET_OFFLOAD;
49512198SEiji.Ota@Sun.COM if (inc) {
49612198SEiji.Ota@Sun.COM sin->sin_port = inc->i_hdr.h_sport;
49712198SEiji.Ota@Sun.COM sin->sin_addr.s_addr = inc->i_saddr;
49812198SEiji.Ota@Sun.COM }
49912198SEiji.Ota@Sun.COM msg->msg_namelen = sizeof (struct sockaddr_in);
50012198SEiji.Ota@Sun.COM msg->msg_name = sin;
50112198SEiji.Ota@Sun.COM }
50212198SEiji.Ota@Sun.COM break;
50312198SEiji.Ota@Sun.COM }
50412198SEiji.Ota@Sun.COM
50512198SEiji.Ota@Sun.COM if (rs->rs_cong_notify) {
50612198SEiji.Ota@Sun.COM ret = rdsv3_notify_cong(rs, msg);
50712198SEiji.Ota@Sun.COM goto out;
50812198SEiji.Ota@Sun.COM }
50912198SEiji.Ota@Sun.COM
51012198SEiji.Ota@Sun.COM if (!rdsv3_next_incoming(rs, &inc)) {
51112198SEiji.Ota@Sun.COM if (nonblock) {
51212198SEiji.Ota@Sun.COM ret = -EAGAIN;
51312198SEiji.Ota@Sun.COM break;
51412198SEiji.Ota@Sun.COM }
51512198SEiji.Ota@Sun.COM
51612198SEiji.Ota@Sun.COM RDSV3_DPRINTF3("rdsv3_recvmsg",
51712198SEiji.Ota@Sun.COM "Before wait (rs: %p)", rs);
51812198SEiji.Ota@Sun.COM
51912320SGiri.Adari@Sun.COM #if 0
52012320SGiri.Adari@Sun.COM ret = rdsv3_wait_sig(sk->sk_sleep,
52112320SGiri.Adari@Sun.COM !(list_is_empty(&rs->rs_notify_queue) &&
52212320SGiri.Adari@Sun.COM !rs->rs_cong_notify &&
52312320SGiri.Adari@Sun.COM !rdsv3_next_incoming(rs, &inc)));
52412320SGiri.Adari@Sun.COM if (ret == 0) {
52512320SGiri.Adari@Sun.COM /* signal/timeout pending */
52612320SGiri.Adari@Sun.COM RDSV3_DPRINTF2("rdsv3_recvmsg",
52712320SGiri.Adari@Sun.COM "woke due to signal");
52812320SGiri.Adari@Sun.COM ret = -ERESTART;
52912320SGiri.Adari@Sun.COM }
53012320SGiri.Adari@Sun.COM #else
53112198SEiji.Ota@Sun.COM mutex_enter(&sk->sk_sleep->waitq_mutex);
53212320SGiri.Adari@Sun.COM sk->sk_sleep->waitq_waiters++;
53312198SEiji.Ota@Sun.COM while ((list_is_empty(&rs->rs_notify_queue) &&
53412198SEiji.Ota@Sun.COM !rs->rs_cong_notify &&
53512198SEiji.Ota@Sun.COM !rdsv3_next_incoming(rs, &inc))) {
53612198SEiji.Ota@Sun.COM ret = cv_wait_sig(&sk->sk_sleep->waitq_cv,
53712198SEiji.Ota@Sun.COM &sk->sk_sleep->waitq_mutex);
53812198SEiji.Ota@Sun.COM if (ret == 0) {
53912198SEiji.Ota@Sun.COM /* signal/timeout pending */
54012198SEiji.Ota@Sun.COM RDSV3_DPRINTF2("rdsv3_recvmsg",
54112198SEiji.Ota@Sun.COM "woke due to signal");
542*13118SEiji.Ota@Sun.COM ret = -EINTR;
54312198SEiji.Ota@Sun.COM break;
54412198SEiji.Ota@Sun.COM }
54512320SGiri.Adari@Sun.COM }
54612320SGiri.Adari@Sun.COM sk->sk_sleep->waitq_waiters--;
54712320SGiri.Adari@Sun.COM mutex_exit(&sk->sk_sleep->waitq_mutex);
54812198SEiji.Ota@Sun.COM #endif
54912198SEiji.Ota@Sun.COM
55012198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_recvmsg",
55112198SEiji.Ota@Sun.COM "recvmsg woke rs: %p inc %p ret %d",
55212198SEiji.Ota@Sun.COM rs, inc, -ret);
55312198SEiji.Ota@Sun.COM
55412198SEiji.Ota@Sun.COM if (ret < 0)
55512198SEiji.Ota@Sun.COM break;
55612198SEiji.Ota@Sun.COM
55712198SEiji.Ota@Sun.COM /*
55812198SEiji.Ota@Sun.COM * if the wakeup was due to rs_notify_queue or
55912198SEiji.Ota@Sun.COM * rs_cong_notify then we need to handle those first.
56012198SEiji.Ota@Sun.COM */
56112198SEiji.Ota@Sun.COM continue;
56212198SEiji.Ota@Sun.COM }
56312198SEiji.Ota@Sun.COM
56412198SEiji.Ota@Sun.COM RDSV3_DPRINTF5("rdsv3_recvmsg",
56512198SEiji.Ota@Sun.COM "copying inc %p from %u.%u.%u.%u:%u to user", inc,
56612198SEiji.Ota@Sun.COM NIPQUAD(inc->i_conn->c_faddr),
56712198SEiji.Ota@Sun.COM ntohs(inc->i_hdr.h_sport));
56812414SEiji.Ota@Sun.COM
56912198SEiji.Ota@Sun.COM ret = inc->i_conn->c_trans->inc_copy_to_user(inc, uio, size);
57012198SEiji.Ota@Sun.COM if (ret < 0)
57112198SEiji.Ota@Sun.COM break;
57212198SEiji.Ota@Sun.COM
57312198SEiji.Ota@Sun.COM /*
57412198SEiji.Ota@Sun.COM * if the message we just copied isn't at the head of the
57512198SEiji.Ota@Sun.COM * recv queue then someone else raced us to return it, try
57612198SEiji.Ota@Sun.COM * to get the next message.
57712198SEiji.Ota@Sun.COM */
57812198SEiji.Ota@Sun.COM if (!rdsv3_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
57912198SEiji.Ota@Sun.COM rdsv3_inc_put(inc);
58012198SEiji.Ota@Sun.COM inc = NULL;
58112198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_deliver_raced);
58212198SEiji.Ota@Sun.COM continue;
58312198SEiji.Ota@Sun.COM }
58412198SEiji.Ota@Sun.COM
58512198SEiji.Ota@Sun.COM if (ret < ntohl(inc->i_hdr.h_len)) {
58612198SEiji.Ota@Sun.COM if (msg_flags & MSG_TRUNC)
58712198SEiji.Ota@Sun.COM ret = ntohl(inc->i_hdr.h_len);
58812198SEiji.Ota@Sun.COM msg->msg_flags |= MSG_TRUNC;
58912198SEiji.Ota@Sun.COM }
59012198SEiji.Ota@Sun.COM
59112198SEiji.Ota@Sun.COM if (rdsv3_cmsg_recv(inc, msg)) {
59212198SEiji.Ota@Sun.COM ret = -EFAULT;
59312198SEiji.Ota@Sun.COM goto out;
59412198SEiji.Ota@Sun.COM }
59512198SEiji.Ota@Sun.COM
59612198SEiji.Ota@Sun.COM rdsv3_stats_inc(s_recv_delivered);
59712198SEiji.Ota@Sun.COM
59812198SEiji.Ota@Sun.COM if (msg->msg_namelen) {
59912198SEiji.Ota@Sun.COM sin = kmem_alloc(sizeof (struct sockaddr_in), KM_SLEEP);
60012198SEiji.Ota@Sun.COM sin->sin_family = AF_INET_OFFLOAD;
60112198SEiji.Ota@Sun.COM sin->sin_port = inc->i_hdr.h_sport;
60212198SEiji.Ota@Sun.COM sin->sin_addr.s_addr = inc->i_saddr;
60312198SEiji.Ota@Sun.COM (void) memset(sin->sin_zero, 0,
60412198SEiji.Ota@Sun.COM sizeof (sin->sin_zero));
60512198SEiji.Ota@Sun.COM msg->msg_namelen = sizeof (struct sockaddr_in);
60612198SEiji.Ota@Sun.COM msg->msg_name = sin;
60712198SEiji.Ota@Sun.COM }
60812198SEiji.Ota@Sun.COM break;
60912198SEiji.Ota@Sun.COM }
61012198SEiji.Ota@Sun.COM
61112198SEiji.Ota@Sun.COM if (inc)
61212198SEiji.Ota@Sun.COM rdsv3_inc_put(inc);
61312198SEiji.Ota@Sun.COM
61412198SEiji.Ota@Sun.COM out:
61512896SEiji.Ota@Sun.COM if (msg && msg->msg_control == NULL)
61612896SEiji.Ota@Sun.COM msg->msg_controllen = 0;
61712896SEiji.Ota@Sun.COM
61812198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_recvmsg", "Return(rs: %p, ret: %d)", rs, ret);
61912198SEiji.Ota@Sun.COM
62012198SEiji.Ota@Sun.COM return (ret);
62112198SEiji.Ota@Sun.COM }
62212198SEiji.Ota@Sun.COM
62312198SEiji.Ota@Sun.COM /*
62412198SEiji.Ota@Sun.COM * The socket is being shut down and we're asked to drop messages that were
62512198SEiji.Ota@Sun.COM * queued for recvmsg. The caller has unbound the socket so the receive path
62612198SEiji.Ota@Sun.COM * won't queue any more incoming fragments or messages on the socket.
62712198SEiji.Ota@Sun.COM */
62812198SEiji.Ota@Sun.COM void
rdsv3_clear_recv_queue(struct rdsv3_sock * rs)62912198SEiji.Ota@Sun.COM rdsv3_clear_recv_queue(struct rdsv3_sock *rs)
63012198SEiji.Ota@Sun.COM {
63112198SEiji.Ota@Sun.COM struct rsock *sk = rdsv3_rs_to_sk(rs);
63212198SEiji.Ota@Sun.COM struct rdsv3_incoming *inc, *tmp;
63312198SEiji.Ota@Sun.COM
63412198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_clear_recv_queue", "Enter(rs: %p)", rs);
63512198SEiji.Ota@Sun.COM
63612198SEiji.Ota@Sun.COM rw_enter(&rs->rs_recv_lock, RW_WRITER);
63712198SEiji.Ota@Sun.COM RDSV3_FOR_EACH_LIST_NODE_SAFE(inc, tmp, &rs->rs_recv_queue, i_item) {
63812198SEiji.Ota@Sun.COM rdsv3_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
63912198SEiji.Ota@Sun.COM -ntohl(inc->i_hdr.h_len),
64012198SEiji.Ota@Sun.COM inc->i_hdr.h_dport);
64112198SEiji.Ota@Sun.COM list_remove_node(&inc->i_item);
64212198SEiji.Ota@Sun.COM rdsv3_inc_put(inc);
64312198SEiji.Ota@Sun.COM }
64412198SEiji.Ota@Sun.COM rw_exit(&rs->rs_recv_lock);
64512198SEiji.Ota@Sun.COM
64612198SEiji.Ota@Sun.COM RDSV3_DPRINTF4("rdsv3_clear_recv_queue", "Return(rs: %p)", rs);
64712198SEiji.Ota@Sun.COM }
64812198SEiji.Ota@Sun.COM
64912198SEiji.Ota@Sun.COM /*
65012198SEiji.Ota@Sun.COM * inc->i_saddr isn't used here because it is only set in the receive
65112198SEiji.Ota@Sun.COM * path.
65212198SEiji.Ota@Sun.COM */
65312198SEiji.Ota@Sun.COM void
rdsv3_inc_info_copy(struct rdsv3_incoming * inc,struct rdsv3_info_iterator * iter,uint32_be_t saddr,uint32_be_t daddr,int flip)65412198SEiji.Ota@Sun.COM rdsv3_inc_info_copy(struct rdsv3_incoming *inc,
65512198SEiji.Ota@Sun.COM struct rdsv3_info_iterator *iter,
65612198SEiji.Ota@Sun.COM uint32_be_t saddr, uint32_be_t daddr, int flip)
65712198SEiji.Ota@Sun.COM {
65812863SEiji.Ota@Sun.COM struct rds_info_message minfo;
65912198SEiji.Ota@Sun.COM
66012198SEiji.Ota@Sun.COM minfo.seq = ntohll(inc->i_hdr.h_sequence);
66112198SEiji.Ota@Sun.COM minfo.len = ntohl(inc->i_hdr.h_len);
66212198SEiji.Ota@Sun.COM
66312198SEiji.Ota@Sun.COM if (flip) {
66412198SEiji.Ota@Sun.COM minfo.laddr = daddr;
66512198SEiji.Ota@Sun.COM minfo.faddr = saddr;
66612198SEiji.Ota@Sun.COM minfo.lport = inc->i_hdr.h_dport;
66712198SEiji.Ota@Sun.COM minfo.fport = inc->i_hdr.h_sport;
66812198SEiji.Ota@Sun.COM } else {
66912198SEiji.Ota@Sun.COM minfo.laddr = saddr;
67012198SEiji.Ota@Sun.COM minfo.faddr = daddr;
67112198SEiji.Ota@Sun.COM minfo.lport = inc->i_hdr.h_sport;
67212198SEiji.Ota@Sun.COM minfo.fport = inc->i_hdr.h_dport;
67312198SEiji.Ota@Sun.COM }
67412198SEiji.Ota@Sun.COM
67512198SEiji.Ota@Sun.COM rdsv3_info_copy(iter, &minfo, sizeof (minfo));
67612198SEiji.Ota@Sun.COM }
677