2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
18 #include <linux/scif.h>
19 #include "scif_main.h"
22 static const char * const scif_ep_states[] = {
34 enum conn_async_state {
35 ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
36 ASYNC_CONN_INPROGRESS, /* async connect in progress */
37 ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
40 scif_epd_t scif_open(void)
42 struct scif_endpt *ep;
45 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
49 ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
53 spin_lock_init(&ep->lock);
54 mutex_init(&ep->sendlock);
55 mutex_init(&ep->recvlock);
57 ep->state = SCIFEP_UNBOUND;
58 dev_dbg(scif_info.mdev.this_device,
59 "SCIFAPI open: ep %p success\n", ep);
67 EXPORT_SYMBOL_GPL(scif_open);
70 * scif_disconnect_ep - Disconnects the endpoint if found
71 * @epd: The end point returned from scif_open()
73 static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
76 struct scif_endpt *fep = NULL;
77 struct scif_endpt *tmpep;
78 struct list_head *pos, *tmpq;
82 * Wake up any threads blocked in send()/recv() before closing
83 * out the connection. Grabbing and releasing the send/recv lock
84 * will ensure that any blocked senders/receivers have exited for
85 * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
86 * close. Ring 3 endpoints are not affected since close will not
87 * be called while there are IOCTLs executing.
89 wake_up_interruptible(&ep->sendwq);
90 wake_up_interruptible(&ep->recvwq);
91 mutex_lock(&ep->sendlock);
92 mutex_unlock(&ep->sendlock);
93 mutex_lock(&ep->recvlock);
94 mutex_unlock(&ep->recvlock);
96 /* Remove from the connected list */
97 mutex_lock(&scif_info.connlock);
98 list_for_each_safe(pos, tmpq, &scif_info.connected) {
99 tmpep = list_entry(pos, struct scif_endpt, list);
103 spin_lock(&ep->lock);
110 * The other side has completed the disconnect before
111 * the end point can be removed from the list. Therefore
112 * the ep lock is not locked, traverse the disconnected
113 * list to find the endpoint and release the conn lock.
115 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
116 tmpep = list_entry(pos, struct scif_endpt, list);
122 mutex_unlock(&scif_info.connlock);
126 init_completion(&ep->discon);
127 msg.uop = SCIF_DISCNCT;
130 msg.payload[0] = (u64)ep;
131 msg.payload[1] = ep->remote_ep;
133 err = scif_nodeqp_send(ep->remote_dev, &msg);
134 spin_unlock(&ep->lock);
135 mutex_unlock(&scif_info.connlock);
138 /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
139 wait_for_completion_timeout(&ep->discon,
140 SCIF_NODE_ALIVE_TIMEOUT);
144 int scif_close(scif_epd_t epd)
146 struct scif_endpt *ep = (struct scif_endpt *)epd;
147 struct scif_endpt *tmpep;
148 struct list_head *pos, *tmpq;
149 enum scif_epd_state oldstate;
152 dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
153 ep, scif_ep_states[ep->state]);
155 spin_lock(&ep->lock);
156 flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
157 spin_unlock(&ep->lock);
160 flush_work(&scif_info.conn_work);
162 spin_lock(&ep->lock);
163 oldstate = ep->state;
165 ep->state = SCIFEP_CLOSING;
169 case SCIFEP_DISCONNECTED:
170 spin_unlock(&ep->lock);
171 /* Remove from the disconnected list */
172 mutex_lock(&scif_info.connlock);
173 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
174 tmpep = list_entry(pos, struct scif_endpt, list);
180 mutex_unlock(&scif_info.connlock);
184 case SCIFEP_CONNECTING:
185 spin_unlock(&ep->lock);
188 case SCIFEP_CONNECTED:
191 spin_unlock(&ep->lock);
192 scif_disconnect_ep(ep);
195 case SCIFEP_LISTENING:
196 case SCIFEP_CLLISTEN:
198 struct scif_conreq *conreq;
200 struct scif_endpt *aep;
202 spin_unlock(&ep->lock);
203 spin_lock(&scif_info.eplock);
205 /* remove from listen list */
206 list_for_each_safe(pos, tmpq, &scif_info.listen) {
207 tmpep = list_entry(pos, struct scif_endpt, list);
211 /* Remove any dangling accepts */
212 while (ep->acceptcnt) {
213 aep = list_first_entry(&ep->li_accept,
214 struct scif_endpt, liacceptlist);
215 list_del(&aep->liacceptlist);
216 scif_put_port(aep->port.port);
217 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
218 tmpep = list_entry(pos, struct scif_endpt,
225 spin_unlock(&scif_info.eplock);
226 mutex_lock(&scif_info.connlock);
227 list_for_each_safe(pos, tmpq, &scif_info.connected) {
228 tmpep = list_entry(pos,
229 struct scif_endpt, list);
235 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
236 tmpep = list_entry(pos,
237 struct scif_endpt, list);
243 mutex_unlock(&scif_info.connlock);
244 scif_teardown_ep(aep);
245 spin_lock(&scif_info.eplock);
246 scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
250 spin_lock(&ep->lock);
251 spin_unlock(&scif_info.eplock);
253 /* Remove and reject any pending connection requests. */
254 while (ep->conreqcnt) {
255 conreq = list_first_entry(&ep->conlist,
256 struct scif_conreq, list);
257 list_del(&conreq->list);
259 msg.uop = SCIF_CNCT_REJ;
260 msg.dst.node = conreq->msg.src.node;
261 msg.dst.port = conreq->msg.src.port;
262 msg.payload[0] = conreq->msg.payload[0];
263 msg.payload[1] = conreq->msg.payload[1];
265 * No Error Handling on purpose for scif_nodeqp_send().
266 * If the remote node is lost we still want free the
267 * connection requests on the self node.
269 scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
275 spin_unlock(&ep->lock);
276 /* If a kSCIF accept is waiting wake it up */
277 wake_up_interruptible(&ep->conwq);
281 scif_put_port(ep->port.port);
282 scif_teardown_ep(ep);
283 scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
286 EXPORT_SYMBOL_GPL(scif_close);
289 * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
290 * accept new connections.
291 * @epd: The end point returned from scif_open()
293 int __scif_flush(scif_epd_t epd)
295 struct scif_endpt *ep = (struct scif_endpt *)epd;
298 case SCIFEP_LISTENING:
300 ep->state = SCIFEP_CLLISTEN;
302 /* If an accept is waiting wake it up */
303 wake_up_interruptible(&ep->conwq);
312 int scif_bind(scif_epd_t epd, u16 pn)
314 struct scif_endpt *ep = (struct scif_endpt *)epd;
318 dev_dbg(scif_info.mdev.this_device,
319 "SCIFAPI bind: ep %p %s requested port number %d\n",
320 ep, scif_ep_states[ep->state], pn);
323 * Similar to IETF RFC 1700, SCIF ports below
324 * SCIF_ADMIN_PORT_END can only be bound by system (or root)
325 * processes or by processes executed by privileged users.
327 if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
329 goto scif_bind_admin_exit;
333 spin_lock(&ep->lock);
334 if (ep->state == SCIFEP_BOUND) {
337 } else if (ep->state != SCIFEP_UNBOUND) {
343 tmp = scif_rsrv_port(pn);
349 pn = scif_get_new_port();
356 ep->state = SCIFEP_BOUND;
357 ep->port.node = scif_info.nodeid;
359 ep->conn_async_state = ASYNC_CONN_IDLE;
361 dev_dbg(scif_info.mdev.this_device,
362 "SCIFAPI bind: bound to port number %d\n", pn);
364 spin_unlock(&ep->lock);
365 scif_bind_admin_exit:
368 EXPORT_SYMBOL_GPL(scif_bind);
370 int scif_listen(scif_epd_t epd, int backlog)
372 struct scif_endpt *ep = (struct scif_endpt *)epd;
374 dev_dbg(scif_info.mdev.this_device,
375 "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
376 spin_lock(&ep->lock);
380 case SCIFEP_CLLISTEN:
382 case SCIFEP_DISCONNECTED:
383 spin_unlock(&ep->lock);
385 case SCIFEP_LISTENING:
386 case SCIFEP_CONNECTED:
387 case SCIFEP_CONNECTING:
389 spin_unlock(&ep->lock);
395 ep->state = SCIFEP_LISTENING;
396 ep->backlog = backlog;
400 INIT_LIST_HEAD(&ep->conlist);
401 init_waitqueue_head(&ep->conwq);
402 INIT_LIST_HEAD(&ep->li_accept);
403 spin_unlock(&ep->lock);
406 * Listen status is complete so delete the qp information not needed
407 * on a listen before placing on the list of listening ep's
409 scif_teardown_ep(ep);
410 ep->qp_info.qp = NULL;
412 spin_lock(&scif_info.eplock);
413 list_add_tail(&ep->list, &scif_info.listen);
414 spin_unlock(&scif_info.eplock);
417 EXPORT_SYMBOL_GPL(scif_listen);