1 /* arch/arm/mach-msm/smd_rpcrouter.c
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2009 QUALCOMM Incorporated.
5 * Author: San Mehat <san@android.com>
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 /* TODO: handle cases where smd_write() will tempfail due to full fifo */
19 /* TODO: thread priority? schedule a work to bump it? */
20 /* TODO: maybe make server_list_lock a mutex */
21 /* TODO: pool fragments to avoid kmalloc/kfree churn */
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include <linux/cdev.h>
28 #include <linux/init.h>
29 #include <linux/device.h>
30 #include <linux/types.h>
31 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/sched.h>
35 #include <linux/poll.h>
36 #include <linux/wakelock.h>
37 #include <asm/uaccess.h>
38 #include <asm/byteorder.h>
39 #include <linux/platform_device.h>
40 #include <linux/uaccess.h>
42 #include <asm/byteorder.h>
44 #include <mach/msm_smd.h>
45 #include "smd_rpcrouter.h"
47 #define TRACE_R2R_MSG 0
48 #define TRACE_R2R_RAW 0
49 #define TRACE_RPC_MSG 0
50 #define TRACE_NOTIFY_MSG 0
52 #define MSM_RPCROUTER_DEBUG 0
53 #define MSM_RPCROUTER_DEBUG_PKT 0
54 #define MSM_RPCROUTER_R2R_DEBUG 0
55 #define DUMP_ALL_RECEIVED_HEADERS 0
57 #define DIAG(x...) printk("[RR] ERROR " x)
59 #if MSM_RPCROUTER_DEBUG
60 #define D(x...) printk(x)
62 #define D(x...) do {} while (0)
66 #define RR(x...) printk("[RR] "x)
68 #define RR(x...) do {} while (0)
72 #define IO(x...) printk("[RPC] "x)
74 #define IO(x...) do {} while (0)
78 #define NTFY(x...) printk(KERN_ERR "[NOTIFY] "x)
80 #define NTFY(x...) do {} while (0)
83 static LIST_HEAD(local_endpoints);
84 static LIST_HEAD(remote_endpoints);
86 static LIST_HEAD(server_list);
88 static smd_channel_t *smd_channel;
89 static int initialized;
90 static wait_queue_head_t newserver_wait;
91 static wait_queue_head_t smd_wait;
93 static DEFINE_SPINLOCK(local_endpoints_lock);
94 static DEFINE_SPINLOCK(remote_endpoints_lock);
95 static DEFINE_SPINLOCK(server_list_lock);
96 static DEFINE_SPINLOCK(smd_lock);
98 static struct workqueue_struct *rpcrouter_workqueue;
99 static struct wake_lock rpcrouter_wake_lock;
100 static int rpcrouter_need_len;
102 static atomic_t next_xid = ATOMIC_INIT(1);
103 static uint8_t next_pacmarkid;
105 static void do_read_data(struct work_struct *work);
106 static void do_create_pdevs(struct work_struct *work);
107 static void do_create_rpcrouter_pdev(struct work_struct *work);
109 static DECLARE_WORK(work_read_data, do_read_data);
110 static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
111 static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
113 #define RR_STATE_IDLE 0
114 #define RR_STATE_HEADER 1
115 #define RR_STATE_BODY 2
116 #define RR_STATE_ERROR 3
119 struct rr_packet *pkt;
121 uint32_t state; /* current assembly state */
122 uint32_t count; /* bytes needed in this state */
125 struct rr_context the_rr_context;
127 static struct platform_device rpcrouter_pdev = {
128 .name = "oncrpc_router",
133 static int rpcrouter_send_control_msg(union rr_control_msg *msg)
135 struct rr_header hdr;
139 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) {
140 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
141 "router not initialized\n");
145 hdr.version = RPCROUTER_VERSION;
147 hdr.src_pid = RPCROUTER_PID_LOCAL;
148 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
150 hdr.size = sizeof(*msg);
152 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
154 /* TODO: what if channel is full? */
156 need = sizeof(hdr) + hdr.size;
157 spin_lock_irqsave(&smd_lock, flags);
158 while (smd_write_avail(smd_channel) < need) {
159 spin_unlock_irqrestore(&smd_lock, flags);
161 spin_lock_irqsave(&smd_lock, flags);
163 smd_write(smd_channel, &hdr, sizeof(hdr));
164 smd_write(smd_channel, msg, hdr.size);
165 spin_unlock_irqrestore(&smd_lock, flags);
169 static struct rr_server *rpcrouter_create_server(uint32_t pid,
174 struct rr_server *server;
178 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
180 return ERR_PTR(-ENOMEM);
182 memset(server, 0, sizeof(struct rr_server));
188 spin_lock_irqsave(&server_list_lock, flags);
189 list_add_tail(&server->list, &server_list);
190 spin_unlock_irqrestore(&server_list_lock, flags);
192 if (pid == RPCROUTER_PID_REMOTE) {
193 rc = msm_rpcrouter_create_server_cdev(server);
199 spin_lock_irqsave(&server_list_lock, flags);
200 list_del(&server->list);
201 spin_unlock_irqrestore(&server_list_lock, flags);
206 static void rpcrouter_destroy_server(struct rr_server *server)
210 spin_lock_irqsave(&server_list_lock, flags);
211 list_del(&server->list);
212 spin_unlock_irqrestore(&server_list_lock, flags);
213 device_destroy(msm_rpcrouter_class, server->device_number);
217 static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
219 struct rr_server *server;
222 spin_lock_irqsave(&server_list_lock, flags);
223 list_for_each_entry(server, &server_list, list) {
224 if (server->prog == prog
225 && server->vers == ver) {
226 spin_unlock_irqrestore(&server_list_lock, flags);
230 spin_unlock_irqrestore(&server_list_lock, flags);
234 static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
236 struct rr_server *server;
239 spin_lock_irqsave(&server_list_lock, flags);
240 list_for_each_entry(server, &server_list, list) {
241 if (server->device_number == dev) {
242 spin_unlock_irqrestore(&server_list_lock, flags);
246 spin_unlock_irqrestore(&server_list_lock, flags);
250 struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
252 struct msm_rpc_endpoint *ept;
255 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
258 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
260 /* mark no reply outstanding */
261 ept->reply_pid = 0xffffffff;
263 ept->cid = (uint32_t) ept;
264 ept->pid = RPCROUTER_PID_LOCAL;
267 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
268 struct rr_server *srv;
270 * This is a userspace client which opened
271 * a program/ver devicenode. Bind the client
272 * to that destination
274 srv = rpcrouter_lookup_server_by_dev(dev);
275 /* TODO: bug? really? */
278 ept->dst_pid = srv->pid;
279 ept->dst_cid = srv->cid;
280 ept->dst_prog = cpu_to_be32(srv->prog);
281 ept->dst_vers = cpu_to_be32(srv->vers);
283 D("Creating local ept %p @ %08x:%08x\n", ept, srv->prog, srv->vers);
285 /* mark not connected */
286 ept->dst_pid = 0xffffffff;
287 D("Creating a master local ept %p\n", ept);
290 init_waitqueue_head(&ept->wait_q);
291 INIT_LIST_HEAD(&ept->read_q);
292 spin_lock_init(&ept->read_q_lock);
293 wake_lock_init(&ept->read_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_read");
294 INIT_LIST_HEAD(&ept->incomplete);
296 spin_lock_irqsave(&local_endpoints_lock, flags);
297 list_add_tail(&ept->list, &local_endpoints);
298 spin_unlock_irqrestore(&local_endpoints_lock, flags);
302 int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
305 union rr_control_msg msg;
307 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
308 msg.cli.pid = ept->pid;
309 msg.cli.cid = ept->cid;
311 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
312 rc = rpcrouter_send_control_msg(&msg);
316 wake_lock_destroy(&ept->read_q_wake_lock);
317 list_del(&ept->list);
322 static int rpcrouter_create_remote_endpoint(uint32_t cid)
324 struct rr_remote_endpoint *new_c;
327 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
330 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
333 new_c->pid = RPCROUTER_PID_REMOTE;
334 init_waitqueue_head(&new_c->quota_wait);
335 spin_lock_init(&new_c->quota_lock);
337 spin_lock_irqsave(&remote_endpoints_lock, flags);
338 list_add_tail(&new_c->list, &remote_endpoints);
339 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
343 static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
345 struct msm_rpc_endpoint *ept;
348 spin_lock_irqsave(&local_endpoints_lock, flags);
349 list_for_each_entry(ept, &local_endpoints, list) {
350 if (ept->cid == cid) {
351 spin_unlock_irqrestore(&local_endpoints_lock, flags);
355 spin_unlock_irqrestore(&local_endpoints_lock, flags);
359 static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid)
361 struct rr_remote_endpoint *ept;
364 spin_lock_irqsave(&remote_endpoints_lock, flags);
365 list_for_each_entry(ept, &remote_endpoints, list) {
366 if (ept->cid == cid) {
367 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
371 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
375 static int process_control_msg(union rr_control_msg *msg, int len)
377 union rr_control_msg ctl;
378 struct rr_server *server;
379 struct rr_remote_endpoint *r_ept;
383 if (len != sizeof(*msg)) {
384 printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
390 case RPCROUTER_CTRL_CMD_HELLO:
394 memset(&ctl, 0, sizeof(ctl));
395 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
396 rpcrouter_send_control_msg(&ctl);
400 /* Send list of servers one at a time */
401 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
403 /* TODO: long time to hold a spinlock... */
404 spin_lock_irqsave(&server_list_lock, flags);
405 list_for_each_entry(server, &server_list, list) {
406 ctl.srv.pid = server->pid;
407 ctl.srv.cid = server->cid;
408 ctl.srv.prog = server->prog;
409 ctl.srv.vers = server->vers;
411 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
412 server->pid, server->cid,
413 server->prog, server->vers);
415 rpcrouter_send_control_msg(&ctl);
417 spin_unlock_irqrestore(&server_list_lock, flags);
419 queue_work(rpcrouter_workqueue, &work_create_rpcrouter_pdev);
422 case RPCROUTER_CTRL_CMD_RESUME_TX:
423 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
425 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
428 "rpcrouter: Unable to resume client\n");
431 spin_lock_irqsave(&r_ept->quota_lock, flags);
432 r_ept->tx_quota_cntr = 0;
433 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
434 wake_up(&r_ept->quota_wait);
437 case RPCROUTER_CTRL_CMD_NEW_SERVER:
438 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
439 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
441 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
444 server = rpcrouter_create_server(
445 msg->srv.pid, msg->srv.cid,
446 msg->srv.prog, msg->srv.vers);
450 * XXX: Verify that its okay to add the
451 * client to our remote client list
452 * if we get a NEW_SERVER notification
454 if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) {
455 rc = rpcrouter_create_remote_endpoint(
459 "rpcrouter:Client create"
462 schedule_work(&work_create_pdevs);
463 wake_up(&newserver_wait);
465 if ((server->pid == msg->srv.pid) &&
466 (server->cid == msg->srv.cid)) {
467 printk(KERN_ERR "rpcrouter: Duplicate svr\n");
469 server->pid = msg->srv.pid;
470 server->cid = msg->srv.cid;
475 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
476 RR("o REMOVE_SERVER prog=%08x:%d\n",
477 msg->srv.prog, msg->srv.vers);
478 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
480 rpcrouter_destroy_server(server);
483 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
484 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
485 if (msg->cli.pid != RPCROUTER_PID_REMOTE) {
487 "rpcrouter: Denying remote removal of "
491 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
493 spin_lock_irqsave(&remote_endpoints_lock, flags);
494 list_del(&r_ept->list);
495 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
499 /* Notify local clients of this event */
500 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
505 RR("o UNKNOWN(%08x)\n", msg->cmd);
512 static void do_create_rpcrouter_pdev(struct work_struct *work)
514 platform_device_register(&rpcrouter_pdev);
517 static void do_create_pdevs(struct work_struct *work)
520 struct rr_server *server;
522 /* TODO: race if destroyed while being registered */
523 spin_lock_irqsave(&server_list_lock, flags);
524 list_for_each_entry(server, &server_list, list) {
525 if (server->pid == RPCROUTER_PID_REMOTE) {
526 if (server->pdev_name[0] == 0) {
527 spin_unlock_irqrestore(&server_list_lock,
529 msm_rpcrouter_create_server_pdev(server);
530 schedule_work(&work_create_pdevs);
535 spin_unlock_irqrestore(&server_list_lock, flags);
538 static void rpcrouter_smdnotify(void *_dev, unsigned event)
540 if (event != SMD_EVENT_DATA)
543 if (smd_read_avail(smd_channel) >= rpcrouter_need_len)
544 wake_lock(&rpcrouter_wake_lock);
548 static void *rr_malloc(unsigned sz)
550 void *ptr = kmalloc(sz, GFP_KERNEL);
554 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
556 ptr = kmalloc(sz, GFP_KERNEL);
562 /* TODO: deal with channel teardown / restore */
563 static int rr_read(void *data, int len)
567 // printk("rr_read() %d\n", len);
569 spin_lock_irqsave(&smd_lock, flags);
570 if (smd_read_avail(smd_channel) >= len) {
571 rc = smd_read(smd_channel, data, len);
572 spin_unlock_irqrestore(&smd_lock, flags);
578 rpcrouter_need_len = len;
579 wake_unlock(&rpcrouter_wake_lock);
580 spin_unlock_irqrestore(&smd_lock, flags);
582 // printk("rr_read: waiting (%d)\n", len);
583 wait_event(smd_wait, smd_read_avail(smd_channel) >= len);
588 static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX];
590 static void do_read_data(struct work_struct *work)
592 struct rr_header hdr;
593 struct rr_packet *pkt;
594 struct rr_fragment *frag;
595 struct msm_rpc_endpoint *ept;
599 if (rr_read(&hdr, sizeof(hdr)))
603 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
604 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
605 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
608 if (hdr.version != RPCROUTER_VERSION) {
609 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
612 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
613 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
617 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
618 if (rr_read(r2r_buf, hdr.size))
620 process_control_msg((void*) r2r_buf, hdr.size);
624 if (hdr.size < sizeof(pm)) {
625 DIAG("runt packet (no pacmark)\n");
628 if (rr_read(&pm, sizeof(pm)))
631 hdr.size -= sizeof(pm);
633 frag = rr_malloc(hdr.size + sizeof(*frag));
635 frag->length = hdr.size;
636 if (rr_read(frag->data, hdr.size))
639 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
641 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
646 /* See if there is already a partial packet that matches our mid
647 * and if so, append this fragment to that packet.
649 mid = PACMARK_MID(pm);
650 list_for_each_entry(pkt, &ept->incomplete, list) {
651 if (pkt->mid == mid) {
652 pkt->last->next = frag;
654 pkt->length += frag->length;
655 if (PACMARK_LAST(pm)) {
656 list_del(&pkt->list);
657 goto packet_complete;
662 /* This mid is new -- create a packet for it, and put it on
663 * the incomplete list if this fragment is not a last fragment,
664 * otherwise put it on the read queue.
666 pkt = rr_malloc(sizeof(struct rr_packet));
669 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
671 pkt->length = frag->length;
672 if (!PACMARK_LAST(pm)) {
673 list_add_tail(&pkt->list, &ept->incomplete);
678 spin_lock_irqsave(&ept->read_q_lock, flags);
679 wake_lock(&ept->read_q_wake_lock);
680 list_add_tail(&pkt->list, &ept->read_q);
681 wake_up(&ept->wait_q);
682 spin_unlock_irqrestore(&ept->read_q_lock, flags);
685 if (hdr.confirm_rx) {
686 union rr_control_msg msg;
688 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
689 msg.cli.pid = hdr.dst_pid;
690 msg.cli.cid = hdr.dst_cid;
692 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
693 rpcrouter_send_control_msg(&msg);
696 queue_work(rpcrouter_workqueue, &work_read_data);
701 printk(KERN_ERR "rpc_router has died\n");
702 wake_unlock(&rpcrouter_wake_lock);
705 void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
706 uint32_t vers, uint32_t proc)
708 memset(hdr, 0, sizeof(struct rpc_request_hdr));
709 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
710 hdr->rpc_vers = cpu_to_be32(2);
711 hdr->prog = cpu_to_be32(prog);
712 hdr->vers = cpu_to_be32(vers);
713 hdr->procedure = cpu_to_be32(proc);
716 struct msm_rpc_endpoint *msm_rpc_open(void)
718 struct msm_rpc_endpoint *ept;
720 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
722 return ERR_PTR(-ENOMEM);
727 int msm_rpc_close(struct msm_rpc_endpoint *ept)
729 return msm_rpcrouter_destroy_local_endpoint(ept);
731 EXPORT_SYMBOL(msm_rpc_close);
733 int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
735 struct rr_header hdr;
737 struct rpc_request_hdr *rq = buffer;
738 struct rr_remote_endpoint *r_ept;
743 /* TODO: fragmentation for large outbound packets */
744 if (count > (RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t)) || !count)
747 /* snoop the RPC packet and enforce permissions */
749 /* has to have at least the xid and type fields */
750 if (count < (sizeof(uint32_t) * 2)) {
751 printk(KERN_ERR "rr_write: rejecting runt packet\n");
757 if (count < (sizeof(uint32_t) * 6)) {
759 "rr_write: rejecting runt call packet\n");
762 if (ept->dst_pid == 0xffffffff) {
763 printk(KERN_ERR "rr_write: not connected\n");
767 #if CONFIG_MSM_AMSS_VERSION >= 6350
768 if ((ept->dst_prog != rq->prog) ||
769 !msm_rpc_is_compatible_version(
770 be32_to_cpu(ept->dst_vers),
771 be32_to_cpu(rq->vers))) {
773 if (ept->dst_prog != rq->prog || ept->dst_vers != rq->vers) {
776 "rr_write: cannot write to %08x:%d "
777 "(bound to %08x:%d)\n",
778 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
779 be32_to_cpu(ept->dst_prog),
780 be32_to_cpu(ept->dst_vers));
783 hdr.dst_pid = ept->dst_pid;
784 hdr.dst_cid = ept->dst_cid;
785 IO("CALL on ept %p to %08x:%08x @ %d:%08x (%d bytes) (xid %x proc %x)\n",
787 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
788 ept->dst_pid, ept->dst_cid, count,
789 be32_to_cpu(rq->xid), be32_to_cpu(rq->procedure));
793 if (ept->reply_pid == 0xffffffff) {
795 "rr_write: rejecting unexpected reply\n");
798 if (ept->reply_xid != rq->xid) {
800 "rr_write: rejecting packet w/ bad xid\n");
804 hdr.dst_pid = ept->reply_pid;
805 hdr.dst_cid = ept->reply_cid;
807 /* consume this reply */
808 ept->reply_pid = 0xffffffff;
810 IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n",
812 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
815 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid);
819 "msm_rpc_write(): No route to ept "
820 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
821 return -EHOSTUNREACH;
824 /* Create routing header */
825 hdr.type = RPCROUTER_CTRL_CMD_DATA;
826 hdr.version = RPCROUTER_VERSION;
827 hdr.src_pid = ept->pid;
828 hdr.src_cid = ept->cid;
830 hdr.size = count + sizeof(uint32_t);
833 prepare_to_wait(&r_ept->quota_wait, &__wait,
835 spin_lock_irqsave(&r_ept->quota_lock, flags);
836 if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
838 if (signal_pending(current) &&
839 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
841 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
844 finish_wait(&r_ept->quota_wait, &__wait);
846 if (signal_pending(current) &&
847 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
848 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
851 r_ept->tx_quota_cntr++;
852 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
855 /* bump pacmark while interrupts disabled to avoid race
856 * probably should be atomic op instead
858 pacmark = PACMARK(count, ++next_pacmarkid, 0, 1);
860 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
862 spin_lock_irqsave(&smd_lock, flags);
864 needed = sizeof(hdr) + hdr.size;
865 while (smd_write_avail(smd_channel) < needed) {
866 spin_unlock_irqrestore(&smd_lock, flags);
868 spin_lock_irqsave(&smd_lock, flags);
871 /* TODO: deal with full fifo */
872 smd_write(smd_channel, &hdr, sizeof(hdr));
873 smd_write(smd_channel, &pacmark, sizeof(pacmark));
874 smd_write(smd_channel, buffer, count);
876 spin_unlock_irqrestore(&smd_lock, flags);
880 EXPORT_SYMBOL(msm_rpc_write);
883 * NOTE: It is the responsibility of the caller to kfree buffer
885 int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
886 unsigned user_len, long timeout)
888 struct rr_fragment *frag, *next;
892 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
896 /* single-fragment messages conveniently can be
897 * returned as-is (the buffer is at the front)
899 if (frag->next == 0) {
900 *buffer = (void*) frag;
904 /* multi-fragment messages, we have to do it the
905 * hard way, which is rather disgusting right now
910 while (frag != NULL) {
911 memcpy(buf, frag->data, frag->length);
921 int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
922 void *_request, int request_size,
925 return msm_rpc_call_reply(ept, proc,
926 _request, request_size,
929 EXPORT_SYMBOL(msm_rpc_call);
931 int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
932 void *_request, int request_size,
933 void *_reply, int reply_size,
936 struct rpc_request_hdr *req = _request;
937 struct rpc_reply_hdr *reply;
940 if (request_size < sizeof(*req))
943 if (ept->dst_pid == 0xffffffff)
946 /* We can't use msm_rpc_setup_req() here, because dst_prog and
947 * dst_vers here are already in BE.
949 memset(req, 0, sizeof(*req));
950 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
951 req->rpc_vers = cpu_to_be32(2);
952 req->prog = ept->dst_prog;
953 req->vers = ept->dst_vers;
954 req->procedure = cpu_to_be32(proc);
956 rc = msm_rpc_write(ept, req, request_size);
961 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
964 if (rc < (3 * sizeof(uint32_t))) {
968 /* we should not get CALL packets -- ignore them */
969 if (reply->type == 0) {
973 /* If an earlier call timed out, we could get the (no
974 * longer wanted) reply for it. Ignore replies that
977 if (reply->xid != req->xid) {
981 if (reply->reply_stat != 0) {
985 if (reply->data.acc_hdr.accept_stat != 0) {
989 if (_reply == NULL) {
993 if (rc > reply_size) {
996 memcpy(_reply, reply, rc);
1003 EXPORT_SYMBOL(msm_rpc_call_reply);
1006 static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
1008 unsigned long flags;
1010 spin_lock_irqsave(&ept->read_q_lock, flags);
1011 ret = !list_empty(&ept->read_q);
1012 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1016 int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1017 struct rr_fragment **frag_ret,
1018 unsigned len, long timeout)
1020 struct rr_packet *pkt;
1021 struct rpc_request_hdr *rq;
1022 DEFINE_WAIT(__wait);
1023 unsigned long flags;
1026 IO("READ on ept %p\n", ept);
1028 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1030 wait_event(ept->wait_q, ept_packet_available(ept));
1032 rc = wait_event_timeout(
1033 ept->wait_q, ept_packet_available(ept),
1040 rc = wait_event_interruptible(
1041 ept->wait_q, ept_packet_available(ept));
1045 rc = wait_event_interruptible_timeout(
1046 ept->wait_q, ept_packet_available(ept),
1053 spin_lock_irqsave(&ept->read_q_lock, flags);
1054 if (list_empty(&ept->read_q)) {
1055 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1058 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1059 if (pkt->length > len) {
1060 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1063 list_del(&pkt->list);
1064 if (list_empty(&ept->read_q))
1065 wake_unlock(&ept->read_q_wake_lock);
1066 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1070 *frag_ret = pkt->first;
1071 rq = (void*) pkt->first->data;
1072 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1073 IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n",
1074 ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1075 be32_to_cpu(rq->procedure),
1076 be32_to_cpu(rq->xid));
1078 if (ept->reply_pid != 0xffffffff) {
1080 "rr_read: lost previous reply xid...\n");
1082 /* TODO: locking? */
1083 ept->reply_pid = pkt->hdr.src_pid;
1084 ept->reply_cid = pkt->hdr.src_cid;
1085 ept->reply_xid = rq->xid;
1088 else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1))
1089 IO("READ on ept %p is a REPLY\n", ept);
1090 else IO("READ on ept %p (%d bytes)\n", ept, rc);
1097 #if CONFIG_MSM_AMSS_VERSION >= 6350
1098 int msm_rpc_is_compatible_version(uint32_t server_version,
1099 uint32_t client_version)
1101 if ((server_version & RPC_VERSION_MODE_MASK) !=
1102 (client_version & RPC_VERSION_MODE_MASK))
1105 if (server_version & RPC_VERSION_MODE_MASK)
1106 return server_version == client_version;
1108 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1109 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1110 ((server_version & RPC_VERSION_MINOR_MASK) >=
1111 (client_version & RPC_VERSION_MINOR_MASK));
1113 EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1115 static int msm_rpc_get_compatible_server(uint32_t prog,
1117 uint32_t *found_vers)
1119 struct rr_server *server;
1120 unsigned long flags;
1121 if (found_vers == NULL)
1124 spin_lock_irqsave(&server_list_lock, flags);
1125 list_for_each_entry(server, &server_list, list) {
1126 if ((server->prog == prog) &&
1127 msm_rpc_is_compatible_version(server->vers, ver)) {
1128 *found_vers = server->vers;
1129 spin_unlock_irqrestore(&server_list_lock, flags);
1133 spin_unlock_irqrestore(&server_list_lock, flags);
1138 struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags)
1140 struct msm_rpc_endpoint *ept;
1141 struct rr_server *server;
1143 #if CONFIG_MSM_AMSS_VERSION >= 6350
1144 if (!(vers & RPC_VERSION_MODE_MASK)) {
1145 uint32_t found_vers;
1146 if (msm_rpc_get_compatible_server(prog, vers, &found_vers) < 0)
1147 return ERR_PTR(-EHOSTUNREACH);
1148 if (found_vers != vers) {
1149 D("RPC using new version %08x:{%08x --> %08x}\n",
1150 prog, vers, found_vers);
1156 server = rpcrouter_lookup_server(prog, vers);
1158 return ERR_PTR(-EHOSTUNREACH);
1160 ept = msm_rpc_open();
1165 ept->dst_pid = server->pid;
1166 ept->dst_cid = server->cid;
1167 ept->dst_prog = cpu_to_be32(prog);
1168 ept->dst_vers = cpu_to_be32(vers);
1172 EXPORT_SYMBOL(msm_rpc_connect);
1174 uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept)
1176 return be32_to_cpu(ept->dst_vers);
1178 EXPORT_SYMBOL(msm_rpc_get_vers);
1180 /* TODO: permission check? */
1181 int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
1182 uint32_t prog, uint32_t vers)
1185 union rr_control_msg msg;
1186 struct rr_server *server;
1188 server = rpcrouter_create_server(ept->pid, ept->cid,
1193 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
1194 msg.srv.pid = ept->pid;
1195 msg.srv.cid = ept->cid;
1196 msg.srv.prog = prog;
1197 msg.srv.vers = vers;
1199 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
1200 ept->pid, ept->cid, prog, vers);
1202 rc = rpcrouter_send_control_msg(&msg);
1209 /* TODO: permission check -- disallow unreg of somebody else's server */
1210 int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
1211 uint32_t prog, uint32_t vers)
1213 struct rr_server *server;
1214 server = rpcrouter_lookup_server(prog, vers);
1218 rpcrouter_destroy_server(server);
1222 static int msm_rpcrouter_probe(struct platform_device *pdev)
1226 /* Initialize what we need to start processing */
1227 INIT_LIST_HEAD(&local_endpoints);
1228 INIT_LIST_HEAD(&remote_endpoints);
1230 init_waitqueue_head(&newserver_wait);
1231 init_waitqueue_head(&smd_wait);
1232 wake_lock_init(&rpcrouter_wake_lock, WAKE_LOCK_SUSPEND, "SMD_RPCCALL");
1234 rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter");
1235 if (!rpcrouter_workqueue)
1238 rc = msm_rpcrouter_init_devices();
1240 goto fail_destroy_workqueue;
1242 /* Open up SMD channel 2 */
1244 rc = smd_open("SMD_RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify);
1246 goto fail_remove_devices;
1248 queue_work(rpcrouter_workqueue, &work_read_data);
1251 fail_remove_devices:
1252 msm_rpcrouter_exit_devices();
1253 fail_destroy_workqueue:
1254 destroy_workqueue(rpcrouter_workqueue);
1258 static struct platform_driver msm_smd_channel2_driver = {
1259 .probe = msm_rpcrouter_probe,
1261 .name = "SMD_RPCCALL",
1262 .owner = THIS_MODULE,
1266 static int __init rpcrouter_init(void)
1268 return platform_driver_register(&msm_smd_channel2_driver);
1271 module_init(rpcrouter_init);
1272 MODULE_DESCRIPTION("MSM RPC Router");
1273 MODULE_AUTHOR("San Mehat <san@android.com>");
1274 MODULE_LICENSE("GPL");