mei: add a vtag map for each client
authorAlexander Usyskin <alexander.usyskin@intel.com>
Tue, 18 Aug 2020 11:51:41 +0000 (14:51 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Aug 2020 13:44:44 +0000 (15:44 +0200)
Vtag map is a list of tuples of vtag and file pointer (struct
mei_cl_vtag) associated with a particular me host client.

Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Link: https://lore.kernel.org/r/20200818115147.2567012-8-tomas.winkler@intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/misc/mei/client.c
drivers/misc/mei/client.h
drivers/misc/mei/main.c
drivers/misc/mei/mei_dev.h

index 276021f99666be1b9435ccbce3b1a9d738d310b7..3904fce182610bb289526c1b73b2b325eedc1d1d 100644 (file)
@@ -354,6 +354,27 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
        mei_io_cb_free(cb);
 }
 
+/**
+ * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * @cl: mei client
+ * @fp: pointer to file structure
+ */
+static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
+                                 const struct file *fp)
+{
+       struct mei_cl_vtag *cl_vtag;
+
+       list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
+               if (cl_vtag->fp == fp) {
+                       cl_vtag->pending_read = true;
+                       return;
+               }
+       }
+}
+
 /**
  * mei_io_cb_init - allocate and initialize io callback
  *
@@ -435,6 +456,19 @@ static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
                        mei_io_cb_free(cb);
 }
 
+/**
+ * mei_cl_free_pending - free pending cb
+ *
+ * @cl: host client
+ */
+static void mei_cl_free_pending(struct mei_cl *cl)
+{
+       struct mei_cl_cb *cb;
+
+       cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
+       mei_io_cb_free(cb);
+}
+
 /**
  * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
  *
@@ -544,7 +578,9 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
        mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
        mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
        mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
-       mei_io_list_free_fp(&cl->rd_pending, fp);
+       /* free pending cb only in final flush */
+       if (!fp)
+               mei_cl_free_pending(cl);
        spin_lock(&cl->rd_completed_lock);
        mei_io_list_free_fp(&cl->rd_completed, fp);
        spin_unlock(&cl->rd_completed_lock);
@@ -565,6 +601,7 @@ static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
        init_waitqueue_head(&cl->rx_wait);
        init_waitqueue_head(&cl->tx_wait);
        init_waitqueue_head(&cl->ev_wait);
+       INIT_LIST_HEAD(&cl->vtag_map);
        spin_lock_init(&cl->rd_completed_lock);
        INIT_LIST_HEAD(&cl->rd_completed);
        INIT_LIST_HEAD(&cl->rd_pending);
@@ -1237,8 +1274,117 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
        return 0;
 }
 
+/**
+ * mei_cl_vtag_alloc - allocate and fill the vtag structure
+ *
+ * @fp: pointer to file structure
+ * @vtag: vm tag
+ *
+ * Return:
+ * * Pointer to allocated struct - on success
+ * * ERR_PTR(-ENOMEM) on memory allocation failure
+ */
+struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
+{
+       struct mei_cl_vtag *cl_vtag;
+
+       cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
+       if (!cl_vtag)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&cl_vtag->list);
+       cl_vtag->vtag = vtag;
+       cl_vtag->fp = fp;
+
+       return cl_vtag;
+}
+
+/**
+ * mei_cl_fp_by_vtag - obtain the file pointer by vtag
+ *
+ * @cl: host client
+ * @vtag: vm tag
+ *
+ * Return:
+ * * A file pointer - on success
+ * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
+ */
+const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
+{
+       struct mei_cl_vtag *vtag_l;
+
+       list_for_each_entry(vtag_l, &cl->vtag_map, list)
+               if (vtag_l->vtag == vtag)
+                       return vtag_l->fp;
+
+       return ERR_PTR(-ENOENT);
+}
+
+/**
+ * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
+ *
+ * @cl: host client
+ * @vtag: vm tag
+ */
+static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
+{
+       struct mei_cl_vtag *vtag_l;
+
+       list_for_each_entry(vtag_l, &cl->vtag_map, list) {
+               if (vtag_l->vtag == vtag) {
+                       vtag_l->pending_read = false;
+                       break;
+               }
+       }
+}
+
+/**
+ * mei_cl_read_vtag_add_fc - add flow control for next pending reader
+ *                           in the vtag list
+ *
+ * @cl: host client
+ */
+static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
+{
+       struct mei_cl_vtag *cl_vtag;
+
+       list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
+               if (cl_vtag->pending_read) {
+                       if (mei_cl_enqueue_ctrl_wr_cb(cl,
+                                                     mei_cl_mtu(cl),
+                                                     MEI_FOP_READ,
+                                                     cl_vtag->fp))
+                               cl->rx_flow_ctrl_creds++;
+                       break;
+               }
+       }
+}
+
+/**
+ * mei_cl_vt_support_check - check if client support vtags
+ *
+ * @cl: host client
+ *
+ * Return:
+ * * 0 - supported, or not connected at all
+ * * -EOPNOTSUPP - vtags are not supported by client
+ */
+int mei_cl_vt_support_check(const struct mei_cl *cl)
+{
+       struct mei_device *dev = cl->dev;
+
+       if (!dev->hbm_f_vt_supported)
+               return -EOPNOTSUPP;
+
+       if (!cl->me_cl)
+               return 0;
+
+       return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
+}
+
 /**
  * mei_cl_add_rd_completed - add read completed callback to list with lock
+ *                           and vtag check
  *
  * @cl: host client
  * @cb: callback block
@@ -1246,6 +1392,20 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
  */
 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
+       const struct file *fp;
+
+       if (!mei_cl_vt_support_check(cl)) {
+               fp = mei_cl_fp_by_vtag(cl, cb->vtag);
+               if (IS_ERR(fp)) {
+                       /* client already disconnected, discarding */
+                       mei_io_cb_free(cb);
+                       return;
+               }
+               cb->fp = fp;
+               mei_cl_reset_read_by_vtag(cl, cb->vtag);
+               mei_cl_read_vtag_add_fc(cl);
+       }
+
        spin_lock(&cl->rd_completed_lock);
        list_add_tail(&cb->list, &cl->rd_completed);
        spin_unlock(&cl->rd_completed_lock);
@@ -1520,13 +1680,17 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
                return 0;
 
        /* HW currently supports only one pending read */
-       if (cl->rx_flow_ctrl_creds)
+       if (cl->rx_flow_ctrl_creds) {
+               mei_cl_set_read_by_fp(cl, fp);
                return -EBUSY;
+       }
 
        cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
        if (!cb)
                return -ENOMEM;
 
+       mei_cl_set_read_by_fp(cl, fp);
+
        rets = pm_runtime_get(dev->dev);
        if (rets < 0 && rets != -EINPROGRESS) {
                pm_runtime_put_noidle(dev->dev);
index bd57c64f6c1a0468d2097b5ef28fe027b7b49dc3..64143d4ec75830be59e3e7b4353a1fed3f44c595 100644 (file)
@@ -146,6 +146,9 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
                                            const struct file *fp);
 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
 
+struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag);
+const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag);
+int mei_cl_vt_support_check(const struct mei_cl *cl);
 /*
  *  MEI input output function prototype
  */
index 441bdea4d4c1d85d9f56d30e622fbe85095dbf76..401bf8743689a0f2467bd141439bf51a9626af6d 100644 (file)
@@ -80,6 +80,27 @@ err_unlock:
        return err;
 }
 
+/**
+ * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ *
+ */
+static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
+                                    const struct file *fp)
+{
+       struct mei_cl_vtag *vtag_l, *next;
+
+       list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
+               if (vtag_l->fp == fp) {
+                       list_del(&vtag_l->list);
+                       kfree(vtag_l);
+                       return;
+               }
+       }
+}
+
 /**
  * mei_release - the release function
  *
@@ -101,17 +122,35 @@ static int mei_release(struct inode *inode, struct file *file)
 
        mutex_lock(&dev->device_lock);
 
+       mei_cl_vtag_remove_by_fp(cl, file);
+
+       if (!list_empty(&cl->vtag_map)) {
+               cl_dbg(dev, cl, "not the last vtag\n");
+               mei_cl_flush_queues(cl, file);
+               rets = 0;
+               goto out;
+       }
+
        rets = mei_cl_disconnect(cl);
+       /*
+        * Check again: This is necessary since disconnect releases the lock
+        * and another client can connect in the meantime.
+        */
+       if (!list_empty(&cl->vtag_map)) {
+               cl_dbg(dev, cl, "not the last vtag after disconnect\n");
+               mei_cl_flush_queues(cl, file);
+               goto out;
+       }
 
-       mei_cl_flush_queues(cl, file);
+       mei_cl_flush_queues(cl, NULL);
        cl_dbg(dev, cl, "removing\n");
 
        mei_cl_unlink(cl);
+       kfree(cl);
 
+out:
        file->private_data = NULL;
 
-       kfree(cl);
-
        mutex_unlock(&dev->device_lock);
        return rets;
 }
@@ -237,6 +276,28 @@ out:
        mutex_unlock(&dev->device_lock);
        return rets;
 }
+
+/**
+ * mei_cl_vtag_by_fp - obtain the vtag by file pointer
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ *
+ * Return: vtag value on success, otherwise 0
+ */
+static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
+{
+       struct mei_cl_vtag *cl_vtag;
+
+       if (!fp)
+               return 0;
+
+       list_for_each_entry(cl_vtag, &cl->vtag_map, list)
+               if (cl_vtag->fp == fp)
+                       return cl_vtag->vtag;
+       return 0;
+}
+
 /**
  * mei_write - the write function.
  *
@@ -314,6 +375,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
                rets = -ENOMEM;
                goto out;
        }
+       cb->vtag = mei_cl_vtag_by_fp(cl, file);
 
        rets = copy_from_user(cb->buf.data, ubuf, length);
        if (rets) {
index 1219edea3243f3d4662b626ece94b5a856ffcbd3..2f4cc1a8aae8ce355e3c2d64b91f1b270635a493 100644 (file)
@@ -193,6 +193,21 @@ struct mei_cl_cb {
        u32 blocking:1;
 };
 
+/**
+ * struct mei_cl_vtag - file pointer to vtag mapping structure
+ *
+ * @list: link in map queue
+ * @fp: file pointer
+ * @vtag: corresponding vtag
+ * @pending_read: the read is pending on this file
+ */
+struct mei_cl_vtag {
+       struct list_head list;
+       const struct file *fp;
+       u8 vtag;
+       u8 pending_read:1;
+};
+
 /**
  * struct mei_cl - me client host representation
  *    carried in file->private_data
@@ -209,6 +224,7 @@ struct mei_cl_cb {
  * @me_cl: fw client connected
  * @fp: file associated with client
  * @host_client_id: host id
+ * @vtag_map: vtag map
  * @tx_flow_ctrl_creds: transmit flow credentials
  * @rx_flow_ctrl_creds: receive flow credentials
  * @timer_count:  watchdog timer for operation completion
@@ -235,6 +251,7 @@ struct mei_cl {
        struct mei_me_client *me_cl;
        const struct file *fp;
        u8 host_client_id;
+       struct list_head vtag_map;
        u8 tx_flow_ctrl_creds;
        u8 rx_flow_ctrl_creds;
        u8 timer_count;