drm/nouveau/flcn/qmgr: rename remaining nvkm_msgqueue bits to nvkm_falcon_qmgr
authorBen Skeggs <bskeggs@redhat.com>
Tue, 14 Jan 2020 20:34:22 +0000 (06:34 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Wed, 15 Jan 2020 00:50:28 +0000 (10:50 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h

index d6e84a66784548946140bb228c3f03f58e92219c..a674548f616841451c7d6c7c2f94c7a7c46c3238 100644 (file)
@@ -152,7 +152,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
                   struct nvkm_msgqueue_hdr *cmd, nvkm_falcon_qmgr_callback cb,
                   struct completion *completion, bool wait_init)
 {
-       struct nvkm_msgqueue_seq *seq;
+       struct nvkm_falcon_qmgr_seq *seq;
        struct nvkm_msgqueue_queue *queue;
        int ret;
 
index 7e9e82da7ea7dc21a4cd083c6c1b3a83aefd349d..eb499b796fe5698db4db39ba4a27853c26986f92 100644 (file)
@@ -139,9 +139,9 @@ msgqueue_msg_handle(struct nvkm_msgqueue *priv,
                    struct nv_falcon_msg *hdr)
 {
        const struct nvkm_subdev *subdev = priv->falcon->owner;
-       struct nvkm_msgqueue_seq *seq;
+       struct nvkm_falcon_qmgr_seq *seq;
 
-       seq = &msgq->qmgr->seq[hdr->seq_id];
+       seq = &msgq->qmgr->seq.id[hdr->seq_id];
        if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
                nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
                return -EINVAL;
index b67e85b169aa5bb8a1b7324fdb981e3d423672ad..a453de341a75de72b7f6ab9a13b2744c2de02986 100644 (file)
  */
 #include "qmgr.h"
 
-struct nvkm_msgqueue_seq *
-nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *priv)
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
 {
-       const struct nvkm_subdev *subdev = priv->falcon->owner;
-       struct nvkm_msgqueue_seq *seq;
+       const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+       struct nvkm_falcon_qmgr_seq *seq;
        u32 index;
 
-       mutex_lock(&priv->seq_lock);
-       index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
-       if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
+       mutex_lock(&qmgr->seq.mutex);
+       index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+       if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
                nvkm_error(subdev, "no free sequence available\n");
-               mutex_unlock(&priv->seq_lock);
+               mutex_unlock(&qmgr->seq.mutex);
                return ERR_PTR(-EAGAIN);
        }
 
-       set_bit(index, priv->seq_tbl);
-       mutex_unlock(&priv->seq_lock);
+       set_bit(index, qmgr->seq.tbl);
+       mutex_unlock(&qmgr->seq.mutex);
 
-       seq = &priv->seq[index];
+       seq = &qmgr->seq.id[index];
        seq->state = SEQ_STATE_PENDING;
        return seq;
 }
 
 void
-nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *priv,
-                            struct nvkm_msgqueue_seq *seq)
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+                            struct nvkm_falcon_qmgr_seq *seq)
 {
-       /* no need to acquire seq_lock since clear_bit is atomic */
+       /* no need to acquire seq.mutex since clear_bit is atomic */
        seq->state = SEQ_STATE_FREE;
        seq->callback = NULL;
        reinit_completion(&seq->done);
-       clear_bit(seq->id, priv->seq_tbl);
+       clear_bit(seq->id, qmgr->seq.tbl);
 }
 
 void
@@ -77,10 +77,10 @@ nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
                return -ENOMEM;
 
        qmgr->falcon = falcon;
-       mutex_init(&qmgr->seq_lock);
-       for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++) {
-               qmgr->seq[i].id = i;
-               init_completion(&qmgr->seq[i].done);
+       mutex_init(&qmgr->seq.mutex);
+       for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+               qmgr->seq.id[i].id = i;
+               init_completion(&qmgr->seq.id[i].done);
        }
 
        return 0;
index 905a625b334854fa42b343c52d2a61ef5d4b626e..935858c6a6210cf84ff281dbcece82c8f7da20ea 100644 (file)
@@ -10,7 +10,7 @@
 #define MSG_BUF_SIZE 128
 
 /**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
  *
  * Every time a command is sent, a sequence is assigned to it so the
  * corresponding message can be matched. Upon receiving the message, a callback
@@ -21,7 +21,7 @@
  * @callback:  callback to call upon receiving matching message
  * @completion:        completion to signal after callback is called
  */
-struct nvkm_msgqueue_seq {
+struct nvkm_falcon_qmgr_seq {
        u16 id;
        enum {
                SEQ_STATE_FREE = 0,
@@ -40,20 +40,22 @@ struct nvkm_msgqueue_seq {
  * We can have an arbitrary number of sequences, but realistically we will
  * probably not use that much simultaneously.
  */
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
 
 struct nvkm_falcon_qmgr {
        struct nvkm_falcon *falcon;
 
-       struct mutex seq_lock;
-       struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
-       unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
+       struct {
+               struct mutex mutex;
+               struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+               unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+       } seq;
 };
 
-struct nvkm_msgqueue_seq *
+struct nvkm_falcon_qmgr_seq *
 nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
 void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
-                                 struct nvkm_msgqueue_seq *);
+                                 struct nvkm_falcon_qmgr_seq *);
 
 #define FLCNQ_PRINTK(t,q,f,a...)                                               \
        FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)