dmaengine: replace dma_async_client_register with dmaengine_get
authorDan Williams <dan.j.williams@intel.com>
Tue, 6 Jan 2009 18:38:17 +0000 (11:38 -0700)
committerDan Williams <dan.j.williams@intel.com>
Tue, 6 Jan 2009 18:38:17 +0000 (11:38 -0700)
Now that clients no longer need to be notified of channel arrival
dma_async_client_register can simply increment the dmaengine_ref_count.

Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
crypto/async_tx/async_tx.c
drivers/dma/dmaengine.c
include/linux/dmaengine.h
net/core/dev.c

index 2cdf7a0867b7a1c0de573168a1d578646c4c2b84..f21147f3626a628c2fb78e1e5448a0361f9a630d 100644 (file)
 #include <linux/async_tx.h>
 
 #ifdef CONFIG_DMA_ENGINE
-static enum dma_state_client
-dma_channel_add_remove(struct dma_client *client,
-       struct dma_chan *chan, enum dma_state state);
-
-static struct dma_client async_tx_dma = {
-       .event_callback = dma_channel_add_remove,
-       /* .cap_mask == 0 defaults to all channels */
-};
-
-/**
- * async_tx_lock - protect modification of async_tx_master_list and serialize
- *     rebalance operations
- */
-static DEFINE_SPINLOCK(async_tx_lock);
-
-static LIST_HEAD(async_tx_master_list);
-
-static void
-free_dma_chan_ref(struct rcu_head *rcu)
-{
-       struct dma_chan_ref *ref;
-       ref = container_of(rcu, struct dma_chan_ref, rcu);
-       kfree(ref);
-}
-
-static void
-init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
-{
-       INIT_LIST_HEAD(&ref->node);
-       INIT_RCU_HEAD(&ref->rcu);
-       ref->chan = chan;
-       atomic_set(&ref->count, 0);
-}
-
-static enum dma_state_client
-dma_channel_add_remove(struct dma_client *client,
-       struct dma_chan *chan, enum dma_state state)
-{
-       unsigned long found, flags;
-       struct dma_chan_ref *master_ref, *ref;
-       enum dma_state_client ack = DMA_DUP; /* default: take no action */
-
-       switch (state) {
-       case DMA_RESOURCE_AVAILABLE:
-               found = 0;
-               rcu_read_lock();
-               list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-                       if (ref->chan == chan) {
-                               found = 1;
-                               break;
-                       }
-               rcu_read_unlock();
-
-               pr_debug("async_tx: dma resource available [%s]\n",
-                       found ? "old" : "new");
-
-               if (!found)
-                       ack = DMA_ACK;
-               else
-                       break;
-
-               /* add the channel to the generic management list */
-               master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
-               if (master_ref) {
-                       init_dma_chan_ref(master_ref, chan);
-                       spin_lock_irqsave(&async_tx_lock, flags);
-                       list_add_tail_rcu(&master_ref->node,
-                               &async_tx_master_list);
-                       spin_unlock_irqrestore(&async_tx_lock,
-                               flags);
-               } else {
-                       printk(KERN_WARNING "async_tx: unable to create"
-                               " new master entry in response to"
-                               " a DMA_RESOURCE_ADDED event"
-                               " (-ENOMEM)\n");
-                       return 0;
-               }
-               break;
-       case DMA_RESOURCE_REMOVED:
-               found = 0;
-               spin_lock_irqsave(&async_tx_lock, flags);
-               list_for_each_entry(ref, &async_tx_master_list, node)
-                       if (ref->chan == chan) {
-                               list_del_rcu(&ref->node);
-                               call_rcu(&ref->rcu, free_dma_chan_ref);
-                               found = 1;
-                               break;
-                       }
-               spin_unlock_irqrestore(&async_tx_lock, flags);
-
-               pr_debug("async_tx: dma resource removed [%s]\n",
-                       found ? "ours" : "not ours");
-
-               if (found)
-                       ack = DMA_ACK;
-               else
-                       break;
-               break;
-       case DMA_RESOURCE_SUSPEND:
-       case DMA_RESOURCE_RESUME:
-               printk(KERN_WARNING "async_tx: does not support dma channel"
-                       " suspend/resume\n");
-               break;
-       default:
-               BUG();
-       }
-
-       return ack;
-}
-
 static int __init async_tx_init(void)
 {
-       dma_async_client_register(&async_tx_dma);
-       dma_async_client_chan_request(&async_tx_dma);
+       dmaengine_get();
 
        printk(KERN_INFO "async_tx: api initialized (async)\n");
 
@@ -150,7 +39,7 @@ static int __init async_tx_init(void)
 
 static void __exit async_tx_exit(void)
 {
-       dma_async_client_unregister(&async_tx_dma);
+       dmaengine_put();
 }
 
 /**
index 90aca505a1df109eb5b14fda1a5ae448ed8629de..3f1849b7f5ef7b7aa283e16dba460c3ae0d2f98d 100644 (file)
@@ -600,10 +600,9 @@ static void dma_clients_notify_available(void)
 }
 
 /**
- * dma_async_client_register - register a &dma_client
- * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
+ * dmaengine_get - register interest in dma_channels
  */
-void dma_async_client_register(struct dma_client *client)
+void dmaengine_get(void)
 {
        struct dma_device *device, *_d;
        struct dma_chan *chan;
@@ -634,25 +633,18 @@ void dma_async_client_register(struct dma_client *client)
         */
        if (dmaengine_ref_count == 1)
                dma_channel_rebalance();
-       list_add_tail(&client->global_node, &dma_client_list);
        mutex_unlock(&dma_list_mutex);
 }
-EXPORT_SYMBOL(dma_async_client_register);
+EXPORT_SYMBOL(dmaengine_get);
 
 /**
- * dma_async_client_unregister - unregister a client and free the &dma_client
- * @client: &dma_client to free
- *
- * Force frees any allocated DMA channels, frees the &dma_client memory
+ * dmaengine_put - let dma drivers be removed when ref_count == 0
  */
-void dma_async_client_unregister(struct dma_client *client)
+void dmaengine_put(void)
 {
        struct dma_device *device;
        struct dma_chan *chan;
 
-       if (!client)
-               return;
-
        mutex_lock(&dma_list_mutex);
        dmaengine_ref_count--;
        BUG_ON(dmaengine_ref_count < 0);
@@ -663,11 +655,9 @@ void dma_async_client_unregister(struct dma_client *client)
                list_for_each_entry(chan, &device->channels, device_node)
                        dma_chan_put(chan);
        }
-
-       list_del(&client->global_node);
        mutex_unlock(&dma_list_mutex);
 }
-EXPORT_SYMBOL(dma_async_client_unregister);
+EXPORT_SYMBOL(dmaengine_put);
 
 /**
  * dma_async_client_chan_request - send all available channels to the
index d63544cf8a1ae1c6cd2408983d728ddf304283ff..37d95db156d35f79bb98f37356518142fd03d3be 100644 (file)
@@ -318,8 +318,8 @@ struct dma_device {
 
 /* --- public DMA engine API --- */
 
-void dma_async_client_register(struct dma_client *client);
-void dma_async_client_unregister(struct dma_client *client);
+void dmaengine_get(void);
+void dmaengine_put(void);
 void dma_async_client_chan_request(struct dma_client *client);
 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
        void *dest, void *src, size_t len);
index bbb07dbe1740bd68a9b8da736fae336735b23037..7596fc9403c8a15cf5c7f3f023e59724993e526f 100644 (file)
@@ -4894,8 +4894,7 @@ static int __init netdev_dma_register(void)
        }
        spin_lock_init(&net_dma.lock);
        dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
-       dma_async_client_register(&net_dma.client);
-       dma_async_client_chan_request(&net_dma.client);
+       dmaengine_get();
        return 0;
 }