{
struct binder_buffer *buffer;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
return buffer;
}
if (!next)
return ERR_PTR(-ENOMEM);
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
goto out;
}
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc mutex if clear_on_free
- * is used frequently for large buffers. The mutex is not
+ * increase contention for the alloc->lock if clear_on_free
+ * is used frequently for large buffers. This lock is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
binder_free_buf_locked(alloc, buffer);
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
}
/**
struct binder_buffer *buffer;
buffers = 0;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
}
kfree(alloc->pages);
}
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
if (alloc->mm)
mmdrop(alloc->mm);
struct binder_buffer *buffer;
struct rb_node *n;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
}
/**
int lru = 0;
int free = 0;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
lru++;
}
}
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
struct rb_node *n;
int count = 0;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
return count;
}
goto err_mmget;
if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed;
- if (!mutex_trylock(&alloc->mutex))
- goto err_get_alloc_mutex_failed;
+ if (!spin_trylock(&alloc->lock))
+ goto err_get_alloc_lock_failed;
if (!page->page_ptr)
goto err_page_already_freed;
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
spin_unlock(lock);
if (vma) {
err_invalid_vma:
err_page_already_freed:
- mutex_unlock(&alloc->mutex);
-err_get_alloc_mutex_failed:
+ spin_unlock(&alloc->lock);
+err_get_alloc_lock_failed:
mmap_read_unlock(mm);
err_mmap_read_lock_failed:
mmput_async(mm);
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- mutex_init(&alloc->mutex);
+ spin_lock_init(&alloc->lock);
INIT_LIST_HEAD(&alloc->buffers);
}