There is no reason for it to be recursive. Resursiveness leaked
from struct fio_zone_info::mutex initialisation.
Signed-off-by: Alexey Dobriyan (SK hynix) <adobriyan@gmail.com>
Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
return -ENOMEM;
pthread_mutexattr_init(&attr);
return -ENOMEM;
pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutexattr_setpshared(&attr, true);
pthread_mutex_init(&zbd_info->mutex, &attr);
pthread_mutexattr_setpshared(&attr, true);
pthread_mutex_init(&zbd_info->mutex, &attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
zbd_info->refcount = 1;
p = &zbd_info->zone_info[0];
for (i = 0; i < nr_zones; i++, p++) {
zbd_info->refcount = 1;
p = &zbd_info->zone_info[0];
for (i = 0; i < nr_zones; i++, p++) {
int i, j, ret = 0;
pthread_mutexattr_init(&attr);
int i, j, ret = 0;
pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutexattr_setpshared(&attr, true);
zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
pthread_mutexattr_setpshared(&attr, true);
zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
if (!zbd_info)
goto out;
pthread_mutex_init(&zbd_info->mutex, &attr);
if (!zbd_info)
goto out;
pthread_mutex_init(&zbd_info->mutex, &attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
zbd_info->refcount = 1;
p = &zbd_info->zone_info[0];
for (offset = 0, j = 0; j < nr_zones;) {
zbd_info->refcount = 1;
p = &zbd_info->zone_info[0];
for (offset = 0, j = 0; j < nr_zones;) {
* Reset zbd_info.write_cnt, the counter that counts down towards the next
* zone reset.
*/
* Reset zbd_info.write_cnt, the counter that counts down towards the next
* zone reset.
*/
-static void zbd_reset_write_cnt(const struct thread_data *td,
- const struct fio_file *f)
+static void _zbd_reset_write_cnt(const struct thread_data *td,
+ const struct fio_file *f)
{
assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
{
assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
- pthread_mutex_lock(&f->zbd_info->mutex);
f->zbd_info->write_cnt = td->o.zrf.u.f ?
min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
f->zbd_info->write_cnt = td->o.zrf.u.f ?
min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
+}
+
+static void zbd_reset_write_cnt(const struct thread_data *td,
+ const struct fio_file *f)
+{
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ _zbd_reset_write_cnt(td, f);
pthread_mutex_unlock(&f->zbd_info->mutex);
}
pthread_mutex_unlock(&f->zbd_info->mutex);
}
if (f->zbd_info->write_cnt)
write_cnt = --f->zbd_info->write_cnt;
if (write_cnt == 0)
if (f->zbd_info->write_cnt)
write_cnt = --f->zbd_info->write_cnt;
if (write_cnt == 0)
- zbd_reset_write_cnt(td, f);
+ _zbd_reset_write_cnt(td, f);
pthread_mutex_unlock(&f->zbd_info->mutex);
return write_cnt == 0;
pthread_mutex_unlock(&f->zbd_info->mutex);
return write_cnt == 0;