Merge branch 'fix/core' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
authorMark Brown <broonie@kernel.org>
Mon, 26 Feb 2018 11:00:14 +0000 (11:00 +0000)
committerMark Brown <broonie@kernel.org>
Mon, 26 Feb 2018 11:00:14 +0000 (11:00 +0000)
1  2 
drivers/base/regmap/regmap.c

index 258a40e2a1d3d05d048bf03fcb7166b228a75a4f,453116fd4362e44c78f294b6b91739f2d34ce7e1..f5d6536636261fcefefd0019564a53175c10de6a
@@@ -1831,7 -1831,7 +1831,7 @@@ int regmap_raw_write(struct regmap *map
                return -EINVAL;
        if (val_len % map->format.val_bytes)
                return -EINVAL;
-       if (map->max_raw_write && map->max_raw_write > val_len)
+       if (map->max_raw_write && map->max_raw_write < val_len)
                return -E2BIG;
  
        map->lock(map->lock_arg);
@@@ -1993,7 -1993,7 +1993,7 @@@ out
                                return -EINVAL;
                        }
  
 -                      ret = regmap_write(map, reg + (i * map->reg_stride),
 +                      ret = regmap_write(map, reg + regmap_get_offset(map, i),
                                           ival);
                        if (ret)
                                return ret;
@@@ -2542,39 -2542,18 +2542,39 @@@ int regmap_raw_read(struct regmap *map
  
        if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
            map->cache_type == REGCACHE_NONE) {
 +              size_t chunk_count, chunk_bytes;
 +              size_t chunk_regs = val_count;
 +
                if (!map->bus->read) {
                        ret = -ENOTSUPP;
                        goto out;
                }
 -              if (map->max_raw_read && map->max_raw_read < val_len) {
 -                      ret = -E2BIG;
 -                      goto out;
 -              }
  
 -              /* Physical block read if there's no cache involved */
 -              ret = _regmap_raw_read(map, reg, val, val_len);
 +              if (map->use_single_read)
 +                      chunk_regs = 1;
 +              else if (map->max_raw_read && val_len > map->max_raw_read)
 +                      chunk_regs = map->max_raw_read / val_bytes;
 +
 +              chunk_count = val_count / chunk_regs;
 +              chunk_bytes = chunk_regs * val_bytes;
 +
 +              /* Read bytes that fit into whole chunks */
 +              for (i = 0; i < chunk_count; i++) {
 +                      ret = _regmap_raw_read(map, reg, val, chunk_bytes);
 +                      if (ret != 0)
 +                              goto out;
 +
 +                      reg += regmap_get_offset(map, chunk_regs);
 +                      val += chunk_bytes;
 +                      val_len -= chunk_bytes;
 +              }
  
 +              /* Read remaining bytes */
 +              if (val_len) {
 +                      ret = _regmap_raw_read(map, reg, val, val_len);
 +                      if (ret != 0)
 +                              goto out;
 +              }
        } else {
                /* Otherwise go word by word for the cache; should be low
                 * cost as we expect to hit the cache.
@@@ -2674,60 -2653,108 +2674,60 @@@ int regmap_bulk_read(struct regmap *map
  
        if (!IS_ALIGNED(reg, map->reg_stride))
                return -EINVAL;
 +      if (val_count == 0)
 +              return -EINVAL;
  
        if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
 -              /*
 -               * Some devices does not support bulk read, for
 -               * them we have a series of single read operations.
 -               */
 -              size_t total_size = val_bytes * val_count;
 -
 -              if (!map->use_single_read &&
 -                  (!map->max_raw_read || map->max_raw_read > total_size)) {
 -                      ret = regmap_raw_read(map, reg, val,
 -                                            val_bytes * val_count);
 -                      if (ret != 0)
 -                              return ret;
 -              } else {
 -                      /*
 -                       * Some devices do not support bulk read or do not
 -                       * support large bulk reads, for them we have a series
 -                       * of read operations.
 -                       */
 -                      int chunk_stride = map->reg_stride;
 -                      size_t chunk_size = val_bytes;
 -                      size_t chunk_count = val_count;
 -
 -                      if (!map->use_single_read) {
 -                              chunk_size = map->max_raw_read;
 -                              if (chunk_size % val_bytes)
 -                                      chunk_size -= chunk_size % val_bytes;
 -                              chunk_count = total_size / chunk_size;
 -                              chunk_stride *= chunk_size / val_bytes;
 -                      }
 -
 -                      /* Read bytes that fit into a multiple of chunk_size */
 -                      for (i = 0; i < chunk_count; i++) {
 -                              ret = regmap_raw_read(map,
 -                                                    reg + (i * chunk_stride),
 -                                                    val + (i * chunk_size),
 -                                                    chunk_size);
 -                              if (ret != 0)
 -                                      return ret;
 -                      }
 -
 -                      /* Read remaining bytes */
 -                      if (chunk_size * i < total_size) {
 -                              ret = regmap_raw_read(map,
 -                                                    reg + (i * chunk_stride),
 -                                                    val + (i * chunk_size),
 -                                                    total_size - i * chunk_size);
 -                              if (ret != 0)
 -                                      return ret;
 -                      }
 -              }
 +              ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
 +              if (ret != 0)
 +                      return ret;
  
                for (i = 0; i < val_count * val_bytes; i += val_bytes)
                        map->format.parse_inplace(val + i);
        } else {
 +#ifdef CONFIG_64BIT
 +              u64 *u64 = val;
 +#endif
 +              u32 *u32 = val;
 +              u16 *u16 = val;
 +              u8 *u8 = val;
 +
 +              map->lock(map->lock_arg);
 +
                for (i = 0; i < val_count; i++) {
                        unsigned int ival;
 -                      ret = regmap_read(map, reg + regmap_get_offset(map, i),
 -                                        &ival);
 -                      if (ret != 0)
 -                              return ret;
  
 -                      if (map->format.format_val) {
 -                              map->format.format_val(val + (i * val_bytes), ival, 0);
 -                      } else {
 -                              /* Devices providing read and write
 -                               * operations can use the bulk I/O
 -                               * functions if they define a val_bytes,
 -                               * we assume that the values are native
 -                               * endian.
 -                               */
 -#ifdef CONFIG_64BIT
 -                              u64 *u64 = val;
 -#endif
 -                              u32 *u32 = val;
 -                              u16 *u16 = val;
 -                              u8 *u8 = val;
 +                      ret = _regmap_read(map, reg + regmap_get_offset(map, i),
 +                                         &ival);
 +                      if (ret != 0)
 +                              goto out;
  
 -                              switch (map->format.val_bytes) {
 +                      switch (map->format.val_bytes) {
  #ifdef CONFIG_64BIT
 -                              case 8:
 -                                      u64[i] = ival;
 -                                      break;
 +                      case 8:
 +                              u64[i] = ival;
 +                              break;
  #endif
 -                              case 4:
 -                                      u32[i] = ival;
 -                                      break;
 -                              case 2:
 -                                      u16[i] = ival;
 -                                      break;
 -                              case 1:
 -                                      u8[i] = ival;
 -                                      break;
 -                              default:
 -                                      return -EINVAL;
 -                              }
 +                      case 4:
 +                              u32[i] = ival;
 +                              break;
 +                      case 2:
 +                              u16[i] = ival;
 +                              break;
 +                      case 1:
 +                              u8[i] = ival;
 +                              break;
 +                      default:
 +                              ret = -EINVAL;
 +                              goto out;
                        }
                }
 +
 +out:
 +              map->unlock(map->lock_arg);
        }
  
 -      return 0;
 +      return ret;
  }
  EXPORT_SYMBOL_GPL(regmap_bulk_read);