Skip to content

Commit

Permalink
iio: buffer: iio: core: move to the cleanup.h magic
Browse files Browse the repository at this point in the history
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.

Signed-off-by: Nuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240229-iio-use-cleanup-magic-v3-3-c3d34889ae3c@analog.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
  • Loading branch information
Nuno Sa authored and Jonathan Cameron committed Mar 25, 2024
1 parent 095be2d commit 714b5b4
Showing 1 changed file with 47 additions and 73 deletions.
120 changes: 47 additions & 73 deletions drivers/iio/industrialio-buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
* - Alternative access techniques?
*/
#include <linux/anon_inodes.h>
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
Expand Down Expand Up @@ -533,28 +534,26 @@ static ssize_t iio_scan_el_store(struct device *dev,
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}

guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer))
return -EBUSY;

ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
goto error_ret;
} else if (state && !ret) {
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
return ret;

error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
if (state && ret)
return len;

return ret < 0 ? ret : len;
if (state)
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
else
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
return ret;

return len;
}

static ssize_t iio_scan_el_ts_show(struct device *dev,
Expand All @@ -581,16 +580,13 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
if (ret < 0)
return ret;

mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer))
return -EBUSY;

buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);

return ret ? ret : len;
return len;
}

static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
Expand Down Expand Up @@ -674,21 +670,16 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
if (val == buffer->length)
return len;

mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
buffer->access->set_length(buffer, val);
ret = 0;
}
if (ret)
goto out;
guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer))
return -EBUSY;

buffer->access->set_length(buffer, val);

if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length;
out:
mutex_unlock(&iio_dev_opaque->mlock);

return ret ? ret : len;
return len;
}

static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
Expand Down Expand Up @@ -1268,7 +1259,6 @@ int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;

if (insert_buffer == remove_buffer)
return 0;
Expand All @@ -1277,32 +1267,22 @@ int iio_update_buffers(struct iio_dev *indio_dev,
insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL;

mutex_lock(&iio_dev_opaque->info_exist_lock);
mutex_lock(&iio_dev_opaque->mlock);
guard(mutex)(&iio_dev_opaque->info_exist_lock);
guard(mutex)(&iio_dev_opaque->mlock);

if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL;

if (remove_buffer && !iio_buffer_is_active(remove_buffer))
remove_buffer = NULL;

if (!insert_buffer && !remove_buffer) {
ret = 0;
goto out_unlock;
}

if (!indio_dev->info) {
ret = -ENODEV;
goto out_unlock;
}

ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
if (!insert_buffer && !remove_buffer)
return 0;

out_unlock:
mutex_unlock(&iio_dev_opaque->mlock);
mutex_unlock(&iio_dev_opaque->info_exist_lock);
if (!indio_dev->info)
return -ENODEV;

return ret;
return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
}
EXPORT_SYMBOL_GPL(iio_update_buffers);

Expand All @@ -1326,22 +1306,22 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;

mutex_lock(&iio_dev_opaque->mlock);
guard(mutex)(&iio_dev_opaque->mlock);

/* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
return len;

if (requested_state)
ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
ret = __iio_update_buffers(indio_dev, NULL, buffer);
if (ret)
return ret;

done:
mutex_unlock(&iio_dev_opaque->mlock);
return (ret < 0) ? ret : len;
return len;
}

static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
Expand All @@ -1368,23 +1348,17 @@ static ssize_t watermark_store(struct device *dev,
if (!val)
return -EINVAL;

mutex_lock(&iio_dev_opaque->mlock);
guard(mutex)(&iio_dev_opaque->mlock);

if (val > buffer->length) {
ret = -EINVAL;
goto out;
}
if (val > buffer->length)
return -EINVAL;

if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto out;
}
if (iio_buffer_is_active(buffer))
return -EBUSY;

buffer->watermark = val;
out:
mutex_unlock(&iio_dev_opaque->mlock);

return ret ? ret : len;
return len;
}

static ssize_t data_available_show(struct device *dev,
Expand Down

0 comments on commit 714b5b4

Please sign in to comment.