In the code below, why is the logic of enabling and disabling the
sensor in this function? Generally the function to read the sensor
value is just used for the code to read the sensor values ? and not
enable/disable the sensor
case IIO_CHAN_INFO_RAW:
+ sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_ENABLE);
+ err = sensor->sensor_ops->config_set(
+ sensor->ph, sensor->sensor_info->id, sensor_config);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in enabling sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ err = sensor->sensor_ops->reading_get_timestamped(
+ sensor->ph, sensor->sensor_info->id,
+ sensor->sensor_info->num_axis, readings);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in reading raw attribute for sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_DISABLE);
+ err = sensor->sensor_ops->config_set(
+ sensor->ph, sensor->sensor_info->id, sensor_config);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in enabling sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+ /* Check if raw value fits 32 bits */
+ if (readings[ch->scan_index].value < INT_MIN ||
+ readings[ch->scan_index].value > INT_MAX)
+ return -ERANGE;
+ /* Use 32-bit value, since practically there is no need in 64 bits */
+ *val = (int)readings[ch->scan_index].value;
+ return IIO_VAL_INT;