BigW Consortium Gitlab

Commit 0a75741c by David Clark

Merge remote-tracking branch 'origin/master'

parents 82174f7c b4b4d96b
......@@ -29,3 +29,9 @@
path = linux_kernel_modules/cp2130
url = https://github.com/mangOH/cp2130
branch = development
[submodule "apps/RedSensorToCloud"]
path = apps/RedSensorToCloud
url = https://github.com/mangOH/RedSensorToCloud
[submodule "apps/Heartbeat"]
path = apps/Heartbeat
url = https://github.com/mangOH/Heartbeat
# This makefile depends on targetDefs.* files in the Legato root folder. In order to facilitate the
# inclusion of these files, make needs to be invoked with a --include-dir=$LEGATO_ROOT parameter.
TARGET:=wp85
# Load default PA definitions from main Legato Makefile
$(shell grep 'export PA_DIR .*' $(LEGATO_ROOT)/Makefile > legatoDefs.mk)
$(shell grep 'export .*_PA_.* .*' $(LEGATO_ROOT)/Makefile >> legatoDefs.mk)
include legatoDefs.mk
# Default targets definitions from Framework
include targetDefs.$(TARGET)
.PHONY: all $(TARGET)
all: $(TARGET)
# The comment below is for Developer Studio integration. Do not remove it.
# DS_CLONE_ROOT(MANGOH_ROOT)
MANGOH_ROOT=$(shell pwd)
# The comment below is for Developer Studio integration. Do not remove it.
# DS_CUSTOM_OPTIONS(EXTRA_OPTS)
EXTRA_OPTS = -s "$(MANGOH_ROOT)/apps/GpioExpander/gpioExpanderService"
$(TARGET):
export MANGOH_ROOT=$(MANGOH_ROOT) && \
mksys \
$(MKSYS_FLAGS) \
-t $(TARGET) \
-i "$(LEGATO_ROOT)/interfaces/supervisor" \
-i "$(LEGATO_ROOT)/interfaces/positioning" \
-i "$(LEGATO_ROOT)/interfaces/airVantage" \
-i "$(LEGATO_ROOT)/interfaces/modemServices" \
-i "$(LEGATO_ROOT)/interfaces" \
-i "$(LEGATO_ROOT)/interfaces/atServices" \
-i "$(LEGATO_ROOT)/interfaces/wifi" \
-i "$(LEGATO_ROOT)/interfaces/secureStorage" \
-i "$(LEGATO_ROOT)/interfaces/logDaemon" \
-s "$(LEGATO_ROOT)/modules/WiFi/service" \
-s "$(LEGATO_ROOT)/components" \
-s "$(LEGATO_ROOT)/modules/WiFi/apps/tools/wifi" \
-s "$(LEGATO_ROOT)/apps/platformServices/airVantage" \
-s "$(LEGATO_ROOT)/apps/platformServices" \
-s "$(LEGATO_ROOT)/apps/tools" \
$(EXTRA_OPTS) \
-C -g -X -g -L -g \
mangOH_Green.sdef
#mangOH_Red.sdef
clean:
rm -rf /tmp/mkbuild_* _build_* *.update legatoDefs.mk
all:
$(info Bug LE-7663 prevents this Makefile from building a correct Legato system.)
$(info Once this bug is fixed in a stable version of Legato, the Makefile will)
$(info be restored. For now, build the mangOH system by executing the following)
$(info command from a Legato working copy:)
$(info make wp85 SDEF_TO_USE=$$MANGOH_ROOT/mangOH_Red.sdef MKSYS_FLAGS="-s $$MANGOH_ROOT/apps/GpioExpander/gpioExpanderService -s $$MANGOH_ROOT/apps/RedSensorToCloud")
$(info Note that you must first define MANGOH_ROOT. eg. export MANGOH_ROOT=~/mangOH)
exit 1
# MangOH
Base project containing apps playing with MangOH hardware
Base project containing apps and drivers for the mangOH hardware
## Setup
### Linux
Open a terminal and load Legato development environment
* `source ~/legato/packages/legato.sdk.latest/resources/configlegatoenv`
### Windows
Launch the **Legato Command Line** shortcut to load the Legato development environment
### Get the source code
Clone this repository and the submodules it requires
* `git clone --recursive git://github.com/mangOH/mangOH`
### Build
Build the mangOH Legato system
* `make --include-dir=$LEGATO_ROOT wp85`
1. Download and install the appropriate toolchain from [source.sierrawireless.com](https://source.sierrawireless.com/resources/legato/downloads/)
1. Get the Legato source code as described by the [Legato README](https://github.com/legatoproject/legato-af/blob/master/README.md)
1. Clone the mangOH source code by running `git clone --recursive git://github.com/mangOH/mangOH`
1. `cd` into the Legato working folder and run the following command to build a system for the mangOH Red: `make wp85 SDEF_TO_USE=$MANGOH_ROOT/mangOH_Red.sdef MKSYS_FLAGS="-s $MANGOH_ROOT/apps/GpioExpander/gpioExpanderService -s $MANGOH_ROOT/apps/RedSensorToCloud"`
1. Run `./bin/legs` to put the Legato tools into `$PATH`
1. Run `instlegato wp85 192.168.2.2` to install the system onto the mangOH connected via a USB cable to the CF3 USB port.
Subproject commit 78c752542592fb552bd8fc67b1f7673f8bab3938
Subproject commit 1df717d51f6d2f25a62b57f1b6b7abbdfb4e5319
Subproject commit b7412dcb3589b441426e32c1fc5d12bda6283c9d
Subproject commit 84a92c33c6cb1cc73f42799a806fd4223023178d
Subproject commit 6966f104db935ff771dcce229647255f0bb01fa2
Subproject commit 60da5e1c03b694ce9b45ccd61f67a4bfa3f0fee2
Subproject commit 29c01ea6f23f4b46d9dd735a87c53eed043c310a
Subproject commit 7f669131d16ce30e05ef8406651a7a0965a6b8a4
cflags:
{
-DCONFIG_BMI160
-DCONFIG_BMI160_I2C
-DCONFIG_IIO
-DCONFIG_IIO_BUFFER
-DCONFIG_IIO_TRIGGERED_BUFFER
// TODO: I don't think REGMAP is buildable as a module so our kernel will need to change.
-DREGMAP
-DREGMAP_I2C
}
sources:
{
bmi160_i2c.c
}
Subproject commit 9b663f844d0c39a8acb33ab305f2ed7e4ca0a49a
Subproject commit 3f6ec22ecdcfe42a4582c55321a5b8d8d90cbb2f
cflags:
{
-DCONFIG_IIO
-DCONFIG_IIO_BUFFER
-DCONFIG_IIO_TRIGGER
-DCONFIG_IIO_KFIFO_BUF
-DCONFIG_IIO_TRIGGERED_BUFFER
}
sources:
{
// IIO_KFIFO_BUF
industrialio-triggered-buffer.c
}
IIO Kernel Module Definitions
=============================
The source for the IIO kernel modules is part of the standard Legato Linux
kernel source tree, but the module is not built by default. As a result, we
build this as an out of tree kernel module, but we don't want to version a copy
of the source files.
Instead, it is suggested to put a symlink to each of the sdef files in `[yocto
working copy]/kernel/drivers/iio/` and then refer to the mdef files in the sdef
using an environment variable. For example:
`${LEGATO_KERNELSRC}/drivers/iio/iio`.
The IIO modules are required by the BMP280 pressure sensor driver module.
The source for the IIO kernel modules comes from the standard Legato Linux kernel source tree. The
source is copied into this directory so that it isn't necessary to have the full kernel source code
in order to build the mangOH SDEF.
The kernel source in this folder was copied from the tag `SWI9X15Y_07.12.01.00`
/* The industrial I/O core function defs.
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* These definitions are meant for use only within the IIO core, not individual
* drivers.
*/
#ifndef _IIO_CORE_H_
#define _IIO_CORE_H_
#include <linux/kernel.h>
#include <linux/device.h>
struct iio_chan_spec;
struct iio_dev;
extern struct device_type iio_device_type;
int __iio_add_chan_devattr(const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*func)(struct device *dev,
struct device_attribute *attr,
char *buf),
ssize_t (*writefunc)(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len),
u64 mask,
enum iio_shared_by shared_by,
struct device *dev,
struct list_head *attr_list);
void iio_free_chan_devattr_list(struct list_head *attr_list);
ssize_t iio_format_value(char *buf, unsigned int type, int val, int val2);
/* Event interface flags */
#define IIO_BUSY_BIT_POS 1
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
#define iio_buffer_poll_addr (&iio_buffer_poll)
#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
void iio_disable_all_buffers(struct iio_dev *indio_dev);
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
#else
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
static inline void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) {}
#endif
int iio_device_register_eventset(struct iio_dev *indio_dev);
void iio_device_unregister_eventset(struct iio_dev *indio_dev);
void iio_device_wakeup_eventset(struct iio_dev *indio_dev);
int iio_event_getfd(struct iio_dev *indio_dev);
#endif
/* The industrial I/O core, trigger consumer handling functions
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#ifdef CONFIG_IIO_TRIGGER
/**
* iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
* @indio_dev: iio_dev associated with the device that will consume the trigger
**/
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
/**
* iio_device_unregister_trigger_consumer() - reverse the registration process
* @indio_dev: iio_dev associated with the device that consumed the trigger
**/
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev);
#else
/**
* iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
* @indio_dev: iio_dev associated with the device that will consume the trigger
**/
static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
return 0;
}
/**
* iio_device_unregister_trigger_consumer() - reverse the registration process
* @indio_dev: iio_dev associated with the device that consumed the trigger
**/
static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
}
#endif /* CONFIG_TRIGGER_CONSUMER */
/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Handling of buffer allocation / resizing.
*
*
* Things to look at here.
* - Better memory allocation techniques?
* - Alternative access techniques?
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
static const char * const iio_endian_prefix[] = {
[IIO_BE] = "be",
[IIO_LE] = "le",
};
static bool iio_buffer_is_active(struct iio_buffer *buf)
{
return !list_empty(&buf->buffer_list);
}
static bool iio_buffer_data_available(struct iio_buffer *buf)
{
if (buf->access->data_available)
return buf->access->data_available(buf);
return buf->stufftoread;
}
/**
* iio_buffer_read_first_n_outer() - chrdev read for buffer access
*
* This function relies on all buffer implementations having an
* iio_buffer as their first element.
**/
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps)
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
int ret;
if (!indio_dev->info)
return -ENODEV;
if (!rb || !rb->access->read_first_n)
return -EINVAL;
do {
if (!iio_buffer_data_available(rb)) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(rb->pollq,
iio_buffer_data_available(rb) ||
indio_dev->info == NULL);
if (ret)
return ret;
if (indio_dev->info == NULL)
return -ENODEV;
}
ret = rb->access->read_first_n(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
return ret;
}
/**
* iio_buffer_poll() - poll the buffer to find out if it has data
*/
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
if (!indio_dev->info)
return -ENODEV;
poll_wait(filp, &rb->pollq, wait);
if (iio_buffer_data_available(rb))
return POLLIN | POLLRDNORM;
/* need a way of knowing if there may be enough data... */
return 0;
}
/**
* iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
* @indio_dev: The IIO device
*
* Wakes up the event waitqueue used for poll(). Should usually
* be called when the device is unregistered.
*/
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
if (!indio_dev->buffer)
return;
wake_up(&indio_dev->buffer->pollq);
}
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
INIT_LIST_HEAD(&buffer->buffer_list);
init_waitqueue_head(&buffer->pollq);
kref_init(&buffer->ref);
}
EXPORT_SYMBOL(iio_buffer_init);
static ssize_t iio_show_scan_index(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
}
static ssize_t iio_show_fixed_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
u8 type = this_attr->c->scan_type.endianness;
if (type == IIO_CPU) {
#ifdef __LITTLE_ENDIAN
type = IIO_LE;
#else
type = IIO_BE;
#endif
}
return sprintf(buf, "%s:%c%d/%d>>%u\n",
iio_endian_prefix[type],
this_attr->c->scan_type.sign,
this_attr->c->scan_type.realbits,
this_attr->c->scan_type.storagebits,
this_attr->c->scan_type.shift);
}
static ssize_t iio_scan_el_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
indio_dev->buffer->scan_mask);
return sprintf(buf, "%d\n", ret);
}
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{
clear_bit(bit, buffer->scan_mask);
return 0;
}
static ssize_t iio_scan_el_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
bool state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = strtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
goto error_ret;
} else if (state && !ret) {
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
error_ret:
mutex_unlock(&indio_dev->mlock);
return ret < 0 ? ret : len;
}
static ssize_t iio_scan_el_ts_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
}
static ssize_t iio_scan_el_ts_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool state;
ret = strtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
indio_dev->buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
int ret, attrcount = 0;
struct iio_buffer *buffer = indio_dev->buffer;
ret = __iio_add_chan_devattr("index",
chan,
&iio_show_scan_index,
NULL,
0,
IIO_SEPARATE,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
goto error_ret;
attrcount++;
ret = __iio_add_chan_devattr("type",
chan,
&iio_show_fixed_type,
NULL,
0,
0,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
goto error_ret;
attrcount++;
if (chan->type != IIO_TIMESTAMP)
ret = __iio_add_chan_devattr("en",
chan,
&iio_scan_el_show,
&iio_scan_el_store,
chan->scan_index,
0,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
else
ret = __iio_add_chan_devattr("en",
chan,
&iio_scan_el_ts_show,
&iio_scan_el_ts_store,
chan->scan_index,
0,
&indio_dev->dev,
&buffer->scan_el_dev_attr_list);
if (ret)
goto error_ret;
attrcount++;
ret = attrcount;
error_ret:
return ret;
}
static const char * const iio_scan_elements_group_name = "scan_elements";
int iio_buffer_register(struct iio_dev *indio_dev,
const struct iio_chan_spec *channels,
int num_channels)
{
struct iio_dev_attr *p;
struct attribute **attr;
struct iio_buffer *buffer = indio_dev->buffer;
int ret, i, attrn, attrcount, attrcount_orig = 0;
if (buffer->attrs)
indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
if (buffer->scan_el_attrs != NULL) {
attr = buffer->scan_el_attrs->attrs;
while (*attr++ != NULL)
attrcount_orig++;
}
attrcount = attrcount_orig;
INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
if (channels) {
/* new magic */
for (i = 0; i < num_channels; i++) {
if (channels[i].scan_index < 0)
continue;
/* Establish necessary mask length */
if (channels[i].scan_index >
(int)indio_dev->masklength - 1)
indio_dev->masklength
= channels[i].scan_index + 1;
ret = iio_buffer_add_channel_sysfs(indio_dev,
&channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
attrcount += ret;
if (channels[i].type == IIO_TIMESTAMP)
indio_dev->scan_index_timestamp =
channels[i].scan_index;
}
if (indio_dev->masklength && buffer->scan_mask == NULL) {
buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
sizeof(*buffer->scan_mask),
GFP_KERNEL);
if (buffer->scan_mask == NULL) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
}
}
}
buffer->scan_el_group.name = iio_scan_elements_group_name;
buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
sizeof(buffer->scan_el_group.attrs[0]),
GFP_KERNEL);
if (buffer->scan_el_group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_scan_mask;
}
if (buffer->scan_el_attrs)
memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
attrn = attrcount_orig;
list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
return 0;
error_free_scan_mask:
kfree(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
return ret;
}
EXPORT_SYMBOL(iio_buffer_register);
void iio_buffer_unregister(struct iio_dev *indio_dev)
{
kfree(indio_dev->buffer->scan_mask);
kfree(indio_dev->buffer->scan_el_group.attrs);
iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
}
EXPORT_SYMBOL(iio_buffer_unregister);
ssize_t iio_buffer_read_length(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
if (buffer->access->get_length)
return sprintf(buf, "%d\n",
buffer->access->get_length(buffer));
return 0;
}
EXPORT_SYMBOL(iio_buffer_read_length);
ssize_t iio_buffer_write_length(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
unsigned int val;
int ret;
ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
if (buffer->access->get_length)
if (val == buffer->access->get_length(buffer))
return len;
mutex_lock(&indio_dev->mlock);
if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
} else {
if (buffer->access->set_length)
buffer->access->set_length(buffer, val);
ret = 0;
}
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_write_length);
ssize_t iio_buffer_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
unsigned int masklength,
const unsigned long *mask)
{
if (bitmap_empty(mask, masklength))
return NULL;
while (*av_masks) {
if (bitmap_subset(mask, av_masks, masklength))
return av_masks;
av_masks += BITS_TO_LONGS(masklength);
}
return NULL;
}
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
const struct iio_chan_spec *ch;
unsigned bytes = 0;
int length, i;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
indio_dev->masklength) {
ch = iio_find_channel_from_si(indio_dev, i);
length = ch->scan_type.storagebits / 8;
bytes = ALIGN(bytes, length);
bytes += length;
}
if (timestamp) {
ch = iio_find_channel_from_si(indio_dev,
indio_dev->scan_index_timestamp);
length = ch->scan_type.storagebits / 8;
bytes = ALIGN(bytes, length);
bytes += length;
}
return bytes;
}
static void iio_buffer_activate(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
iio_buffer_get(buffer);
list_add(&buffer->buffer_list, &indio_dev->buffer_list);
}
static void iio_buffer_deactivate(struct iio_buffer *buffer)
{
list_del_init(&buffer->buffer_list);
iio_buffer_put(buffer);
}
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer, *_buffer;
if (list_empty(&indio_dev->buffer_list))
return;
if (indio_dev->setup_ops->predisable)
indio_dev->setup_ops->predisable(indio_dev);
list_for_each_entry_safe(buffer, _buffer,
&indio_dev->buffer_list, buffer_list)
iio_buffer_deactivate(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
if (indio_dev->available_scan_masks == NULL)
kfree(indio_dev->active_scan_mask);
}
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
unsigned int bytes;
if (!buffer->access->set_bytes_per_datum)
return;
bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
buffer->scan_timestamp);
buffer->access->set_bytes_per_datum(buffer, bytes);
}
static int __iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
int ret;
int success = 0;
struct iio_buffer *buffer;
unsigned long *compound_mask;
const unsigned long *old_mask;
/* Wind down existing buffers - iff there are any */
if (!list_empty(&indio_dev->buffer_list)) {
if (indio_dev->setup_ops->predisable) {
ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
goto error_ret;
}
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) {
ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
goto error_ret;
}
}
/* Keep a copy of current setup to allow roll back */
old_mask = indio_dev->active_scan_mask;
if (!indio_dev->available_scan_masks)
indio_dev->active_scan_mask = NULL;
if (remove_buffer)
iio_buffer_deactivate(remove_buffer);
if (insert_buffer)
iio_buffer_activate(indio_dev, insert_buffer);
/* If no buffers in list, we are done */
if (list_empty(&indio_dev->buffer_list)) {
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->available_scan_masks == NULL)
kfree(old_mask);
return 0;
}
/* What scan mask do we actually have? */
compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
sizeof(long), GFP_KERNEL);
if (compound_mask == NULL) {
if (indio_dev->available_scan_masks == NULL)
kfree(old_mask);
return -ENOMEM;
}
indio_dev->scan_timestamp = 0;
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
indio_dev->masklength);
indio_dev->scan_timestamp |= buffer->scan_timestamp;
}
if (indio_dev->available_scan_masks) {
indio_dev->active_scan_mask =
iio_scan_mask_match(indio_dev->available_scan_masks,
indio_dev->masklength,
compound_mask);
if (indio_dev->active_scan_mask == NULL) {
/*
* Roll back.
* Note can only occur when adding a buffer.
*/
iio_buffer_deactivate(insert_buffer);
if (old_mask) {
indio_dev->active_scan_mask = old_mask;
success = -EINVAL;
}
else {
kfree(compound_mask);
ret = -EINVAL;
goto error_ret;
}
}
} else {
indio_dev->active_scan_mask = compound_mask;
}
iio_update_demux(indio_dev);
/* Wind up again */
if (indio_dev->setup_ops->preenable) {
ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
printk(KERN_ERR
"Buffer not started: buffer preenable failed (%d)\n", ret);
goto error_remove_inserted;
}
}
indio_dev->scan_bytes =
iio_compute_scan_bytes(indio_dev,
indio_dev->active_scan_mask,
indio_dev->scan_timestamp);
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
iio_buffer_update_bytes_per_datum(indio_dev, buffer);
if (buffer->access->request_update) {
ret = buffer->access->request_update(buffer);
if (ret) {
printk(KERN_INFO
"Buffer not started: buffer parameter update failed (%d)\n", ret);
goto error_run_postdisable;
}
}
}
if (indio_dev->info->update_scan_mode) {
ret = indio_dev->info
->update_scan_mode(indio_dev,
indio_dev->active_scan_mask);
if (ret < 0) {
printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
goto error_run_postdisable;
}
}
/* Definitely possible for devices to support both of these. */
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO "Buffer not started: no trigger\n");
ret = -EINVAL;
/* Can only occur on first buffer */
goto error_run_postdisable;
}
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
} else { /* Should never be reached */
ret = -EINVAL;
goto error_run_postdisable;
}
if (indio_dev->setup_ops->postenable) {
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
printk(KERN_INFO
"Buffer not started: postenable failed (%d)\n", ret);
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
goto error_disable_all_buffers;
}
}
if (indio_dev->available_scan_masks)
kfree(compound_mask);
else
kfree(old_mask);
return success;
error_disable_all_buffers:
indio_dev->currentmode = INDIO_DIRECT_MODE;
error_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:
if (insert_buffer)
iio_buffer_deactivate(insert_buffer);
indio_dev->active_scan_mask = old_mask;
kfree(compound_mask);
error_ret:
return ret;
}
int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
int ret;
if (insert_buffer == remove_buffer)
return 0;
mutex_lock(&indio_dev->info_exist_lock);
mutex_lock(&indio_dev->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL;
if (remove_buffer && !iio_buffer_is_active(remove_buffer))
remove_buffer = NULL;
if (!insert_buffer && !remove_buffer) {
ret = 0;
goto out_unlock;
}
if (indio_dev->info == NULL) {
ret = -ENODEV;
goto out_unlock;
}
ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
out_unlock:
mutex_unlock(&indio_dev->mlock);
mutex_unlock(&indio_dev->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_update_buffers);
ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool inlist;
ret = strtobool(buf, &requested_state);
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
/* Find out if it is in the list */
inlist = iio_buffer_is_active(indio_dev->buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
ret = __iio_update_buffers(indio_dev,
indio_dev->buffer, NULL);
else
ret = __iio_update_buffers(indio_dev,
NULL, indio_dev->buffer);
if (ret < 0)
goto done;
done:
mutex_unlock(&indio_dev->mlock);
return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);
/**
* iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
* @indio_dev: the iio device
* @mask: scan mask to be checked
*
* Return true if exactly one bit is set in the scan mask, false otherwise. It
* can be used for devices where only one channel can be active for sampling at
* a time.
*/
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask)
{
return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
const unsigned long *mask)
{
if (!indio_dev->setup_ops->validate_scan_mask)
return true;
return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @indio_dev: the iio device
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
*
* Note that at this point we have no way of knowing what other
* buffers might request, hence this code only verifies that the
* individual buffers request is plausible.
*/
int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
const unsigned long *mask;
unsigned long *trialmask;
trialmask = kmalloc(sizeof(*trialmask)*
BITS_TO_LONGS(indio_dev->masklength),
GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
WARN_ON("Trying to set scanmask prior to registering buffer\n");
goto err_invalid_mask;
}
bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
set_bit(bit, trialmask);
if (!iio_validate_scan_mask(indio_dev, trialmask))
goto err_invalid_mask;
if (indio_dev->available_scan_masks) {
mask = iio_scan_mask_match(indio_dev->available_scan_masks,
indio_dev->masklength,
trialmask);
if (!mask)
goto err_invalid_mask;
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
kfree(trialmask);
return 0;
err_invalid_mask:
kfree(trialmask);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(iio_scan_mask_set);
int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
if (bit > indio_dev->masklength)
return -EINVAL;
if (!buffer->scan_mask)
return 0;
/* Ensure return value is 0 or 1. */
return !!test_bit(bit, buffer->scan_mask);
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
/**
* struct iio_demux_table() - table describing demux memcpy ops
* @from: index to copy from
* @to: index to copy to
* @length: how many bytes to copy
* @l: list head used for management
*/
struct iio_demux_table {
unsigned from;
unsigned to;
unsigned length;
struct list_head l;
};
static const void *iio_demux(struct iio_buffer *buffer,
const void *datain)
{
struct iio_demux_table *t;
if (list_empty(&buffer->demux_list))
return datain;
list_for_each_entry(t, &buffer->demux_list, l)
memcpy(buffer->demux_bounce + t->to,
datain + t->from, t->length);
return buffer->demux_bounce;
}
static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
{
const void *dataout = iio_demux(buffer, data);
return buffer->access->store_to(buffer, dataout);
}
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
struct iio_demux_table *p, *q;
list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
list_del(&p->l);
kfree(p);
}
}
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{
int ret;
struct iio_buffer *buf;
list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
ret = iio_push_to_buffer(buf, data);
if (ret < 0)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
const struct iio_chan_spec *ch;
int ret, in_ind = -1, out_ind, length;
unsigned in_loc = 0, out_loc = 0;
struct iio_demux_table *p;
/* Clear out any old demux */
iio_buffer_demux_free(buffer);
kfree(buffer->demux_bounce);
buffer->demux_bounce = NULL;
/* First work out which scan mode we will actually have */
if (bitmap_equal(indio_dev->active_scan_mask,
buffer->scan_mask,
indio_dev->masklength))
return 0;
/* Now we have the two masks, work from least sig and build up sizes */
for_each_set_bit(out_ind,
buffer->scan_mask,
indio_dev->masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
while (in_ind != out_ind) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
ch = iio_find_channel_from_si(indio_dev, in_ind);
length = ch->scan_type.storagebits/8;
/* Make sure we are aligned */
in_loc += length;
if (in_loc % length)
in_loc += length - in_loc % length;
}
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
ret = -ENOMEM;
goto error_clear_mux_table;
}
ch = iio_find_channel_from_si(indio_dev, in_ind);
length = ch->scan_type.storagebits/8;
if (out_loc % length)
out_loc += length - out_loc % length;
if (in_loc % length)
in_loc += length - in_loc % length;
p->from = in_loc;
p->to = out_loc;
p->length = length;
list_add_tail(&p->l, &buffer->demux_list);
out_loc += length;
in_loc += length;
}
/* Relies on scan_timestamp being last */
if (buffer->scan_timestamp) {
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
ret = -ENOMEM;
goto error_clear_mux_table;
}
ch = iio_find_channel_from_si(indio_dev,
indio_dev->scan_index_timestamp);
length = ch->scan_type.storagebits/8;
if (out_loc % length)
out_loc += length - out_loc % length;
if (in_loc % length)
in_loc += length - in_loc % length;
p->from = in_loc;
p->to = out_loc;
p->length = length;
list_add_tail(&p->l, &buffer->demux_list);
out_loc += length;
in_loc += length;
}
buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
if (buffer->demux_bounce == NULL) {
ret = -ENOMEM;
goto error_clear_mux_table;
}
return 0;
error_clear_mux_table:
iio_buffer_demux_free(buffer);
return ret;
}
int iio_update_demux(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer;
int ret;
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
ret = iio_buffer_update_demux(indio_dev, buffer);
if (ret < 0)
goto error_clear_mux_table;
}
return 0;
error_clear_mux_table:
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
iio_buffer_demux_free(buffer);
return ret;
}
EXPORT_SYMBOL_GPL(iio_update_demux);
/**
* iio_buffer_release() - Free a buffer's resources
* @ref: Pointer to the kref embedded in the iio_buffer struct
*
* This function is called when the last reference to the buffer has been
* dropped. It will typically free all resources allocated by the buffer. Do not
* call this function manually, always use iio_buffer_put() when done using a
* buffer.
*/
static void iio_buffer_release(struct kref *ref)
{
struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
buffer->access->release(buffer);
}
/**
* iio_buffer_get() - Grab a reference to the buffer
* @buffer: The buffer to grab a reference for, may be NULL
*
* Returns the pointer to the buffer that was passed into the function.
*/
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
{
if (buffer)
kref_get(&buffer->ref);
return buffer;
}
EXPORT_SYMBOL_GPL(iio_buffer_get);
/**
* iio_buffer_put() - Release the reference to the buffer
* @buffer: The buffer to release the reference for, may be NULL
*/
void iio_buffer_put(struct iio_buffer *buffer)
{
if (buffer)
kref_put(&buffer->ref, iio_buffer_release);
}
EXPORT_SYMBOL_GPL(iio_buffer_put);
/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Based on elements of hwmon and input subsystems.
*/
#define pr_fmt(fmt) "iio-core: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
#include <linux/debugfs.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
/* IDA to assign each registered device a unique id */
static DEFINE_IDA(iio_ida);
static dev_t iio_devt;
#define IIO_DEV_MAX 256
struct bus_type iio_bus_type = {
.name = "iio",
};
EXPORT_SYMBOL(iio_bus_type);
static struct dentry *iio_debugfs_dentry;
static const char * const iio_direction[] = {
[0] = "in",
[1] = "out",
};
static const char * const iio_chan_type_name_spec[] = {
[IIO_VOLTAGE] = "voltage",
[IIO_CURRENT] = "current",
[IIO_POWER] = "power",
[IIO_ACCEL] = "accel",
[IIO_ANGL_VEL] = "anglvel",
[IIO_MAGN] = "magn",
[IIO_LIGHT] = "illuminance",
[IIO_INTENSITY] = "intensity",
[IIO_PROXIMITY] = "proximity",
[IIO_TEMP] = "temp",
[IIO_INCLI] = "incli",
[IIO_ROT] = "rot",
[IIO_ANGL] = "angl",
[IIO_TIMESTAMP] = "timestamp",
[IIO_CAPACITANCE] = "capacitance",
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
[IIO_HUMIDITYRELATIVE] = "humidityrelative",
};
static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
[IIO_MOD_LIGHT_BOTH] = "both",
[IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_LIGHT_CLEAR] = "clear",
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
};
/* relies on pairs of these shared then separate */
static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_RAW] = "raw",
[IIO_CHAN_INFO_PROCESSED] = "input",
[IIO_CHAN_INFO_SCALE] = "scale",
[IIO_CHAN_INFO_OFFSET] = "offset",
[IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
[IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
[IIO_CHAN_INFO_PEAK] = "peak_raw",
[IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
[IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
[IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
[IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
= "filter_low_pass_3db_frequency",
[IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
[IIO_CHAN_INFO_FREQUENCY] = "frequency",
[IIO_CHAN_INFO_PHASE] = "phase",
[IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
[IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
[IIO_CHAN_INFO_INT_TIME] = "integration_time",
};
/**
* iio_find_channel_from_si() - get channel from its scan index
* @indio_dev: device
* @si: scan index to match
*/
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
{
int i;
for (i = 0; i < indio_dev->num_channels; i++)
if (indio_dev->channels[i].scan_index == si)
return &indio_dev->channels[i];
return NULL;
}
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
}
EXPORT_SYMBOL(iio_read_const_attr);
static int __init iio_init(void)
{
int ret;
/* Register sysfs bus */
ret = bus_register(&iio_bus_type);
if (ret < 0) {
pr_err("could not register bus type\n");
goto error_nothing;
}
ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
if (ret < 0) {
pr_err("failed to allocate char dev region\n");
goto error_unregister_bus_type;
}
iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
return 0;
error_unregister_bus_type:
bus_unregister(&iio_bus_type);
error_nothing:
return ret;
}
static void __exit iio_exit(void)
{
if (iio_devt)
unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
bus_unregister(&iio_bus_type);
debugfs_remove(iio_debugfs_dentry);
}
#if defined(CONFIG_DEBUG_FS)
static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct iio_dev *indio_dev = file->private_data;
char buf[20];
unsigned val = 0;
ssize_t len;
int ret;
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
if (ret)
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
len = snprintf(buf, sizeof(buf), "0x%X\n", val);
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
static ssize_t iio_debugfs_write_reg(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
{
struct iio_dev *indio_dev = file->private_data;
unsigned reg, val;
char buf[80];
int ret;
count = min_t(size_t, count, (sizeof(buf)-1));
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = 0;
ret = sscanf(buf, "%i %i", &reg, &val);
switch (ret) {
case 1:
indio_dev->cached_reg_addr = reg;
break;
case 2:
indio_dev->cached_reg_addr = reg;
ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
val, NULL);
if (ret) {
dev_err(indio_dev->dev.parent, "%s: write failed\n",
__func__);
return ret;
}
break;
default:
return -EINVAL;
}
return count;
}
static const struct file_operations iio_debugfs_reg_fops = {
.open = simple_open,
.read = iio_debugfs_read_reg,
.write = iio_debugfs_write_reg,
};
static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
{
debugfs_remove_recursive(indio_dev->debugfs_dentry);
}
static int iio_device_register_debugfs(struct iio_dev *indio_dev)
{
struct dentry *d;
if (indio_dev->info->debugfs_reg_access == NULL)
return 0;
if (!iio_debugfs_dentry)
return 0;
indio_dev->debugfs_dentry =
debugfs_create_dir(dev_name(&indio_dev->dev),
iio_debugfs_dentry);
if (indio_dev->debugfs_dentry == NULL) {
dev_warn(indio_dev->dev.parent,
"Failed to create debugfs directory\n");
return -EFAULT;
}
d = debugfs_create_file("direct_reg_access", 0644,
indio_dev->debugfs_dentry,
indio_dev, &iio_debugfs_reg_fops);
if (!d) {
iio_device_unregister_debugfs(indio_dev);
return -ENOMEM;
}
return 0;
}
#else
static int iio_device_register_debugfs(struct iio_dev *indio_dev)
{
return 0;
}
static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
{
}
#endif /* CONFIG_DEBUG_FS */
static ssize_t iio_read_channel_ext_info(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
const struct iio_chan_spec_ext_info *ext_info;
ext_info = &this_attr->c->ext_info[this_attr->address];
return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
}
static ssize_t iio_write_channel_ext_info(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
const struct iio_chan_spec_ext_info *ext_info;
ext_info = &this_attr->c->ext_info[this_attr->address];
return ext_info->write(indio_dev, ext_info->private,
this_attr->c, buf, len);
}
ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
{
const struct iio_enum *e = (const struct iio_enum *)priv;
unsigned int i;
size_t len = 0;
if (!e->num_items)
return 0;
for (i = 0; i < e->num_items; ++i)
len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]);
/* replace last space with a newline */
buf[len - 1] = '\n';
return len;
}
EXPORT_SYMBOL_GPL(iio_enum_available_read);
ssize_t iio_enum_read(struct iio_dev *indio_dev,
uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
{
const struct iio_enum *e = (const struct iio_enum *)priv;
int i;
if (!e->get)
return -EINVAL;
i = e->get(indio_dev, chan);
if (i < 0)
return i;
else if (i >= e->num_items)
return -EINVAL;
return sprintf(buf, "%s\n", e->items[i]);
}
EXPORT_SYMBOL_GPL(iio_enum_read);
ssize_t iio_enum_write(struct iio_dev *indio_dev,
uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
size_t len)
{
const struct iio_enum *e = (const struct iio_enum *)priv;
unsigned int i;
int ret;
if (!e->set)
return -EINVAL;
for (i = 0; i < e->num_items; i++) {
if (sysfs_streq(buf, e->items[i]))
break;
}
if (i == e->num_items)
return -EINVAL;
ret = e->set(indio_dev, chan, i);
return ret ? ret : len;
}
EXPORT_SYMBOL_GPL(iio_enum_write);
/**
* iio_format_value() - Formats a IIO value into its string representation
* @buf: The buffer to which the formated value gets written
* @type: One of the IIO_VAL_... constants. This decides how the val and val2
* parameters are formatted.
* @val: First part of the value, exact meaning depends on the type parameter.
* @val2: Second part of the value, exact meaning depends on the type parameter.
*/
ssize_t iio_format_value(char *buf, unsigned int type, int val, int val2)
{
unsigned long long tmp;
bool scale_db = false;
switch (type) {
case IIO_VAL_INT:
return sprintf(buf, "%d\n", val);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
case IIO_VAL_INT_PLUS_MICRO:
if (val2 < 0)
return sprintf(buf, "-%ld.%06u%s\n", abs(val), -val2,
scale_db ? " dB" : "");
else
return sprintf(buf, "%d.%06u%s\n", val, val2,
scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (val2 < 0)
return sprintf(buf, "-%ld.%09u\n", abs(val), -val2);
else
return sprintf(buf, "%d.%09u\n", val, val2);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)val * 1000000000LL, val2);
val2 = do_div(tmp, 1000000000LL);
val = tmp;
return sprintf(buf, "%d.%09u\n", val, val2);
case IIO_VAL_FRACTIONAL_LOG2:
tmp = (s64)val * 1000000000LL >> val2;
val2 = do_div(tmp, 1000000000LL);
val = tmp;
return sprintf(buf, "%d.%09u\n", val, val2);
default:
return 0;
}
}
static ssize_t iio_read_channel_info(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val, val2;
int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
&val, &val2, this_attr->address);
if (ret < 0)
return ret;
return iio_format_value(buf, ret, val, val2);
}
/**
* iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
* @fract_mult: Multiplier for the first decimal place, should be a power of 10
* @integer: The integer part of the number
* @fract: The fractional part of the number
*
* Returns 0 on success, or a negative error code if the string could not be
* parsed.
*/
int iio_str_to_fixpoint(const char *str, int fract_mult,
int *integer, int *fract)
{
int i = 0, f = 0;
bool integer_part = true, negative = false;
if (str[0] == '-') {
negative = true;
str++;
} else if (str[0] == '+') {
str++;
}
while (*str) {
if ('0' <= *str && *str <= '9') {
if (integer_part) {
i = i * 10 + *str - '0';
} else {
f += fract_mult * (*str - '0');
fract_mult /= 10;
}
} else if (*str == '\n') {
if (*(str + 1) == '\0')
break;
else
return -EINVAL;
} else if (*str == '.' && integer_part) {
integer_part = false;
} else {
return -EINVAL;
}
str++;
}
if (negative) {
if (i)
i = -i;
else
f = -f;
}
*integer = i;
*fract = f;
return 0;
}
EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
static ssize_t iio_write_channel_info(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, fract_mult = 100000;
int integer, fract;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
return -EINVAL;
if (indio_dev->info->write_raw_get_fmt)
switch (indio_dev->info->write_raw_get_fmt(indio_dev,
this_attr->c, this_attr->address)) {
case IIO_VAL_INT_PLUS_MICRO:
fract_mult = 100000;
break;
case IIO_VAL_INT_PLUS_NANO:
fract_mult = 100000000;
break;
default:
return -EINVAL;
}
ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
if (ret)
return ret;
ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
integer, fract, this_attr->address);
if (ret)
return ret;
return len;
}
static
int __iio_device_attr_init(struct device_attribute *dev_attr,
const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
struct device_attribute *attr,
char *buf),
ssize_t (*writefunc)(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len),
enum iio_shared_by shared_by)
{
int ret = 0;
char *name_format = NULL;
char *full_postfix;
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
if (chan->modified && (shared_by == IIO_SEPARATE)) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_modifier_names[chan
->channel2],
chan->extend_name,
postfix);
else
full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
iio_modifier_names[chan
->channel2],
postfix);
} else {
if (chan->extend_name == NULL)
full_postfix = kstrdup(postfix, GFP_KERNEL);
else
full_postfix = kasprintf(GFP_KERNEL,
"%s_%s",
chan->extend_name,
postfix);
}
if (full_postfix == NULL)
return -ENOMEM;
if (chan->differential) { /* Differential can not have modifier */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
name_format = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
name_format
= kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
case IIO_SEPARATE:
if (!chan->indexed) {
WARN_ON("Differential channels must be indexed\n");
ret = -EINVAL;
goto error_free_full_postfix;
}
name_format
= kasprintf(GFP_KERNEL,
"%s_%s%d-%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
iio_chan_type_name_spec[chan->type],
chan->channel2,
full_postfix);
break;
}
} else { /* Single ended */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
name_format = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
name_format = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
name_format
= kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
case IIO_SEPARATE:
if (chan->indexed)
name_format
= kasprintf(GFP_KERNEL, "%s_%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
full_postfix);
else
name_format
= kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
}
}
if (name_format == NULL) {
ret = -ENOMEM;
goto error_free_full_postfix;
}
dev_attr->attr.name = kasprintf(GFP_KERNEL,
name_format,
chan->channel,
chan->channel2);
if (dev_attr->attr.name == NULL) {
ret = -ENOMEM;
goto error_free_name_format;
}
if (readfunc) {
dev_attr->attr.mode |= S_IRUGO;
dev_attr->show = readfunc;
}
if (writefunc) {
dev_attr->attr.mode |= S_IWUSR;
dev_attr->store = writefunc;
}
error_free_name_format:
kfree(name_format);
error_free_full_postfix:
kfree(full_postfix);
return ret;
}
static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
{
kfree(dev_attr->attr.name);
}
int __iio_add_chan_devattr(const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
struct device_attribute *attr,
char *buf),
ssize_t (*writefunc)(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len),
u64 mask,
enum iio_shared_by shared_by,
struct device *dev,
struct list_head *attr_list)
{
int ret;
struct iio_dev_attr *iio_attr, *t;
iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
if (iio_attr == NULL) {
ret = -ENOMEM;
goto error_ret;
}
ret = __iio_device_attr_init(&iio_attr->dev_attr,
postfix, chan,
readfunc, writefunc, shared_by);
if (ret)
goto error_iio_dev_attr_free;
iio_attr->c = chan;
iio_attr->address = mask;
list_for_each_entry(t, attr_list, l)
if (strcmp(t->dev_attr.attr.name,
iio_attr->dev_attr.attr.name) == 0) {
if (shared_by == IIO_SEPARATE)
dev_err(dev, "tried to double register : %s\n",
t->dev_attr.attr.name);
ret = -EBUSY;
goto error_device_attr_deinit;
}
list_add(&iio_attr->l, attr_list);
return 0;
error_device_attr_deinit:
__iio_device_attr_deinit(&iio_attr->dev_attr);
error_iio_dev_attr_free:
kfree(iio_attr);
error_ret:
return ret;
}
static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
const long *infomask)
{
int i, ret, attrcount = 0;
for_each_set_bit(i, infomask, sizeof(infomask)*8) {
ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
chan,
&iio_read_channel_info,
&iio_write_channel_info,
i,
shared_by,
&indio_dev->dev,
&indio_dev->channel_attr_list);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
else if (ret < 0)
return ret;
attrcount++;
}
return attrcount;
}
static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret, attrcount = 0;
const struct iio_chan_spec_ext_info *ext_info;
if (chan->channel < 0)
return 0;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SEPARATE,
&chan->info_mask_separate);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_TYPE,
&chan->info_mask_shared_by_type);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_DIR,
&chan->info_mask_shared_by_dir);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_ALL,
&chan->info_mask_shared_by_all);
if (ret < 0)
return ret;
attrcount += ret;
if (chan->ext_info) {
unsigned int i = 0;
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
ret = __iio_add_chan_devattr(ext_info->name,
chan,
ext_info->read ?
&iio_read_channel_ext_info : NULL,
ext_info->write ?
&iio_write_channel_ext_info : NULL,
i,
ext_info->shared,
&indio_dev->dev,
&indio_dev->channel_attr_list);
i++;
if (ret == -EBUSY && ext_info->shared)
continue;
if (ret)
return ret;
attrcount++;
}
}
return attrcount;
}
/**
* iio_free_chan_devattr_list() - Free a list of IIO device attributes
* @attr_list: List of IIO device attributes
*
* This function frees the memory allocated for each of the IIO device
* attributes in the list. Note: if you want to reuse the list after calling
* this function you have to reinitialize it using INIT_LIST_HEAD().
*/
void iio_free_chan_devattr_list(struct list_head *attr_list)
{
struct iio_dev_attr *p, *n;
list_for_each_entry_safe(p, n, attr_list, l) {
kfree(p->dev_attr.attr.name);
kfree(p);
}
}
static ssize_t iio_show_dev_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sprintf(buf, "%s\n", indio_dev->name);
}
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
struct iio_dev_attr *p;
struct attribute **attr;
/* First count elements in any existing group */
if (indio_dev->info->attrs) {
attr = indio_dev->info->attrs->attrs;
while (*attr++ != NULL)
attrcount_orig++;
}
attrcount = attrcount_orig;
/*
* New channel registration method - relies on the fact a group does
* not need to be initialized if its name is NULL.
*/
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
ret = iio_device_add_channel_sysfs(indio_dev,
&indio_dev
->channels[i]);
if (ret < 0)
goto error_clear_attrs;
attrcount += ret;
}
if (indio_dev->name)
attrcount++;
indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
sizeof(indio_dev->chan_attr_group.attrs[0]),
GFP_KERNEL);
if (indio_dev->chan_attr_group.attrs == NULL) {
ret = -ENOMEM;
goto error_clear_attrs;
}
/* Copy across original attributes */
if (indio_dev->info->attrs)
memcpy(indio_dev->chan_attr_group.attrs,
indio_dev->info->attrs->attrs,
sizeof(indio_dev->chan_attr_group.attrs[0])
*attrcount_orig);
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p, &indio_dev->channel_attr_list, l)
indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
indio_dev->groups[indio_dev->groupcounter++] =
&indio_dev->chan_attr_group;
return 0;
error_clear_attrs:
iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
return ret;
}
static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
{
iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
kfree(indio_dev->chan_attr_group.attrs);
}
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = dev_to_iio_dev(device);
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
iio_buffer_put(indio_dev->buffer);
ida_simple_remove(&iio_ida, indio_dev->id);
kfree(indio_dev);
}
struct device_type iio_device_type = {
.name = "iio_device",
.release = iio_dev_release,
};
/**
* iio_device_alloc() - allocate an iio_dev from a driver
* @sizeof_priv: Space to allocate for private structure.
**/
struct iio_dev *iio_device_alloc(int sizeof_priv)
{
struct iio_dev *dev;
size_t alloc_size;
alloc_size = sizeof(struct iio_dev);
if (sizeof_priv) {
alloc_size = ALIGN(alloc_size, IIO_ALIGN);
alloc_size += sizeof_priv;
}
/* ensure 32-byte alignment of whole construct ? */
alloc_size += IIO_ALIGN - 1;
dev = kzalloc(alloc_size, GFP_KERNEL);
if (dev) {
dev->dev.groups = dev->groups;
dev->dev.type = &iio_device_type;
dev->dev.bus = &iio_bus_type;
device_initialize(&dev->dev);
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
mutex_init(&dev->info_exist_lock);
INIT_LIST_HEAD(&dev->channel_attr_list);
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
/* cannot use a dev_err as the name isn't available */
pr_err("failed to get device id\n");
kfree(dev);
return NULL;
}
dev_set_name(&dev->dev, "iio:device%d", dev->id);
INIT_LIST_HEAD(&dev->buffer_list);
}
return dev;
}
EXPORT_SYMBOL(iio_device_alloc);
/**
* iio_device_free() - free an iio_dev from a driver
* @dev: the iio_dev associated with the device
**/
void iio_device_free(struct iio_dev *dev)
{
if (dev)
put_device(&dev->dev);
}
EXPORT_SYMBOL(iio_device_free);
static void devm_iio_device_release(struct device *dev, void *res)
{
iio_device_free(*(struct iio_dev **)res);
}
static int devm_iio_device_match(struct device *dev, void *res, void *data)
{
struct iio_dev **r = res;
if (!r || !*r) {
WARN_ON(!r || !*r);
return 0;
}
return *r == data;
}
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
* @dev: Device to allocate iio_dev for
* @sizeof_priv: Space to allocate for private structure.
*
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
* If an iio_dev allocated with this function needs to be freed separately,
* devm_iio_device_free() must be used.
*
* RETURNS:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
{
struct iio_dev **ptr, *iio_dev;
ptr = devres_alloc(devm_iio_device_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return NULL;
/* use raw alloc_dr for kmalloc caller tracing */
iio_dev = iio_device_alloc(sizeof_priv);
if (iio_dev) {
*ptr = iio_dev;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return iio_dev;
}
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
/**
* devm_iio_device_free - Resource-managed iio_device_free()
* @dev: Device this iio_dev belongs to
* @iio_dev: the iio_dev associated with the device
*
* Free iio_dev allocated with devm_iio_device_alloc().
*/
void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
{
int rc;
rc = devres_release(dev, devm_iio_device_release,
devm_iio_device_match, iio_dev);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_iio_device_free);
/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
**/
static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
return -EBUSY;
iio_device_get(indio_dev);
filp->private_data = indio_dev;
return 0;
}
/**
* iio_chrdev_release() - chrdev file close buffer access and ioctls
**/
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
iio_device_put(indio_dev);
return 0;
}
/* Somewhat of a cross file organization violation - ioctls here are actually
* event related */
static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct iio_dev *indio_dev = filp->private_data;
int __user *ip = (int __user *)arg;
int fd;
if (!indio_dev->info)
return -ENODEV;
if (cmd == IIO_GET_EVENT_FD_IOCTL) {
fd = iio_event_getfd(indio_dev);
if (copy_to_user(ip, &fd, sizeof(fd)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static const struct file_operations iio_buffer_fileops = {
.read = iio_buffer_read_first_n_outer_addr,
.release = iio_chrdev_release,
.open = iio_chrdev_open,
.poll = iio_buffer_poll_addr,
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = iio_ioctl,
.compat_ioctl = iio_ioctl,
};
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
/**
* iio_device_register() - register a device with the IIO subsystem
* @indio_dev: Device structure filled by the device driver
**/
int iio_device_register(struct iio_dev *indio_dev)
{
int ret;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
ret = iio_device_register_debugfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register debugfs interfaces\n");
goto error_ret;
}
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register sysfs interfaces\n");
goto error_unreg_debugfs;
}
ret = iio_device_register_eventset(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register event set\n");
goto error_free_sysfs;
}
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_register_trigger_consumer(indio_dev);
if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
indio_dev->setup_ops == NULL)
indio_dev->setup_ops = &noop_ring_setup_ops;
cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
indio_dev->chrdev.owner = indio_dev->info->driver_module;
indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
if (ret < 0)
goto error_unreg_eventset;
ret = device_add(&indio_dev->dev);
if (ret < 0)
goto error_cdev_del;
return 0;
error_cdev_del:
cdev_del(&indio_dev->chrdev);
error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_device_register);
/**
* iio_device_unregister() - unregister a device from the IIO subsystem
* @indio_dev: Device structure representing the device.
**/
void iio_device_unregister(struct iio_dev *indio_dev)
{
mutex_lock(&indio_dev->info_exist_lock);
device_del(&indio_dev->dev);
if (indio_dev->chrdev.dev)
cdev_del(&indio_dev->chrdev);
iio_device_unregister_debugfs(indio_dev);
iio_disable_all_buffers(indio_dev);
indio_dev->info = NULL;
iio_device_wakeup_eventset(indio_dev);
iio_buffer_wakeup_poll(indio_dev);
mutex_unlock(&indio_dev->info_exist_lock);
}
EXPORT_SYMBOL(iio_device_unregister);
static void devm_iio_device_unreg(struct device *dev, void *res)
{
iio_device_unregister(*(struct iio_dev **)res);
}
/**
* devm_iio_device_register - Resource-managed iio_device_register()
* @dev: Device to allocate iio_dev for
* @indio_dev: Device structure filled by the device driver
*
* Managed iio_device_register. The IIO device registered with this
* function is automatically unregistered on driver detach. This function
* calls iio_device_register() internally. Refer to that function for more
* information.
*
* If an iio_dev registered with this function needs to be unregistered
* separately, devm_iio_device_unregister() must be used.
*
* RETURNS:
* 0 on success, negative error number on failure.
*/
int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_dev **ptr;
int ret;
ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
*ptr = indio_dev;
ret = iio_device_register(indio_dev);
if (!ret)
devres_add(dev, ptr);
else
devres_free(ptr);
return ret;
}
EXPORT_SYMBOL_GPL(devm_iio_device_register);
/**
* devm_iio_device_unregister - Resource-managed iio_device_unregister()
* @dev: Device this iio_dev belongs to
* @indio_dev: the iio_dev associated with the device
*
* Unregister iio_dev registered with devm_iio_device_register().
*/
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
{
int rc;
rc = devres_release(dev, devm_iio_device_unreg,
devm_iio_device_match, indio_dev);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
subsys_initcall(iio_init);
module_exit(iio_exit);
MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
MODULE_DESCRIPTION("Industrial I/O core");
MODULE_LICENSE("GPL");
/* Industrial I/O event handling
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Based on elements of hwmon and input subsystems.
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
/**
* struct iio_event_interface - chrdev interface for an event line
* @wait: wait queue to allow blocking reads of events
* @det_events: list of detected events
* @dev_attr_list: list of event interface sysfs attribute
* @flags: file operations related flags including busy flag.
* @group: event interface sysfs attribute group
*/
struct iio_event_interface {
wait_queue_head_t wait;
DECLARE_KFIFO(det_events, struct iio_event_data, 16);
struct list_head dev_attr_list;
unsigned long flags;
struct attribute_group group;
};
/**
* iio_push_event() - try to add event to the list for userspace reading
* @indio_dev: IIO device structure
* @ev_code: What event
* @timestamp: When the event occurred
**/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
struct iio_event_data ev;
unsigned long flags;
int copied;
/* Does anyone care? */
spin_lock_irqsave(&ev_int->wait.lock, flags);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
ev.id = ev_code;
ev.timestamp = timestamp;
copied = kfifo_put(&ev_int->det_events, ev);
if (copied != 0)
wake_up_locked_poll(&ev_int->wait, POLLIN);
}
spin_unlock_irqrestore(&ev_int->wait.lock, flags);
return 0;
}
EXPORT_SYMBOL(iio_push_event);
/**
* iio_event_poll() - poll the event queue to find out if it has data
*/
static unsigned int iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
unsigned int events = 0;
if (!indio_dev->info)
return -ENODEV;
poll_wait(filep, &ev_int->wait, wait);
spin_lock_irq(&ev_int->wait.lock);
if (!kfifo_is_empty(&ev_int->det_events))
events = POLLIN | POLLRDNORM;
spin_unlock_irq(&ev_int->wait.lock);
return events;
}
static ssize_t iio_event_chrdev_read(struct file *filep,
char __user *buf,
size_t count,
loff_t *f_ps)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
unsigned int copied;
int ret;
if (!indio_dev->info)
return -ENODEV;
if (count < sizeof(struct iio_event_data))
return -EINVAL;
spin_lock_irq(&ev_int->wait.lock);
if (kfifo_is_empty(&ev_int->det_events)) {
if (filep->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto error_unlock;
}
/* Blocking on device; waiting for something to be there */
ret = wait_event_interruptible_locked_irq(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events) ||
indio_dev->info == NULL);
if (ret)
goto error_unlock;
if (indio_dev->info == NULL) {
ret = -ENODEV;
goto error_unlock;
}
/* Single access device so no one else can get the data */
}
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
error_unlock:
spin_unlock_irq(&ev_int->wait.lock);
return ret ? ret : copied;
}
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
/*
* In order to maintain a clean state for reopening,
* clear out any awaiting events. The mask will prevent
* any new __iio_push_event calls running.
*/
kfifo_reset_out(&ev_int->det_events);
spin_unlock_irq(&ev_int->wait.lock);
iio_device_put(indio_dev);
return 0;
}
static const struct file_operations iio_event_chrdev_fileops = {
.read = iio_event_chrdev_read,
.poll = iio_event_poll,
.release = iio_event_chrdev_release,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
int iio_event_getfd(struct iio_dev *indio_dev)
{
struct iio_event_interface *ev_int = indio_dev->event_interface;
int fd;
if (ev_int == NULL)
return -ENODEV;
spin_lock_irq(&ev_int->wait.lock);
if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
spin_unlock_irq(&ev_int->wait.lock);
return -EBUSY;
}
spin_unlock_irq(&ev_int->wait.lock);
iio_device_get(indio_dev);
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
indio_dev, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
spin_unlock_irq(&ev_int->wait.lock);
iio_device_put(indio_dev);
}
return fd;
}
static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_THRESH] = "thresh",
[IIO_EV_TYPE_MAG] = "mag",
[IIO_EV_TYPE_ROC] = "roc",
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
[IIO_EV_DIR_FALLING] = "falling"
};
static const char * const iio_ev_info_text[] = {
[IIO_EV_INFO_ENABLE] = "en",
[IIO_EV_INFO_VALUE] = "value",
[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
};
static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
{
return attr->c->event_spec[attr->address & 0xffff].dir;
}
static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
{
return attr->c->event_spec[attr->address & 0xffff].type;
}
static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
{
return (attr->address >> 16) & 0xffff;
}
static ssize_t iio_ev_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
bool val;
ret = strtobool(buf, &val);
if (ret < 0)
return ret;
ret = indio_dev->info->write_event_config(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), val);
return (ret < 0) ? ret : len;
}
static ssize_t iio_ev_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val;
val = indio_dev->info->read_event_config(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr));
if (val < 0)
return val;
else
return sprintf(buf, "%d\n", val);
}
static ssize_t iio_ev_value_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val, val2;
int ret;
ret = indio_dev->info->read_event_value(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
&val, &val2);
if (ret < 0)
return ret;
return iio_format_value(buf, ret, val, val2);
}
static ssize_t iio_ev_value_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val, val2;
int ret;
if (!indio_dev->info->write_event_value)
return -EINVAL;
ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
if (ret)
return ret;
ret = indio_dev->info->write_event_value(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
val, val2);
if (ret < 0)
return ret;
return len;
}
static int iio_device_add_event(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int spec_index,
enum iio_event_type type, enum iio_event_direction dir,
enum iio_shared_by shared_by, const unsigned long *mask)
{
ssize_t (*show)(struct device *, struct device_attribute *, char *);
ssize_t (*store)(struct device *, struct device_attribute *,
const char *, size_t);
unsigned int attrcount = 0;
unsigned int i;
char *postfix;
int ret;
for_each_set_bit(i, mask, sizeof(*mask)) {
postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_ev_type_text[type], iio_ev_dir_text[dir],
iio_ev_info_text[i]);
if (postfix == NULL)
return -ENOMEM;
if (i == IIO_EV_INFO_ENABLE) {
show = iio_ev_state_show;
store = iio_ev_state_store;
} else {
show = iio_ev_value_show;
store = iio_ev_value_store;
}
ret = __iio_add_chan_devattr(postfix, chan, show, store,
(i << 16) | spec_index, shared_by, &indio_dev->dev,
&indio_dev->event_interface->dev_attr_list);
kfree(postfix);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
if (ret)
return ret;
attrcount++;
}
return attrcount;
}
static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
enum iio_event_direction dir;
enum iio_event_type type;
for (i = 0; i < chan->num_event_specs; i++) {
type = chan->event_spec[i].type;
dir = chan->event_spec[i].dir;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SEPARATE, &chan->event_spec[i].mask_separate);
if (ret < 0)
goto error_ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_TYPE,
&chan->event_spec[i].mask_shared_by_type);
if (ret < 0)
goto error_ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_DIR,
&chan->event_spec[i].mask_shared_by_dir);
if (ret < 0)
goto error_ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_ALL,
&chan->event_spec[i].mask_shared_by_all);
if (ret < 0)
goto error_ret;
attrcount += ret;
}
ret = attrcount;
error_ret:
return ret;
}
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
int j, ret, attrcount = 0;
/* Dynically created from the channels array */
for (j = 0; j < indio_dev->num_channels; j++) {
ret = iio_device_add_event_sysfs(indio_dev,
&indio_dev->channels[j]);
if (ret < 0)
return ret;
attrcount += ret;
}
return attrcount;
}
static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
int j;
for (j = 0; j < indio_dev->num_channels; j++) {
if (indio_dev->channels[j].num_event_specs != 0)
return true;
}
return false;
}
static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
}
static const char *iio_event_group_name = "events";
int iio_device_register_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_attr *p;
int ret = 0, attrcount_orig = 0, attrcount, attrn;
struct attribute **attr;
if (!(indio_dev->info->event_attrs ||
iio_check_for_dynamic_events(indio_dev)))
return 0;
indio_dev->event_interface =
kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
if (indio_dev->event_interface == NULL) {
ret = -ENOMEM;
goto error_ret;
}
INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
iio_setup_ev_int(indio_dev->event_interface);
if (indio_dev->info->event_attrs != NULL) {
attr = indio_dev->info->event_attrs->attrs;
while (*attr++ != NULL)
attrcount_orig++;
}
attrcount = attrcount_orig;
if (indio_dev->channels) {
ret = __iio_add_event_config_attrs(indio_dev);
if (ret < 0)
goto error_free_setup_event_lines;
attrcount += ret;
}
indio_dev->event_interface->group.name = iio_event_group_name;
indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
sizeof(indio_dev->event_interface->group.attrs[0]),
GFP_KERNEL);
if (indio_dev->event_interface->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
}
if (indio_dev->info->event_attrs)
memcpy(indio_dev->event_interface->group.attrs,
indio_dev->info->event_attrs->attrs,
sizeof(indio_dev->event_interface->group.attrs[0])
*attrcount_orig);
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p,
&indio_dev->event_interface->dev_attr_list,
l)
indio_dev->event_interface->group.attrs[attrn++] =
&p->dev_attr.attr;
indio_dev->groups[indio_dev->groupcounter++] =
&indio_dev->event_interface->group;
return 0;
error_free_setup_event_lines:
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
error_ret:
return ret;
}
/**
* iio_device_wakeup_eventset - Wakes up the event waitqueue
* @indio_dev: The IIO device
*
* Wakes up the event waitqueue used for poll() and blocking read().
* Should usually be called when the device is unregistered.
*/
void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
{
if (indio_dev->event_interface == NULL)
return;
wake_up(&indio_dev->event_interface->wait);
}
void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
if (indio_dev->event_interface == NULL)
return;
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface->group.attrs);
kfree(indio_dev->event_interface);
}
/* The industrial I/O core, trigger handling functions
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
#include <linux/iio/trigger_consumer.h>
/* RFC - Question of approach
* Make the common case (single sensor single trigger)
* simple by starting trigger capture from when first sensors
* is added.
*
* Complex simultaneous start requires use of 'hold' functionality
* of the trigger. (not implemented)
*
* Any other suggestions?
*/
static DEFINE_IDA(iio_trigger_ida);
/* Single list of all available triggers */
static LIST_HEAD(iio_trigger_list);
static DEFINE_MUTEX(iio_trigger_list_lock);
/**
* iio_trigger_read_name() - retrieve useful identifying name
**/
static ssize_t iio_trigger_read_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
return sprintf(buf, "%s\n", trig->name);
}
static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
static struct attribute *iio_trig_dev_attrs[] = {
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(iio_trig_dev);
int iio_trigger_register(struct iio_trigger *trig_info)
{
int ret;
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
if (trig_info->id < 0) {
ret = trig_info->id;
goto error_ret;
}
/* Set the name used for the sysfs directory etc */
dev_set_name(&trig_info->dev, "trigger%ld",
(unsigned long) trig_info->id);
ret = device_add(&trig_info->dev);
if (ret)
goto error_unregister_id;
/* Add to list of available triggers held by the IIO core */
mutex_lock(&iio_trigger_list_lock);
list_add_tail(&trig_info->list, &iio_trigger_list);
mutex_unlock(&iio_trigger_list_lock);
return 0;
error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_trigger_register);
void iio_trigger_unregister(struct iio_trigger *trig_info)
{
mutex_lock(&iio_trigger_list_lock);
list_del(&trig_info->list);
mutex_unlock(&iio_trigger_list_lock);
ida_simple_remove(&iio_trigger_ida, trig_info->id);
/* Possible issue in here */
device_del(&trig_info->dev);
}
EXPORT_SYMBOL(iio_trigger_unregister);
static struct iio_trigger *iio_trigger_find_by_name(const char *name,
size_t len)
{
struct iio_trigger *trig = NULL, *iter;
mutex_lock(&iio_trigger_list_lock);
list_for_each_entry(iter, &iio_trigger_list, list)
if (sysfs_streq(iter->name, name)) {
trig = iter;
break;
}
mutex_unlock(&iio_trigger_list_lock);
return trig;
}
void iio_trigger_poll(struct iio_trigger *trig, s64 time)
{
int i;
if (!atomic_read(&trig->use_count)) {
atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
else
iio_trigger_notify_done(trig);
}
}
}
EXPORT_SYMBOL(iio_trigger_poll);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
{
iio_trigger_poll(private, iio_get_time_ns());
return IRQ_HANDLED;
}
EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
if (!atomic_read(&trig->use_count)) {
atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
if (trig->subirqs[i].enabled)
handle_nested_irq(trig->subirq_base + i);
else
iio_trigger_notify_done(trig);
}
}
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
}
EXPORT_SYMBOL(iio_trigger_notify_done);
/* Trigger Consumer related functions */
static int iio_trigger_get_irq(struct iio_trigger *trig)
{
int ret;
mutex_lock(&trig->pool_lock);
ret = bitmap_find_free_region(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER,
ilog2(1));
mutex_unlock(&trig->pool_lock);
if (ret >= 0)
ret += trig->subirq_base;
return ret;
}
static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
{
mutex_lock(&trig->pool_lock);
clear_bit(irq - trig->subirq_base, trig->pool);
mutex_unlock(&trig->pool_lock);
}
/* Complexity in here. With certain triggers (datardy) an acknowledgement
* may be needed if the pollfuncs do not include the data read for the
* triggering device.
* This is not currently handled. Alternative of not enabling trigger unless
* the relevant function is in there may be the best option.
*/
/* Worth protecting against double additions? */
static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
int ret = 0;
bool notinuse
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
/* Prevent the module from being removed whilst attached to a trigger */
__module_get(pf->indio_dev->info->driver_module);
pf->irq = iio_trigger_get_irq(trig);
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
if (ret < 0) {
module_put(pf->indio_dev->info->driver_module);
return ret;
}
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
module_put(pf->indio_dev->info->driver_module);
}
return ret;
}
static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
int ret = 0;
bool no_other_users
= (bitmap_weight(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER)
== 1);
if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
goto error_ret;
}
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(pf->indio_dev->info->driver_module);
error_ret:
return ret;
}
irqreturn_t iio_pollfunc_store_time(int irq, void *p)
{
struct iio_poll_func *pf = p;
pf->timestamp = iio_get_time_ns();
return IRQ_WAKE_THREAD;
}
EXPORT_SYMBOL(iio_pollfunc_store_time);
struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
struct iio_dev *indio_dev,
const char *fmt,
...)
{
va_list vargs;
struct iio_poll_func *pf;
pf = kmalloc(sizeof *pf, GFP_KERNEL);
if (pf == NULL)
return NULL;
va_start(vargs, fmt);
pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
va_end(vargs);
if (pf->name == NULL) {
kfree(pf);
return NULL;
}
pf->h = h;
pf->thread = thread;
pf->type = type;
pf->indio_dev = indio_dev;
return pf;
}
EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
void iio_dealloc_pollfunc(struct iio_poll_func *pf)
{
kfree(pf->name);
kfree(pf);
}
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
* iio_trigger_read_current() - trigger consumer sysfs query current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
**/
static ssize_t iio_trigger_read_current(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
if (indio_dev->trig)
return sprintf(buf, "%s\n", indio_dev->trig->name);
return 0;
}
/**
* iio_trigger_write_current() - trigger consumer sysfs set current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used for this device to be specified at run time based on the trigger's
* name.
**/
static ssize_t iio_trigger_write_current(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_trigger *oldtrig = indio_dev->trig;
struct iio_trigger *trig;
int ret;
mutex_lock(&indio_dev->mlock);
if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
mutex_unlock(&indio_dev->mlock);
return -EBUSY;
}
mutex_unlock(&indio_dev->mlock);
trig = iio_trigger_find_by_name(buf, len);
if (oldtrig == trig)
return len;
if (trig && indio_dev->info->validate_trigger) {
ret = indio_dev->info->validate_trigger(indio_dev, trig);
if (ret)
return ret;
}
if (trig && trig->ops && trig->ops->validate_device) {
ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
return ret;
}
indio_dev->trig = trig;
if (oldtrig)
iio_trigger_put(oldtrig);
if (indio_dev->trig)
iio_trigger_get(indio_dev->trig);
return len;
}
static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
iio_trigger_read_current,
iio_trigger_write_current);
static struct attribute *iio_trigger_consumer_attrs[] = {
&dev_attr_current_trigger.attr,
NULL,
};
static const struct attribute_group iio_trigger_consumer_attr_group = {
.name = "trigger",
.attrs = iio_trigger_consumer_attrs,
};
static void iio_trig_release(struct device *device)
{
struct iio_trigger *trig = to_iio_trigger(device);
int i;
if (trig->subirq_base) {
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
irq_modify_status(trig->subirq_base + i,
IRQ_NOAUTOEN,
IRQ_NOREQUEST | IRQ_NOPROBE);
irq_set_chip(trig->subirq_base + i,
NULL);
irq_set_handler(trig->subirq_base + i,
NULL);
}
irq_free_descs(trig->subirq_base,
CONFIG_IIO_CONSUMERS_PER_TRIGGER);
}
kfree(trig->name);
kfree(trig);
}
static struct device_type iio_trig_type = {
.release = iio_trig_release,
.groups = iio_trig_dev_groups,
};
static void iio_trig_subirqmask(struct irq_data *d)
{
struct irq_chip *chip = irq_data_get_irq_chip(d);
struct iio_trigger *trig
= container_of(chip,
struct iio_trigger, subirq_chip);
trig->subirqs[d->irq - trig->subirq_base].enabled = false;
}
static void iio_trig_subirqunmask(struct irq_data *d)
{
struct irq_chip *chip = irq_data_get_irq_chip(d);
struct iio_trigger *trig
= container_of(chip,
struct iio_trigger, subirq_chip);
trig->subirqs[d->irq - trig->subirq_base].enabled = true;
}
static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
{
struct iio_trigger *trig;
trig = kzalloc(sizeof *trig, GFP_KERNEL);
if (trig) {
int i;
trig->dev.type = &iio_trig_type;
trig->dev.bus = &iio_bus_type;
device_initialize(&trig->dev);
mutex_init(&trig->pool_lock);
trig->subirq_base
= irq_alloc_descs(-1, 0,
CONFIG_IIO_CONSUMERS_PER_TRIGGER,
0);
if (trig->subirq_base < 0) {
kfree(trig);
return NULL;
}
trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (trig->name == NULL) {
irq_free_descs(trig->subirq_base,
CONFIG_IIO_CONSUMERS_PER_TRIGGER);
kfree(trig);
return NULL;
}
trig->subirq_chip.name = trig->name;
trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
irq_set_chip(trig->subirq_base + i,
&trig->subirq_chip);
irq_set_handler(trig->subirq_base + i,
&handle_simple_irq);
irq_modify_status(trig->subirq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN,
IRQ_NOPROBE);
}
get_device(&trig->dev);
}
return trig;
}
struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
{
struct iio_trigger *trig;
va_list vargs;
va_start(vargs, fmt);
trig = viio_trigger_alloc(fmt, vargs);
va_end(vargs);
return trig;
}
EXPORT_SYMBOL(iio_trigger_alloc);
void iio_trigger_free(struct iio_trigger *trig)
{
if (trig)
put_device(&trig->dev);
}
EXPORT_SYMBOL(iio_trigger_free);
static void devm_iio_trigger_release(struct device *dev, void *res)
{
iio_trigger_free(*(struct iio_trigger **)res);
}
static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
{
struct iio_trigger **r = res;
if (!r || !*r) {
WARN_ON(!r || !*r);
return 0;
}
return *r == data;
}
/**
* devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* @dev: Device to allocate iio_trigger for
* @fmt: trigger name format. If it includes format
* specifiers, the additional arguments following
* format are formatted and inserted in the resulting
* string replacing their respective specifiers.
*
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
*
* If an iio_trigger allocated with this function needs to be freed separately,
* devm_iio_trigger_free() must be used.
*
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...)
{
struct iio_trigger **ptr, *trig;
va_list vargs;
ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return NULL;
/* use raw alloc_dr for kmalloc caller tracing */
va_start(vargs, fmt);
trig = viio_trigger_alloc(fmt, vargs);
va_end(vargs);
if (trig) {
*ptr = trig;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return trig;
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
/**
* devm_iio_trigger_free - Resource-managed iio_trigger_free()
* @dev: Device this iio_dev belongs to
* @iio_trig: the iio_trigger associated with the device
*
* Free iio_trigger allocated with devm_iio_trigger_alloc().
*/
void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
{
int rc;
rc = devres_release(dev, devm_iio_trigger_release,
devm_iio_trigger_match, iio_trig);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
&iio_trigger_consumer_attr_group;
}
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
/* Clean up an associated but not attached trigger reference */
if (indio_dev->trig)
iio_trigger_put(indio_dev->trig);
}
int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
{
return iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_postenable);
int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
return iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_predisable);
/*
* Copyright (c) 2012 Analog Devices, Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
.postenable = &iio_triggered_buffer_postenable,
.predisable = &iio_triggered_buffer_predisable,
};
/**
* iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc
* @indio_dev: IIO device structure
* @pollfunc_bh: Function which will be used as pollfunc bottom half
* @pollfunc_th: Function which will be used as pollfunc top half
* @setup_ops: Buffer setup functions to use for this device.
* If NULL the default setup functions for triggered
* buffers will be used.
*
* This function combines some common tasks which will normally be performed
* when setting up a triggered buffer. It will allocate the buffer and the
* pollfunc, as well as register the buffer with the IIO core.
*
* Before calling this function the indio_dev structure should already be
* completely initialized, but not yet registered. In practice this means that
* this function should be called right before iio_device_register().
*
* To free the resources allocated by this function call
* iio_triggered_buffer_cleanup().
*/
int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
irqreturn_t (*pollfunc_bh)(int irq, void *p),
irqreturn_t (*pollfunc_th)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops)
{
struct iio_buffer *buffer;
int ret;
buffer = iio_kfifo_allocate(indio_dev);
if (!buffer) {
ret = -ENOMEM;
goto error_ret;
}
iio_device_attach_buffer(indio_dev, buffer);
indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh,
pollfunc_th,
IRQF_ONESHOT,
indio_dev,
"%s_consumer%d",
indio_dev->name,
indio_dev->id);
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_kfifo_free;
}
/* Ring buffer functions - here trigger setup related */
if (setup_ops)
indio_dev->setup_ops = setup_ops;
else
indio_dev->setup_ops = &iio_triggered_buffer_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
ret = iio_buffer_register(indio_dev,
indio_dev->channels,
indio_dev->num_channels);
if (ret)
goto error_dealloc_pollfunc;
return 0;
error_dealloc_pollfunc:
iio_dealloc_pollfunc(indio_dev->pollfunc);
error_kfifo_free:
iio_kfifo_free(indio_dev->buffer);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_triggered_buffer_setup);
/**
* iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup()
* @indio_dev: IIO device structure
*/
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
{
iio_buffer_unregister(indio_dev);
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_kfifo_free(indio_dev->buffer);
}
EXPORT_SYMBOL(iio_triggered_buffer_cleanup);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
MODULE_LICENSE("GPL");
/* The industrial I/O core in kernel channel mapping
*
* Copyright (c) 2011 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
#include <linux/iio/consumer.h>
struct iio_map_internal {
struct iio_dev *indio_dev;
struct iio_map *map;
struct list_head l;
};
static LIST_HEAD(iio_map_list);
static DEFINE_MUTEX(iio_map_list_lock);
int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
{
int i = 0, ret = 0;
struct iio_map_internal *mapi;
if (maps == NULL)
return 0;
mutex_lock(&iio_map_list_lock);
while (maps[i].consumer_dev_name != NULL) {
mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
if (mapi == NULL) {
ret = -ENOMEM;
goto error_ret;
}
mapi->map = &maps[i];
mapi->indio_dev = indio_dev;
list_add(&mapi->l, &iio_map_list);
i++;
}
error_ret:
mutex_unlock(&iio_map_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_map_array_register);
/*
* Remove all map entries associated with the given iio device
*/
int iio_map_array_unregister(struct iio_dev *indio_dev)
{
int ret = -ENODEV;
struct iio_map_internal *mapi;
struct list_head *pos, *tmp;
mutex_lock(&iio_map_list_lock);
list_for_each_safe(pos, tmp, &iio_map_list) {
mapi = list_entry(pos, struct iio_map_internal, l);
if (indio_dev == mapi->indio_dev) {
list_del(&mapi->l);
kfree(mapi);
ret = 0;
}
}
mutex_unlock(&iio_map_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_map_array_unregister);
static const struct iio_chan_spec
*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
{
int i;
const struct iio_chan_spec *chan = NULL;
for (i = 0; i < indio_dev->num_channels; i++)
if (indio_dev->channels[i].datasheet_name &&
strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
chan = &indio_dev->channels[i];
break;
}
return chan;
}
#ifdef CONFIG_OF
static int iio_dev_node_match(struct device *dev, void *data)
{
return dev->of_node == data && dev->type == &iio_device_type;
}
static int __of_iio_channel_get(struct iio_channel *channel,
struct device_node *np, int index)
{
struct device *idev;
struct iio_dev *indio_dev;
int err;
struct of_phandle_args iiospec;
err = of_parse_phandle_with_args(np, "io-channels",
"#io-channel-cells",
index, &iiospec);
if (err)
return err;
idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
iio_dev_node_match);
of_node_put(iiospec.np);
if (idev == NULL)
return -EPROBE_DEFER;
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
index = iiospec.args_count ? iiospec.args[0] : 0;
if (index >= indio_dev->num_channels) {
err = -EINVAL;
goto err_put;
}
channel->channel = &indio_dev->channels[index];
return 0;
err_put:
iio_device_put(indio_dev);
return err;
}
static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
{
struct iio_channel *channel;
int err;
if (index < 0)
return ERR_PTR(-EINVAL);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (channel == NULL)
return ERR_PTR(-ENOMEM);
err = __of_iio_channel_get(channel, np, index);
if (err)
goto err_free_channel;
return channel;
err_free_channel:
kfree(channel);
return ERR_PTR(err);
}
static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
const char *name)
{
struct iio_channel *chan = NULL;
/* Walk up the tree of devices looking for a matching iio channel */
while (np) {
int index = 0;
/*
* For named iio channels, first look up the name in the
* "io-channel-names" property. If it cannot be found, the
* index will be an error code, and of_iio_channel_get()
* will fail.
*/
if (name)
index = of_property_match_string(np, "io-channel-names",
name);
chan = of_iio_channel_get(np, index);
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
break;
else if (name && index >= 0) {
pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
np->full_name, name ? name : "", index);
return NULL;
}
/*
* No matching IIO channel found on this node.
* If the parent node has a "io-channel-ranges" property,
* then we can try one of its channels.
*/
np = np->parent;
if (np && !of_get_property(np, "io-channel-ranges", NULL))
return NULL;
}
return chan;
}
static struct iio_channel *of_iio_channel_get_all(struct device *dev)
{
struct iio_channel *chans;
int i, mapind, nummaps = 0;
int ret;
do {
ret = of_parse_phandle_with_args(dev->of_node,
"io-channels",
"#io-channel-cells",
nummaps, NULL);
if (ret < 0)
break;
} while (++nummaps);
if (nummaps == 0) /* no error, return NULL to search map table */
return NULL;
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
if (chans == NULL)
return ERR_PTR(-ENOMEM);
/* Search for OF matches */
for (mapind = 0; mapind < nummaps; mapind++) {
ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
mapind);
if (ret)
goto error_free_chans;
}
return chans;
error_free_chans:
for (i = 0; i < mapind; i++)
iio_device_put(chans[i].indio_dev);
kfree(chans);
return ERR_PTR(ret);
}
#else /* CONFIG_OF */
static inline struct iio_channel *
of_iio_channel_get_by_name(struct device_node *np, const char *name)
{
return NULL;
}
static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
{
return NULL;
}
#endif /* CONFIG_OF */
static struct iio_channel *iio_channel_get_sys(const char *name,
const char *channel_name)
{
struct iio_map_internal *c_i = NULL, *c = NULL;
struct iio_channel *channel;
int err;
if (name == NULL && channel_name == NULL)
return ERR_PTR(-ENODEV);
/* first find matching entry the channel map */
mutex_lock(&iio_map_list_lock);
list_for_each_entry(c_i, &iio_map_list, l) {
if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
(channel_name &&
strcmp(channel_name, c_i->map->consumer_channel) != 0))
continue;
c = c_i;
iio_device_get(c->indio_dev);
break;
}
mutex_unlock(&iio_map_list_lock);
if (c == NULL)
return ERR_PTR(-ENODEV);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (channel == NULL) {
err = -ENOMEM;
goto error_no_mem;
}
channel->indio_dev = c->indio_dev;
if (c->map->adc_channel_label) {
channel->channel =
iio_chan_spec_from_name(channel->indio_dev,
c->map->adc_channel_label);
if (channel->channel == NULL) {
err = -EINVAL;
goto error_no_chan;
}
}
return channel;
error_no_chan:
kfree(channel);
error_no_mem:
iio_device_put(c->indio_dev);
return ERR_PTR(err);
}
struct iio_channel *iio_channel_get(struct device *dev,
const char *channel_name)
{
const char *name = dev ? dev_name(dev) : NULL;
struct iio_channel *channel;
if (dev) {
channel = of_iio_channel_get_by_name(dev->of_node,
channel_name);
if (channel != NULL)
return channel;
}
return iio_channel_get_sys(name, channel_name);
}
EXPORT_SYMBOL_GPL(iio_channel_get);
void iio_channel_release(struct iio_channel *channel)
{
iio_device_put(channel->indio_dev);
kfree(channel);
}
EXPORT_SYMBOL_GPL(iio_channel_release);
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
struct iio_channel *chans;
struct iio_map_internal *c = NULL;
int nummaps = 0;
int mapind = 0;
int i, ret;
if (dev == NULL)
return ERR_PTR(-EINVAL);
chans = of_iio_channel_get_all(dev);
if (chans)
return chans;
name = dev_name(dev);
mutex_lock(&iio_map_list_lock);
/* first count the matching maps */
list_for_each_entry(c, &iio_map_list, l)
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
continue;
else
nummaps++;
if (nummaps == 0) {
ret = -ENODEV;
goto error_ret;
}
/* NULL terminated array to save passing size */
chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
if (chans == NULL) {
ret = -ENOMEM;
goto error_ret;
}
/* for each map fill in the chans element */
list_for_each_entry(c, &iio_map_list, l) {
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
continue;
chans[mapind].indio_dev = c->indio_dev;
chans[mapind].data = c->map->consumer_data;
chans[mapind].channel =
iio_chan_spec_from_name(chans[mapind].indio_dev,
c->map->adc_channel_label);
if (chans[mapind].channel == NULL) {
ret = -EINVAL;
goto error_free_chans;
}
iio_device_get(chans[mapind].indio_dev);
mapind++;
}
if (mapind == 0) {
ret = -ENODEV;
goto error_free_chans;
}
mutex_unlock(&iio_map_list_lock);
return chans;
error_free_chans:
for (i = 0; i < nummaps; i++)
iio_device_put(chans[i].indio_dev);
kfree(chans);
error_ret:
mutex_unlock(&iio_map_list_lock);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iio_channel_get_all);
void iio_channel_release_all(struct iio_channel *channels)
{
struct iio_channel *chan = &channels[0];
while (chan->indio_dev) {
iio_device_put(chan->indio_dev);
chan++;
}
kfree(channels);
}
EXPORT_SYMBOL_GPL(iio_channel_release_all);
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
int unused;
if (val2 == NULL)
val2 = &unused;
return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
val, val2, info);
}
int iio_read_channel_raw(struct iio_channel *chan, int *val)
{
int ret;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_raw);
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int raw, int *processed, unsigned int scale)
{
int scale_type, scale_val, scale_val2, offset;
s64 raw64 = raw;
int ret;
ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
if (ret >= 0)
raw64 += offset;
scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
IIO_CHAN_INFO_SCALE);
if (scale_type < 0)
return scale_type;
switch (scale_type) {
case IIO_VAL_INT:
*processed = raw64 * scale_val;
break;
case IIO_VAL_INT_PLUS_MICRO:
if (scale_val2 < 0)
*processed = -raw64 * scale_val;
else
*processed = raw64 * scale_val;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000LL);
break;
case IIO_VAL_INT_PLUS_NANO:
if (scale_val2 < 0)
*processed = -raw64 * scale_val;
else
*processed = raw64 * scale_val;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000000LL);
break;
case IIO_VAL_FRACTIONAL:
*processed = div_s64(raw64 * (s64)scale_val * scale,
scale_val2);
break;
case IIO_VAL_FRACTIONAL_LOG2:
*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
break;
default:
return -EINVAL;
}
return 0;
}
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale)
{
int ret;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
scale);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
int iio_read_channel_processed(struct iio_channel *chan, int *val)
{
int ret;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
}
if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
ret = iio_channel_read(chan, val, NULL,
IIO_CHAN_INFO_PROCESSED);
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
goto err_unlock;
ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
}
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_processed);
int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
{
int ret;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_scale);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
int ret = 0;
/* Need to verify underlying driver has not gone away */
mutex_lock(&chan->indio_dev->info_exist_lock);
if (chan->indio_dev->info == NULL) {
ret = -ENODEV;
goto err_unlock;
}
*type = chan->channel->type;
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_get_channel_type);
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
#include <linux/mutex.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/sched.h>
#include <linux/poll.h>
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
struct mutex user_lock;
int update_needed;
};
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
int bytes_per_datum, int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
static int iio_request_update_kfifo(struct iio_buffer *r)
{
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
mutex_lock(&buf->user_lock);
if (buf->update_needed) {
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
buf->update_needed = false;
} else {
kfifo_reset_out(&buf->kf);
}
mutex_unlock(&buf->user_lock);
return ret;
}
static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
static IIO_BUFFER_ENABLE_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
static struct attribute *iio_kfifo_attributes[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
NULL,
};
static struct attribute_group iio_kfifo_attribute_group = {
.attrs = iio_kfifo_attributes,
.name = "buffer",
};
static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
{
return r->bytes_per_datum;
}
static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
struct iio_kfifo *kf = iio_to_kfifo(r);
kf->update_needed = true;
return 0;
}
static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
iio_mark_update_needed_kfifo(r);
}
return 0;
}
static int iio_set_length_kfifo(struct iio_buffer *r, int length)
{
/* Avoid an invalid state */
if (length < 2)
length = 2;
if (r->length != length) {
r->length = length;
iio_mark_update_needed_kfifo(r);
}
return 0;
}
static int iio_store_to_kfifo(struct iio_buffer *r,
const void *data)
{
int ret;
struct iio_kfifo *kf = iio_to_kfifo(r);
ret = kfifo_in(&kf->kf, data, 1);
if (ret != 1)
return -EBUSY;
wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
return 0;
}
static int iio_read_first_n_kfifo(struct iio_buffer *r,
size_t n, char __user *buf)
{
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
if (mutex_lock_interruptible(&kf->user_lock))
return -ERESTARTSYS;
if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
ret = -EINVAL;
else
ret = kfifo_to_user(&kf->kf, buf, n, &copied);
mutex_unlock(&kf->user_lock);
if (ret < 0)
return ret;
return copied;
}
static bool iio_kfifo_buf_data_available(struct iio_buffer *r)
{
struct iio_kfifo *kf = iio_to_kfifo(r);
bool empty;
mutex_lock(&kf->user_lock);
empty = kfifo_is_empty(&kf->kf);
mutex_unlock(&kf->user_lock);
return !empty;
}
static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
{
struct iio_kfifo *kf = iio_to_kfifo(buffer);
mutex_destroy(&kf->user_lock);
kfifo_free(&kf->kf);
kfree(kf);
}
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
.data_available = iio_kfifo_buf_data_available,
.request_update = &iio_request_update_kfifo,
.get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
.get_length = &iio_get_length_kfifo,
.set_length = &iio_set_length_kfifo,
.release = &iio_kfifo_buffer_release,
};
struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
{
struct iio_kfifo *kf;
kf = kzalloc(sizeof *kf, GFP_KERNEL);
if (!kf)
return NULL;
kf->update_needed = true;
iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
kf->buffer.access = &kfifo_access_funcs;
kf->buffer.length = 2;
mutex_init(&kf->user_lock);
return &kf->buffer;
}
EXPORT_SYMBOL(iio_kfifo_allocate);
void iio_kfifo_free(struct iio_buffer *r)
{
iio_buffer_put(r);
}
EXPORT_SYMBOL(iio_kfifo_free);
MODULE_LICENSE("GPL");
cflags:
{
// Needed for lsm6ds3 platform data type definition
-I${MANGOH_ROOT}/linux_kernel_modules/lsm6ds3
}
sources:
{
mangoh_module.c
board_green_dv4.c
board_red_dv2.c
board_red_dv3.c
}
params:
{
board = "green dv4"
}
cflags:
{
// Needed for lsm6ds3 platform data type definition
-I${MANGOH_ROOT}/linux_kernel_modules/lsm6ds3
}
sources:
{
mangoh_module.c
board_green_dv4.c
board_red_dv2.c
board_red_dv3.c
}
params:
{
board = "red dv2"
}
cflags:
{
// Needed for lsm6ds3 platform data type definition
-I${MANGOH_ROOT}/linux_kernel_modules/lsm6ds3
}
sources:
{
mangoh_module.c
board_green_dv4.c
board_red_dv2.c
board_red_dv3.c
}
params:
{
board = "red dv3"
}
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/pca954x.h>
#include <linux/i2c/sx150x.h>
#include <linux/gpio.h>
#include "mangoh.h"
#include "lsm6ds3_platform_data.h"
/*
*-----------------------------------------------------------------------------
* Type definitions
*-----------------------------------------------------------------------------
*/
struct green_dv4_platform_data {
struct i2c_client* i2c_switch;
struct i2c_client* gpio_expander1;
struct i2c_client* gpio_expander2;
struct i2c_client* gpio_expander3;
struct i2c_client* accelerometer;
};
struct green_dv4_device
{
struct platform_device pdev;
struct green_dv4_platform_data pdata;
};
/*
*-----------------------------------------------------------------------------
* Function declarations
*-----------------------------------------------------------------------------
*/
static int green_dv4_map(struct platform_device *pdev);
static int green_dv4_unmap(struct platform_device* pdev);
static void green_dv4_release_device(struct device* dev);
/*
*-----------------------------------------------------------------------------
* Constants
*-----------------------------------------------------------------------------
*/
#define GREEN_DV4_I2C_SW_BASE_ADAPTER_ID (1)
#define GREEN_DV4_I2C_SW_PORT_IOT0 (0)
#define GREEN_DV4_I2C_SW_PORT_IOT1 (1)
#define GREEN_DV4_I2C_SW_PORT_IOT2 (2)
#define GREEN_DV4_I2C_SW_PORT_USB_HUB (3)
#define GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER1 (4)
#define GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER2 (5)
#define GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER3 (6)
#define GREEN_DV4_I2C_SW_PORT_BATTERY_CHARGER (7)
/*
*-----------------------------------------------------------------------------
* Variables
*-----------------------------------------------------------------------------
*/
const struct mangoh_descriptor green_dv4_descriptor = {
.type = "mangoh green dv4",
.map = green_dv4_map,
.unmap = green_dv4_unmap,
};
static struct pca954x_platform_mode green_dv4_pca954x_adap_modes[] = {
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 0, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 1, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 2, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 3, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 4, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 5, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 6, .deselect_on_exit=1, .class=0},
{.adap_id=GREEN_DV4_I2C_SW_BASE_ADAPTER_ID + 7, .deselect_on_exit=1, .class=0},
};
static struct pca954x_platform_data green_dv4_pca954x_pdata = {
green_dv4_pca954x_adap_modes,
ARRAY_SIZE(green_dv4_pca954x_adap_modes),
};
static const struct i2c_board_info green_dv4_pca954x_device_info = {
I2C_BOARD_INFO("pca9548", 0x71),
.platform_data = &green_dv4_pca954x_pdata,
};
static struct sx150x_platform_data green_dv4_expander1_platform_data = {
.gpio_base = -1,
.oscio_is_gpo = false,
.io_pullup_ena = 0,
.io_pulldn_ena = 0,
.io_open_drain_ena = 0,
.io_polarity = 0,
.irq_base = -1,
.irq_summary = -1,
};
static const struct i2c_board_info green_dv4_expander1_devinfo = {
I2C_BOARD_INFO("sx1509q", 0x3e),
.platform_data = &green_dv4_expander1_platform_data,
};
static struct sx150x_platform_data green_dv4_expander2_platform_data = {
.gpio_base = -1,
.oscio_is_gpo = false,
/*
* Enable pull-up resistors for CARD_DETECT_IOT0, CARD_DETECT_IOT2 and
* CARD_DETECT_IOT1 respectively.
*/
.io_pullup_ena = ((1 << 11) | (1 << 12) | (1 << 13)),
.io_pulldn_ena = 0,
.io_open_drain_ena = 0,
/*
* The interrupt lines driven by expanders 1 and 3 respectively are
* driven low when an interrupt occurs, so invert the polarity
*/
.io_polarity = ((1 << 0) | (1 << 14)),
.irq_base = -1,
.irq_summary = -1,
};
static const struct i2c_board_info green_dv4_expander2_devinfo = {
I2C_BOARD_INFO("sx1509q", 0x3f),
.platform_data = &green_dv4_expander2_platform_data,
};
static struct sx150x_platform_data green_dv4_expander3_platform_data = {
.gpio_base = -1,
.oscio_is_gpo = false,
.io_pullup_ena = 0,
.io_pulldn_ena = 0,
.io_open_drain_ena = 0,
.io_polarity = 0,
.irq_base = -1,
.irq_summary = -1,
};
static const struct i2c_board_info green_dv4_expander3_devinfo = {
I2C_BOARD_INFO("sx1509q", 0x70),
.platform_data = &green_dv4_expander3_platform_data,
};
static struct lsm6ds3_platform_data green_dv4_accelerometer_platform_data = {
.drdy_int_pin = 1,
};
static struct i2c_board_info green_dv4_accelerometer_devinfo = {
I2C_BOARD_INFO("lsm6ds3", 0x6A),
.platform_data = &green_dv4_accelerometer_platform_data,
};
static const char platform_device_name[] = "mangoh green dv4";
/*
*-----------------------------------------------------------------------------
* Public function definitions
*-----------------------------------------------------------------------------
*/
int green_dv4_create_device(struct platform_device** d)
{
struct green_dv4_device *dv4 = kzalloc(sizeof(struct green_dv4_device), GFP_KERNEL);
if (!dv4)
return -ENOMEM;
dv4->pdev.name = platform_device_name;
dv4->pdev.id = -1;
dv4->pdev.dev.platform_data = &dv4->pdata;
dv4->pdev.dev.release = green_dv4_release_device;
*d = &dv4->pdev;
return 0;
}
/*
*-----------------------------------------------------------------------------
* Static function definitions
*-----------------------------------------------------------------------------
*/
static int get_sx150x_gpio_base(struct i2c_client *client)
{
/*
* This is kind of a hack. It depends on the fact that we know
* that the clientdata of the gpio expander is a struct
* sx150x_chip (type is private to the sx150x driver) and
* knowing that the first element of that struct is a struct
* gpio_chip.
*/
struct gpio_chip *expander = i2c_get_clientdata(client);
return expander->base;
}
static int green_dv4_map(struct platform_device *pdev)
{
struct green_dv4_platform_data* pdata = dev_get_platdata(&pdev->dev);
/* Get the main i2c adapter */
struct i2c_adapter* adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "Failed to get I2C adapter 0.\n");
return -ENODEV;
}
/* Map the I2C switch */
dev_dbg(&pdev->dev, "mapping i2c switch\n");
pdata->i2c_switch = i2c_new_device(adapter, &green_dv4_pca954x_device_info);
if (!pdata->i2c_switch) {
dev_err(
&pdev->dev,
"Failed to register %s\n",
green_dv4_pca954x_device_info.type);
return -ENODEV;
}
#if 0
/*
* Map SX1509 GPIO expander 2. Expander 2 has to be mapped first because
* the interrupt pins of expanders 1 and 3 are connected to I/Os of
* expander 2.
*/
dev_dbg(&pdev->dev, "mapping gpio expander 2\n");
/*
* GPIOEXP_INT2 goes to "GPIO2" on the inner ring in the green dv4
* schematic. The WP85 schematic maps this to GPIO 59.
*/
green_dv4_expander2_platform_data.irq_summary = gpio_to_irq(59);
/*
* Currently we just hard code an IRQ base that was tested to have a
* contiguous set of 16 available IRQs. TODO: Is there a better way to
* find a contiguous set of IRQs?
*/
green_dv4_expander2_platform_data.irq_base = 347;
adapter = i2c_get_adapter(
GREEN_DV4_I2C_SW_BASE_ADAPTER_ID +
GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER2);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER2);
return -ENODEV;
}
pdata->gpio_expander2 = i2c_new_device(adapter, &green_dv4_expander2_devinfo);
if (!pdata->gpio_expander2) {
dev_err(&pdev->dev, "GPIO expander 2 is missing\n");
return -ENODEV;
}
/* Map SX1509 GPIO expander 1 */
dev_dbg(&pdev->dev, "mapping gpio expander 1\n");
/*
* GPIOEXP_INT1 goes to I/O0 on GPIO expander 2 in the green dv4
* schematic.
*/
green_dv4_expander1_platform_data.irq_summary =
gpio_to_irq(get_sx150x_gpio_base(pdata->gpio_expander2) + 0);
/*
* Currently we just hard code an IRQ base that was tested to have a
* contiguous set of 16 available IRQs. TODO: Is there a better way to
* find a contiguous set of IRQs?
*/
green_dv4_expander1_platform_data.irq_base = 154;
adapter = i2c_get_adapter(
GREEN_DV4_I2C_SW_BASE_ADAPTER_ID +
GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER1);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER1);
return -ENODEV;
}
pdata->gpio_expander1 = i2c_new_device(adapter, &green_dv4_expander1_devinfo);
if (!pdata->gpio_expander1) {
dev_err(&pdev->dev, "GPIO expander 1 is missing\n");
return -ENODEV;
}
/* Map SX1509 GPIO expander 3 */
dev_dbg(&pdev->dev, "mapping gpio expander 3\n");
/*
* GPIOEXP_INT3 goes to I/O14 on GPIO expander 2 in the green dv4
* schematic.
*/
green_dv4_expander3_platform_data.irq_summary =
gpio_to_irq(get_sx150x_gpio_base(pdata->gpio_expander2) + 14);
/*
* Currently we just hard code an IRQ base that was tested to have a
* contiguous set of 16 available IRQs. TODO: Is there a better way to
* find a contiguous set of IRQs?
*/
green_dv4_expander3_platform_data.irq_base = 168;
adapter = i2c_get_adapter(
GREEN_DV4_I2C_SW_BASE_ADAPTER_ID +
GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER3);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", GREEN_DV4_I2C_SW_PORT_GPIO_EXPANDER3);
return -ENODEV;
}
pdata->gpio_expander3 = i2c_new_device(adapter, &green_dv4_expander3_devinfo);
if (!pdata->gpio_expander3) {
dev_err(&pdev->dev, "GPIO expander 3 is missing\n");
return -ENODEV;
}
#endif // GPIO expander not properly supported
/* Map the I2C LSM6DS3 accelerometer */
dev_dbg(&pdev->dev, "mapping lsm6ds3 accelerometer\n");
/* Pin 10 of gpio expander 2 is connected to INT2 of the lsm6ds3 */
/* Can't use the irq because the GPIO expander isn't controlled by a kernel driver
green_dv4_accelerometer_devinfo.irq =
gpio_to_irq(get_sx150x_gpio_base(pdata->gpio_expander2) + 10);
*/
adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", GREEN_DV4_I2C_SW_BASE_ADAPTER_ID);
return -ENODEV;
}
pdata->accelerometer = i2c_new_device(adapter, &green_dv4_accelerometer_devinfo);
if (!pdata->accelerometer) {
dev_err(&pdev->dev, "Accelerometer is missing\n");
return -ENODEV;
}
/*
* TODO:
* 3503 USB Hub: 0x08
* Looks like there is a driver in the wp85 kernel source at drivers/usb/misc/usb3503.c
* I'm not really sure what benefit is achieved through using this driver.
* Battery Gauge: 0x55
* chip is BQ27621YZFR-G1A which is supported by this driver which is
* in the mainline linux kernel as
* drivers/power/supply/bq27xxx_battery*. Unfortunately, the power
* supply API has changed, so using this driver would require
* substantial backporting.
* http://www.ti.com/tool/bq27xxxsw-linux
* Buck & Battery Charger: 0x6B
* chip is BQ24192IRGER. I haven't been able to find a linux kernel
* driver, but this looks like some relevant code:
* https://chromium.googlesource.com/chromiumos/platform/ec/+/master/driver/charger/bq24192.c
*/
return 0;
}
static int green_dv4_unmap(struct platform_device* pdev)
{
struct green_dv4_platform_data* pdata = dev_get_platdata(&pdev->dev);
i2c_unregister_device(pdata->accelerometer);
i2c_put_adapter(pdata->accelerometer->adapter);
#if 0
i2c_unregister_device(pdata->gpio_expander3);
i2c_put_adapter(pdata->gpio_expander3->adapter);
i2c_unregister_device(pdata->gpio_expander1);
i2c_put_adapter(pdata->gpio_expander1->adapter);
i2c_unregister_device(pdata->gpio_expander2);
i2c_put_adapter(pdata->gpio_expander2->adapter);
#endif // GPIO expander not properly supported
i2c_unregister_device(pdata->i2c_switch);
i2c_put_adapter(pdata->i2c_switch->adapter);
return 0;
}
static void green_dv4_release_device(struct device* dev)
{
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
struct green_dv4_device *dv4 = container_of(pdev, struct green_dv4_device, pdev);
kfree(dv4);
}
#ifndef BOARD_GREEN_DV4_H
#define BOARD_GREEN_DV4_H
#include "mangoh.h"
extern const struct mangoh_descriptor green_dv4_descriptor;
int green_dv4_create_device(struct platform_device** d);
#endif // BOARD_GREEN_DV4_H
#define DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/pca954x.h>
#include <linux/i2c/sx150x.h>
#include <linux/gpio.h>
#include "mangoh.h"
#include "lsm6ds3_platform_data.h"
/*
*-----------------------------------------------------------------------------
* Type definitions
*-----------------------------------------------------------------------------
*/
struct red_dv2_platform_data {
struct i2c_client* i2c_switch;
struct i2c_client* gpio_expander;
struct i2c_client* accelerometer;
struct i2c_client* pressure;
};
struct red_dv2_device
{
struct platform_device pdev;
struct red_dv2_platform_data pdata;
};
/*
*-----------------------------------------------------------------------------
* Function declarations
*-----------------------------------------------------------------------------
*/
static int red_dv2_map(struct platform_device *pdev);
static int red_dv2_unmap(struct platform_device* pdev);
static void red_dv2_release_device(struct device* dev);
/*
*-----------------------------------------------------------------------------
* Constants
*-----------------------------------------------------------------------------
*/
#define RED_DV2_I2C_SW_BASE_ADAPTER_ID (1)
#define RED_DV2_I2C_SW_PORT_IOT0 (0)
#define RED_DV2_I2C_SW_PORT_USB_HUB (1)
#define RED_DV2_I2C_SW_PORT_GPIO_EXPANDER (2)
#define RED_DV2_I2C_SW_PORT_EXP (3) /* expansion header */
/*
*-----------------------------------------------------------------------------
* Variables
*-----------------------------------------------------------------------------
*/
const struct mangoh_descriptor red_dv2_descriptor = {
.type = "mangoh red dv2",
.map = red_dv2_map,
.unmap = red_dv2_unmap,
};
static struct pca954x_platform_mode red_dv2_pca954x_adap_modes[] = {
{.adap_id=RED_DV2_I2C_SW_BASE_ADAPTER_ID + 0, .deselect_on_exit=1, .class=0},
{.adap_id=RED_DV2_I2C_SW_BASE_ADAPTER_ID + 1, .deselect_on_exit=1, .class=0},
{.adap_id=RED_DV2_I2C_SW_BASE_ADAPTER_ID + 2, .deselect_on_exit=1, .class=0},
{.adap_id=RED_DV2_I2C_SW_BASE_ADAPTER_ID + 3, .deselect_on_exit=1, .class=0},
};
static struct pca954x_platform_data red_dv2_pca954x_pdata = {
red_dv2_pca954x_adap_modes,
ARRAY_SIZE(red_dv2_pca954x_adap_modes),
};
static const struct i2c_board_info red_dv2_pca954x_device_info = {
I2C_BOARD_INFO("pca9546", 0x71),
.platform_data = &red_dv2_pca954x_pdata,
};
static struct sx150x_platform_data red_dv2_expander_platform_data = {
.gpio_base = -1,
.oscio_is_gpo = false,
.io_pullup_ena = 0,
.io_pulldn_ena = 0,
.io_open_drain_ena = 0,
.io_polarity = 0,
.irq_summary = -1,
.irq_base = -1,
};
static const struct i2c_board_info red_dv2_expander_devinfo = {
I2C_BOARD_INFO("sx1509q", 0x3e),
.platform_data = &red_dv2_expander_platform_data,
.irq = 0,
};
static struct lsm6ds3_platform_data red_dv2_accelerometer_platform_data = {
.drdy_int_pin = 1,
};
static struct i2c_board_info red_dv2_accelerometer_devinfo = {
I2C_BOARD_INFO("lsm6ds3", 0x6A),
.platform_data = &red_dv2_accelerometer_platform_data,
};
static struct i2c_board_info red_dv2_pressure_devinfo = {
I2C_BOARD_INFO("bmp280", 0x76),
};
static const char platform_device_name[] = "mangoh red dv2";
/*
*-----------------------------------------------------------------------------
* Public function definitions
*-----------------------------------------------------------------------------
*/
int red_dv2_create_device(struct platform_device** d)
{
struct red_dv2_device *dv2 = kzalloc(sizeof(struct red_dv2_device), GFP_KERNEL);
if (!dv2)
return -ENOMEM;
dv2->pdev.name = platform_device_name;
dv2->pdev.id = -1;
dv2->pdev.dev.platform_data = &dv2->pdata;
dv2->pdev.dev.release = red_dv2_release_device;
*d = &dv2->pdev;
return 0;
}
/*
*-----------------------------------------------------------------------------
* Static function definitions
*-----------------------------------------------------------------------------
*/
static int get_sx150x_gpio_base(struct i2c_client *client)
{
/*
* This is kind of a hack. It depends on the fact that we know
* that the clientdata of the gpio expander is a struct
* sx150x_chip (type is private to the sx150x driver) and
* knowing that the first element of that struct is a struct
* gpio_chip.
*/
struct gpio_chip *expander = i2c_get_clientdata(client);
return expander->base;
}
static int red_dv2_map(struct platform_device *pdev)
{
struct red_dv2_platform_data* pdata = dev_get_platdata(&pdev->dev);
/* Get the main i2c adapter */
struct i2c_adapter* adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "Failed to get I2C adapter 0.\n");
return -ENODEV;
}
/* Map the I2C switch */
dev_dbg(&pdev->dev, "mapping i2c switch\n");
pdata->i2c_switch = i2c_new_device(adapter, &red_dv2_pca954x_device_info);
if (!pdata->i2c_switch) {
dev_err(
&pdev->dev,
"Failed to register %s\n",
red_dv2_pca954x_device_info.type);
return -ENODEV;
}
/* Map GPIO expander */
dev_dbg(&pdev->dev, "mapping gpio expander\n");
/*
* GPIOEXP_INT1 goes to GPIO32 on the CF3 inner ring which maps to
* GPIO30 in the WP85.
*/
red_dv2_expander_platform_data.irq_summary = gpio_to_irq(30);
/*
* Currently we just hard code an IRQ base that was tested to have a
* contiguous set of 16 available IRQs. TODO: Is there a better way to
* find a contiguous set of IRQs?
*/
red_dv2_expander_platform_data.irq_base = 347;
adapter = i2c_get_adapter(RED_DV2_I2C_SW_BASE_ADAPTER_ID +
RED_DV2_I2C_SW_PORT_GPIO_EXPANDER);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", RED_DV2_I2C_SW_PORT_GPIO_EXPANDER);
return -ENODEV;
}
pdata->gpio_expander = i2c_new_device(adapter, &red_dv2_expander_devinfo);
if (!pdata->gpio_expander) {
dev_err(&pdev->dev, "GPIO expander is missing\n");
return -ENODEV;
}
/* Map the I2C LSM6DS3 accelerometer */
dev_dbg(&pdev->dev, "mapping lsm6ds3 accelerometer\n");
/* Pin 12 of the gpio expander is connected to INT2 of the lsm6ds3 */
red_dv2_accelerometer_devinfo.irq =
gpio_to_irq(get_sx150x_gpio_base(pdata->gpio_expander) + 12);
adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", RED_DV2_I2C_SW_BASE_ADAPTER_ID);
return -ENODEV;
}
pdata->accelerometer = i2c_new_device(adapter, &red_dv2_accelerometer_devinfo);
if (!pdata->accelerometer) {
dev_err(&pdev->dev, "Accelerometer is missing\n");
return -ENODEV;
}
/* Map the I2C BMP280 pressure sensor */
dev_dbg(&pdev->dev, "mapping bmp280 pressure sensor\n");
adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", RED_DV2_I2C_SW_BASE_ADAPTER_ID);
return -ENODEV;
}
pdata->pressure = i2c_new_device(adapter, &red_dv2_pressure_devinfo);
if (!pdata->pressure) {
dev_err(&pdev->dev, "Pressure sensor is missing\n");
return -ENODEV;
}
/*
* TODO:
* Pressure Sensor: 0x76
* chip is bmp280 by Bosch which has a driver in the mainline kernel
* 3503 USB Hub: 0x08
* Looks like there is a driver in the wp85 kernel source at drivers/usb/misc/usb3503.c
* I'm not really sure what benefit is achieved through using this driver.
* Battery Gauge: 0x64
* chip is LTC29421 which is made by linear technologies (now Analog
* Devices). I haven't found a linux kernel driver.
* Buck & Battery Charger: 0x6B
* chip is BQ24296RGER
*/
return 0;
}
static int red_dv2_unmap(struct platform_device* pdev)
{
struct red_dv2_platform_data* pdata = dev_get_platdata(&pdev->dev);
i2c_unregister_device(pdata->pressure);
i2c_put_adapter(pdata->pressure->adapter);
i2c_unregister_device(pdata->accelerometer);
i2c_put_adapter(pdata->accelerometer->adapter);
i2c_unregister_device(pdata->gpio_expander);
i2c_put_adapter(pdata->gpio_expander->adapter);
i2c_unregister_device(pdata->i2c_switch);
i2c_put_adapter(pdata->i2c_switch->adapter);
return 0;
}
static void red_dv2_release_device(struct device* dev)
{
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
struct red_dv2_device *dv2 = container_of(pdev, struct red_dv2_device, pdev);
kfree(dv2);
}
#ifndef BOARD_RED_DV2_H
#define BOARD_RED_DV2_H
#include "mangoh.h"
extern const struct mangoh_descriptor red_dv2_descriptor;
int red_dv2_create_device(struct platform_device** d);
#endif // BOARD_RED_DV2_H
#define DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/pca954x.h>
#include <linux/i2c/sx150x.h>
#include <linux/gpio.h>
#include "mangoh.h"
#include "lsm6ds3_platform_data.h"
/*
*-----------------------------------------------------------------------------
* Type definitions
*-----------------------------------------------------------------------------
*/
struct red_dv3_platform_data {
struct i2c_client* i2c_switch;
struct i2c_client* gpio_expander;
struct i2c_client* accelerometer;
struct i2c_client* pressure;
};
struct red_dv3_device
{
struct platform_device pdev;
struct red_dv3_platform_data pdata;
};
/*
*-----------------------------------------------------------------------------
* Function declarations
*-----------------------------------------------------------------------------
*/
static int red_dv3_map(struct platform_device *pdev);
static int red_dv3_unmap(struct platform_device* pdev);
static void red_dv3_release_device(struct device* dev);
/*
*-----------------------------------------------------------------------------
* Constants
*-----------------------------------------------------------------------------
*/
#define RED_DV3_I2C_SW_BASE_ADAPTER_ID (1)
#define RED_DV3_I2C_SW_PORT_IOT0 (0)
#define RED_DV3_I2C_SW_PORT_USB_HUB (1)
#define RED_DV3_I2C_SW_PORT_GPIO_EXPANDER (2)
#define RED_DV3_I2C_SW_PORT_EXP (3) /* expansion header */
/*
*-----------------------------------------------------------------------------
* Variables
*-----------------------------------------------------------------------------
*/
const struct mangoh_descriptor red_dv3_descriptor = {
.type = "mangoh red dv3",
.map = red_dv3_map,
.unmap = red_dv3_unmap,
};
static struct pca954x_platform_mode red_dv3_pca954x_adap_modes[] = {
{.adap_id=RED_DV3_I2C_SW_BASE_ADAPTER_ID + 0, .deselect_on_exit=1, .class=0},
{.adap_id=RED_DV3_I2C_SW_BASE_ADAPTER_ID + 1, .deselect_on_exit=1, .class=0},
{.adap_id=RED_DV3_I2C_SW_BASE_ADAPTER_ID + 2, .deselect_on_exit=1, .class=0},
{.adap_id=RED_DV3_I2C_SW_BASE_ADAPTER_ID + 3, .deselect_on_exit=1, .class=0},
};
static struct pca954x_platform_data red_dv3_pca954x_pdata = {
red_dv3_pca954x_adap_modes,
ARRAY_SIZE(red_dv3_pca954x_adap_modes),
};
static const struct i2c_board_info red_dv3_pca954x_device_info = {
I2C_BOARD_INFO("pca9546", 0x71),
.platform_data = &red_dv3_pca954x_pdata,
};
static struct sx150x_platform_data red_dv3_expander_platform_data = {
.gpio_base = -1,
.oscio_is_gpo = false,
.io_pullup_ena = 0,
.io_pulldn_ena = 0,
.io_open_drain_ena = 0,
.io_polarity = 0,
.irq_summary = -1,
.irq_base = -1,
};
static const struct i2c_board_info red_dv3_expander_devinfo = {
I2C_BOARD_INFO("sx1509q", 0x3e),
.platform_data = &red_dv3_expander_platform_data,
.irq = 0,
};
static struct i2c_board_info red_dv3_accelerometer_devinfo = {
I2C_BOARD_INFO("bmi160", 0x68),
};
static struct i2c_board_info red_dv3_pressure_devinfo = {
I2C_BOARD_INFO("bmp280", 0x76),
};
static const char platform_device_name[] = "mangoh red dv3";
/*
*-----------------------------------------------------------------------------
* Public function definitions
*-----------------------------------------------------------------------------
*/
int red_dv3_create_device(struct platform_device** d)
{
struct red_dv3_device *dv3 = kzalloc(sizeof(struct red_dv3_device), GFP_KERNEL);
if (!dv3)
return -ENOMEM;
dv3->pdev.name = platform_device_name;
dv3->pdev.id = -1;
dv3->pdev.dev.platform_data = &dv3->pdata;
dv3->pdev.dev.release = red_dv3_release_device;
*d = &dv3->pdev;
return 0;
}
/*
*-----------------------------------------------------------------------------
* Static function definitions
*-----------------------------------------------------------------------------
*/
static int red_dv3_map(struct platform_device *pdev)
{
struct red_dv3_platform_data* pdata = dev_get_platdata(&pdev->dev);
/* Get the main i2c adapter */
struct i2c_adapter* adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "Failed to get I2C adapter 0.\n");
return -ENODEV;
}
/* Map the I2C switch */
dev_dbg(&pdev->dev, "mapping i2c switch\n");
pdata->i2c_switch = i2c_new_device(adapter, &red_dv3_pca954x_device_info);
if (!pdata->i2c_switch) {
dev_err(
&pdev->dev,
"Failed to register %s\n",
red_dv3_pca954x_device_info.type);
return -ENODEV;
}
#if 0
/* Map GPIO expander */
dev_dbg(&pdev->dev, "mapping gpio expander\n");
/*
* GPIOEXP_INT1 goes to GPIO32 on the CF3 inner ring which maps to
* GPIO30 in the WP85.
*/
red_dv3_expander_platform_data.irq_summary = gpio_to_irq(30);
/*
* Currently we just hard code an IRQ base that was tested to have a
* contiguous set of 16 available IRQs. TODO: Is there a better way to
* find a contiguous set of IRQs?
*/
red_dv3_expander_platform_data.irq_base = 347;
adapter = i2c_get_adapter(RED_DV3_I2C_SW_BASE_ADAPTER_ID +
RED_DV3_I2C_SW_PORT_GPIO_EXPANDER);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n",
RED_DV3_I2C_SW_BASE_ADAPTER_ID +
RED_DV3_I2C_SW_PORT_GPIO_EXPANDER);
return -ENODEV;
}
pdata->gpio_expander = i2c_new_device(adapter, &red_dv3_expander_devinfo);
if (!pdata->gpio_expander) {
dev_err(&pdev->dev, "GPIO expander is missing\n");
return -ENODEV;
}
#endif // GPIO expander not properly supported
/* Map the I2C BMI160 accelerometer */
dev_dbg(&pdev->dev, "mapping bmi160 accelerometer\n");
/*
* Pins 11 and 12 of the gpio expander are connected to bmi160's INT1
* and INT2 pins respectively. It does not appear that the bmi160 driver
* makes use of these interrupt pins.
*/
adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", 0);
return -ENODEV;
}
pdata->accelerometer = i2c_new_device(adapter, &red_dv3_accelerometer_devinfo);
if (!pdata->accelerometer) {
dev_err(&pdev->dev, "Accelerometer is missing\n");
return -ENODEV;
}
/* Map the I2C BMP280 pressure sensor */
dev_dbg(&pdev->dev, "mapping bmp280 pressure sensor\n");
adapter = i2c_get_adapter(0);
if (!adapter) {
dev_err(&pdev->dev, "No I2C bus %d.\n", 0);
return -ENODEV;
}
pdata->pressure = i2c_new_device(adapter, &red_dv3_pressure_devinfo);
if (!pdata->pressure) {
dev_err(&pdev->dev, "Pressure sensor is missing\n");
return -ENODEV;
}
/*
* TODO:
* 3503 USB Hub: 0x08
* Looks like there is a driver in the wp85 kernel source at drivers/usb/misc/usb3503.c
* I'm not really sure what benefit is achieved through using this driver.
* Battery Gauge: 0x64
* chip is LTC29421 which is made by linear technologies (now Analog
* Devices). I haven't found a linux kernel driver.
* Buck & Battery Charger: 0x6B
* chip is BQ24296RGER
*/
return 0;
}
static int red_dv3_unmap(struct platform_device* pdev)
{
struct red_dv3_platform_data* pdata = dev_get_platdata(&pdev->dev);
i2c_unregister_device(pdata->pressure);
i2c_put_adapter(pdata->pressure->adapter);
i2c_unregister_device(pdata->accelerometer);
i2c_put_adapter(pdata->accelerometer->adapter);
#if 0
i2c_unregister_device(pdata->gpio_expander);
i2c_put_adapter(pdata->gpio_expander->adapter);
#endif // GPIO expander not properly supported
i2c_unregister_device(pdata->i2c_switch);
i2c_put_adapter(pdata->i2c_switch->adapter);
return 0;
}
static void red_dv3_release_device(struct device* dev)
{
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
struct red_dv3_device *dv3 = container_of(pdev, struct red_dv3_device, pdev);
kfree(dv3);
}
#ifndef BOARD_RED_DV3_H
#define BOARD_RED_DV3_H
#include "mangoh.h"
extern const struct mangoh_descriptor red_dv3_descriptor;
int red_dv3_create_device(struct platform_device** d);
#endif // BOARD_RED_DV3_H
#ifndef MANGOH_H
#define MANGOH_H
int mangoh_find_contiguous_irqs(unsigned int num_required, unsigned long irq_flags);
struct mangoh_descriptor {
char* type; // TODO: why is this needed?
int (*map)(struct platform_device* pdev);
int (*unmap)(struct platform_device* pdev);
};
#endif // MANGOH_H
#define DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include "mangoh.h"
#include "board_green_dv4.h"
#include "board_red_dv2.h"
#include "board_red_dv3.h"
/*
*-----------------------------------------------------------------------------
* Type definitions
*-----------------------------------------------------------------------------
*/
/*
*-----------------------------------------------------------------------------
* Function declarations
*-----------------------------------------------------------------------------
*/
static int mangoh_probe(struct platform_device* pdev);
static int mangoh_remove(struct platform_device* pdev);
/*
*-----------------------------------------------------------------------------
* Variables
*-----------------------------------------------------------------------------
*/
/* Module parameter definitions */
static char *board = "green dv4";
module_param(board, charp, S_IRUGO);
MODULE_PARM_DESC(revision, "mangOH hardware board model and revision");
static struct platform_device* mangoh_device;
static const struct platform_device_id mangoh_ids[] = {
{.name="mangoh green dv4", .driver_data=(kernel_ulong_t)&green_dv4_descriptor},
{.name="mangoh red dv2", .driver_data=(kernel_ulong_t)&red_dv2_descriptor},
{.name="mangoh red dv3", .driver_data=(kernel_ulong_t)&red_dv3_descriptor},
{},
};
MODULE_DEVICE_TABLE(platform, mangoh_ids);
static struct platform_driver mangoh_driver = {
.probe = mangoh_probe,
.remove = mangoh_remove,
.driver = {
.name = "mangoh",
.owner = THIS_MODULE,
.bus = &platform_bus_type,
},
.id_table = mangoh_ids,
};
/*
*-----------------------------------------------------------------------------
* Function definitions
*-----------------------------------------------------------------------------
*/
static int __init mangoh_init(void)
{
printk(KERN_DEBUG "Called %s\n", __func__);
if (strcmp(board, "green dv4") == 0) {
int rc = green_dv4_create_device(&mangoh_device);
printk(KERN_DEBUG "mangoh: created green_dv4 device\n");
if (rc != 0) {
pr_err(
"%s: Couldn't create device for '%s'.\n",
__func__,
board);
return rc;
}
} else if (strcmp(board, "red dv2") == 0) {
int rc = red_dv2_create_device(&mangoh_device);
if (rc != 0) {
pr_err(
"%s: Couldn't create device for '%s'.\n",
__func__,
board);
return rc;
}
} else if (strcmp(board, "red dv3") == 0) {
int rc = red_dv3_create_device(&mangoh_device);
if (rc != 0) {
pr_err(
"%s: Couldn't create device for '%s'.\n",
__func__,
board);
return rc;
}
} else {
pr_err(
"%s: unsupported board '%s'.\n",
__func__,
board);
return -ENODEV;
}
platform_driver_register(&mangoh_driver);
printk(KERN_DEBUG "mangoh: registered platform driver\n");
if (platform_device_register(mangoh_device)) {
platform_driver_unregister(&mangoh_driver);
return -ENODEV;
}
printk(KERN_DEBUG "mangoh: registered platform device\n");
return 0;
}
static void __exit mangoh_exit(void)
{
printk(KERN_DEBUG "Called %s\n", __func__);
if (mangoh_device != NULL)
platform_device_unregister(mangoh_device);
platform_driver_unregister(&mangoh_driver);
}
static int mangoh_probe(struct platform_device* pdev)
{
struct mangoh_descriptor* desc =
(struct mangoh_descriptor*)
platform_get_device_id(pdev)->driver_data;
platform_set_drvdata(pdev, desc);
return desc->map(pdev);
}
static int mangoh_remove(struct platform_device* pdev)
{
struct mangoh_descriptor* desc = platform_get_drvdata(pdev);
const int res = desc->unmap(pdev);
dev_info(&pdev->dev, res ? "Removal failed.\n" : "Removed.\n");
return res;
}
module_init(mangoh_init);
module_exit(mangoh_exit);
MODULE_ALIAS(PLATFORM_MODULE_PREFIX "mangoh");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sierra Wireless");
MODULE_DESCRIPTION("Add devices on mangOH green hardware board");
MODULE_VERSION("1.0");
//--------------------------------------------------------------------------------------------------
// MangOH system definition that includes essential modem and positioning services
// plus available MangOH apps
// mangOH Green system definition that extends the wifi sdef from Legato.
//
// Copyright (C) Sierra Wireless Inc. Use of this work is subject to license.
//--------------------------------------------------------------------------------------------------
buildVars:
{
LEGATO_WIFI_ROOT = $LEGATO_ROOT/modules/WiFi
LEGATO_WIFI_PA = ti
}
#include "$LEGATO_ROOT/modules/WiFi/wifi.sdef"
apps:
{
// Platform services.
$LEGATO_ROOT/apps/platformServices/airVantage/avcService
$LEGATO_ROOT/apps/platformServices/audioService
$LEGATO_ROOT/apps/platformServices/cellNetService
$LEGATO_ROOT/apps/platformServices/dataConnectionService
$LEGATO_ROOT/apps/platformServices/fwupdateService
$LEGATO_ROOT/apps/platformServices/modemService
$LEGATO_ROOT/apps/platformServices/positioningService
$LEGATO_ROOT/apps/platformServices/powerMgr
$LEGATO_ROOT/apps/platformServices/secStore
$LEGATO_ROOT/apps/platformServices/smsInboxService
$LEGATO_ROOT/apps/platformServices/voiceCallService
$LEGATO_ROOT/apps/platformServices/gpioService
$LEGATO_ROOT/apps/platformServices/atClient
$LEGATO_ROOT/apps/platformServices/atServer
$LEGATO_ROOT/apps/platformServices/spiService
$LEGATO_ROOT/apps/platformServices/devMode
$LEGATO_WIFI_ROOT/service/wifiService
$LEGATO_WIFI_ROOT/apps/sample/wifiClientTest/wifiClientTest
$LEGATO_WIFI_ROOT/apps/sample/wifiApTest/wifiApTest
$LEGATO_WIFI_ROOT/apps/sample/wifiWebAp/wifiWebAp
$LEGATO_WIFI_ROOT/apps/tools/wifi/wifi
$MANGOH_ROOT/apps/GpioExpander/gpioExpanderService/gpioExpanderServiceGreen
$MANGOH_ROOT/apps/MuxControl/muxCtrlService/muxCtrlService
$MANGOH_ROOT/apps/MuxControl/tools/muxCtrlTools
$MANGOH_ROOT/apps/MqttClient/mqttClient
$MANGOH_ROOT/apps/DataRouter/dataRouter
$MANGOH_ROOT/apps/DataRouter/drTool/drTool
$MANGOH_ROOT/apps/ArduinoBridge/arduinoBridge
// Data Router is disabled because the published version doesn't build against Legato 17.05
// $MANGOH_ROOT/apps/DataRouter/dataRouter
// $MANGOH_ROOT/apps/DataRouter/drTool/drTool
// $MANGOH_ROOT/apps/ArduinoBridge/arduinoBridge
$MANGOH_ROOT/apps/SocialService/socialService
// Command-line tools.
$LEGATO_ROOT/apps/tools/tools
$MANGOH_ROOT/apps/Heartbeat/heartbeatGreen
$LEGATO_ROOT/apps/tools/devMode
}
commands:
{
cm = tools:/bin/cm
fwupdate = tools:/bin/fwupdate
secstore = tools:/bin/secstore
pmtool = tools:/bin/pmtool
gnss = tools:/bin/gnss
uartMode = tools:/bin/uartMode
wifi = wifi:/bin/wifi
mux = muxCtrlTools:/bin/mux
dr = drTool:/bin/dr
// dr = drTool:/bin/dr
twitter = socialService:/bin/twitter
}
bindings:
kernelModules:
{
<root>.le_fwupdate -> fwupdateService.le_fwupdate
}
$MANGOH_ROOT/linux_kernel_modules/mangoh/9-mangoh_green_dv4
interfaceSearch:
{
interfaces/modemServices
interfaces/positioning
$MANGOH_ROOT/linux_kernel_modules/lsm6ds3/0-lsm6ds3
$MANGOH_ROOT/linux_kernel_modules/lsm6ds3/1-lsm6ds3-i2c
}
//--------------------------------------------------------------------------------------------------
// MangOH system definition that includes essential modem and positioning services
// plus available MangOH apps
// mangOH Red system definition that extends the wifi sdef from Legato.
//
// Copyright (C) Sierra Wireless Inc. Use of this work is subject to license.
//--------------------------------------------------------------------------------------------------
#include "$LEGATO_ROOT/legatoTargetConfig.sinc"
#include "$LEGATO_ROOT/apps/platformServices/defaultAirVantage.sinc"
#include "$LEGATO_ROOT/modules/WiFi/wifi.sdef"
apps:
{
// Platform services.
// $LEGATO_ROOT/apps/platformServices/airVantage/avcService
$LEGATO_ROOT/apps/platformServices/audioService
$LEGATO_ROOT/apps/platformServices/cellNetService
$LEGATO_ROOT/apps/platformServices/dataConnectionService
$LEGATO_ROOT/apps/platformServices/fwupdateService
$LEGATO_ROOT/apps/platformServices/modemService
$LEGATO_ROOT/apps/platformServices/positioningService
$LEGATO_ROOT/apps/platformServices/powerMgr
$LEGATO_ROOT/apps/platformServices/secStore
$LEGATO_ROOT/apps/platformServices/smsInboxService
$LEGATO_ROOT/apps/platformServices/voiceCallService
$LEGATO_ROOT/apps/platformServices/gpioService
// $LEGATO_ROOT/apps/platformServices/atClient
// $LEGATO_ROOT/apps/platformServices/atServer
$LEGATO_ROOT/apps/platformServices/spiService
$LEGATO_ROOT/apps/tools/devMode
$MANGOH_ROOT/apps/GpioExpander/gpioExpanderService/gpioExpanderServiceRed
// TODO: MuxControl was written specifically for mangOH Green. Perhaps mangOH Red needs a
// similar facility to manage uart routing.
// $MANGOH_ROOT/apps/MuxControl/muxCtrlService/muxCtrlService
// $MANGOH_ROOT/apps/MuxControl/tools/muxCtrlTools
$MANGOH_ROOT/apps/MqttClient/mqttClient
// $MANGOH_ROOT/apps/DataRouter/dataRouter
// $MANGOH_ROOT/apps/DataRouter/drTool/drTool
// Data Router is disabled because the published version doesn't build against Legato 17.05
// $MANGOH_ROOT/apps/DataRouter/dataRouter
// $MANGOH_ROOT/apps/DataRouter/drTool/drTool
$MANGOH_ROOT/apps/SocialService/socialService
// Command-line tools.
$LEGATO_ROOT/apps/tools/tools
}
$MANGOH_ROOT/apps/RedSensorToCloud/redSensorToCloud
$MANGOH_ROOT/apps/Heartbeat/heartbeatRed
kernelModules:
{
$MANGOH_ROOT/linux_kernel_modules/cp2130/spi-cp2130.mdef
$MANGOH_ROOT/linux_kernel_modules/spisvc/spisvc.mdef
$MANGOH_ROOT/linux_kernel_modules/mt7697wifi/mt7697wifi_core.mdef
$MANGOH_ROOT/linux_kernel_modules/mt7697q/mt7697q.mdef
$LEGATO_ROOT/apps/tools/devMode
}
buildVars:
commands:
{
LEGATO_KERNELROOT = /home/david/kernel
// dr = drTool:/bin/dr
twitter = socialService:/bin/twitter
}
commands:
kernelModules:
{
cm = tools:/bin/cm
fwupdate = tools:/bin/fwupdate
secstore = tools:/bin/secstore
pmtool = tools:/bin/pmtool
gnss = tools:/bin/gnss
uartMode = tools:/bin/uartMode
$MANGOH_ROOT/linux_kernel_modules/mangoh/9-mangoh_red_dv3
// $MANGOH_ROOT/linux_kernel_modules/mangoh/9-mangoh_red_dv2
// mux = muxCtrlTools:/bin/mux
$MANGOH_ROOT/linux_kernel_modules/cp2130/0-cp2130
// dr = drTool:/bin/dr
// Required for bmp280 & bmi160
$MANGOH_ROOT/linux_kernel_modules/iio/0-iio
$MANGOH_ROOT/linux_kernel_modules/iio/1-iio-kfifo-buf
// Required for bmi160
$MANGOH_ROOT/linux_kernel_modules/iio/2-iio-triggered-buffer
twitter = socialService:/bin/twitter
}
$MANGOH_ROOT/linux_kernel_modules/bmp280/2-bmp280
$MANGOH_ROOT/linux_kernel_modules/bmp280/3-bmp280-i2c
bindings:
{
<root>.le_fwupdate -> fwupdateService.le_fwupdate
}
// Only on mangOH Red DV2
// $MANGOH_ROOT/linux_kernel_modules/lsm6ds3/0-lsm6ds3
// $MANGOH_ROOT/linux_kernel_modules/lsm6ds3/1-lsm6ds3-i2c
interfaceSearch:
{
interfaces/modemServices
interfaces/positioning
// Only on mangOH Red DV3
$MANGOH_ROOT/linux_kernel_modules/bmi160/3-bmi160
$MANGOH_ROOT/linux_kernel_modules/bmi160/4-bmi160-i2c
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment