summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlisher Alikhodjaev <alisher@google.com>2021-03-03 17:33:03 -0800
committerAlisher Alikhodjaev <alisher@google.com>2021-03-03 17:33:03 -0800
commit0ba27cc9f8a6b2e6ecba2e9c5fb4cf57c46d8b0a (patch)
tree78a822689c95a8836d65230e95340cdd0e0cacc3
parent35fe13746880ea5fae3a29a9892fc50b330fedde (diff)
downloadnfc-0ba27cc9f8a6b2e6ecba2e9c5fb4cf57c46d8b0a.tar.gz
nfc: add NFC, eSE/ESIM drivers
The drivers for ese devices were delivered by STMicroelectronics as is. The nfc drivers also maintained by ST and were copied from android-gs-pixel-5.10 branch SHA1 8c6e2b93e4dc78af6428d4e3338f47cd6bd4ac14 Bug: 168350321 Signed-off-by: Arach Mohammed-Brahim <arach.mohammed.brahim@st.com> Signed-off-by: Alisher Alikhodjaev <alisher@google.com> Change-Id: I558f51de5ea5786c502c4bc4bd811e390e65c59f
-rw-r--r--ese/Makefile7
-rw-r--r--ese/st33spi.c1116
-rw-r--r--ese/st54spi.c1138
-rw-r--r--st21nfc.c1206
-rw-r--r--st21nfc.h25
5 files changed, 3492 insertions, 0 deletions
diff --git a/ese/Makefile b/ese/Makefile
new file mode 100644
index 0000000..ad53668
--- /dev/null
+++ b/ese/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for eSE devices
+#
+
+obj-$(CONFIG_ESE_ST54) += st54spi.o
+obj-$(CONFIG_ESE_ST33) += st33spi.o
diff --git a/ese/st33spi.c b/ese/st33spi.c
new file mode 100644
index 0000000..f2c057f
--- /dev/null
+++ b/ese/st33spi.c
@@ -0,0 +1,1116 @@
+// SPDX-License-Identifier: <GPL-2.0>
+/*
+ * Simple synchronous userspace interface to SPI devices
+ *
+ * Copyright (C) 2006 SWAPP
+ * Andrea Paterniani <a.paterniani@swapp-eng.it>
+ * Copyright (C) 2007 David Brownell (simplification, cleanup)
+ *
+ */
+/*
+ * Modified by ST Microelectronics.
+ * <arach.mohammed.brahim@st.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/acpi.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spidev.h>
+
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <linux/uaccess.h>
+
+#undef ST33NFC_QCOM
+
+#ifdef ST33NFC_QCOM
+#include <linux/spi/spi-geni-qcom.h>
+#endif /* ST33NFC_QCOM */
+
+#ifndef GKI_MODULE
+#define GKI_MODULE 1
+#endif
+
+#include "../st21nfc.h"
+
+/*
+ * This supports access to SPI devices using normal userspace I/O calls.
+ * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
+ * and often mask message boundaries, full SPI support requires full duplex
+ * transfers. There are several kinds of internal message boundaries to
+ * handle chipselect management and other protocol options.
+ *
+ * SPI has a character major number assigned. We allocate minor numbers
+ * dynamically using a bitmask. You must use hotplug tools, such as udev
+ * (or mdev with busybox) to create and destroy the /dev/st33spi device
+ * nodes, since there is no fixed association of minor numbers with any
+ * particular SPI bus or device.
+ */
+static int st33spi_major;
+#define N_SPI_MINORS 2 /* ... up to 256 */
+
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
+
+#define ST33SPI_IOC_RD_POWER _IOR(SPI_IOC_MAGIC, 99, __u32)
+#define ST33SPI_IOC_WR_POWER _IOW(SPI_IOC_MAGIC, 99, __u32)
+
+/* Bit masks for spi_device.mode management. Note that incorrect
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
+ *
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
+ */
+#define SPI_MODE_MASK \
+ (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_3WIRE | \
+ SPI_LOOP | SPI_NO_CS | SPI_READY | SPI_TX_DUAL | SPI_TX_QUAD | \
+ SPI_RX_DUAL | SPI_RX_QUAD)
+
+struct st33spi_data {
+ dev_t devt;
+ spinlock_t spi_lock;
+ struct spi_device *spi;
+ struct spi_device *spi_reset;
+ struct list_head device_entry;
+
+ /* TX/RX buffers are NULL unless this device is open (users > 0) */
+ struct mutex buf_lock;
+ unsigned int users;
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ u32 speed_hz;
+
+ /* GPIO for SE_POWER_REQ / SE_nRESET */
+ struct gpio_desc *gpiod_se_reset;
+
+ int power_gpio_mode;
+ int power_gpio;
+ int nfcc_needs_poweron;
+ int sehal_needs_poweron;
+ int se_is_poweron;
+};
+
+#define POWER_MODE_NONE -1
+#define POWER_MODE_ST33 2
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_lock);
+
+static unsigned int bufsiz = 4096;
+module_param(bufsiz, uint, 0444);
+MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
+
+#define VERBOSE 0
+
+#define DRIVER_VERSION "2.2.0"
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t st33spi_sync(struct st33spi_data *st33spi,
+ struct spi_message *message)
+{
+ int status;
+ struct spi_device *spi;
+
+ spin_lock_irq(&st33spi->spi_lock);
+ spi = st33spi->spi;
+ spin_unlock_irq(&st33spi->spi_lock);
+
+ if (spi == NULL)
+ status = -ESHUTDOWN;
+ else
+ status = spi_sync(spi, message);
+
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static inline ssize_t st33spi_sync_write(struct st33spi_data *st33spi,
+ size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = st33spi->tx_buffer,
+ .len = len,
+ .speed_hz = st33spi->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return st33spi_sync(st33spi, &m);
+}
+
+static inline ssize_t st33spi_sync_read(struct st33spi_data *st33spi,
+ size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = st33spi->rx_buffer,
+ .len = len,
+ .speed_hz = st33spi->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return st33spi_sync(st33spi, &m);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Read-only message with current device setup */
+static ssize_t st33spi_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct st33spi_data *st33spi;
+ ssize_t status = 0;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ st33spi = filp->private_data;
+
+ dev_dbg(&st33spi->spi->dev, "st33spi Read: %zu bytes\n", count);
+
+ mutex_lock(&st33spi->buf_lock);
+ status = st33spi_sync_read(st33spi, count);
+ if (status > 0) {
+ unsigned long missing;
+
+ missing = copy_to_user(buf, st33spi->rx_buffer, status);
+ if (missing == status)
+ status = -EFAULT;
+ else
+ status = status - missing;
+ }
+ mutex_unlock(&st33spi->buf_lock);
+
+ dev_dbg(&st33spi->spi->dev, "st33spi Read: status: %zd\n", status);
+
+ return status;
+}
+
+/* Write-only message with current device setup */
+static ssize_t st33spi_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct st33spi_data *st33spi;
+ ssize_t status = 0;
+ unsigned long missing;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ st33spi = filp->private_data;
+
+ dev_dbg(&st33spi->spi->dev, "st33spi Write: %zu bytes\n", count);
+
+ mutex_lock(&st33spi->buf_lock);
+ missing = copy_from_user(st33spi->tx_buffer, buf, count);
+ if (missing == 0)
+ status = st33spi_sync_write(st33spi, count);
+ else
+ status = -EFAULT;
+ mutex_unlock(&st33spi->buf_lock);
+
+ dev_dbg(&st33spi->spi->dev, "st33spi Write: status: %zd\n", status);
+
+ return status;
+}
+
+static int st33spi_message(struct st33spi_data *st33spi,
+ struct spi_ioc_transfer *u_xfers,
+ unsigned int n_xfers)
+{
+ struct spi_message msg;
+ struct spi_transfer *k_xfers;
+ struct spi_transfer *k_tmp;
+ struct spi_ioc_transfer *u_tmp;
+ unsigned int n, total, tx_total, rx_total;
+ u8 *tx_buf, *rx_buf;
+ int status = -EFAULT;
+
+ spi_message_init(&msg);
+ k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
+ if (k_xfers == NULL)
+ return -ENOMEM;
+
+ /* Construct spi_message, copying any tx data to bounce buffer.
+ * We walk the array of user-provided transfers, using each one
+ * to initialize a kernel version of the same transfer.
+ */
+ tx_buf = st33spi->tx_buffer;
+ rx_buf = st33spi->rx_buffer;
+ total = 0;
+ tx_total = 0;
+ rx_total = 0;
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n;
+ n--, k_tmp++, u_tmp++) {
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+ /* Since the function returns the total length of transfers
+ * on success, restrict the total to positive int values to
+ * avoid the return value looking like an error. Also check
+ * each transfer length to avoid arithmetic overflow.
+ */
+ if (total > INT_MAX || k_tmp->len > INT_MAX) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
+ if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+ rx_total += k_tmp->len;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->rx_buf = rx_buf;
+ if (!access_ok((u8 __user *)(uintptr_t)u_tmp->rx_buf,
+ u_tmp->len))
+ goto done;
+ rx_buf += k_tmp->len;
+ }
+ if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+ tx_total += k_tmp->len;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->tx_buf = tx_buf;
+ if (copy_from_user(
+ tx_buf,
+ (const u8 __user *)(uintptr_t)u_tmp->tx_buf,
+ u_tmp->len))
+ goto done;
+ tx_buf += k_tmp->len;
+ }
+
+ k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
+ k_tmp->bits_per_word = u_tmp->bits_per_word;
+ k_tmp->delay_usecs = u_tmp->delay_usecs;
+ k_tmp->speed_hz = u_tmp->speed_hz;
+ if (!k_tmp->speed_hz)
+ k_tmp->speed_hz = st33spi->speed_hz;
+#if VERBOSE
+ dev_dbg(&st33spi->spi->dev,
+ " xfer len %u %s%s%s%dbits %u usec %uHz\n", u_tmp->len,
+ u_tmp->rx_buf ? "rx " : "", u_tmp->tx_buf ? "tx " : "",
+ u_tmp->cs_change ? "cs " : "",
+ u_tmp->bits_per_word ?: st33spi->spi->bits_per_word,
+ u_tmp->delay_usecs,
+ u_tmp->speed_hz ?: st33spi->spi->max_speed_hz);
+#endif
+ spi_message_add_tail(k_tmp, &msg);
+ }
+
+ status = st33spi_sync(st33spi, &msg);
+ if (status < 0)
+ goto done;
+
+ /* copy any rx data out of bounce buffer */
+ rx_buf = st33spi->rx_buffer;
+ for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
+ if (u_tmp->rx_buf) {
+ if (__copy_to_user((u8 __user *)(uintptr_t)u_tmp->rx_buf,
+ rx_buf, u_tmp->len)) {
+ status = -EFAULT;
+ goto done;
+ }
+ rx_buf += u_tmp->len;
+ }
+ }
+ status = total;
+
+done:
+ kfree(k_xfers);
+ return status;
+}
+
+static struct spi_ioc_transfer *
+st33spi_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+ unsigned int *n_ioc)
+{
+ struct spi_ioc_transfer *ioc;
+ u32 tmp;
+
+ /* Check type, command number and direction */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC ||
+ _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) ||
+ _IOC_DIR(cmd) != _IOC_WRITE)
+ return ERR_PTR(-ENOTTY);
+
+ tmp = _IOC_SIZE(cmd);
+ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+ return ERR_PTR(-EINVAL);
+ *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+ if (*n_ioc == 0)
+ return NULL;
+
+ /* copy into scratch area */
+ ioc = kmalloc(tmp, GFP_KERNEL);
+ if (!ioc)
+ return ERR_PTR(-ENOMEM);
+ if (__copy_from_user(ioc, u_ioc, tmp)) {
+ kfree(ioc);
+ return ERR_PTR(-EFAULT);
+ }
+ return ioc;
+}
+
+static void st33spi_power_off(struct st33spi_data *st33spi)
+{
+#ifdef WITH_SPI_CLK_MNGT
+ /* no need for the SPI clock to be enabled. */
+ dev_dbg(&st33spi->spi->dev,
+ "%s : disabling PMU clock of SPI subsystem\n", __func__);
+ mt_spi_disable_master_clk(st33spi->spi);
+#endif /* WITH_SPI_CLK_MNGT */
+
+ st33spi->se_is_poweron = 0;
+}
+
+static void st33spi_power_on(struct st33spi_data *st33spi)
+{
+#ifdef WITH_SPI_CLK_MNGT
+ /* the SPI clock needs to be enabled. */
+ dev_dbg(&st33spi->spi->dev,
+ "%s : enabling PMU clock of SPI subsystem\n", __func__);
+ mt_spi_enable_master_clk(st33spi->spi);
+#endif /* WITH_SPI_CLK_MNGT */
+
+ if (st33spi->power_gpio_mode == POWER_MODE_ST33) {
+ /* Just a pulse on SPI_nRESET */
+ gpiod_set_value_cansleep(st33spi->gpiod_se_reset, 1);
+ usleep_range(5000, 5500);
+ gpiod_set_value_cansleep(st33spi->gpiod_se_reset, 0);
+ dev_info(&st33spi->spi->dev, "%s : st33 set nReset to Low\n",
+ __func__);
+ usleep_range(3000, 4000);
+ }
+ st33spi->se_is_poweron = 1;
+}
+
+static void st33spi_power_set(struct st33spi_data *st33spi, int val)
+{
+ if (!st33spi)
+ return;
+
+ dev_dbg(&st33spi->spi->dev, "st33spi sehal pwr_req: %d\n", val);
+
+ if (val) {
+ st33spi->sehal_needs_poweron = 1;
+ st33spi_power_on(st33spi);
+ } else {
+ st33spi->sehal_needs_poweron = 0;
+ if ((st33spi->se_is_poweron == 1) &&
+ (st33spi->nfcc_needs_poweron == 0))
+ /* we don t need power anymore */
+ st33spi_power_off(st33spi);
+ }
+}
+
+static int st33spi_power_get(struct st33spi_data *st33spi)
+{
+ return gpiod_get_value_cansleep(st33spi->gpiod_se_reset);
+}
+
+static long st33spi_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ int retval = 0;
+ struct st33spi_data *st33spi;
+ struct spi_device *spi;
+ u32 tmp;
+ unsigned int n_ioc;
+ struct spi_ioc_transfer *ioc;
+
+ /* Check type and command number */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* Check access direction once here; don't repeat below.
+ * IOC_DIR is from the user perspective, while access_ok is
+ * from the kernel perspective; so they look reversed.
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ if (err)
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ st33spi = filp->private_data;
+ spin_lock_irq(&st33spi->spi_lock);
+ spi = spi_dev_get(st33spi->spi);
+ spin_unlock_irq(&st33spi->spi_lock);
+
+ dev_dbg(&st33spi->spi->dev, "st33spi ioctl cmd %d\n", cmd);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* use the buffer lock here for triple duty:
+ * - prevent I/O (from us) so calling spi_setup() is safe;
+ * - prevent concurrent SPI_IOC_WR_* from morphing
+ * data fields while SPI_IOC_RD_* reads them;
+ * - SPI_IOC_MESSAGE needs the buffer locked "normally".
+ */
+ mutex_lock(&st33spi->buf_lock);
+
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_BITS_PER_WORD:
+ retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MAX_SPEED_HZ:
+ retval = __put_user(st33spi->speed_hz, (__u32 __user *)arg);
+ break;
+ case ST33SPI_IOC_RD_POWER:
+ dev_dbg(&st33spi->spi->dev, "st33spi ST33SPI_IOC_RD_POWER\n");
+ retval = __put_user(st33spi_power_get(st33spi),
+ (__u32 __user *)arg);
+ break;
+
+ /* write requests */
+ case SPI_IOC_WR_MODE:
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp & ~SPI_MODE_MASK) {
+ retval = -EINVAL;
+ break;
+ }
+
+ tmp |= spi->mode & ~SPI_MODE_MASK;
+ spi->mode = (u16)tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_LSB_FIRST:
+ retval = __get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp)
+ spi->mode |= SPI_LSB_FIRST;
+ else
+ spi->mode &= ~SPI_LSB_FIRST;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "%csb first\n",
+ tmp ? 'l' : 'm');
+ }
+ break;
+ case SPI_IOC_WR_BITS_PER_WORD:
+ retval = __get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u8 save = spi->bits_per_word;
+
+ spi->bits_per_word = tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->bits_per_word = save;
+ else
+ dev_dbg(&spi->dev, "%d bits per word\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_MAX_SPEED_HZ:
+ retval = __get_user(tmp, (__u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval >= 0)
+ st33spi->speed_hz = tmp;
+ else
+ dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
+ spi->max_speed_hz = save;
+ }
+ break;
+ case ST33SPI_IOC_WR_POWER:
+ retval = __get_user(tmp, (__u32 __user *)arg);
+ dev_dbg(&st33spi->spi->dev,
+ "st33spi ST33SPI_IOC_WR_POWER %d\n", retval);
+ if (retval == 0) {
+ st33spi_power_set(st33spi, tmp ? 1 : 0);
+ dev_dbg(&st33spi->spi->dev, "SE_POWER_REQ set: %d\n",
+ tmp);
+ }
+ break;
+ default:
+ /* segmented and/or full-duplex I/O request */
+ /* Check message and copy into scratch area */
+ ioc = st33spi_get_ioc_message(
+ cmd, (struct spi_ioc_transfer __user *)arg, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ break;
+ }
+ if (!ioc)
+ break; /* n_ioc is also 0 */
+
+ /* translate to spi_message, execute */
+ retval = st33spi_message(st33spi, ioc, n_ioc);
+ kfree(ioc);
+ break;
+ }
+
+ mutex_unlock(&st33spi->buf_lock);
+ spi_dev_put(spi);
+
+ dev_dbg(&st33spi->spi->dev, "st33spi ioctl retval %d\n", retval);
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long st33spi_compat_ioc_message(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct spi_ioc_transfer __user *u_ioc;
+ int retval = 0;
+ struct st33spi_data *st33spi;
+ struct spi_device *spi;
+ unsigned int n_ioc, n;
+ struct spi_ioc_transfer *ioc;
+
+ u_ioc = (struct spi_ioc_transfer __user *)compat_ptr(arg);
+ if (!access_ok(u_ioc, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ st33spi = filp->private_data;
+ spin_lock_irq(&st33spi->spi_lock);
+ spi = spi_dev_get(st33spi->spi);
+ spin_unlock_irq(&st33spi->spi_lock);
+
+ dev_dbg(&st33spi->spi->dev, "st33spi compat_ioctl cmd %d\n", cmd);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+ mutex_lock(&st33spi->buf_lock);
+
+ /* Check message and copy into scratch area */
+ ioc = st33spi_get_ioc_message(cmd, u_ioc, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ goto done;
+ }
+ if (!ioc)
+ goto done; /* n_ioc is also 0 */
+
+ /* Convert buffer pointers */
+ for (n = 0; n < n_ioc; n++) {
+ ioc[n].rx_buf = (uintptr_t)compat_ptr(ioc[n].rx_buf);
+ ioc[n].tx_buf = (uintptr_t)compat_ptr(ioc[n].tx_buf);
+ }
+
+ /* translate to spi_message, execute */
+ retval = st33spi_message(st33spi, ioc, n_ioc);
+ kfree(ioc);
+
+done:
+ mutex_unlock(&st33spi->buf_lock);
+ spi_dev_put(spi);
+ dev_dbg(&st33spi->spi->dev, "st33spi compat_ioctl retval %d\n", retval);
+ return retval;
+}
+
+static long st33spi_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC &&
+ _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) &&
+ _IOC_DIR(cmd) == _IOC_WRITE)
+ return st33spi_compat_ioc_message(filp, cmd, arg);
+
+ return st33spi_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define st33spi_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+static int st33spi_open(struct inode *inode, struct file *filp)
+{
+ struct st33spi_data *st33spi;
+ int status = -ENXIO;
+
+ mutex_lock(&device_list_lock);
+
+ list_for_each_entry (st33spi, &device_list, device_entry) {
+ if (st33spi->devt == inode->i_rdev) {
+ status = 0;
+ break;
+ }
+ }
+
+ if (status) {
+ dev_dbg(&st33spi->spi->dev, "st33spi: nothing for minor %d\n",
+ iminor(inode));
+ goto err_find_dev;
+ }
+
+ /* Authorize only 1 process to open the device. */
+ if (st33spi->users > 0) {
+ dev_err(&st33spi->spi->dev, "already open\n");
+ mutex_unlock(&device_list_lock);
+ return -EBUSY;
+ }
+
+ dev_dbg(&st33spi->spi->dev, "st33spi: open\n");
+
+ if (!st33spi->tx_buffer) {
+ st33spi->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!st33spi->tx_buffer) {
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+ }
+
+ if (!st33spi->rx_buffer) {
+ st33spi->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!st33spi->rx_buffer) {
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+ }
+
+ st33spi->users++;
+ filp->private_data = st33spi;
+ nonseekable_open(inode, filp);
+
+ mutex_unlock(&device_list_lock);
+
+ dev_dbg(&st33spi->spi->dev, "st33spi: open - force power on\n");
+ st33spi_power_set(st33spi, 1);
+ return 0;
+
+err_alloc_rx_buf:
+ kfree(st33spi->tx_buffer);
+ st33spi->tx_buffer = NULL;
+err_find_dev:
+ mutex_unlock(&device_list_lock);
+ return status;
+}
+
+static int st33spi_release(struct inode *inode, struct file *filp)
+{
+ struct st33spi_data *st33spi;
+
+ mutex_lock(&device_list_lock);
+ st33spi = filp->private_data;
+ filp->private_data = NULL;
+
+ dev_dbg(&st33spi->spi->dev, "st33spi: release\n");
+
+ /* last close? */
+ st33spi->users--;
+ if (!st33spi->users) {
+ int dofree;
+
+ dev_dbg(&st33spi->spi->dev,
+ "st33spi: release - may allow power off\n");
+ st33spi_power_set(st33spi, 0);
+
+ kfree(st33spi->tx_buffer);
+ st33spi->tx_buffer = NULL;
+
+ kfree(st33spi->rx_buffer);
+ st33spi->rx_buffer = NULL;
+
+ spin_lock_irq(&st33spi->spi_lock);
+ if (st33spi->spi)
+ st33spi->speed_hz = st33spi->spi->max_speed_hz;
+
+ /* ... after we unbound from the underlying device? */
+ dofree = ((st33spi->spi == NULL) &&
+ (st33spi->spi_reset == NULL));
+ spin_unlock_irq(&st33spi->spi_lock);
+
+ if (dofree)
+ kfree(st33spi);
+ }
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static const struct file_operations st33spi_fops = {
+ .owner = THIS_MODULE,
+ /*
+ * REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+ * too, except for the locking.
+ */
+ .write = st33spi_write,
+ .read = st33spi_read,
+ .unlocked_ioctl = st33spi_ioctl,
+ .compat_ioctl = st33spi_compat_ioctl,
+ .open = st33spi_open,
+ .release = st33spi_release,
+ .llseek = no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The main reason to have this class is to make mdev/udev create the
+ * /dev/st33spi character device nodes exposing our userspace API.
+ * It also simplifies memory management.
+ */
+
+static struct class *st33spi_class;
+
+static const struct of_device_id st33spi_dt_ids[] = {
+ { .compatible = "st,st33spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st33spi_dt_ids);
+
+#ifdef CONFIG_ACPI
+
+/* Placeholder SPI devices not to be used in production systems */
+#define ST33SPI_ACPI_PLACEHOLDER 1
+
+static const struct acpi_device_id st33spi_acpi_ids[] = {
+ /*
+ * The ACPI SPT000* devices are only meant for development and
+ * testing. Systems used in production should have a proper ACPI
+ * description of the connected peripheral and they should also
+ * use a proper driver instead of poking directly to the SPI bus
+ */
+ { "SPT0001", ST33SPI_ACPI_PLACEHOLDER },
+ { "SPT0002", ST33SPI_ACPI_PLACEHOLDER },
+ { "SPT0003", ST33SPI_ACPI_PLACEHOLDER },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, st33spi_acpi_ids);
+
+static void st33spi_probe_acpi(struct spi_device *spi)
+{
+ const struct acpi_device_id *id;
+
+ if (!has_acpi_companion(&spi->dev))
+ return;
+
+ id = acpi_match_device(st33spi_acpi_ids, &spi->dev);
+ if (WARN_ON(!id))
+ return;
+}
+#else
+static inline void st33spi_probe_acpi(struct spi_device *spi)
+{
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int st33spi_parse_dt(struct device *dev, struct st33spi_data *pdata)
+{
+ int r = 0;
+ struct device_node *np = dev->of_node;
+ const char *power_mode;
+
+#ifndef GKI_MODULE
+ np = of_find_compatible_node(NULL, NULL, "st,st33spi");
+#endif
+
+ if (!np) {
+ return r;
+ }
+
+ /* Read power mode. */
+ power_mode = of_get_property(np, "power_mode", NULL);
+ if (!power_mode) {
+ dev_info(dev, "%s: Default power mode: ST33\n",
+ __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_ST33;
+ } else if (!strcmp(power_mode, "ST33")) {
+ dev_info(dev, "%s: Power mode: ST33\n", __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_ST33;
+ } else if (!strcmp(power_mode, "none")) {
+ dev_info(dev, "%s: Power mode: none\n", __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_NONE;
+ } else {
+ dev_err(dev, "%s: Power mode unknown: %s\n", __FILE__,
+ power_mode);
+ return -EFAULT;
+ }
+
+ /* Get the Gpio */
+ if (pdata->power_gpio_mode == POWER_MODE_ST33) {
+ pdata->gpiod_se_reset =
+ devm_gpiod_get(dev, "esereset", GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->gpiod_se_reset)) {
+ dev_err(dev,
+ "%s : Unable to request esereset %d\n",
+ __func__,
+ IS_ERR(pdata->gpiod_se_reset));
+ return -ENODEV;
+ }
+ } else {
+ dev_err(dev, "%s: ST54H mode not supported", __FILE__);
+ }
+ return r;
+}
+
+static int st33spi_probe(struct spi_device *spi)
+{
+ struct st33spi_data *st33spi;
+ int status;
+ unsigned long minor;
+#ifdef ST33NFC_QCOM
+ struct device *dev = &spi->dev;
+ struct spi_geni_qcom_ctrl_data *spi_param;
+#endif /* ST33NFC_QCOM */
+
+#ifdef GKI_MODULE
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ BUILD_BUG_ON(N_SPI_MINORS > 256);
+ st33spi_major =
+ __register_chrdev(0, 0, N_SPI_MINORS, "spi", &st33spi_fops);
+ dev_info(&spi->dev, "Loading st33spi driver, major: %d\n",
+ st33spi_major);
+
+ st33spi_class = class_create(THIS_MODULE, "st33spi");
+ if (IS_ERR(st33spi_class)) {
+ unregister_chrdev(st33spi_major, "st33spi");
+ return PTR_ERR(st33spi_class);
+ }
+#endif
+
+ /*
+ * st33spi should never be referenced in DT without a specific
+ * compatible string, it is a Linux implementation thing
+ * rather than a description of the hardware.
+ */
+
+ st33spi_probe_acpi(spi);
+
+ /* Allocate driver data */
+ st33spi = kzalloc(sizeof(*st33spi), GFP_KERNEL);
+ if (!st33spi)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ st33spi->spi = spi;
+ spin_lock_init(&st33spi->spi_lock);
+ mutex_init(&st33spi->buf_lock);
+
+ INIT_LIST_HEAD(&st33spi->device_entry);
+
+ /* If we can allocate a minor number, hook up this device.
+ * Reusing minors is fine so long as udev or mdev is working.
+ */
+ mutex_lock(&device_list_lock);
+ minor = find_first_zero_bit(minors, N_SPI_MINORS);
+ if (minor < N_SPI_MINORS) {
+ struct device *dev;
+
+ st33spi->devt = MKDEV(st33spi_major, minor);
+ dev = device_create(st33spi_class, &spi->dev, st33spi->devt,
+ st33spi, "st33spi");
+ status = PTR_ERR_OR_ZERO(dev);
+ } else {
+ dev_dbg(&spi->dev, "%s : no minor number available!\n",
+ __FILE__);
+ status = -ENODEV;
+ }
+ if (status == 0) {
+ set_bit(minor, minors);
+ list_add(&st33spi->device_entry, &device_list);
+ }
+ mutex_unlock(&device_list_lock);
+
+ st33spi->speed_hz = spi->max_speed_hz;
+ dev_dbg(&spi->dev, "%s : st33spi->speed_hz=%d\n", __FILE__,
+ st33spi->speed_hz);
+
+ /* set timings for ST33 */
+#ifdef ST33NFC_QCOM
+ spi_param = devm_kzalloc(dev, sizeof(spi_param), GFP_KERNEL);
+ if (spi_param == NULL)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ spi_param->spi_cs_clk_delay = 90;
+ spi->controller_data = spi_param;
+
+#else
+ dev_err(&spi->dev, "%s : TSU_NSS configuration be implemented!\n",
+ __func__);
+ /*
+ * platform-specific method to configure the delay between NSS
+ * selection and the start of data transfer (clk).
+ * If no specific method required, you can comment above line.
+ */
+#endif
+ spi->bits_per_word = 8;
+
+ if (status == 0) {
+ spi_set_drvdata(spi, st33spi);
+ (void)st33spi_parse_dt(&spi->dev, st33spi);
+ } else {
+ kfree(st33spi);
+ }
+
+ return status;
+}
+
+static int st33spi_remove(struct spi_device *spi)
+{
+ struct st33spi_data *st33spi = spi_get_drvdata(spi);
+
+ /* make sure ops on existing fds can abort cleanly */
+ spin_lock_irq(&st33spi->spi_lock);
+ st33spi->spi = NULL;
+ st33spi->spi_reset = NULL;
+ spin_unlock_irq(&st33spi->spi_lock);
+
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
+ list_del(&st33spi->device_entry);
+ device_destroy(st33spi_class, st33spi->devt);
+ clear_bit(MINOR(st33spi->devt), minors);
+ if (st33spi->users == 0) {
+ kfree(st33spi);
+#ifdef GKI_MODULE
+ class_destroy(st33spi_class);
+ unregister_chrdev(st33spi_major, "st33spi");
+#endif
+ }
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static struct spi_driver st33spi_spi_driver = {
+ .driver = {
+ .name = "st33spi",
+ .of_match_table = of_match_ptr(st33spi_dt_ids),
+ .acpi_match_table = ACPI_PTR(st33spi_acpi_ids),
+ },
+ .probe = st33spi_probe,
+ .remove = st33spi_remove,
+
+ /* NOTE: suspend/resume methods are not necessary here.
+ * We don't do anything except pass the requests to/from
+ * the underlying controller. The refrigerator handles
+ * most issues; the controller driver handles the rest.
+ */
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef GKI_MODULE
+module_spi_driver(st33spi_spi_driver);
+#else
+static int __init st33spi_init(void)
+{
+ int status;
+
+ pr_info("Loading st33spi driver\n");
+
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ BUILD_BUG_ON(N_SPI_MINORS > 256);
+ st33spi_major =
+ __register_chrdev(0, 0, N_SPI_MINORS, "spi", &st33spi_fops);
+ pr_info("Loading st33spi driver, major: %d\n", st33spi_major);
+
+ st33spi_class = class_create(THIS_MODULE, "st33spi");
+ if (IS_ERR(st33spi_class)) {
+ unregister_chrdev(st33spi_major,
+ st33spi_spi_driver.driver.name);
+ return PTR_ERR(st33spi_class);
+ }
+
+ status = spi_register_driver(&st33spi_spi_driver);
+ if (status < 0) {
+ class_destroy(st33spi_class);
+ unregister_chrdev(st33spi_major,
+ st33spi_spi_driver.driver.name);
+ }
+ pr_info("Loading st33spi driver: %d\n", status);
+ return status;
+}
+module_init(st33spi_init);
+
+static void __exit st33spi_exit(void)
+{
+ spi_unregister_driver(&st33spi_spi_driver);
+ class_destroy(st33spi_class);
+ unregister_chrdev(st33spi_major, st33spi_spi_driver.driver.name);
+}
+module_exit(st33spi_exit);
+#endif
+
+MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
+MODULE_DESCRIPTION("User mode SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:st33spi");
diff --git a/ese/st54spi.c b/ese/st54spi.c
new file mode 100644
index 0000000..b50cc3e
--- /dev/null
+++ b/ese/st54spi.c
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: <GPL-2.0>
+/*
+ * Simple synchronous userspace interface to SPI devices
+ *
+ * Copyright (C) 2006 SWAPP
+ * Andrea Paterniani <a.paterniani@swapp-eng.it>
+ * Copyright (C) 2007 David Brownell (simplification, cleanup)
+ *
+ */
+/*
+ * Modified by ST Microelectronics.
+ * <arach.mohammed.brahim@st.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/acpi.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spidev.h>
+
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <linux/uaccess.h>
+
+#define ST54NFC_QCOM 1
+#undef ST54NFC_QCOM
+
+#ifdef ST54NFC_QCOM
+#include <linux/spi/spi-geni-qcom.h>
+#endif /* ST54NFC_QCOM */
+
+#ifndef GKI_MODULE
+#define GKI_MODULE 1
+#endif
+
+#include "../st21nfc.h"
+
+/*
+ * This supports access to SPI devices using normal userspace I/O calls.
+ * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
+ * and often mask message boundaries, full SPI support requires full duplex
+ * transfers. There are several kinds of internal message boundaries to
+ * handle chipselect management and other protocol options.
+ *
+ * SPI has a character major number assigned. We allocate minor numbers
+ * dynamically using a bitmask. You must use hotplug tools, such as udev
+ * (or mdev with busybox) to create and destroy the /dev/st54spi device
+ * nodes, since there is no fixed association of minor numbers with any
+ * particular SPI bus or device.
+ */
+static int st54spi_major;
+#define N_SPI_MINORS 2 /* ... up to 256 */
+
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
+
+#define ST54SPI_IOC_RD_POWER _IOR(SPI_IOC_MAGIC, 99, __u32)
+#define ST54SPI_IOC_WR_POWER _IOW(SPI_IOC_MAGIC, 99, __u32)
+
+/* Bit masks for spi_device.mode management. Note that incorrect
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
+ *
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
+ */
+#define SPI_MODE_MASK \
+ (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_3WIRE | \
+ SPI_LOOP | SPI_NO_CS | SPI_READY | SPI_TX_DUAL | SPI_TX_QUAD | \
+ SPI_RX_DUAL | SPI_RX_QUAD)
+
+struct st54spi_data {
+ dev_t devt;
+ spinlock_t spi_lock;
+ struct spi_device *spi;
+ struct spi_device *spi_reset;
+ struct list_head device_entry;
+
+ /* TX/RX buffers are NULL unless this device is open (users > 0) */
+ struct mutex buf_lock;
+ unsigned int users;
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ u32 speed_hz;
+
+ /* GPIO for SE_POWER_REQ / SE_nRESET */
+ struct gpio_desc *gpiod_se_reset;
+
+ int power_gpio_mode;
+ int power_gpio;
+ int nfcc_needs_poweron;
+ int sehal_needs_poweron;
+ int se_is_poweron;
+};
+
+#define POWER_MODE_NONE -1
+#define POWER_MODE_ST54H 0
+#define POWER_MODE_ST54J 1
+#define POWER_MODE_ST54J_COMBO 2
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_lock);
+
+static unsigned int bufsiz = 4096;
+module_param(bufsiz, uint, 0444);
+MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
+
+#define VERBOSE 0
+
+#define DRIVER_VERSION "2.2.0"
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t st54spi_sync(struct st54spi_data *st54spi,
+ struct spi_message *message)
+{
+ int status;
+ struct spi_device *spi;
+
+ spin_lock_irq(&st54spi->spi_lock);
+ spi = st54spi->spi;
+ spin_unlock_irq(&st54spi->spi_lock);
+
+ if (spi == NULL)
+ status = -ESHUTDOWN;
+ else
+ status = spi_sync(spi, message);
+
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static inline ssize_t st54spi_sync_write(struct st54spi_data *st54spi,
+ size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = st54spi->tx_buffer,
+ .len = len,
+ .speed_hz = st54spi->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return st54spi_sync(st54spi, &m);
+}
+
+static inline ssize_t st54spi_sync_read(struct st54spi_data *st54spi,
+ size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = st54spi->rx_buffer,
+ .len = len,
+ .speed_hz = st54spi->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return st54spi_sync(st54spi, &m);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Read-only message with current device setup */
+static ssize_t st54spi_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct st54spi_data *st54spi;
+ ssize_t status = 0;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ st54spi = filp->private_data;
+
+ dev_dbg(&st54spi->spi->dev, "st54spi Read: %zu bytes\n", count);
+
+ mutex_lock(&st54spi->buf_lock);
+ status = st54spi_sync_read(st54spi, count);
+ if (status > 0) {
+ unsigned long missing;
+
+ missing = copy_to_user(buf, st54spi->rx_buffer, status);
+ if (missing == status)
+ status = -EFAULT;
+ else
+ status = status - missing;
+ }
+ mutex_unlock(&st54spi->buf_lock);
+
+ dev_dbg(&st54spi->spi->dev, "st54spi Read: status: %zd\n", status);
+
+ return status;
+}
+
+/* Write-only message with current device setup */
+static ssize_t st54spi_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct st54spi_data *st54spi;
+ ssize_t status = 0;
+ unsigned long missing;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ st54spi = filp->private_data;
+
+ dev_dbg(&st54spi->spi->dev, "st54spi Write: %zu bytes\n", count);
+
+ mutex_lock(&st54spi->buf_lock);
+ missing = copy_from_user(st54spi->tx_buffer, buf, count);
+ if (missing == 0)
+ status = st54spi_sync_write(st54spi, count);
+ else
+ status = -EFAULT;
+ mutex_unlock(&st54spi->buf_lock);
+
+ dev_dbg(&st54spi->spi->dev, "st54spi Write: status: %zd\n", status);
+
+ return status;
+}
+
+static int st54spi_message(struct st54spi_data *st54spi,
+ struct spi_ioc_transfer *u_xfers,
+ unsigned int n_xfers)
+{
+ struct spi_message msg;
+ struct spi_transfer *k_xfers;
+ struct spi_transfer *k_tmp;
+ struct spi_ioc_transfer *u_tmp;
+ unsigned int n, total, tx_total, rx_total;
+ u8 *tx_buf, *rx_buf;
+ int status = -EFAULT;
+
+ spi_message_init(&msg);
+ k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
+ if (k_xfers == NULL)
+ return -ENOMEM;
+
+ /* Construct spi_message, copying any tx data to bounce buffer.
+ * We walk the array of user-provided transfers, using each one
+ * to initialize a kernel version of the same transfer.
+ */
+ tx_buf = st54spi->tx_buffer;
+ rx_buf = st54spi->rx_buffer;
+ total = 0;
+ tx_total = 0;
+ rx_total = 0;
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n;
+ n--, k_tmp++, u_tmp++) {
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+ /* Since the function returns the total length of transfers
+ * on success, restrict the total to positive int values to
+ * avoid the return value looking like an error. Also check
+ * each transfer length to avoid arithmetic overflow.
+ */
+ if (total > INT_MAX || k_tmp->len > INT_MAX) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
+ if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+ rx_total += k_tmp->len;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->rx_buf = rx_buf;
+ if (!access_ok((u8 __user *)(uintptr_t)u_tmp->rx_buf,
+ u_tmp->len))
+ goto done;
+ rx_buf += k_tmp->len;
+ }
+ if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+ tx_total += k_tmp->len;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->tx_buf = tx_buf;
+ if (copy_from_user(
+ tx_buf,
+ (const u8 __user *)(uintptr_t)u_tmp->tx_buf,
+ u_tmp->len))
+ goto done;
+ tx_buf += k_tmp->len;
+ }
+
+ k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
+ k_tmp->bits_per_word = u_tmp->bits_per_word;
+ k_tmp->delay_usecs = u_tmp->delay_usecs;
+ k_tmp->speed_hz = u_tmp->speed_hz;
+ if (!k_tmp->speed_hz)
+ k_tmp->speed_hz = st54spi->speed_hz;
+#if VERBOSE
+ dev_dbg(&st54spi->spi->dev,
+ " xfer len %u %s%s%s%dbits %u usec %uHz\n", u_tmp->len,
+ u_tmp->rx_buf ? "rx " : "", u_tmp->tx_buf ? "tx " : "",
+ u_tmp->cs_change ? "cs " : "",
+ u_tmp->bits_per_word ?: st54spi->spi->bits_per_word,
+ u_tmp->delay_usecs,
+ u_tmp->speed_hz ?: st54spi->spi->max_speed_hz);
+#endif
+ spi_message_add_tail(k_tmp, &msg);
+ }
+
+ status = st54spi_sync(st54spi, &msg);
+ if (status < 0)
+ goto done;
+
+ /* copy any rx data out of bounce buffer */
+ rx_buf = st54spi->rx_buffer;
+ for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
+ if (u_tmp->rx_buf) {
+ if (__copy_to_user((u8 __user *)(uintptr_t)u_tmp->rx_buf,
+ rx_buf, u_tmp->len)) {
+ status = -EFAULT;
+ goto done;
+ }
+ rx_buf += u_tmp->len;
+ }
+ }
+ status = total;
+
+done:
+ kfree(k_xfers);
+ return status;
+}
+
+static struct spi_ioc_transfer *
+st54spi_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+ unsigned int *n_ioc)
+{
+ struct spi_ioc_transfer *ioc;
+ u32 tmp;
+
+ /* Check type, command number and direction */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC ||
+ _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) ||
+ _IOC_DIR(cmd) != _IOC_WRITE)
+ return ERR_PTR(-ENOTTY);
+
+ tmp = _IOC_SIZE(cmd);
+ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+ return ERR_PTR(-EINVAL);
+ *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+ if (*n_ioc == 0)
+ return NULL;
+
+ /* copy into scratch area */
+ ioc = kmalloc(tmp, GFP_KERNEL);
+ if (!ioc)
+ return ERR_PTR(-ENOMEM);
+ if (__copy_from_user(ioc, u_ioc, tmp)) {
+ kfree(ioc);
+ return ERR_PTR(-EFAULT);
+ }
+ return ioc;
+}
+
+static void st54spi_power_off(struct st54spi_data *st54spi)
+{
+ if (st54spi->power_gpio_mode == POWER_MODE_ST54J) {
+ /* Just set SE_PWR_REQ to low */
+ gpiod_set_value(st54spi->gpiod_se_reset, 0);
+ }
+
+#ifdef WITH_SPI_CLK_MNGT
+ /* no need for the SPI clock to be enabled. */
+ dev_dbg(&st54spi->spi->dev,
+ "%s : disabling PMU clock of SPI subsystem\n", __func__);
+ mt_spi_disable_master_clk(st54spi->spi);
+#endif /* WITH_SPI_CLK_MNGT */
+
+ st54spi->se_is_poweron = 0;
+}
+
+static void st54spi_power_on(struct st54spi_data *st54spi)
+{
+#ifdef WITH_SPI_CLK_MNGT
+ /* the SPI clock needs to be enabled. */
+ dev_dbg(&st54spi->spi->dev,
+ "%s : enabling PMU clock of SPI subsystem\n", __func__);
+ mt_spi_enable_master_clk(st54spi->spi);
+#endif /* WITH_SPI_CLK_MNGT */
+
+ if (st54spi->power_gpio_mode == POWER_MODE_ST54J) {
+ gpiod_set_value(st54spi->gpiod_se_reset, 1);
+ usleep_range(5000, 5500);
+ dev_info(&st54spi->spi->dev, "%s : st54 set nReset to High\n",
+ __func__);
+ } else if (st54spi->power_gpio_mode == POWER_MODE_ST54J_COMBO) {
+ /* Just a pulse on SPI_nRESET */
+ gpiod_set_value(st54spi->gpiod_se_reset, 1);
+ usleep_range(5000, 5500);
+ gpiod_set_value(st54spi->gpiod_se_reset, 0);
+ dev_info(&st54spi->spi->dev, "%s : st54 set nReset to Low\n",
+ __func__);
+ usleep_range(3000, 4000);
+ }
+ st54spi->se_is_poweron = 1;
+}
+
+static void st54spi_power_set(struct st54spi_data *st54spi, int val)
+{
+ if (!st54spi)
+ return;
+
+ dev_dbg(&st54spi->spi->dev, "st54spi sehal pwr_req: %d\n", val);
+
+ if (val) {
+ st54spi->sehal_needs_poweron = 1;
+ st54spi_power_on(st54spi);
+ } else {
+ st54spi->sehal_needs_poweron = 0;
+ if ((st54spi->se_is_poweron == 1) &&
+ (st54spi->nfcc_needs_poweron == 0))
+ /* we don t need power anymore */
+ st54spi_power_off(st54spi);
+ }
+}
+
+static int st54spi_power_get(struct st54spi_data *st54spi)
+{
+ return gpiod_get_value(st54spi->gpiod_se_reset);
+}
+
+static long st54spi_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+ int retval = 0;
+ struct st54spi_data *st54spi;
+ struct spi_device *spi;
+ u32 tmp;
+ unsigned int n_ioc;
+ struct spi_ioc_transfer *ioc;
+
+ /* Check type and command number */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* Check access direction once here; don't repeat below.
+ * IOC_DIR is from the user perspective, while access_ok is
+ * from the kernel perspective; so they look reversed.
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ if (err)
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ st54spi = filp->private_data;
+ spin_lock_irq(&st54spi->spi_lock);
+ spi = spi_dev_get(st54spi->spi);
+ spin_unlock_irq(&st54spi->spi_lock);
+
+ dev_dbg(&st54spi->spi->dev, "st54spi ioctl cmd %d\n", cmd);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* use the buffer lock here for triple duty:
+ * - prevent I/O (from us) so calling spi_setup() is safe;
+ * - prevent concurrent SPI_IOC_WR_* from morphing
+ * data fields while SPI_IOC_RD_* reads them;
+ * - SPI_IOC_MESSAGE needs the buffer locked "normally".
+ */
+ mutex_lock(&st54spi->buf_lock);
+
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_BITS_PER_WORD:
+ retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MAX_SPEED_HZ:
+ retval = __put_user(st54spi->speed_hz, (__u32 __user *)arg);
+ break;
+ case ST54SPI_IOC_RD_POWER:
+ dev_dbg(&st54spi->spi->dev, "st54spi ST54SPI_IOC_RD_POWER\n");
+ retval = __put_user(st54spi_power_get(st54spi),
+ (__u32 __user *)arg);
+ break;
+
+ /* write requests */
+ case SPI_IOC_WR_MODE:
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp & ~SPI_MODE_MASK) {
+ retval = -EINVAL;
+ break;
+ }
+
+ tmp |= spi->mode & ~SPI_MODE_MASK;
+ spi->mode = (u16)tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_LSB_FIRST:
+ retval = __get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp)
+ spi->mode |= SPI_LSB_FIRST;
+ else
+ spi->mode &= ~SPI_LSB_FIRST;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "%csb first\n",
+ tmp ? 'l' : 'm');
+ }
+ break;
+ case SPI_IOC_WR_BITS_PER_WORD:
+ retval = __get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u8 save = spi->bits_per_word;
+
+ spi->bits_per_word = tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->bits_per_word = save;
+ else
+ dev_dbg(&spi->dev, "%d bits per word\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_MAX_SPEED_HZ:
+ retval = __get_user(tmp, (__u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval >= 0)
+ st54spi->speed_hz = tmp;
+ else
+ dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
+ spi->max_speed_hz = save;
+ }
+ break;
+ case ST54SPI_IOC_WR_POWER:
+ retval = __get_user(tmp, (__u32 __user *)arg);
+ dev_dbg(&st54spi->spi->dev,
+ "st54spi ST54SPI_IOC_WR_POWER %d\n", retval);
+ if (retval == 0) {
+ st54spi_power_set(st54spi, tmp ? 1 : 0);
+ dev_dbg(&st54spi->spi->dev, "SE_POWER_REQ set: %d\n",
+ tmp);
+ }
+ break;
+ default:
+ /* segmented and/or full-duplex I/O request */
+ /* Check message and copy into scratch area */
+ ioc = st54spi_get_ioc_message(
+ cmd, (struct spi_ioc_transfer __user *)arg, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ break;
+ }
+ if (!ioc)
+ break; /* n_ioc is also 0 */
+
+ /* translate to spi_message, execute */
+ retval = st54spi_message(st54spi, ioc, n_ioc);
+ kfree(ioc);
+ break;
+ }
+
+ mutex_unlock(&st54spi->buf_lock);
+ spi_dev_put(spi);
+
+ dev_dbg(&st54spi->spi->dev, "st54spi ioctl retval %d\n", retval);
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long st54spi_compat_ioc_message(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct spi_ioc_transfer __user *u_ioc;
+ int retval = 0;
+ struct st54spi_data *st54spi;
+ struct spi_device *spi;
+ unsigned int n_ioc, n;
+ struct spi_ioc_transfer *ioc;
+
+ u_ioc = (struct spi_ioc_transfer __user *)compat_ptr(arg);
+ if (!access_ok(u_ioc, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ st54spi = filp->private_data;
+ spin_lock_irq(&st54spi->spi_lock);
+ spi = spi_dev_get(st54spi->spi);
+ spin_unlock_irq(&st54spi->spi_lock);
+
+ dev_dbg(&st54spi->spi->dev, "st54spi compat_ioctl cmd %d\n", cmd);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+ mutex_lock(&st54spi->buf_lock);
+
+ /* Check message and copy into scratch area */
+ ioc = st54spi_get_ioc_message(cmd, u_ioc, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ goto done;
+ }
+ if (!ioc)
+ goto done; /* n_ioc is also 0 */
+
+ /* Convert buffer pointers */
+ for (n = 0; n < n_ioc; n++) {
+ ioc[n].rx_buf = (uintptr_t)compat_ptr(ioc[n].rx_buf);
+ ioc[n].tx_buf = (uintptr_t)compat_ptr(ioc[n].tx_buf);
+ }
+
+ /* translate to spi_message, execute */
+ retval = st54spi_message(st54spi, ioc, n_ioc);
+ kfree(ioc);
+
+done:
+ mutex_unlock(&st54spi->buf_lock);
+ spi_dev_put(spi);
+ dev_dbg(&st54spi->spi->dev, "st54spi compat_ioctl retval %d\n", retval);
+ return retval;
+}
+
+static long st54spi_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC &&
+ _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) &&
+ _IOC_DIR(cmd) == _IOC_WRITE)
+ return st54spi_compat_ioc_message(filp, cmd, arg);
+
+ return st54spi_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define st54spi_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+static int st54spi_open(struct inode *inode, struct file *filp)
+{
+ struct st54spi_data *st54spi;
+ int status = -ENXIO;
+
+ mutex_lock(&device_list_lock);
+
+ list_for_each_entry (st54spi, &device_list, device_entry) {
+ if (st54spi->devt == inode->i_rdev) {
+ status = 0;
+ break;
+ }
+ }
+
+ if (status) {
+ dev_dbg(&st54spi->spi->dev, "st54spi: nothing for minor %d\n",
+ iminor(inode));
+ goto err_find_dev;
+ }
+
+ /* Authorize only 1 process to open the device. */
+ if (st54spi->users > 0) {
+ dev_err(&st54spi->spi->dev, "already open\n");
+ mutex_unlock(&device_list_lock);
+ return -EBUSY;
+ }
+
+ dev_dbg(&st54spi->spi->dev, "st54spi: open\n");
+
+ if (!st54spi->tx_buffer) {
+ st54spi->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!st54spi->tx_buffer) {
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+ }
+
+ if (!st54spi->rx_buffer) {
+ st54spi->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!st54spi->rx_buffer) {
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+ }
+
+ st54spi->users++;
+ filp->private_data = st54spi;
+ nonseekable_open(inode, filp);
+
+ mutex_unlock(&device_list_lock);
+
+ dev_dbg(&st54spi->spi->dev, "st54spi: open - force power on\n");
+ st54spi_power_set(st54spi, 1);
+ return 0;
+
+err_alloc_rx_buf:
+ kfree(st54spi->tx_buffer);
+ st54spi->tx_buffer = NULL;
+err_find_dev:
+ mutex_unlock(&device_list_lock);
+ return status;
+}
+
+static int st54spi_release(struct inode *inode, struct file *filp)
+{
+ struct st54spi_data *st54spi;
+
+ mutex_lock(&device_list_lock);
+ st54spi = filp->private_data;
+ filp->private_data = NULL;
+
+ dev_dbg(&st54spi->spi->dev, "st54spi: release\n");
+
+ /* last close? */
+ st54spi->users--;
+ if (!st54spi->users) {
+ int dofree;
+
+ dev_dbg(&st54spi->spi->dev,
+ "st54spi: release - may allow power off\n");
+ st54spi_power_set(st54spi, 0);
+
+ kfree(st54spi->tx_buffer);
+ st54spi->tx_buffer = NULL;
+
+ kfree(st54spi->rx_buffer);
+ st54spi->rx_buffer = NULL;
+
+ spin_lock_irq(&st54spi->spi_lock);
+ if (st54spi->spi)
+ st54spi->speed_hz = st54spi->spi->max_speed_hz;
+
+ /* ... after we unbound from the underlying device? */
+ dofree = ((st54spi->spi == NULL) &&
+ (st54spi->spi_reset == NULL));
+ spin_unlock_irq(&st54spi->spi_lock);
+
+ if (dofree)
+ kfree(st54spi);
+ }
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static const struct file_operations st54spi_fops = {
+ .owner = THIS_MODULE,
+ /*
+ * REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+ * too, except for the locking.
+ */
+ .write = st54spi_write,
+ .read = st54spi_read,
+ .unlocked_ioctl = st54spi_ioctl,
+ .compat_ioctl = st54spi_compat_ioctl,
+ .open = st54spi_open,
+ .release = st54spi_release,
+ .llseek = no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The main reason to have this class is to make mdev/udev create the
+ * /dev/st54spi character device nodes exposing our userspace API.
+ * It also simplifies memory management.
+ */
+
+static struct class *st54spi_class;
+
+static const struct of_device_id st54spi_dt_ids[] = {
+ { .compatible = "st,st54spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st54spi_dt_ids);
+
+#ifdef CONFIG_ACPI
+
+/* Placeholder SPI devices not to be used in production systems */
+#define ST54SPI_ACPI_PLACEHOLDER 1
+
+static const struct acpi_device_id st54spi_acpi_ids[] = {
+ /*
+ * The ACPI SPT000* devices are only meant for development and
+ * testing. Systems used in production should have a proper ACPI
+ * description of the connected peripheral and they should also
+ * use a proper driver instead of poking directly to the SPI bus
+ */
+ { "SPT0001", ST54SPI_ACPI_PLACEHOLDER },
+ { "SPT0002", ST54SPI_ACPI_PLACEHOLDER },
+ { "SPT0003", ST54SPI_ACPI_PLACEHOLDER },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, st54spi_acpi_ids);
+
+static void st54spi_probe_acpi(struct spi_device *spi)
+{
+ const struct acpi_device_id *id;
+
+ if (!has_acpi_companion(&spi->dev))
+ return;
+
+ id = acpi_match_device(st54spi_acpi_ids, &spi->dev);
+ if (WARN_ON(!id))
+ return;
+}
+#else
+static inline void st54spi_probe_acpi(struct spi_device *spi)
+{
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int st54spi_parse_dt(struct device *dev, struct st54spi_data *pdata)
+{
+ int r = 0;
+ struct device_node *np = dev->of_node;
+ const char *power_mode;
+
+#ifndef GKI_MODULE
+ np = of_find_compatible_node(NULL, NULL, "st,st54spi");
+#endif
+
+ if (!np) {
+ return r;
+ }
+
+ /* Read power mode. */
+ power_mode = of_get_property(np, "power_mode", NULL);
+ if (!power_mode) {
+ dev_info(dev, "%s: Default power mode: ST54J Combo\n",
+ __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_ST54J_COMBO;
+ } else if (!strcmp(power_mode, "ST54Jse")) {
+ dev_info(dev, "%s: Power mode: ST54J SE-only\n",
+ __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_ST54J;
+ } else if (!strcmp(power_mode, "ST54J")) {
+ dev_info(dev, "%s: Power mode: ST54J Combo\n",
+ __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_ST54J_COMBO;
+ } else if (!strcmp(power_mode, "ST54H")) {
+ dev_info(dev, "%s: Power mode: ST54H\n", __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_ST54H;
+ } else if (!strcmp(power_mode, "none")) {
+ dev_info(dev, "%s: Power mode: none\n", __FILE__);
+ pdata->power_gpio_mode = POWER_MODE_NONE;
+ } else {
+ dev_err(dev, "%s: Power mode unknown: %s\n", __FILE__,
+ power_mode);
+ return -EFAULT;
+ }
+
+ /* Get the Gpio */
+ if ((pdata->power_gpio_mode == POWER_MODE_ST54J_COMBO) ||
+ (pdata->power_gpio_mode == POWER_MODE_ST54J)) {
+ pdata->gpiod_se_reset =
+ devm_gpiod_get(dev, "esereset", GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->gpiod_se_reset)) {
+ dev_err(dev,
+ "%s : Unable to request esereset %d\n",
+ __func__,
+ IS_ERR(pdata->gpiod_se_reset));
+ return -ENODEV;
+ }
+ } else {
+ dev_err(dev, "%s: ST54H mode not supported", __FILE__);
+ }
+ return r;
+}
+
+static int st54spi_probe(struct spi_device *spi)
+{
+ struct st54spi_data *st54spi;
+ int status;
+ unsigned long minor;
+#ifdef ST54NFC_QCOM
+ struct device *dev = &spi->dev;
+ struct spi_geni_qcom_ctrl_data *spi_param;
+#endif /* ST54NFC_QCOM */
+
+#ifdef GKI_MODULE
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ BUILD_BUG_ON(N_SPI_MINORS > 256);
+ st54spi_major =
+ __register_chrdev(0, 0, N_SPI_MINORS, "spi", &st54spi_fops);
+ dev_info(&spi->dev, "Loading st54spi driver, major: %d\n",
+ st54spi_major);
+
+ st54spi_class = class_create(THIS_MODULE, "st54spi");
+ if (IS_ERR(st54spi_class)) {
+ unregister_chrdev(st54spi_major, "st54spi");
+ return PTR_ERR(st54spi_class);
+ }
+#endif
+
+ /*
+ * st54spi should never be referenced in DT without a specific
+ * compatible string, it is a Linux implementation thing
+ * rather than a description of the hardware.
+ */
+
+ st54spi_probe_acpi(spi);
+
+ /* Allocate driver data */
+ st54spi = kzalloc(sizeof(*st54spi), GFP_KERNEL);
+ if (!st54spi)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ st54spi->spi = spi;
+ spin_lock_init(&st54spi->spi_lock);
+ mutex_init(&st54spi->buf_lock);
+
+ INIT_LIST_HEAD(&st54spi->device_entry);
+
+ /* If we can allocate a minor number, hook up this device.
+ * Reusing minors is fine so long as udev or mdev is working.
+ */
+ mutex_lock(&device_list_lock);
+ minor = find_first_zero_bit(minors, N_SPI_MINORS);
+ if (minor < N_SPI_MINORS) {
+ struct device *dev;
+
+ st54spi->devt = MKDEV(st54spi_major, minor);
+ dev = device_create(st54spi_class, &spi->dev, st54spi->devt,
+ st54spi, "st54spi");
+ status = PTR_ERR_OR_ZERO(dev);
+ } else {
+ dev_dbg(&spi->dev, "%s : no minor number available!\n",
+ __FILE__);
+ status = -ENODEV;
+ }
+ if (status == 0) {
+ set_bit(minor, minors);
+ list_add(&st54spi->device_entry, &device_list);
+ }
+ mutex_unlock(&device_list_lock);
+
+ st54spi->speed_hz = spi->max_speed_hz;
+ dev_dbg(&spi->dev, "%s : st54spi->speed_hz=%d\n", __FILE__,
+ st54spi->speed_hz);
+
+ /* set timings for ST54 */
+#ifdef ST54NFC_QCOM
+ spi_param = devm_kzalloc(dev, sizeof(spi_param), GFP_KERNEL);
+ if (spi_param == NULL)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ spi_param->spi_cs_clk_delay = 90;
+ spi->controller_data = spi_param;
+
+#else
+ dev_err(&spi->dev, "%s : TSU_NSS configuration be implemented!\n",
+ __func__);
+ /*
+ * platform-specific method to configure the delay between NSS
+ * selection and the start of data transfer (clk).
+ * If no specific method required, you can comment above line.
+ */
+#endif
+ spi->bits_per_word = 8;
+
+ if (status == 0) {
+ spi_set_drvdata(spi, st54spi);
+ (void)st54spi_parse_dt(&spi->dev, st54spi);
+ } else {
+ kfree(st54spi);
+ }
+
+ return status;
+}
+
+static int st54spi_remove(struct spi_device *spi)
+{
+ struct st54spi_data *st54spi = spi_get_drvdata(spi);
+
+ /* make sure ops on existing fds can abort cleanly */
+ spin_lock_irq(&st54spi->spi_lock);
+ st54spi->spi = NULL;
+ st54spi->spi_reset = NULL;
+ spin_unlock_irq(&st54spi->spi_lock);
+
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
+ list_del(&st54spi->device_entry);
+ device_destroy(st54spi_class, st54spi->devt);
+ clear_bit(MINOR(st54spi->devt), minors);
+ if (st54spi->users == 0) {
+ kfree(st54spi);
+#ifdef GKI_MODULE
+ class_destroy(st54spi_class);
+ unregister_chrdev(st54spi_major, "st54spi");
+#endif
+ }
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static struct spi_driver st54spi_spi_driver = {
+ .driver = {
+ .name = "st54spi",
+ .of_match_table = of_match_ptr(st54spi_dt_ids),
+ .acpi_match_table = ACPI_PTR(st54spi_acpi_ids),
+ },
+ .probe = st54spi_probe,
+ .remove = st54spi_remove,
+
+ /* NOTE: suspend/resume methods are not necessary here.
+ * We don't do anything except pass the requests to/from
+ * the underlying controller. The refrigerator handles
+ * most issues; the controller driver handles the rest.
+ */
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef GKI_MODULE
+module_spi_driver(st54spi_spi_driver);
+#else
+static int __init st54spi_init(void)
+{
+ int status;
+
+ pr_info("Loading st54spi driver\n");
+
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ BUILD_BUG_ON(N_SPI_MINORS > 256);
+ st54spi_major =
+ __register_chrdev(0, 0, N_SPI_MINORS, "spi", &st54spi_fops);
+ pr_info("Loading st54spi driver, major: %d\n", st54spi_major);
+
+ st54spi_class = class_create(THIS_MODULE, "st54spi");
+ if (IS_ERR(st54spi_class)) {
+ unregister_chrdev(st54spi_major,
+ st54spi_spi_driver.driver.name);
+ return PTR_ERR(st54spi_class);
+ }
+
+ status = spi_register_driver(&st54spi_spi_driver);
+ if (status < 0) {
+ class_destroy(st54spi_class);
+ unregister_chrdev(st54spi_major,
+ st54spi_spi_driver.driver.name);
+ }
+ pr_info("Loading st54spi driver: %d\n", status);
+ return status;
+}
+module_init(st54spi_init);
+
+static void __exit st54spi_exit(void)
+{
+ spi_unregister_driver(&st54spi_spi_driver);
+ class_destroy(st54spi_class);
+ unregister_chrdev(st54spi_major, st54spi_spi_driver.driver.name);
+}
+module_exit(st54spi_exit);
+#endif
+
+MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
+MODULE_DESCRIPTION("User mode SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:st54spi");
diff --git a/st21nfc.c b/st21nfc.c
new file mode 100644
index 0000000..d2632ba
--- /dev/null
+++ b/st21nfc.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: <GPL-2.0>
+/*
+ * Copyright (C) 2016 ST Microelectronics S.A.
+ * Copyright (C) 2010 Stollmann E+V GmbH
+ * Copyright (C) 2010 Trusted Logic S.A.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/of_gpio.h>
+#include <linux/workqueue.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <net/nfc/nci.h>
+#include <linux/clk.h>
+#include <soc/google/exynos-pmu-if.h>
+#include "st21nfc.h"
+
+#define MAX_BUFFER_SIZE 260
+#define HEADER_LENGTH 3
+#define IDLE_CHARACTER 0x7e
+#define ST21NFC_POWER_STATE_MAX 3
+#define WAKEUP_SRC_TIMEOUT (2000)
+
+#define DRIVER_VERSION "2.0.16"
+
+#define PROP_PWR_MON_RW_ON_NTF nci_opcode_pack(NCI_GID_PROPRIETARY, 5)
+#define PROP_PWR_MON_RW_OFF_NTF nci_opcode_pack(NCI_GID_PROPRIETARY, 6)
+
+/*The enum is used to index a pw_states array, the values matter here*/
+enum st21nfc_power_state {
+ ST21NFC_IDLE = 0,
+ ST21NFC_ACTIVE = 1,
+ ST21NFC_ACTIVE_RW = 2
+};
+
+static const char *const st21nfc_power_state_name[] = {
+ "IDLE", "ACTIVE", "ACTIVE_RW"
+};
+
+enum st21nfc_read_state {
+ ST21NFC_HEADER,
+ ST21NFC_PAYLOAD
+};
+
+struct nfc_sub_power_stats {
+ uint64_t count;
+ uint64_t duration;
+ uint64_t last_entry;
+ uint64_t last_exit;
+};
+
+struct nfc_sub_power_stats_error {
+ /* error transition header --> payload state machine */
+ uint64_t header_payload;
+ /* error transition from an active state when not in idle state */
+ uint64_t active_not_idle;
+ /* error transition from idle state to idle state */
+ uint64_t idle_to_idle;
+ /* warning transition from active_rw state to idle state */
+ uint64_t active_rw_to_idle;
+ /* error transition from active state to active state */
+ uint64_t active_to_active;
+ /* error transition from idle state to active state with notification */
+ uint64_t idle_to_active_ntf;
+ /* error transition from active_rw state to active_rw state */
+ uint64_t act_rw_to_act_rw;
+ /* error transition from idle state to */
+ /* active_rw state with notification */
+ uint64_t idle_to_active_rw_ntf;
+};
+
+/*
+ * The member 'polarity_mode' defines
+ * how the wakeup pin is configured and handled.
+ * it can take the following values :
+ * IRQF_TRIGGER_RISING
+ * IRQF_TRIGGER_HIGH
+ */
+struct st21nfc_device {
+ wait_queue_head_t read_wq;
+ struct mutex read_mutex;
+ struct mutex pidle_mutex;
+ struct i2c_client *client;
+ struct miscdevice st21nfc_device;
+ uint8_t buffer[MAX_BUFFER_SIZE];
+ bool irq_enabled;
+ bool irq_wake_up;
+ bool irq_is_attached;
+ bool device_open; /* Is device open? */
+ spinlock_t irq_enabled_lock;
+ enum st21nfc_power_state pw_current;
+ enum st21nfc_read_state r_state_current;
+ int irq_pw_stats_idle;
+ int p_idle_last;
+ struct nfc_sub_power_stats pw_states[ST21NFC_POWER_STATE_MAX];
+ struct nfc_sub_power_stats_error pw_states_err;
+ struct workqueue_struct *st_p_wq;
+ struct work_struct st_p_work;
+ /*Power state shadow copies for reading*/
+ enum st21nfc_power_state c_pw_current;
+ struct nfc_sub_power_stats c_pw_states[ST21NFC_POWER_STATE_MAX];
+ struct nfc_sub_power_stats_error c_pw_states_err;
+
+ /* CLK control */
+ bool clk_run;
+ struct clk *s_clk;
+ uint8_t pinctrl_en;
+ int irq_clkreq;
+ unsigned int clk_pad;
+
+ /* GPIO for NFCC IRQ pin (input) */
+ struct gpio_desc *gpiod_irq;
+ /* GPIO for NFCC Reset pin (output) */
+ struct gpio_desc *gpiod_reset;
+ /* GPIO for NFCC CLK_REQ pin (input) */
+ struct gpio_desc *gpiod_clkreq;
+ /* GPIO for NFCC CLF_MONITOR_PWR (input) */
+ struct gpio_desc *gpiod_pidle;
+ /* irq_gpio polarity to be used */
+ unsigned int polarity_mode;
+};
+
+/*
+ * Routine to enable clock.
+ * this routine can be extended to select from multiple
+ * sources based on clk_src_name.
+ */
+static int st21nfc_clock_select(struct st21nfc_device *st21nfc_dev)
+{
+ int ret = 0;
+
+ st21nfc_dev->s_clk = clk_get(&st21nfc_dev->client->dev, "nfc_ref_clk");
+
+ /* if NULL we assume external crystal and dont fail */
+ if ((st21nfc_dev->s_clk == NULL) || IS_ERR(st21nfc_dev->s_clk))
+ return 0;
+
+ if (st21nfc_dev->clk_run == false) {
+ ret = clk_prepare_enable(st21nfc_dev->s_clk);
+
+ if (ret)
+ goto err_clk;
+
+ st21nfc_dev->clk_run = true;
+ }
+ return ret;
+
+err_clk:
+ return -EINVAL;
+}
+
+/*
+ * Routine to disable clocks
+ */
+static int st21nfc_clock_deselect(struct st21nfc_device *st21nfc_dev)
+{
+ /* if NULL we assume external crystal and dont fail */
+ if ((st21nfc_dev->s_clk == NULL) || IS_ERR(st21nfc_dev->s_clk))
+ return 0;
+
+ if (st21nfc_dev->clk_run == true) {
+ clk_disable_unprepare(st21nfc_dev->s_clk);
+ st21nfc_dev->clk_run = false;
+ }
+ return 0;
+}
+
+static irqreturn_t st21nfc_clkreq_irq_handler(int irq, void *dev_id)
+{
+ struct st21nfc_device *st21nfc_dev = dev_id;
+ int value = gpiod_get_value(st21nfc_dev->gpiod_clkreq);
+
+ if (st21nfc_dev->pinctrl_en) {
+ if (value)
+ exynos_pmu_update(st21nfc_dev->clk_pad, 1, 1);
+ else
+ exynos_pmu_update(st21nfc_dev->clk_pad, 1, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void st21nfc_disable_irq(struct st21nfc_device *st21nfc_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&st21nfc_dev->irq_enabled_lock, flags);
+ if (st21nfc_dev->irq_enabled) {
+ disable_irq_nosync(st21nfc_dev->client->irq);
+ st21nfc_dev->irq_enabled = false;
+ }
+ spin_unlock_irqrestore(&st21nfc_dev->irq_enabled_lock, flags);
+}
+
+static void st21nfc_enable_irq(struct st21nfc_device *st21nfc_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&st21nfc_dev->irq_enabled_lock, flags);
+ if (!st21nfc_dev->irq_enabled) {
+ st21nfc_dev->irq_enabled = true;
+ enable_irq(st21nfc_dev->client->irq);
+
+ }
+ spin_unlock_irqrestore(&st21nfc_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t st21nfc_dev_irq_handler(int irq, void *dev_id)
+{
+ struct st21nfc_device *st21nfc_dev = dev_id;
+
+ if (device_may_wakeup(&st21nfc_dev->client->dev))
+ pm_wakeup_event(&st21nfc_dev->client->dev,
+ WAKEUP_SRC_TIMEOUT);
+ st21nfc_disable_irq(st21nfc_dev);
+
+ /* Wake up waiting readers */
+ wake_up(&st21nfc_dev->read_wq);
+
+ return IRQ_HANDLED;
+}
+
+static int st21nfc_loc_set_polaritymode(struct st21nfc_device *st21nfc_dev,
+ int mode)
+{
+ struct i2c_client *client = st21nfc_dev->client;
+ struct device *dev = &client->dev;
+ unsigned int irq_type;
+ int ret;
+
+ st21nfc_dev->polarity_mode = mode;
+ /* setup irq_flags */
+ switch (mode) {
+ case IRQF_TRIGGER_RISING:
+ irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ }
+ if (st21nfc_dev->irq_is_attached) {
+ devm_free_irq(dev, client->irq, st21nfc_dev);
+ st21nfc_dev->irq_is_attached = false;
+ }
+ ret = irq_set_irq_type(client->irq, irq_type);
+ if (ret) {
+ dev_err(dev, "%s : set_irq_type failed\n", __func__);
+ return -ENODEV;
+ }
+ /* request irq. the irq is set whenever the chip has data available
+ * for reading. it is cleared when all data has been read.
+ */
+ dev_dbg(dev, "%s : requesting IRQ %d\n", __func__, client->irq);
+ st21nfc_dev->irq_enabled = true;
+
+ ret = devm_request_irq(dev, client->irq, st21nfc_dev_irq_handler,
+ st21nfc_dev->polarity_mode,
+ client->name, st21nfc_dev);
+ if (ret) {
+ dev_err(dev, "%s : devm_request_irq failed\n", __func__);
+ return -ENODEV;
+ }
+ st21nfc_dev->irq_is_attached = true;
+ st21nfc_disable_irq(st21nfc_dev);
+
+ return ret;
+}
+
+
+static void st21nfc_power_stats_switch(
+ struct st21nfc_device *st21nfc_dev, uint64_t current_time_ms,
+ enum st21nfc_power_state old_state, enum st21nfc_power_state new_state,
+ bool is_ntf)
+{
+ mutex_lock(&st21nfc_dev->pidle_mutex);
+
+ if (new_state == old_state) {
+ if ((st21nfc_dev->pw_states[ST21NFC_IDLE].last_entry != 0) ||
+ (old_state != ST21NFC_IDLE)) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s Error: Switched from %s to %s!: %llx, ntf=%d\n",
+ __func__, st21nfc_power_state_name[old_state],
+ st21nfc_power_state_name[new_state],
+ current_time_ms, is_ntf);
+ if (new_state == ST21NFC_IDLE)
+ st21nfc_dev->pw_states_err.idle_to_idle++;
+ else if (new_state == ST21NFC_ACTIVE)
+ st21nfc_dev->pw_states_err.active_to_active++;
+ else if (new_state == ST21NFC_ACTIVE_RW)
+ st21nfc_dev->pw_states_err.act_rw_to_act_rw++;
+
+ mutex_unlock(&st21nfc_dev->pidle_mutex);
+ return;
+ }
+ } else if (!is_ntf &&
+ new_state == ST21NFC_ACTIVE &&
+ old_state != ST21NFC_IDLE) {
+ st21nfc_dev->pw_states_err.active_not_idle++;
+ } else if (!is_ntf &&
+ new_state == ST21NFC_IDLE &&
+ old_state == ST21NFC_ACTIVE_RW) {
+ st21nfc_dev->pw_states_err.active_rw_to_idle++;
+ } else if (is_ntf &&
+ new_state == ST21NFC_ACTIVE &&
+ old_state == ST21NFC_IDLE) {
+ st21nfc_dev->pw_states_err.idle_to_active_ntf++;
+ } else if (is_ntf &&
+ new_state == ST21NFC_ACTIVE_RW &&
+ old_state == ST21NFC_IDLE) {
+ st21nfc_dev->pw_states_err.idle_to_active_rw_ntf++;
+ }
+
+ dev_dbg(&st21nfc_dev->client->dev,
+ "%s Switching from %s to %s: %llx, ntf=%d\n", __func__,
+ st21nfc_power_state_name[old_state],
+ st21nfc_power_state_name[new_state], current_time_ms, is_ntf);
+ st21nfc_dev->pw_states[old_state].last_exit = current_time_ms;
+ st21nfc_dev->pw_states[old_state].duration +=
+ st21nfc_dev->pw_states[old_state].last_exit -
+ st21nfc_dev->pw_states[old_state].last_entry;
+ st21nfc_dev->pw_states[new_state].count++;
+ st21nfc_dev->pw_current = new_state;
+ st21nfc_dev->pw_states[new_state].last_entry = current_time_ms;
+
+ mutex_unlock(&st21nfc_dev->pidle_mutex);
+}
+
+static void st21nfc_power_stats_idle_signal(struct st21nfc_device *st21nfc_dev)
+{
+ uint64_t current_time_ms = ktime_to_ms(ktime_get_boottime());
+ int value = gpiod_get_value(st21nfc_dev->gpiod_pidle);
+
+ if (value != 0) {
+ st21nfc_power_stats_switch(st21nfc_dev, current_time_ms,
+ st21nfc_dev->pw_current, ST21NFC_ACTIVE, false);
+ } else {
+ st21nfc_power_stats_switch(st21nfc_dev, current_time_ms,
+ st21nfc_dev->pw_current, ST21NFC_IDLE, false);
+ }
+}
+
+void st21nfc_pstate_wq(struct work_struct *work)
+{
+ struct st21nfc_device *st21nfc_dev = container_of(work,
+ struct st21nfc_device,
+ st_p_work);
+
+ st21nfc_power_stats_idle_signal(st21nfc_dev);
+}
+
+static irqreturn_t st21nfc_dev_power_stats_handler(int irq, void *dev_id)
+{
+ struct st21nfc_device *st21nfc_dev = dev_id;
+
+ queue_work(st21nfc_dev->st_p_wq, &(st21nfc_dev->st_p_work));
+
+ return IRQ_HANDLED;
+}
+
+static void st21nfc_power_stats_filter(
+ struct st21nfc_device *st21nfc_dev, char *buf, size_t count)
+{
+ uint64_t current_time_ms = ktime_to_ms(ktime_get_boottime());
+ __u16 ntf_opcode = nci_opcode(buf);
+
+ if (IS_ERR(st21nfc_dev->gpiod_pidle))
+ return;
+
+ /* In order to avoid counting active state on PAYLOAD where it would
+ * match a possible header, power states are filtered only on NCI
+ * headers.
+ */
+ if (st21nfc_dev->r_state_current != ST21NFC_HEADER)
+ return;
+
+ if (count != HEADER_LENGTH) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s Warning: expect previous one was idle data\n");
+ st21nfc_dev->pw_states_err.header_payload++;
+ return;
+ }
+
+ if (nci_mt(buf) != NCI_MT_NTF_PKT
+ && nci_opcode_gid(ntf_opcode) != NCI_GID_PROPRIETARY)
+ return;
+
+ switch (ntf_opcode) {
+ case PROP_PWR_MON_RW_OFF_NTF:
+ st21nfc_power_stats_switch(st21nfc_dev, current_time_ms,
+ st21nfc_dev->pw_current, ST21NFC_ACTIVE, true);
+ break;
+ case PROP_PWR_MON_RW_ON_NTF:
+ st21nfc_power_stats_switch(st21nfc_dev, current_time_ms,
+ st21nfc_dev->pw_current, ST21NFC_ACTIVE_RW, true);
+ break;
+ default:
+ return;
+ }
+ return;
+}
+
+static ssize_t st21nfc_dev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct st21nfc_device *st21nfc_dev = container_of(filp->private_data,
+ struct st21nfc_device,
+ st21nfc_device);
+ int ret, idle = 0;
+
+ if (count == 0)
+ return 0;
+
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
+
+ dev_dbg(&st21nfc_dev->client->dev, "%s : reading %zu bytes.\n",
+ __func__, count);
+
+ mutex_lock(&st21nfc_dev->read_mutex);
+
+ /* Read data */
+ ret = i2c_master_recv(st21nfc_dev->client, st21nfc_dev->buffer, count);
+ if (ret < 0) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s: i2c_master_recv returned %d\n", __func__, ret);
+ mutex_unlock(&st21nfc_dev->read_mutex);
+ return ret;
+ }
+ if (st21nfc_dev->r_state_current == ST21NFC_HEADER) {
+ /* Counting idle index */
+ for (idle = 0;
+ idle < ret && st21nfc_dev->buffer[idle] == IDLE_CHARACTER;
+ idle++)
+ ;
+
+ if (idle > 0 && idle < HEADER_LENGTH) {
+ memmove(st21nfc_dev->buffer,
+ st21nfc_dev->buffer + idle, ret - idle);
+ ret = i2c_master_recv(st21nfc_dev->client,
+ st21nfc_dev->buffer + ret - idle,
+ idle);
+ if (ret < 0) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s: i2c_master_recv returned %d\n",
+ __func__, ret);
+ mutex_unlock(&st21nfc_dev->read_mutex);
+ return ret;
+ }
+ ret = count;
+ }
+ }
+ mutex_unlock(&st21nfc_dev->read_mutex);
+
+ if (ret < 0) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s: i2c_master_recv returned %d\n", __func__, ret);
+ return ret;
+ }
+ if (ret > count) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s: received too many bytes from i2c (%d)\n", __func__,
+ ret);
+ return -EIO;
+ }
+
+ if (idle < HEADER_LENGTH) {
+ st21nfc_power_stats_filter(st21nfc_dev, st21nfc_dev->buffer,
+ ret);
+ /* change state only if a payload is detected, i.e. size > 0*/
+ if ((st21nfc_dev->r_state_current == ST21NFC_HEADER) &&
+ (st21nfc_dev->buffer[2] > 0)) {
+ st21nfc_dev->r_state_current = ST21NFC_PAYLOAD;
+ dev_dbg(&st21nfc_dev->client->dev,
+ "%s : new state = ST21NFC_PAYLOAD\n", __func__);
+ } else {
+ st21nfc_dev->r_state_current = ST21NFC_HEADER;
+ dev_dbg(&st21nfc_dev->client->dev,
+ "%s : new state = ST21NFC_HEADER\n", __func__);
+ }
+ }
+
+ if (copy_to_user(buf, st21nfc_dev->buffer, ret)) {
+ dev_warn(&st21nfc_dev->client->dev,
+ "%s : failed to copy to user space\n", __func__);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static ssize_t st21nfc_dev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct st21nfc_device *st21nfc_dev = container_of(filp->private_data,
+ struct st21nfc_device, st21nfc_device);
+ char *tmp = NULL;
+ int ret = count;
+
+ dev_dbg(&st21nfc_dev->client->dev, "%s: st21nfc_dev ptr %p\n", __func__,
+ st21nfc_dev);
+
+ if (count > MAX_BUFFER_SIZE)
+ count = MAX_BUFFER_SIZE;
+
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp)) {
+ dev_err(&st21nfc_dev->client->dev, "%s : memdup_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(&st21nfc_dev->client->dev, "%s : writing %zu bytes.\n",
+ __func__, count);
+ /* Write data */
+ ret = i2c_master_send(st21nfc_dev->client, tmp, count);
+ if (ret != count) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s : i2c_master_send returned %d\n", __func__, ret);
+ ret = -EIO;
+ }
+ kfree(tmp);
+
+ return ret;
+}
+
+static int st21nfc_dev_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+ struct st21nfc_device *st21nfc_dev = container_of(filp->private_data,
+ struct st21nfc_device,
+ st21nfc_device);
+
+ if (st21nfc_dev->device_open) {
+ ret = -EBUSY;
+ dev_err(&st21nfc_dev->client->dev,
+ "%s : device already opened ret= %d\n", __func__, ret);
+ } else {
+ st21nfc_dev->device_open = true;
+ }
+ return ret;
+}
+
+
+static int st21nfc_release(struct inode *inode, struct file *file)
+{
+ struct st21nfc_device *st21nfc_dev = container_of(file->private_data,
+ struct st21nfc_device,
+ st21nfc_device);
+
+ st21nfc_dev->device_open = false;
+ return 0;
+}
+
+static long st21nfc_dev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct st21nfc_device *st21nfc_dev = container_of(filp->private_data,
+ struct st21nfc_device,
+ st21nfc_device);
+
+ struct i2c_client *client = st21nfc_dev->client;
+ struct device *dev = &client->dev;
+ int ret = 0;
+
+ switch (cmd) {
+
+ case ST21NFC_SET_POLARITY_RISING:
+ dev_info(dev, " ### ST21NFC_SET_POLARITY_RISING ###");
+ st21nfc_loc_set_polaritymode(st21nfc_dev, IRQF_TRIGGER_RISING);
+ break;
+
+ case ST21NFC_SET_POLARITY_HIGH:
+ dev_info(dev, " ### ST21NFC_SET_POLARITY_HIGH ###");
+ st21nfc_loc_set_polaritymode(st21nfc_dev, IRQF_TRIGGER_HIGH);
+ break;
+
+ case ST21NFC_PULSE_RESET:
+ /* Double pulse is done to exit Quick boot mode.*/
+ if (!IS_ERR(st21nfc_dev->gpiod_reset)) {
+ /* pulse low for 20 millisecs */
+ gpiod_set_value(st21nfc_dev->gpiod_reset, 0);
+ msleep(20);
+ gpiod_set_value(st21nfc_dev->gpiod_reset, 1);
+ usleep_range(10000, 11000);
+ /* pulse low for 20 millisecs */
+ gpiod_set_value(st21nfc_dev->gpiod_reset, 0);
+ msleep(20);
+ gpiod_set_value(st21nfc_dev->gpiod_reset, 1);
+ }
+ st21nfc_dev->r_state_current = ST21NFC_HEADER;
+ break;
+
+ case ST21NFC_GET_WAKEUP:
+ /* deliver state of Wake_up_pin as return value of ioctl */
+ ret = gpiod_get_value(st21nfc_dev->gpiod_irq);
+ /*
+ * Warning: depending on gpiod_get_value implementation,
+ * it can returns a value different than 1 in case of high level
+ */
+ if (ret != 0)
+ ret = 1;
+
+ dev_dbg(&st21nfc_dev->client->dev, "%s get gpio result %d\n",
+ __func__, ret);
+ break;
+ case ST21NFC_GET_POLARITY:
+ ret = st21nfc_dev->polarity_mode;
+ dev_dbg(&st21nfc_dev->client->dev, "%s get polarity %d\n",
+ __func__, ret);
+ break;
+ case ST21NFC_RECOVERY:
+ /* For ST21NFCD usage only */
+ dev_info(dev, "%s Recovery Request\n", __func__);
+ if (!IS_ERR(st21nfc_dev->gpiod_reset)) {
+ /* pulse low for 20 millisecs */
+ gpiod_set_value(st21nfc_dev->gpiod_reset, 0);
+ usleep_range(10000, 11000);
+ if (st21nfc_dev->irq_is_attached) {
+ devm_free_irq(dev, client->irq, st21nfc_dev);
+ st21nfc_dev->irq_is_attached = false;
+ }
+ /* During the reset, force IRQ OUT as */
+ /* DH output instead of input in normal usage */
+ ret = gpiod_direction_output(st21nfc_dev->gpiod_irq, 1);
+ if (ret) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s : gpiod_direction_output failed\n",
+ __func__);
+ ret = -ENODEV;
+ break;
+ }
+
+ gpiod_set_value(st21nfc_dev->gpiod_irq, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value(st21nfc_dev->gpiod_reset, 1);
+
+ dev_info(dev, "%s done Pulse Request\n", __func__);
+ }
+ msleep(20);
+ gpiod_set_value(st21nfc_dev->gpiod_irq, 0);
+ msleep(20);
+ gpiod_set_value(st21nfc_dev->gpiod_irq, 1);
+ msleep(20);
+ gpiod_set_value(st21nfc_dev->gpiod_irq, 0);
+ msleep(20);
+ dev_info(dev, "%s Recovery procedure finished\n", __func__);
+ ret = gpiod_direction_input(st21nfc_dev->gpiod_irq);
+ if (ret) {
+ dev_err(&st21nfc_dev->client->dev,
+ "%s : gpiod_direction_input failed\n",
+ __func__);
+ ret = -ENODEV;
+ }
+ break;
+ default:
+ dev_err(&st21nfc_dev->client->dev, "%s bad ioctl %u\n",
+ __func__, cmd);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static unsigned int st21nfc_poll(struct file *file, poll_table *wait)
+{
+ struct st21nfc_device *st21nfc_dev = container_of(file->private_data,
+ struct st21nfc_device,
+ st21nfc_device);
+ unsigned int mask = 0;
+ int pinlev = 0;
+
+ /* wait for Wake_up_pin == high */
+ poll_wait(file, &st21nfc_dev->read_wq, wait);
+
+ pinlev = gpiod_get_value(st21nfc_dev->gpiod_irq);
+
+ if (pinlev != 0) {
+ dev_dbg(&st21nfc_dev->client->dev, "%s return ready\n",
+ __func__);
+ mask = POLLIN | POLLRDNORM; /* signal data avail */
+ st21nfc_disable_irq(st21nfc_dev);
+ } else {
+ /* Wake_up_pin is low. Activate ISR */
+ if (!st21nfc_dev->irq_enabled) {
+ dev_dbg(&st21nfc_dev->client->dev, "%s enable irq\n",
+ __func__);
+ st21nfc_enable_irq(st21nfc_dev);
+ } else {
+ dev_dbg(&st21nfc_dev->client->dev,
+ "%s irq already enabled\n", __func__);
+ }
+ }
+ return mask;
+}
+
+static const struct file_operations st21nfc_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = st21nfc_dev_read,
+ .write = st21nfc_dev_write,
+ .open = st21nfc_dev_open,
+ .poll = st21nfc_poll,
+ .release = st21nfc_release,
+
+ .unlocked_ioctl = st21nfc_dev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = st21nfc_dev_ioctl
+#endif
+};
+
+static ssize_t i2c_addr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client != NULL)
+ return scnprintf(buf, PAGE_SIZE, "0x%.2x\n", client->addr);
+ return -ENODEV;
+}
+
+static ssize_t i2c_addr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct st21nfc_device *data = dev_get_drvdata(dev);
+ long new_addr = 0;
+
+ if (data != NULL && data->client != NULL) {
+ if (!kstrtol(buf, 10, &new_addr)) {
+ mutex_lock(&data->read_mutex);
+ data->client->addr = new_addr;
+ mutex_unlock(&data->read_mutex);
+ return count;
+ }
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", DRIVER_VERSION);
+}
+
+static uint64_t st21nfc_power_duration(struct st21nfc_device *data,
+ enum st21nfc_power_state pstate,
+ uint64_t current_time_ms)
+{
+
+ return data->c_pw_current != pstate ?
+ data->c_pw_states[pstate].duration :
+ data->c_pw_states[pstate].duration +
+ (current_time_ms - data->c_pw_states[pstate].last_entry);
+}
+
+static ssize_t power_stats_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct st21nfc_device *data = dev_get_drvdata(dev);
+ uint64_t current_time_ms;
+ uint64_t idle_duration;
+ uint64_t active_ce_duration;
+ uint64_t active_rw_duration;
+
+ mutex_lock(&data->pidle_mutex);
+
+ data->c_pw_current = data->pw_current;
+ data->c_pw_states_err = data->pw_states_err;
+ memcpy(data->c_pw_states, data->pw_states,
+ ST21NFC_POWER_STATE_MAX * sizeof(struct nfc_sub_power_stats));
+
+ mutex_unlock(&data->pidle_mutex);
+
+ current_time_ms = ktime_to_ms(ktime_get_boottime());
+ idle_duration = st21nfc_power_duration(data, ST21NFC_IDLE,
+ current_time_ms);
+ active_ce_duration = st21nfc_power_duration(data, ST21NFC_ACTIVE,
+ current_time_ms);
+ active_rw_duration = st21nfc_power_duration(data, ST21NFC_ACTIVE_RW,
+ current_time_ms);
+
+ return scnprintf(buf, PAGE_SIZE,
+ "NFC subsystem\n"
+ "Idle mode:\n"
+ "\tCumulative count: 0x%llx\n"
+ "\tCumulative duration msec: 0x%llx\n"
+ "\tLast entry timestamp msec: 0x%llx\n"
+ "\tLast exit timestamp msec: 0x%llx\n"
+ "Active mode:\n"
+ "\tCumulative count: 0x%llx\n"
+ "\tCumulative duration msec: 0x%llx\n"
+ "\tLast entry timestamp msec: 0x%llx\n"
+ "\tLast exit timestamp msec: 0x%llx\n"
+ "Active Reader/Writer mode:\n"
+ "\tCumulative count: 0x%llx\n"
+ "\tCumulative duration msec: 0x%llx\n"
+ "\tLast entry timestamp msec: 0x%llx\n"
+ "\tLast exit timestamp msec: 0x%llx\n"
+ "\nError transition header --> payload state machine: 0x%llx\n"
+ "Error transition from an Active state when not in Idle state: 0x%llx\n"
+ "Error transition from Idle state to Idle state: 0x%llx\n"
+ "Warning transition from Active Reader/Writer state to Idle state: 0x%llx\n"
+ "Error transition from Active state to Active state: 0x%llx\n"
+ "Error transition from Idle state to Active state with notification: 0x%llx\n"
+ "Error transition from Active Reader/Writer state to Active Reader/Writer state: 0x%llx\n"
+ "Error transition from Idle state to Active Reader/Writer state with notification: 0x%llx\n"
+ "\nTotal uptime: 0x%llx Cumulative modes time: 0x%llx\n",
+ data->c_pw_states[ST21NFC_IDLE].count,
+ idle_duration,
+ data->c_pw_states[ST21NFC_IDLE].last_entry,
+ data->c_pw_states[ST21NFC_IDLE].last_exit,
+ data->c_pw_states[ST21NFC_ACTIVE].count,
+ active_ce_duration,
+ data->c_pw_states[ST21NFC_ACTIVE].last_entry,
+ data->c_pw_states[ST21NFC_ACTIVE].last_exit,
+ data->c_pw_states[ST21NFC_ACTIVE_RW].count,
+ active_rw_duration,
+ data->c_pw_states[ST21NFC_ACTIVE_RW].last_entry,
+ data->c_pw_states[ST21NFC_ACTIVE_RW].last_exit,
+ data->c_pw_states_err.header_payload,
+ data->c_pw_states_err.active_not_idle,
+ data->c_pw_states_err.idle_to_idle,
+ data->c_pw_states_err.active_rw_to_idle,
+ data->c_pw_states_err.active_to_active,
+ data->c_pw_states_err.idle_to_active_ntf,
+ data->c_pw_states_err.act_rw_to_act_rw,
+ data->c_pw_states_err.idle_to_active_rw_ntf,
+ current_time_ms,
+ idle_duration + active_ce_duration + active_rw_duration);
+}
+
+static DEVICE_ATTR_RW(i2c_addr);
+
+static DEVICE_ATTR_RO(version);
+
+static DEVICE_ATTR_RO(power_stats);
+
+static struct attribute *st21nfc_attrs[] = {
+ &dev_attr_i2c_addr.attr,
+ &dev_attr_version.attr,
+ NULL,
+};
+
+static struct attribute_group st21nfc_attr_grp = {
+ .attrs = st21nfc_attrs,
+};
+
+static const struct acpi_gpio_params irq_gpios = {0, 0, false };
+static const struct acpi_gpio_params reset_gpios = {1, 0, false };
+static const struct acpi_gpio_params pidle_gpios = {2, 0, false};
+static const struct acpi_gpio_params clkreq_gpios = {3, 0, false};
+
+static const struct acpi_gpio_mapping acpi_st21nfc_gpios[] = {
+ { "irq-gpios", &irq_gpios, 1},
+ { "reset-gpios", &reset_gpios, 1},
+ { "pidle-gpios", &pidle_gpios, 1},
+ { "clkreq-gpios", &clkreq_gpios, 1},
+};
+
+static int st21nfc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct st21nfc_device *st21nfc_dev;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "%s : need I2C_FUNC_I2C\n", __func__);
+ return -ENODEV;
+ }
+
+ st21nfc_dev = devm_kzalloc(dev, sizeof(*st21nfc_dev), GFP_KERNEL);
+ if (st21nfc_dev == NULL)
+ return -ENOMEM;
+
+ /* store for later use */
+ st21nfc_dev->client = client;
+ st21nfc_dev->r_state_current = ST21NFC_HEADER;
+ client->adapter->retries = 1;
+
+ ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
+ acpi_st21nfc_gpios);
+ if (ret)
+ dev_dbg(dev, "Unable to add GPIO mapping table\n");
+
+ st21nfc_dev->gpiod_irq = devm_gpiod_get(dev, "irq", GPIOD_IN);
+ if (IS_ERR(st21nfc_dev->gpiod_irq)) {
+ dev_err(dev, "%s : Unable to request irq-gpios\n", __func__);
+ return -ENODEV;
+ }
+
+ st21nfc_dev->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st21nfc_dev->gpiod_reset)) {
+ dev_warn(dev, "%s : Unable to request reset-gpios\n", __func__);
+ return -ENODEV;
+ }
+
+ st21nfc_dev->gpiod_pidle = devm_gpiod_get(dev, "pidle", GPIOD_IN);
+ if (IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ ret = 0;
+ } else {
+ /* Prepare a workqueue for st21nfc_dev_power_stats_handler */
+ st21nfc_dev->st_p_wq = create_workqueue("st_pstate_work");
+ if(!st21nfc_dev->st_p_wq)
+ return -ENODEV;
+ mutex_init(&st21nfc_dev->pidle_mutex);
+ INIT_WORK(&(st21nfc_dev->st_p_work), st21nfc_pstate_wq);
+ /* Start the power stat in power mode idle */
+ st21nfc_dev->irq_pw_stats_idle =
+ gpiod_to_irq(st21nfc_dev->gpiod_pidle);
+
+ ret = irq_set_irq_type(st21nfc_dev->irq_pw_stats_idle,
+ IRQ_TYPE_EDGE_BOTH);
+ if (ret) {
+ dev_err(dev, "%s : set_irq_type failed\n", __func__);
+ goto err_pidle_workqueue;
+ }
+
+ /* This next call requests an interrupt line */
+ ret = devm_request_irq(dev, st21nfc_dev->irq_pw_stats_idle,
+ (irq_handler_t)st21nfc_dev_power_stats_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ /* Interrupt on both edges */
+ "st21nfc_pw_stats_idle_handle",
+ st21nfc_dev);
+ if (ret) {
+ dev_err(dev,
+ "%s : devm_request_irq for power stats idle failed\n",
+ __func__);
+ goto err_pidle_workqueue;
+ }
+
+ ret = sysfs_create_file(&dev->kobj,
+ &dev_attr_power_stats.attr);
+ if (ret) {
+ dev_err(dev,
+ "%s : sysfs_create_file for power stats failed\n",
+ __func__);
+ goto err_pidle_workqueue;
+ }
+ }
+
+ st21nfc_dev->gpiod_clkreq = devm_gpiod_get(dev, "clkreq", GPIOD_IN);
+ if (IS_ERR(st21nfc_dev->gpiod_clkreq)) {
+ ret = 0;
+ } else {
+ if (!device_property_read_bool(dev, "st,clk_pinctrl")) {
+ dev_dbg(dev, "[dsc]%s:[OPTIONAL] clk_pinctrl not set\n",
+ __func__);
+ st21nfc_dev->pinctrl_en = 0;
+ } else {
+ dev_dbg(dev, "[dsc]%s:[OPTIONAL] clk_pinctrl set\n",
+ __func__);
+ st21nfc_dev->pinctrl_en = 1;
+
+ /* handle clk_req irq */
+ st21nfc_dev->irq_clkreq =
+ gpiod_to_irq(st21nfc_dev->gpiod_clkreq);
+
+ ret = irq_set_irq_type(st21nfc_dev->irq_clkreq,
+ IRQ_TYPE_EDGE_BOTH);
+ if (ret) {
+ dev_err(dev, "%s : set_irq_type failed\n",
+ __func__);
+ st21nfc_dev->pinctrl_en = 0;
+ } else {
+ ret = devm_request_irq(dev,
+ st21nfc_dev->irq_clkreq,
+ st21nfc_clkreq_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "st21nfc_clkreq_handle",
+ st21nfc_dev);
+ if (ret) {
+ dev_err(dev,
+ "%s : devm_request_irq for clkreq irq failed\n",
+ __func__);
+ st21nfc_dev->pinctrl_en = 0;
+ }
+ }
+ }
+
+ /* Set clk_run when clock pinctrl already enabled */
+ if (st21nfc_dev->pinctrl_en != 0) {
+ if (device_property_read_u32(dev,
+ "pmu_clk_pad",
+ &st21nfc_dev->clk_pad)) {
+ dev_err(dev,
+ "%s : PMU_CLKOUT_PAD offset is unset\n",
+ __func__);
+ st21nfc_dev->pinctrl_en = 0;
+ }
+ }
+
+ ret = st21nfc_clock_select(st21nfc_dev);
+ if (ret < 0) {
+ dev_err(dev, "%s : st21nfc_clock_select failed\n",
+ __func__);
+ goto err_sysfs_power_stats;
+ }
+ }
+
+ client->irq = gpiod_to_irq(st21nfc_dev->gpiod_irq);
+
+ /* init mutex and queues */
+ init_waitqueue_head(&st21nfc_dev->read_wq);
+ mutex_init(&st21nfc_dev->read_mutex);
+ spin_lock_init(&st21nfc_dev->irq_enabled_lock);
+ dev_dbg(dev, "%s : debug irq_gpio = %d, client-irq = %d\n", __func__,
+ desc_to_gpio(st21nfc_dev->gpiod_irq), client->irq);
+ if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ dev_dbg(dev, "%s : pidle_gpio = %d\n", __func__,
+ desc_to_gpio(st21nfc_dev->gpiod_pidle));
+ }
+ if (!IS_ERR(st21nfc_dev->gpiod_clkreq)) {
+ dev_dbg(dev, "%s : clkreq_gpio = %d\n", __func__,
+ desc_to_gpio(st21nfc_dev->gpiod_clkreq));
+ }
+ st21nfc_dev->st21nfc_device.minor = MISC_DYNAMIC_MINOR;
+ st21nfc_dev->st21nfc_device.name = "st21nfc";
+ st21nfc_dev->st21nfc_device.fops = &st21nfc_dev_fops;
+ st21nfc_dev->st21nfc_device.parent = dev;
+
+ i2c_set_clientdata(client, st21nfc_dev);
+ ret = misc_register(&st21nfc_dev->st21nfc_device);
+ if (ret) {
+ dev_err(dev, "%s : misc_register failed\n", __func__);
+ goto err_misc_register;
+ }
+
+ ret = sysfs_create_group(&dev->kobj, &st21nfc_attr_grp);
+ if (ret) {
+ dev_err(dev, "%s : sysfs_create_group failed\n", __func__);
+ goto err_sysfs_create_group_failed;
+ }
+ device_init_wakeup(&client->dev, true);
+ device_set_wakeup_capable(&client->dev, true);
+ st21nfc_dev->irq_wake_up = false;
+
+ return 0;
+
+err_sysfs_create_group_failed:
+ misc_deregister(&st21nfc_dev->st21nfc_device);
+err_misc_register:
+ mutex_destroy(&st21nfc_dev->read_mutex);
+err_sysfs_power_stats:
+ if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ sysfs_remove_file(&client->dev.kobj,
+ &dev_attr_power_stats.attr);
+ }
+err_pidle_workqueue:
+ if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ mutex_destroy(&st21nfc_dev->pidle_mutex);
+ destroy_workqueue(st21nfc_dev->st_p_wq);
+ }
+ return ret;
+}
+
+static int st21nfc_remove(struct i2c_client *client)
+{
+ struct st21nfc_device *st21nfc_dev = i2c_get_clientdata(client);
+
+ st21nfc_clock_deselect(st21nfc_dev);
+ misc_deregister(&st21nfc_dev->st21nfc_device);
+ if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ sysfs_remove_file(&client->dev.kobj,
+ &dev_attr_power_stats.attr);
+ mutex_destroy(&st21nfc_dev->pidle_mutex);
+ }
+ sysfs_remove_group(&client->dev.kobj, &st21nfc_attr_grp);
+ mutex_destroy(&st21nfc_dev->read_mutex);
+ acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
+
+ return 0;
+}
+
+static int st21nfc_suspend(struct device *device)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ struct st21nfc_device *st21nfc_dev = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(&client->dev) && st21nfc_dev->irq_enabled) {
+ if (!enable_irq_wake(client->irq))
+ st21nfc_dev->irq_wake_up = true;
+ }
+
+ if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ st21nfc_dev->p_idle_last =
+ gpiod_get_value(st21nfc_dev->gpiod_pidle);
+ }
+
+ return 0;
+}
+
+static int st21nfc_resume(struct device *device)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ struct st21nfc_device *st21nfc_dev = i2c_get_clientdata(client);
+ int pidle;
+
+ if (device_may_wakeup(&client->dev) && st21nfc_dev->irq_wake_up) {
+ if (!disable_irq_wake(client->irq))
+ st21nfc_dev->irq_wake_up = false;
+ }
+
+ if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
+ pidle = gpiod_get_value(st21nfc_dev->gpiod_pidle);
+ if((st21nfc_dev->p_idle_last != pidle) ||
+ (st21nfc_dev->pw_current == ST21NFC_IDLE && pidle != 0) ||
+ (st21nfc_dev->pw_current == ST21NFC_ACTIVE && pidle == 0)) {
+ queue_work(st21nfc_dev->st_p_wq,
+ &(st21nfc_dev->st_p_work));
+ }
+ }
+ return 0;
+}
+
+
+static const struct i2c_device_id st21nfc_id[] = {
+ {"st21nfc", 0},
+ {}
+};
+
+static const struct of_device_id st21nfc_of_match[] = {
+ { .compatible = "st,st21nfc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, st21nfc_of_match);
+
+static const struct dev_pm_ops st21nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(st21nfc_suspend, st21nfc_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id st21nfc_acpi_match[] = {
+ {"SMO2104"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st21nfc_acpi_match);
+#endif
+
+static struct i2c_driver st21nfc_driver = {
+ .id_table = st21nfc_id,
+ .driver = {
+ .name = "st21nfc",
+ .owner = THIS_MODULE,
+ .of_match_table = st21nfc_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &st21nfc_pm_ops,
+ .acpi_match_table = ACPI_PTR(st21nfc_acpi_match),
+ },
+ .probe = st21nfc_probe,
+ .remove = st21nfc_remove,
+};
+
+#ifdef GKI_MODULE
+module_i2c_driver(st21nfc_driver);
+#else
+/*
+ * module load/unload record keeping
+ */
+
+static int __init st21nfc_dev_init(void)
+{
+ pr_info("%s: Loading st21nfc driver (version %s)\n",
+ __func__, DRIVER_VERSION);
+ return i2c_add_driver(&st21nfc_driver);
+}
+
+module_init(st21nfc_dev_init);
+
+static void __exit st21nfc_dev_exit(void)
+{
+ pr_debug("Unloading st21nfc driver\n");
+ i2c_del_driver(&st21nfc_driver);
+}
+
+module_exit(st21nfc_dev_exit);
+#endif
+
+MODULE_AUTHOR("STMicroelectronics");
+MODULE_DESCRIPTION("NFC ST21NFC driver");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/st21nfc.h b/st21nfc.h
new file mode 100644
index 0000000..6202ff5
--- /dev/null
+++ b/st21nfc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2013 ST Microelectronics S.A.
+ * Copyright (C) 2009 Stollmann E+V GmbH
+ * Copyright (C) 2010 Trusted Logic S.A.
+ *
+ */
+
+#ifndef __ST21NFC_H
+#define __ST21NFC_H
+
+#define ST21NFC_MAGIC 0xEA
+
+#define ST21NFC_NAME "st21nfc"
+/*
+ * ST21NFC power control via ioctl
+ * ST21NFC_GET_WAKEUP : poll gpio-level for Wakeup pin
+ */
+#define ST21NFC_GET_WAKEUP _IOR(ST21NFC_MAGIC, 0x01, unsigned int)
+#define ST21NFC_PULSE_RESET _IOR(ST21NFC_MAGIC, 0x02, unsigned int)
+#define ST21NFC_SET_POLARITY_RISING _IOR(ST21NFC_MAGIC, 0x03, unsigned int)
+#define ST21NFC_SET_POLARITY_HIGH _IOR(ST21NFC_MAGIC, 0x05, unsigned int)
+#define ST21NFC_GET_POLARITY _IOR(ST21NFC_MAGIC, 0x07, unsigned int)
+#define ST21NFC_RECOVERY _IOR(ST21NFC_MAGIC, 0x08, unsigned int)
+
+#endif