[PATCH v2 3/5] ACPI / EC: Add event clearing variation support.
From: Lv Zheng
Date: Thu Jun 11 2015 - 01:21:55 EST
We've been suffering from the uncertainty of the SCI_EVT clearing timing.
This patch implements 3 of 4 possible modes to handle SCI_EVT clearing
variations. The old behavior is kept in this patch.
Status: QR_EC is re-checked as early as possible after checking previous
SCI_EVT. This always leads to 2 QR_EC transactions per SCI_EVT
indication and the target may implement event queue which returns
0x00 indicating "no outstanding event".
This is proven to be a conflict against Windows behavior, but is
still kept in this patch to make the EC driver robust to the
possible regressions that may occur on Samsung platforms.
Query: QR_EC is re-checked after the target has handled the QR_EC query
request command pushed by the host.
Event: QR_EC is re-checked after the target has noticed the query event
response data pulled by the host.
This timing is not determined by any IRQs, so we may need to use a
guard period in this mode, which may explain the existence of the
ec_guard() code used by the old EC driver where the re-check timing
is implemented in the similar way as this mode.
Method: QR_EC is re-checked as late as possible after completing the _Qxx
evaluation. The target may implement SCI_EVT like a level triggered
interrupt.
It is proven on kernel bugzilla 94411 that, Windows will have all
_Qxx evaluations parallelized. Thus unless required by further
evidences, we needn't implement this mode as it is a conflict of
the _Qxx parallelism requirement.
Note that, according to the reports, there are platforms that cannot be
handled using the "Status" mode without enabling the
EC_FLAGS_QUERY_HANDSHAKE quirk. But they can be handled with the other
modes according to the tests (kernel bugzilla 97381).
The following log entry can be used to confirm the differences of the 3
modes as it should appear at the different positions for the 3 modes:
Command(QR_EC) unblocked
Status: appearing after
EC_SC(W) = 0x84
Query: appearing after
EC_DATA(R) = 0xXX
where XX is the event number used to determine _QXX
Event: appearing after first
EC_SC(R) = 0xX0 SCI_EVT=x BURST=0 CMD=0 IBF=0 OBF=0
that is next to the following log entry:
Command(QR_EC) completed by hardware
Link: https://bugzilla.kernel.org/show_bug.cgi?id=94411
Link: https://bugzilla.kernel.org/show_bug.cgi?id=97381
Link: https://bugzilla.kernel.org/show_bug.cgi?id=98111
Reported-and-tested-by: Gabriele Mazzotta <gabriele.mzt@xxxxxxxxx>
Reported-and-tested-by: Tigran Gabrielyan <tigrangab@xxxxxxxxx>
Reported-and-tested-by: Adrien D <ghbdtn@xxxxxxxxxxxxxxx>
Signed-off-by: Lv Zheng <lv.zheng@xxxxxxxxx>
---
drivers/acpi/ec.c | 137 +++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 132 insertions(+), 5 deletions(-)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 824f3e8..41eb523 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -59,6 +59,38 @@
#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
+/*
+ * The SCI_EVT clearing timing is not defined by the ACPI specification.
+ * This leads to lots of practical timing issues for the host EC driver.
+ * The following variations are defined (from the target EC firmware's
+ * perspective):
+ * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
+ * target can clear SCI_EVT at any time so long as the host can see
+ * the indication by reading the status register (EC_SC). So the
+ * host should re-check SCI_EVT after the first time the SCI_EVT
+ * indication is seen, which is the same time the query request
+ * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
+ * at any later time could indicate another event. Normally such
+ * kind of EC firmware has implemented an event queue and will
+ * return 0x00 to indicate "no outstanding event".
+ * QUERY: After seeing the query request (QR_EC) written to the command
+ * register (EC_CMD) by the host and having prepared the responding
+ * event value in the data register (EC_DATA), the target can safely
+ * clear SCI_EVT because the target can confirm that the current
+ * event is being handled by the host. The host then should check
+ * SCI_EVT right after reading the event response from the data
+ * register (EC_DATA).
+ * EVENT: After seeing the event response read from the data register
+ * (EC_DATA) by the host, the target can clear SCI_EVT. As the
+ * target requires time to notice the change in the data register
+ * (EC_DATA), the host may be required to wait additional guarding
+ * time before checking the SCI_EVT again. Such guarding may not be
+ * necessary if the host is notified via another IRQ.
+ */
+#define ACPI_EC_EVT_TIMING_STATUS 0x00
+#define ACPI_EC_EVT_TIMING_QUERY 0x01
+#define ACPI_EC_EVT_TIMING_EVENT 0x02
+
/* EC commands */
enum ec_command {
ACPI_EC_COMMAND_READ = 0x80,
@@ -76,6 +108,7 @@ enum ec_command {
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
+ EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */
EC_FLAGS_STARTED, /* Driver is started */
@@ -100,6 +133,8 @@ static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
module_param(ec_polling_guard, uint, 0644);
MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
+static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_STATUS;
+
/*
* If the number of false interrupts per one transaction exceeds
* this threshold, will think there is a GPE storm happened and
@@ -400,6 +435,21 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
}
}
+static bool acpi_ec_guard_event(struct acpi_ec *ec)
+{
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
+ ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
+ !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
+ (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
+ return false;
+
+ /*
+ * Postpone the query submission to allow the firmware to proceed,
+ * we shouldn't check SCI_EVT before the firmware reflagging it.
+ */
+ return true;
+}
+
static int ec_transaction_polled(struct acpi_ec *ec)
{
unsigned long flags;
@@ -428,8 +478,15 @@ static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long f
{
ec->curr->flags |= flag;
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
- if (flag == ACPI_EC_COMMAND_POLL)
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
+ flag == ACPI_EC_COMMAND_POLL)
+ acpi_ec_complete_query(ec);
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
+ flag == ACPI_EC_COMMAND_COMPLETE)
acpi_ec_complete_query(ec);
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
+ flag == ACPI_EC_COMMAND_COMPLETE)
+ set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
}
}
@@ -449,6 +506,17 @@ static void advance_transaction(struct acpi_ec *ec)
acpi_ec_clear_gpe(ec);
status = acpi_ec_read_status(ec);
t = ec->curr;
+ /*
+ * Another IRQ or a guarded polling mode advancement is detected,
+ * the next QR_EC submission is then allowed.
+ */
+ if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
+ test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags)) {
+ clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
+ acpi_ec_complete_query(ec);
+ }
+ }
if (!t)
goto err;
if (t->flags & ACPI_EC_COMMAND_POLL) {
@@ -534,11 +602,13 @@ static int ec_guard(struct acpi_ec *ec)
/*
* Perform wait polling
*
- * The following check is there to keep the old
- * logic - no inter-transaction guarding for the
- * wait polling mode.
+ * For SCI_EVT clearing timing of "event",
+ * performing guarding before re-checking the
+ * SCI_EVT. Otherwise, such guarding is not needed
+ * due to the old practices.
*/
- if (!ec_transaction_polled(ec))
+ if (!ec_transaction_polled(ec) &&
+ !acpi_ec_guard_event(ec))
break;
if (wait_event_timeout(ec->wait,
ec_transaction_completed(ec),
@@ -964,6 +1034,24 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
return result;
}
+static void acpi_ec_check_event(struct acpi_ec *ec)
+{
+ unsigned long flags;
+
+ if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
+ if (ec_guard(ec)) {
+ spin_lock_irqsave(&ec->lock, flags);
+ /*
+ * Take care of the SCI_EVT unless no one else is
+ * taking care of it.
+ */
+ if (!ec->curr)
+ advance_transaction(ec);
+ spin_unlock_irqrestore(&ec->lock, flags);
+ }
+ }
+}
+
static void acpi_ec_event_handler(struct work_struct *work)
{
unsigned long flags;
@@ -981,6 +1069,8 @@ static void acpi_ec_event_handler(struct work_struct *work)
spin_unlock_irqrestore(&ec->lock, flags);
ec_dbg_evt("Event stopped");
+
+ acpi_ec_check_event(ec);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -1437,6 +1527,43 @@ error:
return -ENODEV;
}
+static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "status", sizeof("status") - 1)) {
+ ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
+ pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
+ } else if (!strncmp(val, "query", sizeof("query") - 1)) {
+ ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
+ pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
+ } else if (!strncmp(val, "event", sizeof("event") - 1)) {
+ ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
+ pr_info("Assuming SCI_EVT clearing on event reads\n");
+ } else
+ result = -EINVAL;
+ return result;
+}
+
+static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
+{
+ switch (ec_event_clearing) {
+ case ACPI_EC_EVT_TIMING_STATUS:
+ return sprintf(buffer, "status");
+ case ACPI_EC_EVT_TIMING_QUERY:
+ return sprintf(buffer, "query");
+ case ACPI_EC_EVT_TIMING_EVENT:
+ return sprintf(buffer, "event");
+ default:
+ return sprintf(buffer, "invalid");
+ }
+ return 0;
+}
+
+module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
+ NULL, 0644);
+MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
+
static struct acpi_driver acpi_ec_driver = {
.name = "ec",
.class = ACPI_EC_CLASS,
--
1.7.10
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/