[RFC PATCH] hwspinlock/core: add support for reserved locks

From: Suman Anna
Date: Fri May 09 2014 - 15:26:54 EST


The HwSpinlock core allows requesting either a specific lock or an
available normal lock. The specific locks are usually reserved during
board init time, while the normal available locks are intended to be
assigned at runtime.

The HwSpinlock core has been enhanced to mark certain locks as 'reserved'
by parsing through the DT blob. Thes

The HwSpinlock core has been enhanced to:
1. mark certain locks as 'reserved' by parsing the DT blob for any
locks used by client nodes.
2. restrict the anonymous hwspin_lock_request() API to allocate only
from non-reserved locks for DT boots.
3. limit these reserved locks to be allocated only using the
_request_specific() API variants for DT boots.

Signed-off-by: Suman Anna <s-anna@xxxxxx>
---
drivers/hwspinlock/hwspinlock_core.c | 50
++++++++++++++++++++++++++++++++++--
1 file changed, 48 insertions(+), 2 deletions(-)

diff --git a/drivers/hwspinlock/hwspinlock_core.c
b/drivers/hwspinlock/hwspinlock_core.c
index c2063bc..0c924c9 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -425,6 +425,42 @@ static int hwspinlock_device_add(struct
hwspinlock_device *bank)
return ret;
}

+static void hwspin_mark_reserved_locks(struct hwspinlock_device *bank)
+{
+ struct device_node *np = bank->dev->of_node;
+ const char *prop_name = "hwlocks";
+ const char *cells_name = "#hwlock-cells";
+ struct device_node *node = NULL;
+ struct of_phandle_args args;
+ struct hwspinlock *hwlock;
+ int i, id, count, ret;
+
+ for_each_node_with_property(node, prop_name) {
+ count = of_count_phandle_with_args(node, prop_name,
cells_name);
+ if (count <= 0)
+ continue;
+
+ for (i = 0; i < count; i++) {
+ ret = of_parse_phandle_with_args(node, prop_name,
+ cells_name, i,
&args);
+ if (ret || np != args.np)
+ continue;
+
+ id = bank->ops->of_xlate(bank, &args);
+ if (id < 0 || id >= bank->num_locks)
+ continue;
+
+ hwlock = &bank->lock[id];
+ if (hwlock->type == HWSPINLOCK_RESERVED) {
+ dev_err(bank->dev, "potential reuse of
hwspinlock %d between multiple clients on %s\n",
+ id, np->full_name);
+ continue;
+ }
+ hwlock->type = HWSPINLOCK_RESERVED;
+ }
+ }
+}
+
/**
* hwspin_lock_register() - register a new hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
@@ -463,12 +499,16 @@ int hwspin_lock_register(struct hwspinlock_device
*bank, struct device *dev,
if (ret)
return ret;

+ if (dev->of_node)
+ hwspin_mark_reserved_locks(bank);
+
for (i = 0; i < num_locks; i++) {
hwlock = &bank->lock[i];

spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
- hwlock->type = HWSPINLOCK_UNUSED;
+ if (hwlock->type != HWSPINLOCK_RESERVED)
+ hwlock->type = HWSPINLOCK_UNUSED;

ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
@@ -651,7 +691,13 @@ struct hwspinlock
*hwspin_lock_request_specific(unsigned int id)
/* sanity check (this shouldn't happen) */
WARN_ON(hwlock_to_id(hwlock) != id);

- /* make sure this hwspinlock is unused */
+ if (hwlock->type != HWSPINLOCK_RESERVED) {
+ pr_warn("hwspinlock %u is not a reserved lock\n", id);
+ hwlock = NULL;
+ goto out;
+ }
+
+ /* make sure this hwspinlock is an unused reserved lock */
ret = radix_tree_tag_get(&hwspinlock_tree, id, hwlock->type);
if (ret == 0) {
pr_warn("hwspinlock %u is already in use\n", id);
--
1.9.2


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/