The main function of the Redirected interrupt controller is to manage the
redirected-interrupt table, which consists of many redirected entries.
When MSI interrupts are requested, the driver creates a corresponding
redirected entry that describes the target CPU/vector number and the
operating mode of the interrupt. The redirected interrupt module has an
independent cache, and during the interrupt routing process, it will
prioritize the redirected entries that hit the cache. The driver
invalidates certain entry caches via a command queue.
Co-developed-by: Liupu Wang <wangliupu@xxxxxxxxxxx>
Signed-off-by: Liupu Wang <wangliupu@xxxxxxxxxxx>
Signed-off-by: Tianyang Zhang <zhangtianyang@xxxxxxxxxxx>
---
arch/loongarch/include/asm/cpu-features.h | 1 +
arch/loongarch/include/asm/cpu.h | 2 +
arch/loongarch/include/asm/loongarch.h | 6 +
arch/loongarch/kernel/cpu-probe.c | 2 +
drivers/irqchip/Makefile | 2 +-
drivers/irqchip/irq-loongarch-avec.c | 21 +-
drivers/irqchip/irq-loongarch-ir.c | 561 ++++++++++++++++++++++
drivers/irqchip/irq-loongson.h | 12 +
include/linux/cpuhotplug.h | 1 +
9 files changed, 594 insertions(+), 14 deletions(-)
create mode 100644 drivers/irqchip/irq-loongarch-ir.c
+static void invalid_enqueue(struct redirect_queue *rqueue, struct irde_inv_cmd *cmd)
+{
+ struct irde_inv_cmd *inv_addr;
+ u32 tail;
+
+ guard(raw_spinlock_irqsave)(&rqueue->lock);
+
+ while (invalid_queue_is_full(rqueue->node, &tail))
+ cpu_relax();
+
+ inv_addr = (struct irde_inv_cmd *)(rqueue->base + tail * sizeof(struct irde_inv_cmd));
+ memcpy(inv_addr, cmd, sizeof(struct irde_inv_cmd));
+ tail = (tail + 1) % INVALID_QUEUE_SIZE;
+
+ wmb();
+
+ iocsr_write32(tail, LOONGARCH_IOCSR_REDIRECT_CQT);
+}
+
+static void smp_call_invalid_enqueue(void *arg)
+{
+ struct smp_invalid_arg *s_arg = (struct smp_invalid_arg *)arg;
+
+ invalid_enqueue(s_arg->queue, s_arg->cmd);
+}
+
+static void irde_invlid_entry_node(struct redirect_item *item)
+{
+ struct redirect_queue *rqueue;
+ struct smp_invalid_arg arg;
+ struct irde_inv_cmd cmd;
+ volatile u64 raddr = 0;
+ int node = item->table->node, cpu;
+
+ rqueue = &(irde_descs[node].inv_queue);
+ cmd.cmd_info = 0;
+ cmd.index.type = INVALID_INDEX;
+ cmd.index.need_notice = 1;
+ cmd.index.index = item->index;
+ cmd.notice_addr = (u64)(__pa(&raddr));
+
+ if (cpu_to_node(smp_processor_id()) == node)
+ invalid_enqueue(rqueue, &cmd);
+ else {
+ for_each_cpu(cpu, cpumask_of_node(node)) {
+ if (cpu_online(cpu))
+ break;
+ }
+ arg.queue = rqueue;
+ arg.cmd = &cmd;
+ smp_call_function_single(cpu, smp_call_invalid_enqueue, &arg, 0);
+ }
+
+ while (!raddr)
+ cpu_relax();
+
+}
+
+static inline struct avecintc_data *irq_data_get_avec_data(struct irq_data *data)
+{
+ return data->parent_data->chip_data;
+}
+
+static int redirect_table_alloc(struct redirect_item *item, struct redirect_table *ird_table)
+{
+ int index;
+
+ guard(raw_spinlock_irqsave)(&ird_table->lock);
+
+ index = find_first_zero_bit(ird_table->bitmap, IRD_ENTRIES);
+ if (index > IRD_ENTRIES) {
+ pr_err("No redirect entry to use\n");
+ return -ENOMEM;
+ }
+
+ __set_bit(index, ird_table->bitmap);
+
+ item->index = index;
+ item->entry = &ird_table->table[index];
+ item->table = ird_table;
+
+ return 0;
+}
+
+static int redirect_table_free(struct redirect_item *item)
+{
+ struct redirect_table *ird_table;
+ struct redirect_entry *entry;
+ unsigned long flags;
+
+ ird_table = item->table;
+
+ entry = item->entry;
+ memset(entry, 0, sizeof(struct redirect_entry));
+
+ raw_spin_lock_irqsave(&ird_table->lock, flags);
+ bitmap_release_region(ird_table->bitmap, item->index, 0);
+ raw_spin_unlock_irqrestore(&ird_table->lock, flags);
+
+ kfree(item->gpid);
+
+ irde_invlid_entry_node(item);
+
+ return 0;
+}
+
+static inline void redirect_domain_prepare_entry(struct redirect_item *item, struct avecintc_data *adata)
+{
+ struct redirect_entry *entry = item->entry;
+
+ item->gpid->en = 1;
+ item->gpid->irqnum = adata->vec;
+ item->gpid->dst = adata->cpu;
+
+ entry->lo.valid = 1;
+ entry->lo.gpid = ((long)item->gpid >> GPID_ADDR_SHIFT) & (GPID_ADDR_MASK);
+ entry->lo.vector = 0xff;
+ wmb();
+}
+
+static int redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
+{
+ struct redirect_item *item = data->chip_data;
+ struct avecintc_data *adata;
+ int ret;
+
+ ret = irq_chip_set_affinity_parent(data, dest, force);
+ if (ret == IRQ_SET_MASK_OK_DONE)
+ return IRQ_SET_MASK_OK;
+ else if (ret) {
+ pr_err("IRDE:set_affinity error %d\n", ret);
+ return ret;
+ }
+
+ adata = irq_data_get_avec_data(data);
+
+ redirect_domain_prepare_entry(item, adata);
+
+ irde_invlid_entry_node(item);
+
+ avecintc_sync(adata);
+ return IRQ_SET_MASK_OK;
+}
+
+static void redirect_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct redirect_item *item;
+
+ item = irq_data_get_irq_chip_data(d);
+ msg->address_lo = (msi_base_addr | 1 << 2 | ((item->index & 0xffff) << 4));
+ msg->address_hi = 0x0;
+ msg->data = 0x0;
+}
+
+static inline void redirect_ack_irq(struct irq_data *d)
+{
+}
+
+static inline void redirect_unmask_irq(struct irq_data *d)
+{
+}
+
+static inline void redirect_mask_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip loongarch_redirect_chip = {
+ .name = "REDIRECT",
+ .irq_ack = redirect_ack_irq,
+ .irq_mask = redirect_mask_irq,
+ .irq_unmask = redirect_unmask_irq,
+ .irq_set_affinity = redirect_set_affinity,
+ .irq_compose_msi_msg = redirect_compose_msi_msg,
+};
+
+static void redirect_free_resources(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irq_data;
+ struct redirect_item *item;
+
+ for (int i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ if (irq_data && irq_data->chip_data) {
+ item = irq_data->chip_data;
+ redirect_table_free(item);
+ kfree(item);
+ }
+ }
+}
+
+static int redirect_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)