On Mon, Mar 31 2025 at 14:41, Tianyang Zhang wrote:OK, I got it
irq_data_update_effective_affinity(data, cpumask_of(cpu));Stray newline.
@@ -242,6 +233,7 @@ static void avecintc_irq_dispatch(struct irq_desc *desc)
d = this_cpu_read(irq_map[vector]);
if (d) {
generic_handle_irq_desc(d);
+
OK , I got it
} else {Undocumented memory barrier.
+
+static void invalid_enqueue(struct redirect_queue *rqueue, struct irde_inv_cmd *cmd)
+{
+ struct irde_inv_cmd *inv_addr;
+ u32 tail;
+
+ guard(raw_spinlock_irqsave)(&rqueue->lock);
+
+ while (invalid_queue_is_full(rqueue->node, &tail))
+ cpu_relax();
+
+ inv_addr = (struct irde_inv_cmd *)(rqueue->base + tail * sizeof(struct irde_inv_cmd));
+ memcpy(inv_addr, cmd, sizeof(struct irde_inv_cmd));
+ tail = (tail + 1) % INVALID_QUEUE_SIZE;
+
+ wmb();
OK , I got it
+if () lacks brackets
+ iocsr_write32(tail, LOONGARCH_IOCSR_REDIRECT_CQT);
+}
+
+static void smp_call_invalid_enqueue(void *arg)
+{
+ struct smp_invalid_arg *s_arg = (struct smp_invalid_arg *)arg;
+
+ invalid_enqueue(s_arg->queue, s_arg->cmd);
+}
+
+static void irde_invlid_entry_node(struct redirect_item *item)
+{
+ struct redirect_queue *rqueue;
+ struct smp_invalid_arg arg;
+ struct irde_inv_cmd cmd;
+ volatile u64 raddr = 0;
+ int node = item->table->node, cpu;
+
+ rqueue = &(irde_descs[node].inv_queue);
+ cmd.cmd_info = 0;
+ cmd.index.type = INVALID_INDEX;
+ cmd.index.need_notice = 1;
+ cmd.index.index = item->index;
+ cmd.notice_addr = (u64)(__pa(&raddr));
+
+ if (cpu_to_node(smp_processor_id()) == node)
+ invalid_enqueue(rqueue, &cmd);
+ else {
+ for_each_cpu(cpu, cpumask_of_node(node)) {
+ if (cpu_online(cpu))
+ break;
+ }
+ arg.queue = rqueue;
+ arg.cmd = &cmd;
+ smp_call_function_single(cpu, smp_call_invalid_enqueue, &arg, 0);
+ }
+
+ while (!raddr)
+ cpu_relax();
+
+}
+static int redirect_table_free(struct redirect_item *item)scoped_guard(raw_spinlock_irqsave, &ird_table->lock)
+{
+ struct redirect_table *ird_table;
+ struct redirect_entry *entry;
+ unsigned long flags;
+
+ ird_table = item->table;
+
+ entry = item->entry;
+ memset(entry, 0, sizeof(struct redirect_entry));
+
+ raw_spin_lock_irqsave(&ird_table->lock, flags);
+ bitmap_release_region(ird_table->bitmap, item->index, 0);
+ raw_spin_unlock_irqrestore(&ird_table->lock, flags);
bitmap_release_region(ird_table->bitmap, item->index, 0);
+static int redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)Again bracket rules. Also what is this return value ranslation about?
+{
+ struct redirect_item *item = data->chip_data;
+ struct avecintc_data *adata;
+ int ret;
+
+ ret = irq_chip_set_affinity_parent(data, dest, force);
+ if (ret == IRQ_SET_MASK_OK_DONE)
+ return IRQ_SET_MASK_OK;
My oversight. This approach is indeed risky. I'll rethink the design. Thanks for your advice.
+ else if (ret) {This cannot work when irde_invlid_entry_node() goes into the SMP
+ pr_err("IRDE:set_affinity error %d\n", ret);
+ return ret;
+ }
+
+ adata = irq_data_get_avec_data(data);
+
+ redirect_domain_prepare_entry(item, adata);
+
+ irde_invlid_entry_node(item);
function call path, because smp_call_function_single() cannot be invoked
with interrupts disabled.
This is appropriate and safe. Thank you.+{Bah.
+ struct redirect_table *ird_table;
+ struct avecintc_data *avec_data;
+ struct irq_data *irq_data;
+ int ret, i, node;
+
+#ifdef CONFIG_NUMA
+ node = ((msi_alloc_info_t *)arg)->desc->dev->numa_node;
+#elsemsi_alloc_info_t *info = arg;
+ node = 0;
+#endif
node = dev_to_node(info->desc->dev);
+static int redirect_table_init(int node)Use proper reverse fir tree ordering
+{
+ struct redirect_table *ird_table = &(irde_descs[node].ird_table);
+ struct page *pages;
+ unsigned long *bitmap;
Thanks,
tglx