Skip to content

Commit e80f0b6

Browse files
masonhuoMarc Zyngier
authored and
Marc Zyngier
committed
irqchip/irq-sifive-plic: Add syscore callbacks for hibernation
The priority and enable registers of plic will be reset during hibernation power cycle in poweroff mode, add the syscore callbacks to save/restore those registers. Signed-off-by: Mason Huo <[email protected]> Reviewed-by: Ley Foon Tan <[email protected]> Reviewed-by: Sia Jee Heng <[email protected]> Reported-by: Dan Carpenter <[email protected]> Link: https://lore.kernel.org/r/[email protected]/ Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9dfc779 commit e80f0b6

File tree

1 file changed

+91
-2
lines changed

1 file changed

+91
-2
lines changed

drivers/irqchip/irq-sifive-plic.c

+91-2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/of_irq.h>
1818
#include <linux/platform_device.h>
1919
#include <linux/spinlock.h>
20+
#include <linux/syscore_ops.h>
2021
#include <asm/smp.h>
2122

2223
/*
@@ -67,6 +68,8 @@ struct plic_priv {
6768
struct irq_domain *irqdomain;
6869
void __iomem *regs;
6970
unsigned long plic_quirks;
71+
unsigned int nr_irqs;
72+
unsigned long *prio_save;
7073
};
7174

7275
struct plic_handler {
@@ -78,6 +81,7 @@ struct plic_handler {
7881
*/
7982
raw_spinlock_t enable_lock;
8083
void __iomem *enable_base;
84+
u32 *enable_save;
8185
struct plic_priv *priv;
8286
};
8387
static int plic_parent_irq __ro_after_init;
@@ -229,6 +233,71 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
229233
return IRQ_SET_MASK_OK;
230234
}
231235

236+
static int plic_irq_suspend(void)
237+
{
238+
unsigned int i, cpu;
239+
u32 __iomem *reg;
240+
struct plic_priv *priv;
241+
242+
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
243+
244+
for (i = 0; i < priv->nr_irqs; i++)
245+
if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
246+
__set_bit(i, priv->prio_save);
247+
else
248+
__clear_bit(i, priv->prio_save);
249+
250+
for_each_cpu(cpu, cpu_present_mask) {
251+
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
252+
253+
if (!handler->present)
254+
continue;
255+
256+
raw_spin_lock(&handler->enable_lock);
257+
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
258+
reg = handler->enable_base + i * sizeof(u32);
259+
handler->enable_save[i] = readl(reg);
260+
}
261+
raw_spin_unlock(&handler->enable_lock);
262+
}
263+
264+
return 0;
265+
}
266+
267+
static void plic_irq_resume(void)
268+
{
269+
unsigned int i, index, cpu;
270+
u32 __iomem *reg;
271+
struct plic_priv *priv;
272+
273+
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
274+
275+
for (i = 0; i < priv->nr_irqs; i++) {
276+
index = BIT_WORD(i);
277+
writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
278+
priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
279+
}
280+
281+
for_each_cpu(cpu, cpu_present_mask) {
282+
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
283+
284+
if (!handler->present)
285+
continue;
286+
287+
raw_spin_lock(&handler->enable_lock);
288+
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
289+
reg = handler->enable_base + i * sizeof(u32);
290+
writel(handler->enable_save[i], reg);
291+
}
292+
raw_spin_unlock(&handler->enable_lock);
293+
}
294+
}
295+
296+
static struct syscore_ops plic_irq_syscore_ops = {
297+
.suspend = plic_irq_suspend,
298+
.resume = plic_irq_resume,
299+
};
300+
232301
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
233302
irq_hw_number_t hwirq)
234303
{
@@ -345,6 +414,7 @@ static int __init __plic_init(struct device_node *node,
345414
u32 nr_irqs;
346415
struct plic_priv *priv;
347416
struct plic_handler *handler;
417+
unsigned int cpu;
348418

349419
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
350420
if (!priv)
@@ -363,15 +433,21 @@ static int __init __plic_init(struct device_node *node,
363433
if (WARN_ON(!nr_irqs))
364434
goto out_iounmap;
365435

436+
priv->nr_irqs = nr_irqs;
437+
438+
priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL);
439+
if (!priv->prio_save)
440+
goto out_free_priority_reg;
441+
366442
nr_contexts = of_irq_count(node);
367443
if (WARN_ON(!nr_contexts))
368-
goto out_iounmap;
444+
goto out_free_priority_reg;
369445

370446
error = -ENOMEM;
371447
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
372448
&plic_irqdomain_ops, priv);
373449
if (WARN_ON(!priv->irqdomain))
374-
goto out_iounmap;
450+
goto out_free_priority_reg;
375451

376452
for (i = 0; i < nr_contexts; i++) {
377453
struct of_phandle_args parent;
@@ -441,6 +517,11 @@ static int __init __plic_init(struct device_node *node,
441517
handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
442518
i * CONTEXT_ENABLE_SIZE;
443519
handler->priv = priv;
520+
521+
handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
522+
sizeof(*handler->enable_save), GFP_KERNEL);
523+
if (!handler->enable_save)
524+
goto out_free_enable_reg;
444525
done:
445526
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
446527
plic_toggle(handler, hwirq, 0);
@@ -461,11 +542,19 @@ static int __init __plic_init(struct device_node *node,
461542
plic_starting_cpu, plic_dying_cpu);
462543
plic_cpuhp_setup_done = true;
463544
}
545+
register_syscore_ops(&plic_irq_syscore_ops);
464546

465547
pr_info("%pOFP: mapped %d interrupts with %d handlers for"
466548
" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
467549
return 0;
468550

551+
out_free_enable_reg:
552+
for_each_cpu(cpu, cpu_present_mask) {
553+
handler = per_cpu_ptr(&plic_handlers, cpu);
554+
kfree(handler->enable_save);
555+
}
556+
out_free_priority_reg:
557+
kfree(priv->prio_save);
469558
out_iounmap:
470559
iounmap(priv->regs);
471560
out_free_priv:

0 commit comments

Comments
 (0)