17
17
#include <linux/of_irq.h>
18
18
#include <linux/platform_device.h>
19
19
#include <linux/spinlock.h>
20
+ #include <linux/syscore_ops.h>
20
21
#include <asm/smp.h>
21
22
22
23
/*
@@ -67,6 +68,8 @@ struct plic_priv {
67
68
struct irq_domain * irqdomain ;
68
69
void __iomem * regs ;
69
70
unsigned long plic_quirks ;
71
+ unsigned int nr_irqs ;
72
+ unsigned long * prio_save ;
70
73
};
71
74
72
75
struct plic_handler {
@@ -78,6 +81,7 @@ struct plic_handler {
78
81
*/
79
82
raw_spinlock_t enable_lock ;
80
83
void __iomem * enable_base ;
84
+ u32 * enable_save ;
81
85
struct plic_priv * priv ;
82
86
};
83
87
static int plic_parent_irq __ro_after_init ;
@@ -229,6 +233,71 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
229
233
return IRQ_SET_MASK_OK ;
230
234
}
231
235
236
+ static int plic_irq_suspend (void )
237
+ {
238
+ unsigned int i , cpu ;
239
+ u32 __iomem * reg ;
240
+ struct plic_priv * priv ;
241
+
242
+ priv = per_cpu_ptr (& plic_handlers , smp_processor_id ())-> priv ;
243
+
244
+ for (i = 0 ; i < priv -> nr_irqs ; i ++ )
245
+ if (readl (priv -> regs + PRIORITY_BASE + i * PRIORITY_PER_ID ))
246
+ __set_bit (i , priv -> prio_save );
247
+ else
248
+ __clear_bit (i , priv -> prio_save );
249
+
250
+ for_each_cpu (cpu , cpu_present_mask ) {
251
+ struct plic_handler * handler = per_cpu_ptr (& plic_handlers , cpu );
252
+
253
+ if (!handler -> present )
254
+ continue ;
255
+
256
+ raw_spin_lock (& handler -> enable_lock );
257
+ for (i = 0 ; i < DIV_ROUND_UP (priv -> nr_irqs , 32 ); i ++ ) {
258
+ reg = handler -> enable_base + i * sizeof (u32 );
259
+ handler -> enable_save [i ] = readl (reg );
260
+ }
261
+ raw_spin_unlock (& handler -> enable_lock );
262
+ }
263
+
264
+ return 0 ;
265
+ }
266
+
267
+ static void plic_irq_resume (void )
268
+ {
269
+ unsigned int i , index , cpu ;
270
+ u32 __iomem * reg ;
271
+ struct plic_priv * priv ;
272
+
273
+ priv = per_cpu_ptr (& plic_handlers , smp_processor_id ())-> priv ;
274
+
275
+ for (i = 0 ; i < priv -> nr_irqs ; i ++ ) {
276
+ index = BIT_WORD (i );
277
+ writel ((priv -> prio_save [index ] & BIT_MASK (i )) ? 1 : 0 ,
278
+ priv -> regs + PRIORITY_BASE + i * PRIORITY_PER_ID );
279
+ }
280
+
281
+ for_each_cpu (cpu , cpu_present_mask ) {
282
+ struct plic_handler * handler = per_cpu_ptr (& plic_handlers , cpu );
283
+
284
+ if (!handler -> present )
285
+ continue ;
286
+
287
+ raw_spin_lock (& handler -> enable_lock );
288
+ for (i = 0 ; i < DIV_ROUND_UP (priv -> nr_irqs , 32 ); i ++ ) {
289
+ reg = handler -> enable_base + i * sizeof (u32 );
290
+ writel (handler -> enable_save [i ], reg );
291
+ }
292
+ raw_spin_unlock (& handler -> enable_lock );
293
+ }
294
+ }
295
+
296
+ static struct syscore_ops plic_irq_syscore_ops = {
297
+ .suspend = plic_irq_suspend ,
298
+ .resume = plic_irq_resume ,
299
+ };
300
+
232
301
static int plic_irqdomain_map (struct irq_domain * d , unsigned int irq ,
233
302
irq_hw_number_t hwirq )
234
303
{
@@ -345,6 +414,7 @@ static int __init __plic_init(struct device_node *node,
345
414
u32 nr_irqs ;
346
415
struct plic_priv * priv ;
347
416
struct plic_handler * handler ;
417
+ unsigned int cpu ;
348
418
349
419
priv = kzalloc (sizeof (* priv ), GFP_KERNEL );
350
420
if (!priv )
@@ -363,15 +433,21 @@ static int __init __plic_init(struct device_node *node,
363
433
if (WARN_ON (!nr_irqs ))
364
434
goto out_iounmap ;
365
435
436
+ priv -> nr_irqs = nr_irqs ;
437
+
438
+ priv -> prio_save = bitmap_alloc (nr_irqs , GFP_KERNEL );
439
+ if (!priv -> prio_save )
440
+ goto out_free_priority_reg ;
441
+
366
442
nr_contexts = of_irq_count (node );
367
443
if (WARN_ON (!nr_contexts ))
368
- goto out_iounmap ;
444
+ goto out_free_priority_reg ;
369
445
370
446
error = - ENOMEM ;
371
447
priv -> irqdomain = irq_domain_add_linear (node , nr_irqs + 1 ,
372
448
& plic_irqdomain_ops , priv );
373
449
if (WARN_ON (!priv -> irqdomain ))
374
- goto out_iounmap ;
450
+ goto out_free_priority_reg ;
375
451
376
452
for (i = 0 ; i < nr_contexts ; i ++ ) {
377
453
struct of_phandle_args parent ;
@@ -441,6 +517,11 @@ static int __init __plic_init(struct device_node *node,
441
517
handler -> enable_base = priv -> regs + CONTEXT_ENABLE_BASE +
442
518
i * CONTEXT_ENABLE_SIZE ;
443
519
handler -> priv = priv ;
520
+
521
+ handler -> enable_save = kcalloc (DIV_ROUND_UP (nr_irqs , 32 ),
522
+ sizeof (* handler -> enable_save ), GFP_KERNEL );
523
+ if (!handler -> enable_save )
524
+ goto out_free_enable_reg ;
444
525
done :
445
526
for (hwirq = 1 ; hwirq <= nr_irqs ; hwirq ++ ) {
446
527
plic_toggle (handler , hwirq , 0 );
@@ -461,11 +542,19 @@ static int __init __plic_init(struct device_node *node,
461
542
plic_starting_cpu , plic_dying_cpu );
462
543
plic_cpuhp_setup_done = true;
463
544
}
545
+ register_syscore_ops (& plic_irq_syscore_ops );
464
546
465
547
pr_info ("%pOFP: mapped %d interrupts with %d handlers for"
466
548
" %d contexts.\n" , node , nr_irqs , nr_handlers , nr_contexts );
467
549
return 0 ;
468
550
551
+ out_free_enable_reg :
552
+ for_each_cpu (cpu , cpu_present_mask ) {
553
+ handler = per_cpu_ptr (& plic_handlers , cpu );
554
+ kfree (handler -> enable_save );
555
+ }
556
+ out_free_priority_reg :
557
+ kfree (priv -> prio_save );
469
558
out_iounmap :
470
559
iounmap (priv -> regs );
471
560
out_free_priv :
0 commit comments