|
33 | 33 | #include <linux/cpu.h>
|
34 | 34 | #include <linux/err.h>
|
35 | 35 | #include <linux/ftrace.h>
|
| 36 | +#include <linux/irqdomain.h> |
| 37 | +#include <linux/of.h> |
| 38 | +#include <linux/of_irq.h> |
36 | 39 |
|
37 | 40 | #include <linux/atomic.h>
|
38 | 41 | #include <asm/cpu.h>
|
39 | 42 | #include <asm/processor.h>
|
40 | 43 | #include <asm/idle.h>
|
41 | 44 | #include <asm/r4k-timer.h>
|
| 45 | +#include <asm/mips-cpc.h> |
42 | 46 | #include <asm/mmu_context.h>
|
43 | 47 | #include <asm/time.h>
|
44 | 48 | #include <asm/setup.h>
|
@@ -79,6 +83,11 @@ static cpumask_t cpu_core_setup_map;
|
79 | 83 |
|
80 | 84 | cpumask_t cpu_coherent_mask;
|
81 | 85 |
|
| 86 | +#ifdef CONFIG_GENERIC_IRQ_IPI |
| 87 | +static struct irq_desc *call_desc; |
| 88 | +static struct irq_desc *sched_desc; |
| 89 | +#endif |
| 90 | + |
82 | 91 | static inline void set_cpu_sibling_map(int cpu)
|
83 | 92 | {
|
84 | 93 | int i;
|
@@ -145,6 +154,133 @@ void register_smp_ops(struct plat_smp_ops *ops)
|
145 | 154 | mp_ops = ops;
|
146 | 155 | }
|
147 | 156 |
|
| 157 | +#ifdef CONFIG_GENERIC_IRQ_IPI |
| 158 | +void mips_smp_send_ipi_single(int cpu, unsigned int action) |
| 159 | +{ |
| 160 | + mips_smp_send_ipi_mask(cpumask_of(cpu), action); |
| 161 | +} |
| 162 | + |
| 163 | +void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) |
| 164 | +{ |
| 165 | + unsigned long flags; |
| 166 | + unsigned int core; |
| 167 | + int cpu; |
| 168 | + |
| 169 | + local_irq_save(flags); |
| 170 | + |
| 171 | + switch (action) { |
| 172 | + case SMP_CALL_FUNCTION: |
| 173 | + __ipi_send_mask(call_desc, mask); |
| 174 | + break; |
| 175 | + |
| 176 | + case SMP_RESCHEDULE_YOURSELF: |
| 177 | + __ipi_send_mask(sched_desc, mask); |
| 178 | + break; |
| 179 | + |
| 180 | + default: |
| 181 | + BUG(); |
| 182 | + } |
| 183 | + |
| 184 | + if (mips_cpc_present()) { |
| 185 | + for_each_cpu(cpu, mask) { |
| 186 | + core = cpu_data[cpu].core; |
| 187 | + |
| 188 | + if (core == current_cpu_data.core) |
| 189 | + continue; |
| 190 | + |
| 191 | + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { |
| 192 | + mips_cpc_lock_other(core); |
| 193 | + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); |
| 194 | + mips_cpc_unlock_other(); |
| 195 | + } |
| 196 | + } |
| 197 | + } |
| 198 | + |
| 199 | + local_irq_restore(flags); |
| 200 | +} |
| 201 | + |
| 202 | + |
| 203 | +static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) |
| 204 | +{ |
| 205 | + scheduler_ipi(); |
| 206 | + |
| 207 | + return IRQ_HANDLED; |
| 208 | +} |
| 209 | + |
| 210 | +static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) |
| 211 | +{ |
| 212 | + generic_smp_call_function_interrupt(); |
| 213 | + |
| 214 | + return IRQ_HANDLED; |
| 215 | +} |
| 216 | + |
| 217 | +static struct irqaction irq_resched = { |
| 218 | + .handler = ipi_resched_interrupt, |
| 219 | + .flags = IRQF_PERCPU, |
| 220 | + .name = "IPI resched" |
| 221 | +}; |
| 222 | + |
| 223 | +static struct irqaction irq_call = { |
| 224 | + .handler = ipi_call_interrupt, |
| 225 | + .flags = IRQF_PERCPU, |
| 226 | + .name = "IPI call" |
| 227 | +}; |
| 228 | + |
| 229 | +static __init void smp_ipi_init_one(unsigned int virq, |
| 230 | + struct irqaction *action) |
| 231 | +{ |
| 232 | + int ret; |
| 233 | + |
| 234 | + irq_set_handler(virq, handle_percpu_irq); |
| 235 | + ret = setup_irq(virq, action); |
| 236 | + BUG_ON(ret); |
| 237 | +} |
| 238 | + |
| 239 | +static int __init mips_smp_ipi_init(void) |
| 240 | +{ |
| 241 | + unsigned int call_virq, sched_virq; |
| 242 | + struct irq_domain *ipidomain; |
| 243 | + struct device_node *node; |
| 244 | + |
| 245 | + node = of_irq_find_parent(of_root); |
| 246 | + ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); |
| 247 | + |
| 248 | + /* |
| 249 | + * Some platforms have half DT setup. So if we found irq node but |
| 250 | + * didn't find an ipidomain, try to search for one that is not in the |
| 251 | + * DT. |
| 252 | + */ |
| 253 | + if (node && !ipidomain) |
| 254 | + ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); |
| 255 | + |
| 256 | + BUG_ON(!ipidomain); |
| 257 | + |
| 258 | + call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); |
| 259 | + BUG_ON(!call_virq); |
| 260 | + |
| 261 | + sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); |
| 262 | + BUG_ON(!sched_virq); |
| 263 | + |
| 264 | + if (irq_domain_is_ipi_per_cpu(ipidomain)) { |
| 265 | + int cpu; |
| 266 | + |
| 267 | + for_each_cpu(cpu, cpu_possible_mask) { |
| 268 | + smp_ipi_init_one(call_virq + cpu, &irq_call); |
| 269 | + smp_ipi_init_one(sched_virq + cpu, &irq_resched); |
| 270 | + } |
| 271 | + } else { |
| 272 | + smp_ipi_init_one(call_virq, &irq_call); |
| 273 | + smp_ipi_init_one(sched_virq, &irq_resched); |
| 274 | + } |
| 275 | + |
| 276 | + call_desc = irq_to_desc(call_virq); |
| 277 | + sched_desc = irq_to_desc(sched_virq); |
| 278 | + |
| 279 | + return 0; |
| 280 | +} |
| 281 | +early_initcall(mips_smp_ipi_init); |
| 282 | +#endif |
| 283 | + |
148 | 284 | /*
|
149 | 285 | * First C code run on the secondary CPUs after being started up by
|
150 | 286 | * the master.
|
|
0 commit comments