-
Notifications
You must be signed in to change notification settings - Fork 7.3k
Add architectural support for shared interrupts #61331
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,162 @@ | ||
/* | ||
* Copyright 2023 NXP | ||
* | ||
* SPDX-License-Identifier: Apache-2.0 | ||
*/ | ||
|
||
#include <zephyr/sw_isr_table.h> | ||
#include <zephyr/arch/cpu.h> | ||
#include <zephyr/spinlock.h> | ||
|
||
struct shared_irq_client { | ||
void (*routine)(const void *arg); | ||
const void *arg; | ||
}; | ||
|
||
struct shared_irq_data { | ||
bool enabled; | ||
struct shared_irq_client clients[CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS]; | ||
uint32_t client_num; | ||
struct k_spinlock lock; | ||
}; | ||
|
||
static struct shared_irq_data shared_irq_table[CONFIG_NUM_IRQS]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is an awfully big table! 8 bytes per record, 16 (default) records per IRQ, and most platforms have a few dozen IRQ slots. At the very least I'd say 16 is way too generous for a MAX_NUM_CLIENTS default. The much more typical number of drivers forced to share an interrupt is "two". :) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually, you'd pay for it with a little code size, but one clever way to do this would be to put a dlist node into the structs and keep them in lists, "allocating" them pool-style (i.e. walk the array linearly to start, keep deleted records in a free list, and allocate from there when the original N have been exhausted). Then you'd only need one record (albeit one of 16 bytes instead of 8) for each shared handler of any IRQ in the system. |
||
|
||
void shared_irq_isr(const void *data) | ||
{ | ||
int i; | ||
const struct shared_irq_data *irq_data = data; | ||
|
||
for (i = 0; i < irq_data->client_num; i++) | ||
/* note: due to how the IRQ disconnect function works | ||
* this should be impossible but it's better to safe than | ||
* sorry I guess. | ||
*/ | ||
if (irq_data->clients[i].routine) | ||
irq_data->clients[i].routine(irq_data->clients[i].arg); | ||
} | ||
|
||
void z_isr_install(unsigned int irq, void (*routine)(const void *), | ||
const void *param) | ||
{ | ||
struct shared_irq_data *irq_data; | ||
unsigned int table_idx; | ||
int i; | ||
k_spinlock_key_t key; | ||
|
||
table_idx = irq - CONFIG_GEN_IRQ_START_VECTOR; | ||
|
||
/* check for out of bounds table index */ | ||
if (table_idx >= CONFIG_NUM_IRQS) | ||
return; | ||
|
||
irq_data = &shared_irq_table[table_idx]; | ||
|
||
/* seems like we've reached the limit of clients */ | ||
/* TODO: is there a better way of letting the user know this is the case? */ | ||
if (irq_data->client_num == CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS) | ||
return; | ||
|
||
key = k_spin_lock(&irq_data->lock); | ||
|
||
if (!irq_data->enabled) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just test |
||
_sw_isr_table[table_idx].arg = irq_data; | ||
_sw_isr_table[table_idx].isr = shared_irq_isr; | ||
|
||
irq_data->enabled = true; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (Never mind: this is me being confused as mentioned in the comment below, I just forgot to delete the comment)
|
||
} | ||
|
||
/* don't register the same routine/arg pair again */ | ||
for (i = 0; i < irq_data->client_num; i++) { | ||
if (irq_data->clients[i].routine == routine | ||
&& irq_data->clients[i].arg == param) { | ||
k_spin_unlock(&irq_data->lock, key); | ||
return; | ||
} | ||
} | ||
|
||
|
||
irq_data->clients[irq_data->client_num].routine = routine; | ||
irq_data->clients[irq_data->client_num].arg = param; | ||
irq_data->client_num++; | ||
|
||
k_spin_unlock(&irq_data->lock, key); | ||
} | ||
|
||
static void swap_client_data(struct shared_irq_client *a, | ||
struct shared_irq_client *b) | ||
{ | ||
struct shared_irq_client tmp; | ||
|
||
tmp.arg = a->arg; | ||
tmp.routine = a->routine; | ||
|
||
a->arg = b->arg; | ||
a->routine = b->routine; | ||
|
||
b->arg = tmp.arg; | ||
b->routine = tmp.routine; | ||
} | ||
|
||
static inline void shared_irq_remove_client(struct shared_irq_data *irq_data, | ||
int client_idx) | ||
{ | ||
int i; | ||
|
||
irq_data->clients[client_idx].routine = NULL; | ||
irq_data->clients[client_idx].arg = NULL; | ||
|
||
/* push back the NULL client data to the end of the array to avoid | ||
* having holes in the shared IRQ table/ | ||
*/ | ||
for (i = client_idx; i <= (int)irq_data->client_num - 2; i++) | ||
swap_client_data(&irq_data->clients[i], | ||
&irq_data->clients[i + 1]); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Might be worth expanding this handling a bit to detect the case where there's only one handler registered and "unshare" the IRQ. |
||
|
||
} | ||
|
||
int arch_irq_disconnect_dynamic(unsigned int irq, | ||
unsigned int priority, | ||
void (*routine)(const void *), | ||
const void *parameter, | ||
uint32_t flags) | ||
{ | ||
ARG_UNUSED(flags); | ||
ARG_UNUSED(priority); | ||
|
||
struct shared_irq_data *irq_data; | ||
unsigned int table_idx; | ||
int i; | ||
k_spinlock_key_t key; | ||
|
||
table_idx = irq - CONFIG_GEN_IRQ_START_VECTOR; | ||
|
||
/* check for out of bounds table index */ | ||
if (table_idx >= CONFIG_NUM_IRQS) | ||
return -EINVAL; | ||
|
||
irq_data = &shared_irq_table[table_idx]; | ||
|
||
if (!irq_data->client_num) | ||
return 0; | ||
|
||
key = k_spin_lock(&irq_data->lock); | ||
|
||
for (i = 0; i < irq_data->client_num; i++) { | ||
if (irq_data->clients[i].routine == routine | ||
&& irq_data->clients[i].arg == parameter) { | ||
shared_irq_remove_client(irq_data, i); | ||
irq_data->client_num--; | ||
if (!irq_data->client_num) | ||
irq_data->enabled = false; | ||
/* note: you can't have duplicate routine/arg pairs so this | ||
* is the only match we're going to get. | ||
*/ | ||
goto out_unlock; | ||
} | ||
} | ||
|
||
out_unlock: | ||
k_spin_unlock(&irq_data->lock, key); | ||
return 0; | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -70,6 +70,32 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority, | |
flags); | ||
} | ||
|
||
#ifdef CONFIG_SHARED_INTERRUPTS | ||
|
||
/** | ||
* Unregister a dynamic interrupt. | ||
* | ||
* Use this in conjunction with the shared interrupts feature to | ||
* unregister an ISR/arg pair. | ||
* | ||
* @param irq IRQ line number | ||
* @param priority Interrupt priority | ||
* @param routine Interrupt service routine | ||
* @param parameter ISR parameter | ||
* @param flags Arch-specific IRQ configuration flags | ||
* | ||
* @return 0 in case of success, a negative value otherwise. | ||
*/ | ||
static inline int | ||
irq_disconnect_dynamic(unsigned int irq, unsigned int priority, | ||
void (*routine)(const void *parameter), | ||
const void *parameter, uint32_t flags) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nitpick: would be cleaner to put the implementation for arch_irq_disconnect_dynamic() into this patch. Right now all the important stuff is added in a generic patch, but the patch that says it's adding irq_disconnect is actually only adding a wrapper. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point, will fix. Thanks! |
||
{ | ||
return arch_irq_disconnect_dynamic(irq, priority, routine, parameter, flags); | ||
} | ||
|
||
#endif /* CONFIG_SHARED_INTERRUPTS */ | ||
|
||
/** | ||
* @brief Initialize a 'direct' interrupt handler. | ||
* | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There's some ambiguity, but this belongs in kernel/ not arch/common/ I'd say. The arch layer is responsible for handling interrupts and dispatching to a _sw_isr_table[] entry, but everything downstream is OS behavior and belongs in the kernel.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ACK. I'm assuming the same rationale is applicable to
sw_isr_common.c
, right?