@@ -29,16 +29,31 @@ struct gic_pcpu_mask {
29
29
DECLARE_BITMAP (pcpu_mask , GIC_MAX_INTRS );
30
30
};
31
31
32
+ struct gic_irq_spec {
33
+ enum {
34
+ GIC_DEVICE ,
35
+ GIC_IPI
36
+ } type ;
37
+
38
+ union {
39
+ struct cpumask * ipimask ;
40
+ unsigned int hwirq ;
41
+ };
42
+ };
43
+
32
44
static unsigned long __gic_base_addr ;
45
+
33
46
static void __iomem * gic_base ;
34
47
static struct gic_pcpu_mask pcpu_masks [NR_CPUS ];
35
48
static DEFINE_SPINLOCK (gic_lock );
36
49
static struct irq_domain * gic_irq_domain ;
50
+ static struct irq_domain * gic_ipi_domain ;
37
51
static int gic_shared_intrs ;
38
52
static int gic_vpes ;
39
53
static unsigned int gic_cpu_pin ;
40
54
static unsigned int timer_cpu_pin ;
41
55
static struct irq_chip gic_level_irq_controller , gic_edge_irq_controller ;
56
+ DECLARE_BITMAP (ipi_resrv , GIC_MAX_INTRS );
42
57
43
58
static void __gic_irq_dispatch (void );
44
59
@@ -753,7 +768,7 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
753
768
}
754
769
755
770
static int gic_shared_irq_domain_map (struct irq_domain * d , unsigned int virq ,
756
- irq_hw_number_t hw )
771
+ irq_hw_number_t hw , unsigned int vpe )
757
772
{
758
773
int intr = GIC_HWIRQ_TO_SHARED (hw );
759
774
unsigned long flags ;
@@ -763,9 +778,8 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
763
778
764
779
spin_lock_irqsave (& gic_lock , flags );
765
780
gic_map_to_pin (intr , gic_cpu_pin );
766
- /* Map to VPE 0 by default */
767
- gic_map_to_vpe (intr , 0 );
768
- set_bit (intr , pcpu_masks [0 ].pcpu_mask );
781
+ gic_map_to_vpe (intr , vpe );
782
+ set_bit (intr , pcpu_masks [vpe ].pcpu_mask );
769
783
spin_unlock_irqrestore (& gic_lock , flags );
770
784
771
785
return 0 ;
@@ -776,7 +790,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
776
790
{
777
791
if (GIC_HWIRQ_TO_LOCAL (hw ) < GIC_NUM_LOCAL_INTRS )
778
792
return gic_local_irq_domain_map (d , virq , hw );
779
- return gic_shared_irq_domain_map (d , virq , hw );
793
+ return gic_shared_irq_domain_map (d , virq , hw , 0 );
780
794
}
781
795
782
796
static int gic_irq_domain_xlate (struct irq_domain * d , struct device_node * ctrlr ,
@@ -798,9 +812,157 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
798
812
return 0 ;
799
813
}
800
814
815
+ static int gic_irq_domain_alloc (struct irq_domain * d , unsigned int virq ,
816
+ unsigned int nr_irqs , void * arg )
817
+ {
818
+ struct gic_irq_spec * spec = arg ;
819
+ irq_hw_number_t hwirq , base_hwirq ;
820
+ int cpu , ret , i ;
821
+
822
+ if (spec -> type == GIC_DEVICE ) {
823
+ /* verify that it doesn't conflict with an IPI irq */
824
+ if (test_bit (spec -> hwirq , ipi_resrv ))
825
+ return - EBUSY ;
826
+ } else {
827
+ base_hwirq = find_first_bit (ipi_resrv , gic_shared_intrs );
828
+ if (base_hwirq == gic_shared_intrs ) {
829
+ return - ENOMEM ;
830
+ }
831
+
832
+ /* check that we have enough space */
833
+ for (i = base_hwirq ; i < nr_irqs ; i ++ ) {
834
+ if (!test_bit (i , ipi_resrv ))
835
+ return - EBUSY ;
836
+ }
837
+ bitmap_clear (ipi_resrv , base_hwirq , nr_irqs );
838
+
839
+ /* map the hwirq for each cpu consecutively */
840
+ i = 0 ;
841
+ for_each_cpu (cpu , spec -> ipimask ) {
842
+ hwirq = GIC_SHARED_TO_HWIRQ (base_hwirq + i );
843
+
844
+ ret = irq_domain_set_hwirq_and_chip (d , virq + i , hwirq ,
845
+ & gic_edge_irq_controller ,
846
+ NULL );
847
+ if (ret )
848
+ goto error ;
849
+
850
+ ret = gic_shared_irq_domain_map (d , virq + i , hwirq , cpu );
851
+ if (ret )
852
+ goto error ;
853
+
854
+ i ++ ;
855
+ }
856
+
857
+ /*
858
+ * tell the parent about the base hwirq we allocated so it can
859
+ * set its own domain data
860
+ */
861
+ spec -> hwirq = base_hwirq ;
862
+ }
863
+
864
+ return 0 ;
865
+ error :
866
+ bitmap_set (ipi_resrv , base_hwirq , nr_irqs );
867
+ return ret ;
868
+ }
869
+
870
+ void gic_irq_domain_free (struct irq_domain * d , unsigned int virq ,
871
+ unsigned int nr_irqs )
872
+ {
873
+ irq_hw_number_t base_hwirq ;
874
+ struct irq_data * data ;
875
+
876
+ data = irq_get_irq_data (virq );
877
+ if (!data )
878
+ return ;
879
+
880
+ base_hwirq = GIC_HWIRQ_TO_SHARED (irqd_to_hwirq (data ));
881
+ bitmap_set (ipi_resrv , base_hwirq , nr_irqs );
882
+ }
883
+
801
884
static const struct irq_domain_ops gic_irq_domain_ops = {
802
885
.map = gic_irq_domain_map ,
803
886
.xlate = gic_irq_domain_xlate ,
887
+ .alloc = gic_irq_domain_alloc ,
888
+ .free = gic_irq_domain_free ,
889
+ };
890
+
891
+ static int gic_ipi_domain_xlate (struct irq_domain * d , struct device_node * ctrlr ,
892
+ const u32 * intspec , unsigned int intsize ,
893
+ irq_hw_number_t * out_hwirq ,
894
+ unsigned int * out_type )
895
+ {
896
+ /*
897
+ * There's nothing to translate here. hwirq is dynamically allocated and
898
+ * the irq type is always edge triggered.
899
+ * */
900
+ * out_hwirq = 0 ;
901
+ * out_type = IRQ_TYPE_EDGE_RISING ;
902
+
903
+ return 0 ;
904
+ }
905
+
906
+ static int gic_ipi_domain_alloc (struct irq_domain * d , unsigned int virq ,
907
+ unsigned int nr_irqs , void * arg )
908
+ {
909
+ struct cpumask * ipimask = arg ;
910
+ struct gic_irq_spec spec = {
911
+ .type = GIC_IPI ,
912
+ .ipimask = ipimask
913
+ };
914
+ int ret , i ;
915
+
916
+ ret = irq_domain_alloc_irqs_parent (d , virq , nr_irqs , & spec );
917
+ if (ret )
918
+ return ret ;
919
+
920
+ /* the parent should have set spec.hwirq to the base_hwirq it allocated */
921
+ for (i = 0 ; i < nr_irqs ; i ++ ) {
922
+ ret = irq_domain_set_hwirq_and_chip (d , virq + i ,
923
+ GIC_SHARED_TO_HWIRQ (spec .hwirq + i ),
924
+ & gic_edge_irq_controller ,
925
+ NULL );
926
+ if (ret )
927
+ goto error ;
928
+
929
+ ret = irq_set_irq_type (virq + i , IRQ_TYPE_EDGE_RISING );
930
+ if (ret )
931
+ goto error ;
932
+ }
933
+
934
+ return 0 ;
935
+ error :
936
+ irq_domain_free_irqs_parent (d , virq , nr_irqs );
937
+ return ret ;
938
+ }
939
+
940
+ void gic_ipi_domain_free (struct irq_domain * d , unsigned int virq ,
941
+ unsigned int nr_irqs )
942
+ {
943
+ irq_domain_free_irqs_parent (d , virq , nr_irqs );
944
+ }
945
+
946
+ int gic_ipi_domain_match (struct irq_domain * d , struct device_node * node ,
947
+ enum irq_domain_bus_token bus_token )
948
+ {
949
+ bool is_ipi ;
950
+
951
+ switch (bus_token ) {
952
+ case DOMAIN_BUS_IPI :
953
+ is_ipi = d -> bus_token == bus_token ;
954
+ return to_of_node (d -> fwnode ) == node && is_ipi ;
955
+ break ;
956
+ default :
957
+ return 0 ;
958
+ }
959
+ }
960
+
961
+ static struct irq_domain_ops gic_ipi_domain_ops = {
962
+ .xlate = gic_ipi_domain_xlate ,
963
+ .alloc = gic_ipi_domain_alloc ,
964
+ .free = gic_ipi_domain_free ,
965
+ .match = gic_ipi_domain_match ,
804
966
};
805
967
806
968
static void __init __gic_init (unsigned long gic_base_addr ,
@@ -864,6 +1026,18 @@ static void __init __gic_init(unsigned long gic_base_addr,
864
1026
if (!gic_irq_domain )
865
1027
panic ("Failed to add GIC IRQ domain" );
866
1028
1029
+ gic_ipi_domain = irq_domain_add_hierarchy (gic_irq_domain ,
1030
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU ,
1031
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs ,
1032
+ node , & gic_ipi_domain_ops , NULL );
1033
+ if (!gic_ipi_domain )
1034
+ panic ("Failed to add GIC IPI domain" );
1035
+
1036
+ gic_ipi_domain -> bus_token = DOMAIN_BUS_IPI ;
1037
+
1038
+ /* Make the last 2 * NR_CPUS available for IPIs */
1039
+ bitmap_set (ipi_resrv , gic_shared_intrs - 2 * NR_CPUS , 2 * NR_CPUS );
1040
+
867
1041
gic_basic_init ();
868
1042
869
1043
gic_ipi_init ();
0 commit comments