Skip to content

Commit ef8dd01

Browse files
committed
genirq/msi: Make interrupt allocation less convoluted
There is no real reason to do several loops over the MSI descriptors instead of just doing one loop. In case of an error everything is undone anyway so it does not matter whether it's a partial or a full rollback. Signed-off-by: Thomas Gleixner <[email protected]> Tested-by: Michael Kelley <[email protected]> Tested-by: Nishanth Menon <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent a80713f commit ef8dd01

File tree

3 files changed

+69
-67
lines changed

3 files changed

+69
-67
lines changed

.clang-format

-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,6 @@ ForEachMacros:
216216
- 'for_each_migratetype_order'
217217
- 'for_each_msi_entry'
218218
- 'for_each_msi_entry_safe'
219-
- 'for_each_msi_vector'
220219
- 'for_each_net'
221220
- 'for_each_net_continue_reverse'
222221
- 'for_each_netdev'

include/linux/msi.h

-6
Original file line numberDiff line numberDiff line change
@@ -206,12 +206,6 @@ struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter);
206206
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
207207
#define for_each_msi_entry_safe(desc, tmp, dev) \
208208
list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
209-
#define for_each_msi_vector(desc, __irq, dev) \
210-
for_each_msi_entry((desc), (dev)) \
211-
if ((desc)->irq) \
212-
for (__irq = (desc)->irq; \
213-
__irq < ((desc)->irq + (desc)->nvec_used); \
214-
__irq++)
215209

216210
#ifdef CONFIG_IRQ_MSI_IOMMU
217211
static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)

kernel/irq/msi.c

+69-60
Original file line numberDiff line numberDiff line change
@@ -828,23 +828,74 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
828828
return allocated ? allocated : -ENOSPC;
829829
}
830830

831+
#define VIRQ_CAN_RESERVE 0x01
832+
#define VIRQ_ACTIVATE 0x02
833+
#define VIRQ_NOMASK_QUIRK 0x04
834+
835+
static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
836+
{
837+
struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
838+
int ret;
839+
840+
if (!(vflags & VIRQ_CAN_RESERVE)) {
841+
irqd_clr_can_reserve(irqd);
842+
if (vflags & VIRQ_NOMASK_QUIRK)
843+
irqd_set_msi_nomask_quirk(irqd);
844+
}
845+
846+
if (!(vflags & VIRQ_ACTIVATE))
847+
return 0;
848+
849+
ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
850+
if (ret)
851+
return ret;
852+
/*
853+
* If the interrupt uses reservation mode, clear the activated bit
854+
* so request_irq() will assign the final vector.
855+
*/
856+
if (vflags & VIRQ_CAN_RESERVE)
857+
irqd_clr_activated(irqd);
858+
return 0;
859+
}
860+
831861
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
832862
int nvec)
833863
{
834864
struct msi_domain_info *info = domain->host_data;
835865
struct msi_domain_ops *ops = info->ops;
836-
struct irq_data *irq_data;
837-
struct msi_desc *desc;
838866
msi_alloc_info_t arg = { };
867+
unsigned int vflags = 0;
868+
struct msi_desc *desc;
839869
int allocated = 0;
840870
int i, ret, virq;
841-
bool can_reserve;
842871

843872
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
844873
if (ret)
845874
return ret;
846875

847-
for_each_msi_entry(desc, dev) {
876+
/*
877+
* This flag is set by the PCI layer as we need to activate
878+
* the MSI entries before the PCI layer enables MSI in the
879+
* card. Otherwise the card latches a random msi message.
880+
*/
881+
if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
882+
vflags |= VIRQ_ACTIVATE;
883+
884+
/*
885+
* Interrupt can use a reserved vector and will not occupy
886+
* a real device vector until the interrupt is requested.
887+
*/
888+
if (msi_check_reservation_mode(domain, info, dev)) {
889+
vflags |= VIRQ_CAN_RESERVE;
890+
/*
891+
* MSI affinity setting requires a special quirk (X86) when
892+
* reservation mode is active.
893+
*/
894+
if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
895+
vflags |= VIRQ_NOMASK_QUIRK;
896+
}
897+
898+
msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
848899
ops->set_desc(&arg, desc);
849900

850901
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
@@ -856,49 +907,12 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
856907
for (i = 0; i < desc->nvec_used; i++) {
857908
irq_set_msi_desc_off(virq, i, desc);
858909
irq_debugfs_copy_devname(virq + i, dev);
910+
ret = msi_init_virq(domain, virq + i, vflags);
911+
if (ret)
912+
return ret;
859913
}
860914
allocated++;
861915
}
862-
863-
can_reserve = msi_check_reservation_mode(domain, info, dev);
864-
865-
/*
866-
* This flag is set by the PCI layer as we need to activate
867-
* the MSI entries before the PCI layer enables MSI in the
868-
* card. Otherwise the card latches a random msi message.
869-
*/
870-
if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
871-
goto skip_activate;
872-
873-
for_each_msi_vector(desc, i, dev) {
874-
if (desc->irq == i) {
875-
virq = desc->irq;
876-
dev_dbg(dev, "irq [%d-%d] for MSI\n",
877-
virq, virq + desc->nvec_used - 1);
878-
}
879-
880-
irq_data = irq_domain_get_irq_data(domain, i);
881-
if (!can_reserve) {
882-
irqd_clr_can_reserve(irq_data);
883-
if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
884-
irqd_set_msi_nomask_quirk(irq_data);
885-
}
886-
ret = irq_domain_activate_irq(irq_data, can_reserve);
887-
if (ret)
888-
return ret;
889-
}
890-
891-
skip_activate:
892-
/*
893-
* If these interrupts use reservation mode, clear the activated bit
894-
* so request_irq() will assign the final vector.
895-
*/
896-
if (can_reserve) {
897-
for_each_msi_vector(desc, i, dev) {
898-
irq_data = irq_domain_get_irq_data(domain, i);
899-
irqd_clr_activated(irq_data);
900-
}
901-
}
902916
return 0;
903917
}
904918

@@ -976,26 +990,21 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nve
976990

977991
void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
978992
{
979-
struct irq_data *irq_data;
993+
struct irq_data *irqd;
980994
struct msi_desc *desc;
981995
int i;
982996

983-
for_each_msi_vector(desc, i, dev) {
984-
irq_data = irq_domain_get_irq_data(domain, i);
985-
if (irqd_is_activated(irq_data))
986-
irq_domain_deactivate_irq(irq_data);
987-
}
988-
989-
for_each_msi_entry(desc, dev) {
990-
/*
991-
* We might have failed to allocate an MSI early
992-
* enough that there is no IRQ associated to this
993-
* entry. If that's the case, don't do anything.
994-
*/
995-
if (desc->irq) {
996-
irq_domain_free_irqs(desc->irq, desc->nvec_used);
997-
desc->irq = 0;
997+
/* Only handle MSI entries which have an interrupt associated */
998+
msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
999+
/* Make sure all interrupts are deactivated */
1000+
for (i = 0; i < desc->nvec_used; i++) {
1001+
irqd = irq_domain_get_irq_data(domain, desc->irq + i);
1002+
if (irqd && irqd_is_activated(irqd))
1003+
irq_domain_deactivate_irq(irqd);
9981004
}
1005+
1006+
irq_domain_free_irqs(desc->irq, desc->nvec_used);
1007+
desc->irq = 0;
9991008
}
10001009
}
10011010

0 commit comments

Comments
 (0)