xref: /pciutils/lmr/margin_hw.c (revision decf7298)
1 /*
2  *	The PCI Utilities -- Verify and prepare devices before margining
3  *
4  *	Copyright (c) 2023-2024 KNS Group LLC (YADRO)
5  *
6  *	Can be freely distributed and used under the terms of the GNU GPL v2+.
7  *
8  *	SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 #include <memory.h>
12 #include <string.h>
13 
14 #include "lmr.h"
15 
16 static u16 special_hw[][4] =
17   // Vendor ID, Device ID, Revision ID, margin_hw
18   { { 0x8086, 0x347A, 0x4, MARGIN_ICE_LAKE_RC },
19     { 0xFFFF, 0, 0, MARGIN_HW_DEFAULT }
20   };
21 
22 static enum margin_hw
detect_unique_hw(struct pci_dev * dev)23 detect_unique_hw(struct pci_dev *dev)
24 {
25   u16 vendor = pci_read_word(dev, PCI_VENDOR_ID);
26   u16 device = pci_read_word(dev, PCI_DEVICE_ID);
27   u8 revision = pci_read_byte(dev, PCI_REVISION_ID);
28 
29   for (int i = 0; special_hw[i][0] != 0xFFFF; i++)
30     {
31       if (vendor == special_hw[i][0] && device == special_hw[i][1] && revision == special_hw[i][2])
32         return special_hw[i][3];
33     }
34   return MARGIN_HW_DEFAULT;
35 }
36 
37 bool
margin_port_is_down(struct pci_dev * dev)38 margin_port_is_down(struct pci_dev *dev)
39 {
40   struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
41   if (!cap)
42     return false;
43   u8 type = pci_read_byte(dev, PCI_HEADER_TYPE) & 0x7F;
44   u8 dir = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE);
45 
46   if (type == PCI_HEADER_TYPE_BRIDGE
47       && (dir == PCI_EXP_TYPE_ROOT_PORT || dir == PCI_EXP_TYPE_DOWNSTREAM))
48     return true;
49   else
50     return false;
51 }
52 
53 bool
margin_find_pair(struct pci_access * pacc,struct pci_dev * dev,struct pci_dev ** down_port,struct pci_dev ** up_port)54 margin_find_pair(struct pci_access *pacc, struct pci_dev *dev, struct pci_dev **down_port,
55                  struct pci_dev **up_port)
56 {
57   struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
58   if (!cap)
59     return false;
60   bool given_down = margin_port_is_down(dev);
61 
62   for (struct pci_dev *p = pacc->devices; p; p = p->next)
63     {
64       if (given_down && pci_read_byte(dev, PCI_SECONDARY_BUS) == p->bus && dev->domain == p->domain
65           && p->func == 0)
66         {
67           *down_port = dev;
68           *up_port = p;
69           return true;
70         }
71       else if (!given_down && pci_read_byte(p, PCI_SECONDARY_BUS) == dev->bus
72                && dev->domain == p->domain)
73         {
74           *down_port = p;
75           *up_port = dev;
76           return true;
77         }
78     }
79   return false;
80 }
81 
82 bool
margin_verify_link(struct pci_dev * down_port,struct pci_dev * up_port)83 margin_verify_link(struct pci_dev *down_port, struct pci_dev *up_port)
84 {
85   struct pci_cap *cap = pci_find_cap(down_port, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
86   if (!cap)
87     return false;
88   if ((pci_read_word(down_port, cap->addr + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_SPEED) < 4)
89     return false;
90   if ((pci_read_word(down_port, cap->addr + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_SPEED) > 5)
91     return false;
92 
93   u8 down_sec = pci_read_byte(down_port, PCI_SECONDARY_BUS);
94 
95   // Verify that devices are linked, down_port is Root Port or Downstream Port of Switch,
96   // up_port is Function 0 of a Device
97   if (!(down_sec == up_port->bus && margin_port_is_down(down_port) && up_port->func == 0))
98     return false;
99 
100   struct pci_cap *pm = pci_find_cap(up_port, PCI_CAP_ID_PM, PCI_CAP_NORMAL);
101   return pm && !(pci_read_word(up_port, pm->addr + PCI_PM_CTRL) & PCI_PM_CTRL_STATE_MASK); // D0
102 }
103 
104 bool
margin_check_ready_bit(struct pci_dev * dev)105 margin_check_ready_bit(struct pci_dev *dev)
106 {
107   struct pci_cap *lmr = pci_find_cap(dev, PCI_EXT_CAP_ID_LMR, PCI_CAP_EXTENDED);
108   return lmr && (pci_read_word(dev, lmr->addr + PCI_LMR_PORT_STS) & PCI_LMR_PORT_STS_READY);
109 }
110 
111 /* Awaits device at 16 GT/s or higher */
112 static struct margin_dev
fill_dev_wrapper(struct pci_dev * dev)113 fill_dev_wrapper(struct pci_dev *dev)
114 {
115   struct pci_cap *cap = pci_find_cap(dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
116   struct margin_dev res = {
117     .dev = dev,
118     .lmr_cap_addr = pci_find_cap(dev, PCI_EXT_CAP_ID_LMR, PCI_CAP_EXTENDED)->addr,
119     .neg_width = GET_REG_MASK(pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA), PCI_EXP_LNKSTA_WIDTH),
120     .max_width = GET_REG_MASK(pci_read_long(dev, cap->addr + PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_WIDTH),
121     .retimers_n
122     = (!!(pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA2) & PCI_EXP_LINKSTA2_RETIMER))
123       + (!!(pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA2) & PCI_EXP_LINKSTA2_2RETIMERS)),
124     .link_speed = (pci_read_word(dev, cap->addr + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_SPEED),
125     .hw = detect_unique_hw(dev)
126   };
127   return res;
128 }
129 
130 bool
margin_fill_link(struct pci_dev * down_port,struct pci_dev * up_port,struct margin_link * wrappers)131 margin_fill_link(struct pci_dev *down_port, struct pci_dev *up_port, struct margin_link *wrappers)
132 {
133   memset(wrappers, 0, sizeof(*wrappers));
134   if (!margin_verify_link(down_port, up_port))
135     return false;
136   wrappers->down_port = fill_dev_wrapper(down_port);
137   wrappers->up_port = fill_dev_wrapper(up_port);
138   return true;
139 }
140 
141 /* Disable ASPM, set Hardware Autonomous Speed/Width Disable bits */
142 static bool
margin_prep_dev(struct margin_dev * dev)143 margin_prep_dev(struct margin_dev *dev)
144 {
145   struct pci_cap *pcie = pci_find_cap(dev->dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
146   if (!pcie)
147     return false;
148 
149   u16 lnk_ctl = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL);
150   dev->aspm = lnk_ctl & PCI_EXP_LNKCTL_ASPM;
151   dev->hawd = !!(lnk_ctl & PCI_EXP_LNKCTL_HWAUTWD);
152   lnk_ctl &= ~PCI_EXP_LNKCTL_ASPM;
153   pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL, lnk_ctl);
154   if (pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM)
155     return false;
156 
157   lnk_ctl |= PCI_EXP_LNKCTL_HWAUTWD;
158   pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL, lnk_ctl);
159 
160   u16 lnk_ctl2 = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2);
161   dev->hasd = !!(lnk_ctl2 & PCI_EXP_LNKCTL2_SPEED_DIS);
162   lnk_ctl2 |= PCI_EXP_LNKCTL2_SPEED_DIS;
163   pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2, lnk_ctl2);
164 
165   return true;
166 }
167 
168 /* Restore Device ASPM, Hardware Autonomous Speed/Width settings */
169 static void
margin_restore_dev(struct margin_dev * dev)170 margin_restore_dev(struct margin_dev *dev)
171 {
172   struct pci_cap *pcie = pci_find_cap(dev->dev, PCI_CAP_ID_EXP, PCI_CAP_NORMAL);
173   if (!pcie)
174     return;
175 
176   u16 lnk_ctl = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL);
177   lnk_ctl = SET_REG_MASK(lnk_ctl, PCI_EXP_LNKCAP_ASPM, dev->aspm);
178   lnk_ctl = SET_REG_MASK(lnk_ctl, PCI_EXP_LNKCTL_HWAUTWD, dev->hawd);
179   pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL, lnk_ctl);
180 
181   u16 lnk_ctl2 = pci_read_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2);
182   lnk_ctl2 = SET_REG_MASK(lnk_ctl2, PCI_EXP_LNKCTL2_SPEED_DIS, dev->hasd);
183   pci_write_word(dev->dev, pcie->addr + PCI_EXP_LNKCTL2, lnk_ctl2);
184 }
185 
186 bool
margin_prep_link(struct margin_link * link)187 margin_prep_link(struct margin_link *link)
188 {
189   if (!link)
190     return false;
191   if (!margin_prep_dev(&link->down_port))
192     return false;
193   if (!margin_prep_dev(&link->up_port))
194     {
195       margin_restore_dev(&link->down_port);
196       return false;
197     }
198   return true;
199 }
200 
201 void
margin_restore_link(struct margin_link * link)202 margin_restore_link(struct margin_link *link)
203 {
204   margin_restore_dev(&link->down_port);
205   margin_restore_dev(&link->up_port);
206 }
207