1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4
5 #include "ifpga_feature_dev.h"
6 #include "opae_spi.h"
7 #include "opae_intel_max10.h"
8 #include "opae_i2c.h"
9 #include "opae_at24_eeprom.h"
10
11 #define PWR_THRESHOLD_MAX 0x7F
12
fme_get_prop(struct ifpga_fme_hw * fme,struct feature_prop * prop)13 int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
14 {
15 struct ifpga_feature *feature;
16
17 if (!fme)
18 return -ENOENT;
19
20 feature = get_fme_feature_by_id(fme, prop->feature_id);
21
22 if (feature && feature->ops && feature->ops->get_prop)
23 return feature->ops->get_prop(feature, prop);
24
25 return -ENOENT;
26 }
27
fme_set_prop(struct ifpga_fme_hw * fme,struct feature_prop * prop)28 int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
29 {
30 struct ifpga_feature *feature;
31
32 if (!fme)
33 return -ENOENT;
34
35 feature = get_fme_feature_by_id(fme, prop->feature_id);
36
37 if (feature && feature->ops && feature->ops->set_prop)
38 return feature->ops->set_prop(feature, prop);
39
40 return -ENOENT;
41 }
42
fme_set_irq(struct ifpga_fme_hw * fme,u32 feature_id,void * irq_set)43 int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
44 {
45 struct ifpga_feature *feature;
46
47 if (!fme)
48 return -ENOENT;
49
50 feature = get_fme_feature_by_id(fme, feature_id);
51
52 if (feature && feature->ops && feature->ops->set_irq)
53 return feature->ops->set_irq(feature, irq_set);
54
55 return -ENOENT;
56 }
57
58 /* fme private feature head */
fme_hdr_init(struct ifpga_feature * feature)59 static int fme_hdr_init(struct ifpga_feature *feature)
60 {
61 struct feature_fme_header *fme_hdr;
62
63 fme_hdr = (struct feature_fme_header *)feature->addr;
64
65 dev_info(NULL, "FME HDR Init.\n");
66 dev_info(NULL, "FME cap %llx.\n",
67 (unsigned long long)fme_hdr->capability.csr);
68
69 return 0;
70 }
71
fme_hdr_uinit(struct ifpga_feature * feature)72 static void fme_hdr_uinit(struct ifpga_feature *feature)
73 {
74 UNUSED(feature);
75
76 dev_info(NULL, "FME HDR UInit.\n");
77 }
78
fme_hdr_get_revision(struct ifpga_fme_hw * fme,u64 * revision)79 static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
80 {
81 struct feature_fme_header *fme_hdr
82 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
83 struct feature_header header;
84
85 header.csr = readq(&fme_hdr->header);
86 *revision = header.revision;
87
88 return 0;
89 }
90
fme_hdr_get_ports_num(struct ifpga_fme_hw * fme,u64 * ports_num)91 static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
92 {
93 struct feature_fme_header *fme_hdr
94 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
95 struct feature_fme_capability fme_capability;
96
97 fme_capability.csr = readq(&fme_hdr->capability);
98 *ports_num = fme_capability.num_ports;
99
100 return 0;
101 }
102
fme_hdr_get_cache_size(struct ifpga_fme_hw * fme,u64 * cache_size)103 static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
104 {
105 struct feature_fme_header *fme_hdr
106 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
107 struct feature_fme_capability fme_capability;
108
109 fme_capability.csr = readq(&fme_hdr->capability);
110 *cache_size = fme_capability.cache_size;
111
112 return 0;
113 }
114
fme_hdr_get_version(struct ifpga_fme_hw * fme,u64 * version)115 static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
116 {
117 struct feature_fme_header *fme_hdr
118 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
119 struct feature_fme_capability fme_capability;
120
121 fme_capability.csr = readq(&fme_hdr->capability);
122 *version = fme_capability.fabric_verid;
123
124 return 0;
125 }
126
fme_hdr_get_socket_id(struct ifpga_fme_hw * fme,u64 * socket_id)127 static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
128 {
129 struct feature_fme_header *fme_hdr
130 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
131 struct feature_fme_capability fme_capability;
132
133 fme_capability.csr = readq(&fme_hdr->capability);
134 *socket_id = fme_capability.socket_id;
135
136 return 0;
137 }
138
fme_hdr_get_bitstream_id(struct ifpga_fme_hw * fme,u64 * bitstream_id)139 static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
140 u64 *bitstream_id)
141 {
142 struct feature_fme_header *fme_hdr
143 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
144
145 *bitstream_id = readq(&fme_hdr->bitstream_id);
146
147 return 0;
148 }
149
fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw * fme,u64 * bitstream_metadata)150 static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
151 u64 *bitstream_metadata)
152 {
153 struct feature_fme_header *fme_hdr
154 = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
155
156 *bitstream_metadata = readq(&fme_hdr->bitstream_md);
157
158 return 0;
159 }
160
161 static int
fme_hdr_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)162 fme_hdr_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
163 {
164 struct ifpga_fme_hw *fme = feature->parent;
165
166 switch (prop->prop_id) {
167 case FME_HDR_PROP_REVISION:
168 return fme_hdr_get_revision(fme, &prop->data);
169 case FME_HDR_PROP_PORTS_NUM:
170 return fme_hdr_get_ports_num(fme, &prop->data);
171 case FME_HDR_PROP_CACHE_SIZE:
172 return fme_hdr_get_cache_size(fme, &prop->data);
173 case FME_HDR_PROP_VERSION:
174 return fme_hdr_get_version(fme, &prop->data);
175 case FME_HDR_PROP_SOCKET_ID:
176 return fme_hdr_get_socket_id(fme, &prop->data);
177 case FME_HDR_PROP_BITSTREAM_ID:
178 return fme_hdr_get_bitstream_id(fme, &prop->data);
179 case FME_HDR_PROP_BITSTREAM_METADATA:
180 return fme_hdr_get_bitstream_metadata(fme, &prop->data);
181 }
182
183 return -ENOENT;
184 }
185
186 struct ifpga_feature_ops fme_hdr_ops = {
187 .init = fme_hdr_init,
188 .uinit = fme_hdr_uinit,
189 .get_prop = fme_hdr_get_prop,
190 };
191
192 /* thermal management */
fme_thermal_get_threshold1(struct ifpga_fme_hw * fme,u64 * thres1)193 static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
194 {
195 struct feature_fme_thermal *thermal;
196 struct feature_fme_tmp_threshold temp_threshold;
197
198 thermal = get_fme_feature_ioaddr_by_index(fme,
199 FME_FEATURE_ID_THERMAL_MGMT);
200
201 temp_threshold.csr = readq(&thermal->threshold);
202 *thres1 = temp_threshold.tmp_thshold1;
203
204 return 0;
205 }
206
fme_thermal_set_threshold1(struct ifpga_fme_hw * fme,u64 thres1)207 static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
208 {
209 struct feature_fme_thermal *thermal;
210 struct feature_fme_header *fme_hdr;
211 struct feature_fme_tmp_threshold tmp_threshold;
212 struct feature_fme_capability fme_capability;
213
214 thermal = get_fme_feature_ioaddr_by_index(fme,
215 FME_FEATURE_ID_THERMAL_MGMT);
216 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
217
218 spinlock_lock(&fme->lock);
219 tmp_threshold.csr = readq(&thermal->threshold);
220 fme_capability.csr = readq(&fme_hdr->capability);
221
222 if (fme_capability.lock_bit == 1) {
223 spinlock_unlock(&fme->lock);
224 return -EBUSY;
225 } else if (thres1 > 100) {
226 spinlock_unlock(&fme->lock);
227 return -EINVAL;
228 } else if (thres1 == 0) {
229 tmp_threshold.tmp_thshold1_enable = 0;
230 tmp_threshold.tmp_thshold1 = thres1;
231 } else {
232 tmp_threshold.tmp_thshold1_enable = 1;
233 tmp_threshold.tmp_thshold1 = thres1;
234 }
235
236 writeq(tmp_threshold.csr, &thermal->threshold);
237 spinlock_unlock(&fme->lock);
238
239 return 0;
240 }
241
fme_thermal_get_threshold2(struct ifpga_fme_hw * fme,u64 * thres2)242 static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
243 {
244 struct feature_fme_thermal *thermal;
245 struct feature_fme_tmp_threshold temp_threshold;
246
247 thermal = get_fme_feature_ioaddr_by_index(fme,
248 FME_FEATURE_ID_THERMAL_MGMT);
249
250 temp_threshold.csr = readq(&thermal->threshold);
251 *thres2 = temp_threshold.tmp_thshold2;
252
253 return 0;
254 }
255
fme_thermal_set_threshold2(struct ifpga_fme_hw * fme,u64 thres2)256 static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
257 {
258 struct feature_fme_thermal *thermal;
259 struct feature_fme_header *fme_hdr;
260 struct feature_fme_tmp_threshold tmp_threshold;
261 struct feature_fme_capability fme_capability;
262
263 thermal = get_fme_feature_ioaddr_by_index(fme,
264 FME_FEATURE_ID_THERMAL_MGMT);
265 fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
266
267 spinlock_lock(&fme->lock);
268 tmp_threshold.csr = readq(&thermal->threshold);
269 fme_capability.csr = readq(&fme_hdr->capability);
270
271 if (fme_capability.lock_bit == 1) {
272 spinlock_unlock(&fme->lock);
273 return -EBUSY;
274 } else if (thres2 > 100) {
275 spinlock_unlock(&fme->lock);
276 return -EINVAL;
277 } else if (thres2 == 0) {
278 tmp_threshold.tmp_thshold2_enable = 0;
279 tmp_threshold.tmp_thshold2 = thres2;
280 } else {
281 tmp_threshold.tmp_thshold2_enable = 1;
282 tmp_threshold.tmp_thshold2 = thres2;
283 }
284
285 writeq(tmp_threshold.csr, &thermal->threshold);
286 spinlock_unlock(&fme->lock);
287
288 return 0;
289 }
290
fme_thermal_get_threshold_trip(struct ifpga_fme_hw * fme,u64 * thres_trip)291 static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
292 u64 *thres_trip)
293 {
294 struct feature_fme_thermal *thermal;
295 struct feature_fme_tmp_threshold temp_threshold;
296
297 thermal = get_fme_feature_ioaddr_by_index(fme,
298 FME_FEATURE_ID_THERMAL_MGMT);
299
300 temp_threshold.csr = readq(&thermal->threshold);
301 *thres_trip = temp_threshold.therm_trip_thshold;
302
303 return 0;
304 }
305
fme_thermal_get_threshold1_reached(struct ifpga_fme_hw * fme,u64 * thres1_reached)306 static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
307 u64 *thres1_reached)
308 {
309 struct feature_fme_thermal *thermal;
310 struct feature_fme_tmp_threshold temp_threshold;
311
312 thermal = get_fme_feature_ioaddr_by_index(fme,
313 FME_FEATURE_ID_THERMAL_MGMT);
314
315 temp_threshold.csr = readq(&thermal->threshold);
316 *thres1_reached = temp_threshold.thshold1_status;
317
318 return 0;
319 }
320
fme_thermal_get_threshold2_reached(struct ifpga_fme_hw * fme,u64 * thres1_reached)321 static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
322 u64 *thres1_reached)
323 {
324 struct feature_fme_thermal *thermal;
325 struct feature_fme_tmp_threshold temp_threshold;
326
327 thermal = get_fme_feature_ioaddr_by_index(fme,
328 FME_FEATURE_ID_THERMAL_MGMT);
329
330 temp_threshold.csr = readq(&thermal->threshold);
331 *thres1_reached = temp_threshold.thshold2_status;
332
333 return 0;
334 }
335
fme_thermal_get_threshold1_policy(struct ifpga_fme_hw * fme,u64 * thres1_policy)336 static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
337 u64 *thres1_policy)
338 {
339 struct feature_fme_thermal *thermal;
340 struct feature_fme_tmp_threshold temp_threshold;
341
342 thermal = get_fme_feature_ioaddr_by_index(fme,
343 FME_FEATURE_ID_THERMAL_MGMT);
344
345 temp_threshold.csr = readq(&thermal->threshold);
346 *thres1_policy = temp_threshold.thshold_policy;
347
348 return 0;
349 }
350
fme_thermal_set_threshold1_policy(struct ifpga_fme_hw * fme,u64 thres1_policy)351 static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
352 u64 thres1_policy)
353 {
354 struct feature_fme_thermal *thermal;
355 struct feature_fme_tmp_threshold tmp_threshold;
356
357 thermal = get_fme_feature_ioaddr_by_index(fme,
358 FME_FEATURE_ID_THERMAL_MGMT);
359
360 spinlock_lock(&fme->lock);
361 tmp_threshold.csr = readq(&thermal->threshold);
362
363 if (thres1_policy == 0) {
364 tmp_threshold.thshold_policy = 0;
365 } else if (thres1_policy == 1) {
366 tmp_threshold.thshold_policy = 1;
367 } else {
368 spinlock_unlock(&fme->lock);
369 return -EINVAL;
370 }
371
372 writeq(tmp_threshold.csr, &thermal->threshold);
373 spinlock_unlock(&fme->lock);
374
375 return 0;
376 }
377
fme_thermal_get_temperature(struct ifpga_fme_hw * fme,u64 * temp)378 static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
379 {
380 struct feature_fme_thermal *thermal;
381 struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
382
383 thermal = get_fme_feature_ioaddr_by_index(fme,
384 FME_FEATURE_ID_THERMAL_MGMT);
385
386 temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
387 *temp = temp_rdsensor_fmt1.fpga_temp;
388
389 return 0;
390 }
391
fme_thermal_get_revision(struct ifpga_fme_hw * fme,u64 * revision)392 static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
393 {
394 struct feature_fme_thermal *fme_thermal
395 = get_fme_feature_ioaddr_by_index(fme,
396 FME_FEATURE_ID_THERMAL_MGMT);
397 struct feature_header header;
398
399 header.csr = readq(&fme_thermal->header);
400 *revision = header.revision;
401
402 return 0;
403 }
404
405 #define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
406
fme_thermal_mgmt_init(struct ifpga_feature * feature)407 static int fme_thermal_mgmt_init(struct ifpga_feature *feature)
408 {
409 struct feature_fme_thermal *fme_thermal;
410 struct feature_fme_tmp_threshold_cap thermal_cap;
411
412 UNUSED(feature);
413
414 dev_info(NULL, "FME thermal mgmt Init.\n");
415
416 fme_thermal = (struct feature_fme_thermal *)feature->addr;
417 thermal_cap.csr = readq(&fme_thermal->threshold_cap);
418
419 dev_info(NULL, "FME thermal cap %llx.\n",
420 (unsigned long long)fme_thermal->threshold_cap.csr);
421
422 if (thermal_cap.tmp_thshold_disabled)
423 feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
424
425 return 0;
426 }
427
fme_thermal_mgmt_uinit(struct ifpga_feature * feature)428 static void fme_thermal_mgmt_uinit(struct ifpga_feature *feature)
429 {
430 UNUSED(feature);
431
432 dev_info(NULL, "FME thermal mgmt UInit.\n");
433 }
434
435 static int
fme_thermal_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)436 fme_thermal_set_prop(struct ifpga_feature *feature, struct feature_prop *prop)
437 {
438 struct ifpga_fme_hw *fme = feature->parent;
439
440 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
441 return -ENOENT;
442
443 switch (prop->prop_id) {
444 case FME_THERMAL_PROP_THRESHOLD1:
445 return fme_thermal_set_threshold1(fme, prop->data);
446 case FME_THERMAL_PROP_THRESHOLD2:
447 return fme_thermal_set_threshold2(fme, prop->data);
448 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
449 return fme_thermal_set_threshold1_policy(fme, prop->data);
450 }
451
452 return -ENOENT;
453 }
454
455 static int
fme_thermal_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)456 fme_thermal_get_prop(struct ifpga_feature *feature, struct feature_prop *prop)
457 {
458 struct ifpga_fme_hw *fme = feature->parent;
459
460 if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
461 prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
462 prop->prop_id != FME_THERMAL_PROP_REVISION)
463 return -ENOENT;
464
465 switch (prop->prop_id) {
466 case FME_THERMAL_PROP_THRESHOLD1:
467 return fme_thermal_get_threshold1(fme, &prop->data);
468 case FME_THERMAL_PROP_THRESHOLD2:
469 return fme_thermal_get_threshold2(fme, &prop->data);
470 case FME_THERMAL_PROP_THRESHOLD_TRIP:
471 return fme_thermal_get_threshold_trip(fme, &prop->data);
472 case FME_THERMAL_PROP_THRESHOLD1_REACHED:
473 return fme_thermal_get_threshold1_reached(fme, &prop->data);
474 case FME_THERMAL_PROP_THRESHOLD2_REACHED:
475 return fme_thermal_get_threshold2_reached(fme, &prop->data);
476 case FME_THERMAL_PROP_THRESHOLD1_POLICY:
477 return fme_thermal_get_threshold1_policy(fme, &prop->data);
478 case FME_THERMAL_PROP_TEMPERATURE:
479 return fme_thermal_get_temperature(fme, &prop->data);
480 case FME_THERMAL_PROP_REVISION:
481 return fme_thermal_get_revision(fme, &prop->data);
482 }
483
484 return -ENOENT;
485 }
486
487 struct ifpga_feature_ops fme_thermal_mgmt_ops = {
488 .init = fme_thermal_mgmt_init,
489 .uinit = fme_thermal_mgmt_uinit,
490 .get_prop = fme_thermal_get_prop,
491 .set_prop = fme_thermal_set_prop,
492 };
493
fme_pwr_get_consumed(struct ifpga_fme_hw * fme,u64 * consumed)494 static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
495 {
496 struct feature_fme_power *fme_power
497 = get_fme_feature_ioaddr_by_index(fme,
498 FME_FEATURE_ID_POWER_MGMT);
499 struct feature_fme_pm_status pm_status;
500
501 pm_status.csr = readq(&fme_power->status);
502
503 *consumed = pm_status.pwr_consumed;
504
505 return 0;
506 }
507
fme_pwr_get_threshold1(struct ifpga_fme_hw * fme,u64 * threshold)508 static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
509 {
510 struct feature_fme_power *fme_power
511 = get_fme_feature_ioaddr_by_index(fme,
512 FME_FEATURE_ID_POWER_MGMT);
513 struct feature_fme_pm_ap_threshold pm_ap_threshold;
514
515 pm_ap_threshold.csr = readq(&fme_power->threshold);
516
517 *threshold = pm_ap_threshold.threshold1;
518
519 return 0;
520 }
521
fme_pwr_set_threshold1(struct ifpga_fme_hw * fme,u64 threshold)522 static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
523 {
524 struct feature_fme_power *fme_power
525 = get_fme_feature_ioaddr_by_index(fme,
526 FME_FEATURE_ID_POWER_MGMT);
527 struct feature_fme_pm_ap_threshold pm_ap_threshold;
528
529 spinlock_lock(&fme->lock);
530 pm_ap_threshold.csr = readq(&fme_power->threshold);
531
532 if (threshold <= PWR_THRESHOLD_MAX) {
533 pm_ap_threshold.threshold1 = threshold;
534 } else {
535 spinlock_unlock(&fme->lock);
536 return -EINVAL;
537 }
538
539 writeq(pm_ap_threshold.csr, &fme_power->threshold);
540 spinlock_unlock(&fme->lock);
541
542 return 0;
543 }
544
fme_pwr_get_threshold2(struct ifpga_fme_hw * fme,u64 * threshold)545 static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
546 {
547 struct feature_fme_power *fme_power
548 = get_fme_feature_ioaddr_by_index(fme,
549 FME_FEATURE_ID_POWER_MGMT);
550 struct feature_fme_pm_ap_threshold pm_ap_threshold;
551
552 pm_ap_threshold.csr = readq(&fme_power->threshold);
553
554 *threshold = pm_ap_threshold.threshold2;
555
556 return 0;
557 }
558
fme_pwr_set_threshold2(struct ifpga_fme_hw * fme,u64 threshold)559 static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
560 {
561 struct feature_fme_power *fme_power
562 = get_fme_feature_ioaddr_by_index(fme,
563 FME_FEATURE_ID_POWER_MGMT);
564 struct feature_fme_pm_ap_threshold pm_ap_threshold;
565
566 spinlock_lock(&fme->lock);
567 pm_ap_threshold.csr = readq(&fme_power->threshold);
568
569 if (threshold <= PWR_THRESHOLD_MAX) {
570 pm_ap_threshold.threshold2 = threshold;
571 } else {
572 spinlock_unlock(&fme->lock);
573 return -EINVAL;
574 }
575
576 writeq(pm_ap_threshold.csr, &fme_power->threshold);
577 spinlock_unlock(&fme->lock);
578
579 return 0;
580 }
581
fme_pwr_get_threshold1_status(struct ifpga_fme_hw * fme,u64 * threshold_status)582 static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
583 u64 *threshold_status)
584 {
585 struct feature_fme_power *fme_power
586 = get_fme_feature_ioaddr_by_index(fme,
587 FME_FEATURE_ID_POWER_MGMT);
588 struct feature_fme_pm_ap_threshold pm_ap_threshold;
589
590 pm_ap_threshold.csr = readq(&fme_power->threshold);
591
592 *threshold_status = pm_ap_threshold.threshold1_status;
593
594 return 0;
595 }
596
fme_pwr_get_threshold2_status(struct ifpga_fme_hw * fme,u64 * threshold_status)597 static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
598 u64 *threshold_status)
599 {
600 struct feature_fme_power *fme_power
601 = get_fme_feature_ioaddr_by_index(fme,
602 FME_FEATURE_ID_POWER_MGMT);
603 struct feature_fme_pm_ap_threshold pm_ap_threshold;
604
605 pm_ap_threshold.csr = readq(&fme_power->threshold);
606
607 *threshold_status = pm_ap_threshold.threshold2_status;
608
609 return 0;
610 }
611
fme_pwr_get_rtl(struct ifpga_fme_hw * fme,u64 * rtl)612 static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
613 {
614 struct feature_fme_power *fme_power
615 = get_fme_feature_ioaddr_by_index(fme,
616 FME_FEATURE_ID_POWER_MGMT);
617 struct feature_fme_pm_status pm_status;
618
619 pm_status.csr = readq(&fme_power->status);
620
621 *rtl = pm_status.fpga_latency_report;
622
623 return 0;
624 }
625
fme_pwr_get_xeon_limit(struct ifpga_fme_hw * fme,u64 * limit)626 static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
627 {
628 struct feature_fme_power *fme_power
629 = get_fme_feature_ioaddr_by_index(fme,
630 FME_FEATURE_ID_POWER_MGMT);
631 struct feature_fme_pm_xeon_limit xeon_limit;
632
633 xeon_limit.csr = readq(&fme_power->xeon_limit);
634
635 if (!xeon_limit.enable)
636 xeon_limit.pwr_limit = 0;
637
638 *limit = xeon_limit.pwr_limit;
639
640 return 0;
641 }
642
fme_pwr_get_fpga_limit(struct ifpga_fme_hw * fme,u64 * limit)643 static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
644 {
645 struct feature_fme_power *fme_power
646 = get_fme_feature_ioaddr_by_index(fme,
647 FME_FEATURE_ID_POWER_MGMT);
648 struct feature_fme_pm_fpga_limit fpga_limit;
649
650 fpga_limit.csr = readq(&fme_power->fpga_limit);
651
652 if (!fpga_limit.enable)
653 fpga_limit.pwr_limit = 0;
654
655 *limit = fpga_limit.pwr_limit;
656
657 return 0;
658 }
659
fme_pwr_get_revision(struct ifpga_fme_hw * fme,u64 * revision)660 static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
661 {
662 struct feature_fme_power *fme_power
663 = get_fme_feature_ioaddr_by_index(fme,
664 FME_FEATURE_ID_POWER_MGMT);
665 struct feature_header header;
666
667 header.csr = readq(&fme_power->header);
668 *revision = header.revision;
669
670 return 0;
671 }
672
fme_power_mgmt_init(struct ifpga_feature * feature)673 static int fme_power_mgmt_init(struct ifpga_feature *feature)
674 {
675 UNUSED(feature);
676
677 dev_info(NULL, "FME power mgmt Init.\n");
678
679 return 0;
680 }
681
fme_power_mgmt_uinit(struct ifpga_feature * feature)682 static void fme_power_mgmt_uinit(struct ifpga_feature *feature)
683 {
684 UNUSED(feature);
685
686 dev_info(NULL, "FME power mgmt UInit.\n");
687 }
688
fme_power_mgmt_get_prop(struct ifpga_feature * feature,struct feature_prop * prop)689 static int fme_power_mgmt_get_prop(struct ifpga_feature *feature,
690 struct feature_prop *prop)
691 {
692 struct ifpga_fme_hw *fme = feature->parent;
693
694 switch (prop->prop_id) {
695 case FME_PWR_PROP_CONSUMED:
696 return fme_pwr_get_consumed(fme, &prop->data);
697 case FME_PWR_PROP_THRESHOLD1:
698 return fme_pwr_get_threshold1(fme, &prop->data);
699 case FME_PWR_PROP_THRESHOLD2:
700 return fme_pwr_get_threshold2(fme, &prop->data);
701 case FME_PWR_PROP_THRESHOLD1_STATUS:
702 return fme_pwr_get_threshold1_status(fme, &prop->data);
703 case FME_PWR_PROP_THRESHOLD2_STATUS:
704 return fme_pwr_get_threshold2_status(fme, &prop->data);
705 case FME_PWR_PROP_RTL:
706 return fme_pwr_get_rtl(fme, &prop->data);
707 case FME_PWR_PROP_XEON_LIMIT:
708 return fme_pwr_get_xeon_limit(fme, &prop->data);
709 case FME_PWR_PROP_FPGA_LIMIT:
710 return fme_pwr_get_fpga_limit(fme, &prop->data);
711 case FME_PWR_PROP_REVISION:
712 return fme_pwr_get_revision(fme, &prop->data);
713 }
714
715 return -ENOENT;
716 }
717
fme_power_mgmt_set_prop(struct ifpga_feature * feature,struct feature_prop * prop)718 static int fme_power_mgmt_set_prop(struct ifpga_feature *feature,
719 struct feature_prop *prop)
720 {
721 struct ifpga_fme_hw *fme = feature->parent;
722
723 switch (prop->prop_id) {
724 case FME_PWR_PROP_THRESHOLD1:
725 return fme_pwr_set_threshold1(fme, prop->data);
726 case FME_PWR_PROP_THRESHOLD2:
727 return fme_pwr_set_threshold2(fme, prop->data);
728 }
729
730 return -ENOENT;
731 }
732
733 struct ifpga_feature_ops fme_power_mgmt_ops = {
734 .init = fme_power_mgmt_init,
735 .uinit = fme_power_mgmt_uinit,
736 .get_prop = fme_power_mgmt_get_prop,
737 .set_prop = fme_power_mgmt_set_prop,
738 };
739
fme_hssi_eth_init(struct ifpga_feature * feature)740 static int fme_hssi_eth_init(struct ifpga_feature *feature)
741 {
742 UNUSED(feature);
743 return 0;
744 }
745
fme_hssi_eth_uinit(struct ifpga_feature * feature)746 static void fme_hssi_eth_uinit(struct ifpga_feature *feature)
747 {
748 UNUSED(feature);
749 }
750
751 struct ifpga_feature_ops fme_hssi_eth_ops = {
752 .init = fme_hssi_eth_init,
753 .uinit = fme_hssi_eth_uinit,
754 };
755
fme_emif_init(struct ifpga_feature * feature)756 static int fme_emif_init(struct ifpga_feature *feature)
757 {
758 UNUSED(feature);
759 return 0;
760 }
761
fme_emif_uinit(struct ifpga_feature * feature)762 static void fme_emif_uinit(struct ifpga_feature *feature)
763 {
764 UNUSED(feature);
765 }
766
767 struct ifpga_feature_ops fme_emif_ops = {
768 .init = fme_emif_init,
769 .uinit = fme_emif_uinit,
770 };
771
board_type_to_string(u32 type)772 static const char *board_type_to_string(u32 type)
773 {
774 switch (type) {
775 case VC_8_10G:
776 return "VC_8x10G";
777 case VC_4_25G:
778 return "VC_4x25G";
779 case VC_2_1_25:
780 return "VC_2x1x25G";
781 case VC_4_25G_2_25G:
782 return "VC_4x25G+2x25G";
783 case VC_2_2_25G:
784 return "VC_2x2x25G";
785 }
786
787 return "unknown";
788 }
789
board_major_to_string(u32 major)790 static const char *board_major_to_string(u32 major)
791 {
792 switch (major) {
793 case VISTA_CREEK:
794 return "VISTA_CREEK";
795 case RUSH_CREEK:
796 return "RUSH_CREEK";
797 case DARBY_CREEK:
798 return "DARBY_CREEK";
799 }
800
801 return "unknown";
802 }
803
board_type_to_info(u32 type,struct opae_board_info * info)804 static int board_type_to_info(u32 type,
805 struct opae_board_info *info)
806 {
807 switch (type) {
808 case VC_8_10G:
809 info->nums_of_retimer = 2;
810 info->ports_per_retimer = 4;
811 info->nums_of_fvl = 2;
812 info->ports_per_fvl = 4;
813 break;
814 case VC_4_25G:
815 info->nums_of_retimer = 1;
816 info->ports_per_retimer = 4;
817 info->nums_of_fvl = 2;
818 info->ports_per_fvl = 2;
819 break;
820 case VC_2_1_25:
821 info->nums_of_retimer = 2;
822 info->ports_per_retimer = 1;
823 info->nums_of_fvl = 1;
824 info->ports_per_fvl = 2;
825 break;
826 case VC_2_2_25G:
827 info->nums_of_retimer = 2;
828 info->ports_per_retimer = 2;
829 info->nums_of_fvl = 2;
830 info->ports_per_fvl = 2;
831 break;
832 default:
833 return -EINVAL;
834 }
835
836 return 0;
837 }
838
fme_get_board_interface(struct ifpga_fme_hw * fme)839 static int fme_get_board_interface(struct ifpga_fme_hw *fme)
840 {
841 struct fme_bitstream_id id;
842 struct ifpga_hw *hw;
843 u32 val;
844
845 hw = fme->parent;
846 if (!hw)
847 return -ENODEV;
848
849 if (fme_hdr_get_bitstream_id(fme, &id.id))
850 return -EINVAL;
851
852 fme->board_info.major = id.major;
853 fme->board_info.minor = id.minor;
854 fme->board_info.type = id.interface;
855 fme->board_info.fvl_bypass = id.fvl_bypass;
856 fme->board_info.mac_lightweight = id.mac_lightweight;
857 fme->board_info.lightweight = id.lightweiht;
858 fme->board_info.disaggregate = id.disagregate;
859 fme->board_info.seu = id.seu;
860 fme->board_info.ptp = id.ptp;
861
862 dev_info(fme, "found: PCI dev: %02x:%02x:%x board: %s type: %s\n",
863 hw->pci_data->bus,
864 hw->pci_data->devid,
865 hw->pci_data->function,
866 board_major_to_string(fme->board_info.major),
867 board_type_to_string(fme->board_info.type));
868
869 dev_info(fme, "support feature:\n"
870 "fvl_bypass:%s\n"
871 "mac_lightweight:%s\n"
872 "lightweight:%s\n"
873 "disaggregate:%s\n"
874 "seu:%s\n"
875 "ptp1588:%s\n",
876 check_support(fme->board_info.fvl_bypass),
877 check_support(fme->board_info.mac_lightweight),
878 check_support(fme->board_info.lightweight),
879 check_support(fme->board_info.disaggregate),
880 check_support(fme->board_info.seu),
881 check_support(fme->board_info.ptp));
882
883
884 if (board_type_to_info(fme->board_info.type, &fme->board_info))
885 return -EINVAL;
886
887 dev_info(fme, "get board info: nums_retimers %d ports_per_retimer %d nums_fvl %d ports_per_fvl %d\n",
888 fme->board_info.nums_of_retimer,
889 fme->board_info.ports_per_retimer,
890 fme->board_info.nums_of_fvl,
891 fme->board_info.ports_per_fvl);
892
893 if (max10_sys_read(fme->max10_dev, MAX10_BUILD_VER, &val))
894 return -EINVAL;
895 fme->board_info.max10_version = val & 0xffffff;
896
897 if (max10_sys_read(fme->max10_dev, NIOS2_FW_VERSION, &val))
898 return -EINVAL;
899 fme->board_info.nios_fw_version = val & 0xffffff;
900
901 dev_info(fme, "max10 version 0x%x, nios fw version 0x%x\n",
902 fme->board_info.max10_version,
903 fme->board_info.nios_fw_version);
904
905 return 0;
906 }
907
spi_self_checking(struct intel_max10_device * dev)908 static int spi_self_checking(struct intel_max10_device *dev)
909 {
910 u32 val;
911 int ret;
912
913 ret = max10_sys_read(dev, MAX10_TEST_REG, &val);
914 if (ret)
915 return -EIO;
916
917 dev_info(NULL, "Read MAX10 test register 0x%x\n", val);
918
919 return 0;
920 }
921
init_spi_share_data(struct ifpga_fme_hw * fme,struct altera_spi_device * spi)922 static void init_spi_share_data(struct ifpga_fme_hw *fme,
923 struct altera_spi_device *spi)
924 {
925 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
926 opae_share_data *sd = NULL;
927
928 if (hw && hw->adapter && hw->adapter->shm.ptr) {
929 dev_info(NULL, "transfer share data to spi\n");
930 sd = (opae_share_data *)hw->adapter->shm.ptr;
931 spi->mutex = &sd->spi_mutex;
932 spi->dtb_sz_ptr = &sd->dtb_size;
933 spi->dtb = sd->dtb;
934 } else {
935 spi->mutex = NULL;
936 spi->dtb_sz_ptr = NULL;
937 spi->dtb = NULL;
938 }
939 }
940
fme_spi_init(struct ifpga_feature * feature)941 static int fme_spi_init(struct ifpga_feature *feature)
942 {
943 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
944 struct altera_spi_device *spi_master;
945 struct intel_max10_device *max10;
946 int ret = 0;
947
948 dev_info(fme, "FME SPI Master (Max10) Init.\n");
949 dev_debug(fme, "FME SPI base addr %p.\n",
950 feature->addr);
951 dev_debug(fme, "spi param=0x%llx\n",
952 (unsigned long long)opae_readq(feature->addr + 0x8));
953
954 spi_master = altera_spi_alloc(feature->addr, TYPE_SPI);
955 if (!spi_master)
956 return -ENODEV;
957 init_spi_share_data(fme, spi_master);
958
959 altera_spi_init(spi_master);
960
961 max10 = intel_max10_device_probe(spi_master, 0);
962 if (!max10) {
963 ret = -ENODEV;
964 dev_err(fme, "max10 init fail\n");
965 goto spi_fail;
966 }
967
968 fme->max10_dev = max10;
969
970 /* SPI self test */
971 if (spi_self_checking(max10)) {
972 ret = -EIO;
973 goto max10_fail;
974 }
975
976 return ret;
977
978 max10_fail:
979 intel_max10_device_remove(fme->max10_dev);
980 spi_fail:
981 altera_spi_release(spi_master);
982 return ret;
983 }
984
fme_spi_uinit(struct ifpga_feature * feature)985 static void fme_spi_uinit(struct ifpga_feature *feature)
986 {
987 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
988
989 if (fme->max10_dev)
990 intel_max10_device_remove(fme->max10_dev);
991 }
992
993 struct ifpga_feature_ops fme_spi_master_ops = {
994 .init = fme_spi_init,
995 .uinit = fme_spi_uinit,
996 };
997
nios_spi_wait_init_done(struct altera_spi_device * dev)998 static int nios_spi_wait_init_done(struct altera_spi_device *dev)
999 {
1000 u32 val = 0;
1001 unsigned long timeout = rte_get_timer_cycles() +
1002 msecs_to_timer_cycles(10000);
1003 unsigned long ticks;
1004 int major_version;
1005 int fecmode = FEC_MODE_NO;
1006
1007 if (spi_reg_read(dev, NIOS_VERSION, &val))
1008 return -EIO;
1009
1010 major_version =
1011 (val & NIOS_VERSION_MAJOR) >> NIOS_VERSION_MAJOR_SHIFT;
1012 dev_info(dev, "A10 NIOS FW version %d\n", major_version);
1013
1014 if (major_version >= 3) {
1015 /* read NIOS_INIT to check if PKVL INIT done or not */
1016 if (spi_reg_read(dev, NIOS_INIT, &val))
1017 return -EIO;
1018
1019 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1020
1021 /* check if PKVLs are initialized already */
1022 if (val & NIOS_INIT_DONE || val & NIOS_INIT_START)
1023 goto nios_init_done;
1024
1025 /* start to config the default FEC mode */
1026 val = fecmode | NIOS_INIT_START;
1027
1028 if (spi_reg_write(dev, NIOS_INIT, val))
1029 return -EIO;
1030 }
1031
1032 nios_init_done:
1033 do {
1034 if (spi_reg_read(dev, NIOS_INIT, &val))
1035 return -EIO;
1036 if (val & NIOS_INIT_DONE)
1037 break;
1038
1039 ticks = rte_get_timer_cycles();
1040 if (time_after(ticks, timeout))
1041 return -ETIMEDOUT;
1042 msleep(100);
1043 } while (1);
1044
1045 /* get the fecmode */
1046 if (spi_reg_read(dev, NIOS_INIT, &val))
1047 return -EIO;
1048 dev_debug(dev, "read NIOS_INIT: 0x%x\n", val);
1049 fecmode = (val & REQ_FEC_MODE) >> REQ_FEC_MODE_SHIFT;
1050 dev_info(dev, "fecmode: 0x%x, %s\n", fecmode,
1051 (fecmode == FEC_MODE_KR) ? "kr" :
1052 ((fecmode == FEC_MODE_RS) ? "rs" : "no"));
1053
1054 return 0;
1055 }
1056
nios_spi_check_error(struct altera_spi_device * dev)1057 static int nios_spi_check_error(struct altera_spi_device *dev)
1058 {
1059 u32 value = 0;
1060
1061 if (spi_reg_read(dev, PKVL_A_MODE_STS, &value))
1062 return -EIO;
1063
1064 dev_debug(dev, "PKVL A Mode Status 0x%x\n", value);
1065
1066 if (value >= 0x100)
1067 return -EINVAL;
1068
1069 if (spi_reg_read(dev, PKVL_B_MODE_STS, &value))
1070 return -EIO;
1071
1072 dev_debug(dev, "PKVL B Mode Status 0x%x\n", value);
1073
1074 if (value >= 0x100)
1075 return -EINVAL;
1076
1077 return 0;
1078 }
1079
fme_nios_spi_init(struct ifpga_feature * feature)1080 static int fme_nios_spi_init(struct ifpga_feature *feature)
1081 {
1082 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1083 struct altera_spi_device *spi_master;
1084 struct intel_max10_device *max10;
1085 struct ifpga_hw *hw;
1086 struct opae_manager *mgr;
1087 int ret = 0;
1088
1089 hw = fme->parent;
1090 if (!hw)
1091 return -ENODEV;
1092
1093 mgr = hw->adapter->mgr;
1094 if (!mgr)
1095 return -ENODEV;
1096
1097 dev_info(fme, "FME SPI Master (NIOS) Init.\n");
1098 dev_debug(fme, "FME SPI base addr %p.\n",
1099 feature->addr);
1100 dev_debug(fme, "spi param=0x%llx\n",
1101 (unsigned long long)opae_readq(feature->addr + 0x8));
1102
1103 spi_master = altera_spi_alloc(feature->addr, TYPE_NIOS_SPI);
1104 if (!spi_master)
1105 return -ENODEV;
1106 init_spi_share_data(fme, spi_master);
1107
1108 /**
1109 * 1. wait A10 NIOS initial finished and
1110 * release the SPI master to Host
1111 */
1112 if (spi_master->mutex)
1113 pthread_mutex_lock(spi_master->mutex);
1114
1115 ret = nios_spi_wait_init_done(spi_master);
1116 if (ret != 0) {
1117 dev_err(fme, "FME NIOS_SPI init fail\n");
1118 if (spi_master->mutex)
1119 pthread_mutex_unlock(spi_master->mutex);
1120 goto release_dev;
1121 }
1122
1123 dev_info(fme, "FME NIOS_SPI initial done\n");
1124
1125 /* 2. check if error occur? */
1126 if (nios_spi_check_error(spi_master))
1127 dev_info(fme, "NIOS_SPI INIT done, but found some error\n");
1128
1129 if (spi_master->mutex)
1130 pthread_mutex_unlock(spi_master->mutex);
1131
1132 /* 3. init the spi master*/
1133 altera_spi_init(spi_master);
1134
1135 /* init the max10 device */
1136 max10 = intel_max10_device_probe(spi_master, 0);
1137 if (!max10) {
1138 ret = -ENODEV;
1139 dev_err(fme, "max10 init fail\n");
1140 goto release_dev;
1141 }
1142
1143 fme->max10_dev = max10;
1144
1145 max10->bus = hw->pci_data->bus;
1146
1147 fme_get_board_interface(fme);
1148
1149 mgr->sensor_list = &max10->opae_sensor_list;
1150
1151 /* SPI self test */
1152 if (spi_self_checking(max10))
1153 goto spi_fail;
1154
1155 return ret;
1156
1157 spi_fail:
1158 intel_max10_device_remove(fme->max10_dev);
1159 release_dev:
1160 altera_spi_release(spi_master);
1161 return -ENODEV;
1162 }
1163
fme_nios_spi_uinit(struct ifpga_feature * feature)1164 static void fme_nios_spi_uinit(struct ifpga_feature *feature)
1165 {
1166 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1167
1168 if (fme->max10_dev)
1169 intel_max10_device_remove(fme->max10_dev);
1170 }
1171
1172 struct ifpga_feature_ops fme_nios_spi_master_ops = {
1173 .init = fme_nios_spi_init,
1174 .uinit = fme_nios_spi_uinit,
1175 };
1176
i2c_mac_rom_test(struct altera_i2c_dev * dev)1177 static int i2c_mac_rom_test(struct altera_i2c_dev *dev)
1178 {
1179 char buf[20];
1180 int ret;
1181 char read_buf[20] = {0,};
1182 const char *string = "1a2b3c4d5e";
1183
1184 opae_memcpy(buf, string, strlen(string));
1185
1186 ret = at24_eeprom_write(dev, AT24512_SLAVE_ADDR, 0,
1187 (u8 *)buf, strlen(string));
1188 if (ret < 0) {
1189 dev_err(NULL, "write i2c error:%d\n", ret);
1190 return ret;
1191 }
1192
1193 ret = at24_eeprom_read(dev, AT24512_SLAVE_ADDR, 0,
1194 (u8 *)read_buf, strlen(string));
1195 if (ret < 0) {
1196 dev_err(NULL, "read i2c error:%d\n", ret);
1197 return ret;
1198 }
1199
1200 if (memcmp(buf, read_buf, strlen(string))) {
1201 dev_err(NULL, "%s test fail!\n", __func__);
1202 return -EFAULT;
1203 }
1204
1205 dev_info(NULL, "%s test successful\n", __func__);
1206
1207 return 0;
1208 }
1209
init_i2c_mutex(struct ifpga_fme_hw * fme)1210 static void init_i2c_mutex(struct ifpga_fme_hw *fme)
1211 {
1212 struct ifpga_hw *hw = (struct ifpga_hw *)fme->parent;
1213 struct altera_i2c_dev *i2c_dev;
1214 opae_share_data *sd = NULL;
1215
1216 if (fme->i2c_master) {
1217 i2c_dev = (struct altera_i2c_dev *)fme->i2c_master;
1218 if (hw && hw->adapter && hw->adapter->shm.ptr) {
1219 dev_info(NULL, "use multi-process mutex in i2c\n");
1220 sd = (opae_share_data *)hw->adapter->shm.ptr;
1221 i2c_dev->mutex = &sd->i2c_mutex;
1222 } else {
1223 dev_info(NULL, "use multi-thread mutex in i2c\n");
1224 i2c_dev->mutex = &i2c_dev->lock;
1225 }
1226 }
1227 }
1228
fme_i2c_init(struct ifpga_feature * feature)1229 static int fme_i2c_init(struct ifpga_feature *feature)
1230 {
1231 struct feature_fme_i2c *i2c;
1232 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1233
1234 i2c = (struct feature_fme_i2c *)feature->addr;
1235
1236 dev_info(NULL, "FME I2C Master Init.\n");
1237
1238 fme->i2c_master = altera_i2c_probe(i2c);
1239 if (!fme->i2c_master)
1240 return -ENODEV;
1241
1242 init_i2c_mutex(fme);
1243
1244 /* MAC ROM self test */
1245 i2c_mac_rom_test(fme->i2c_master);
1246
1247 return 0;
1248 }
1249
fme_i2c_uninit(struct ifpga_feature * feature)1250 static void fme_i2c_uninit(struct ifpga_feature *feature)
1251 {
1252 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1253
1254 altera_i2c_remove(fme->i2c_master);
1255 }
1256
1257 struct ifpga_feature_ops fme_i2c_master_ops = {
1258 .init = fme_i2c_init,
1259 .uinit = fme_i2c_uninit,
1260 };
1261
fme_eth_group_init(struct ifpga_feature * feature)1262 static int fme_eth_group_init(struct ifpga_feature *feature)
1263 {
1264 struct ifpga_fme_hw *fme = (struct ifpga_fme_hw *)feature->parent;
1265 struct eth_group_device *dev;
1266
1267 dev = (struct eth_group_device *)eth_group_probe(feature->addr);
1268 if (!dev)
1269 return -ENODEV;
1270
1271 fme->eth_dev[dev->group_id] = dev;
1272
1273 fme->eth_group_region[dev->group_id].addr =
1274 feature->addr;
1275 fme->eth_group_region[dev->group_id].phys_addr =
1276 feature->phys_addr;
1277 fme->eth_group_region[dev->group_id].len =
1278 feature->size;
1279
1280 fme->nums_eth_dev++;
1281
1282 dev_info(NULL, "FME PHY Group %d Init.\n", dev->group_id);
1283 dev_info(NULL, "found %d eth group, addr %p phys_addr 0x%llx len %u\n",
1284 dev->group_id, feature->addr,
1285 (unsigned long long)feature->phys_addr,
1286 feature->size);
1287
1288 return 0;
1289 }
1290
fme_eth_group_uinit(struct ifpga_feature * feature)1291 static void fme_eth_group_uinit(struct ifpga_feature *feature)
1292 {
1293 UNUSED(feature);
1294 }
1295
1296 struct ifpga_feature_ops fme_eth_group_ops = {
1297 .init = fme_eth_group_init,
1298 .uinit = fme_eth_group_uinit,
1299 };
1300
fme_mgr_read_mac_rom(struct ifpga_fme_hw * fme,int offset,void * buf,int size)1301 int fme_mgr_read_mac_rom(struct ifpga_fme_hw *fme, int offset,
1302 void *buf, int size)
1303 {
1304 struct altera_i2c_dev *dev;
1305
1306 dev = fme->i2c_master;
1307 if (!dev)
1308 return -ENODEV;
1309
1310 return at24_eeprom_read(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1311 }
1312
fme_mgr_write_mac_rom(struct ifpga_fme_hw * fme,int offset,void * buf,int size)1313 int fme_mgr_write_mac_rom(struct ifpga_fme_hw *fme, int offset,
1314 void *buf, int size)
1315 {
1316 struct altera_i2c_dev *dev;
1317
1318 dev = fme->i2c_master;
1319 if (!dev)
1320 return -ENODEV;
1321
1322 return at24_eeprom_write(dev, AT24512_SLAVE_ADDR, offset, buf, size);
1323 }
1324
get_eth_group_dev(struct ifpga_fme_hw * fme,u8 group_id)1325 static struct eth_group_device *get_eth_group_dev(struct ifpga_fme_hw *fme,
1326 u8 group_id)
1327 {
1328 struct eth_group_device *dev;
1329
1330 if (group_id > (MAX_ETH_GROUP_DEVICES - 1))
1331 return NULL;
1332
1333 dev = (struct eth_group_device *)fme->eth_dev[group_id];
1334 if (!dev)
1335 return NULL;
1336
1337 if (dev->status != ETH_GROUP_DEV_ATTACHED)
1338 return NULL;
1339
1340 return dev;
1341 }
1342
fme_mgr_get_eth_group_nums(struct ifpga_fme_hw * fme)1343 int fme_mgr_get_eth_group_nums(struct ifpga_fme_hw *fme)
1344 {
1345 return fme->nums_eth_dev;
1346 }
1347
fme_mgr_get_eth_group_info(struct ifpga_fme_hw * fme,u8 group_id,struct opae_eth_group_info * info)1348 int fme_mgr_get_eth_group_info(struct ifpga_fme_hw *fme,
1349 u8 group_id, struct opae_eth_group_info *info)
1350 {
1351 struct eth_group_device *dev;
1352
1353 dev = get_eth_group_dev(fme, group_id);
1354 if (!dev)
1355 return -ENODEV;
1356
1357 info->group_id = group_id;
1358 info->speed = dev->speed;
1359 info->nums_of_mac = dev->mac_num;
1360 info->nums_of_phy = dev->phy_num;
1361
1362 return 0;
1363 }
1364
fme_mgr_eth_group_read_reg(struct ifpga_fme_hw * fme,u8 group_id,u8 type,u8 index,u16 addr,u32 * data)1365 int fme_mgr_eth_group_read_reg(struct ifpga_fme_hw *fme, u8 group_id,
1366 u8 type, u8 index, u16 addr, u32 *data)
1367 {
1368 struct eth_group_device *dev;
1369
1370 dev = get_eth_group_dev(fme, group_id);
1371 if (!dev)
1372 return -ENODEV;
1373
1374 return eth_group_read_reg(dev, type, index, addr, data);
1375 }
1376
fme_mgr_eth_group_write_reg(struct ifpga_fme_hw * fme,u8 group_id,u8 type,u8 index,u16 addr,u32 data)1377 int fme_mgr_eth_group_write_reg(struct ifpga_fme_hw *fme, u8 group_id,
1378 u8 type, u8 index, u16 addr, u32 data)
1379 {
1380 struct eth_group_device *dev;
1381
1382 dev = get_eth_group_dev(fme, group_id);
1383 if (!dev)
1384 return -ENODEV;
1385
1386 return eth_group_write_reg(dev, type, index, addr, data);
1387 }
1388
fme_get_eth_group_speed(struct ifpga_fme_hw * fme,u8 group_id)1389 static int fme_get_eth_group_speed(struct ifpga_fme_hw *fme,
1390 u8 group_id)
1391 {
1392 struct eth_group_device *dev;
1393
1394 dev = get_eth_group_dev(fme, group_id);
1395 if (!dev)
1396 return -ENODEV;
1397
1398 return dev->speed;
1399 }
1400
fme_mgr_get_retimer_info(struct ifpga_fme_hw * fme,struct opae_retimer_info * info)1401 int fme_mgr_get_retimer_info(struct ifpga_fme_hw *fme,
1402 struct opae_retimer_info *info)
1403 {
1404 struct intel_max10_device *dev;
1405
1406 dev = (struct intel_max10_device *)fme->max10_dev;
1407 if (!dev)
1408 return -ENODEV;
1409
1410 info->nums_retimer = fme->board_info.nums_of_retimer;
1411 info->ports_per_retimer = fme->board_info.ports_per_retimer;
1412 info->nums_fvl = fme->board_info.nums_of_fvl;
1413 info->ports_per_fvl = fme->board_info.ports_per_fvl;
1414
1415 /* The speed of PKVL is identical the eth group's speed */
1416 info->support_speed = fme_get_eth_group_speed(fme,
1417 LINE_SIDE_GROUP_ID);
1418
1419 return 0;
1420 }
1421
fme_mgr_get_retimer_status(struct ifpga_fme_hw * fme,struct opae_retimer_status * status)1422 int fme_mgr_get_retimer_status(struct ifpga_fme_hw *fme,
1423 struct opae_retimer_status *status)
1424 {
1425 struct intel_max10_device *dev;
1426 unsigned int val;
1427
1428 dev = (struct intel_max10_device *)fme->max10_dev;
1429 if (!dev)
1430 return -ENODEV;
1431
1432 if (max10_sys_read(dev, PKVL_LINK_STATUS, &val)) {
1433 dev_err(dev, "%s: read pkvl status fail\n", __func__);
1434 return -EINVAL;
1435 }
1436
1437 /* The speed of PKVL is identical the eth group's speed */
1438 status->speed = fme_get_eth_group_speed(fme,
1439 LINE_SIDE_GROUP_ID);
1440
1441 status->line_link_bitmap = val;
1442
1443 dev_debug(dev, "get retimer status: speed:%d. line_link_bitmap:0x%x\n",
1444 status->speed,
1445 status->line_link_bitmap);
1446
1447 return 0;
1448 }
1449
fme_mgr_get_sensor_value(struct ifpga_fme_hw * fme,struct opae_sensor_info * sensor,unsigned int * value)1450 int fme_mgr_get_sensor_value(struct ifpga_fme_hw *fme,
1451 struct opae_sensor_info *sensor,
1452 unsigned int *value)
1453 {
1454 struct intel_max10_device *dev;
1455
1456 dev = (struct intel_max10_device *)fme->max10_dev;
1457 if (!dev)
1458 return -ENODEV;
1459
1460 if (max10_sys_read(dev, sensor->value_reg, value)) {
1461 dev_err(dev, "%s: read sensor value register 0x%x fail\n",
1462 __func__, sensor->value_reg);
1463 return -EINVAL;
1464 }
1465
1466 *value *= sensor->multiplier;
1467
1468 return 0;
1469 }
1470