1 /*- 2 * Copyright 2016 Michal Meloun <[email protected]> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_platform.h" 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/kobj.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/queue.h> 37 #include <sys/systm.h> 38 #include <sys/sx.h> 39 40 #ifdef FDT 41 #include <dev/ofw/ofw_bus.h> 42 #include <dev/ofw/ofw_bus_subr.h> 43 #endif 44 45 #include <dev/extres/phy/phy.h> 46 47 #include "phydev_if.h" 48 49 MALLOC_DEFINE(M_PHY, "phy", "Phy framework"); 50 51 /* Forward declarations. */ 52 struct phy; 53 struct phynode; 54 55 typedef TAILQ_HEAD(phynode_list, phynode) phynode_list_t; 56 typedef TAILQ_HEAD(phy_list, phy) phy_list_t; 57 58 /* Default phy methods. */ 59 static int phynode_method_init(struct phynode *phynode); 60 static int phynode_method_enable(struct phynode *phynode, bool disable); 61 static int phynode_method_status(struct phynode *phynode, int *status); 62 63 64 /* 65 * Phy controller methods. 66 */ 67 static phynode_method_t phynode_methods[] = { 68 PHYNODEMETHOD(phynode_init, phynode_method_init), 69 PHYNODEMETHOD(phynode_enable, phynode_method_enable), 70 PHYNODEMETHOD(phynode_status, phynode_method_status), 71 72 PHYNODEMETHOD_END 73 }; 74 DEFINE_CLASS_0(phynode, phynode_class, phynode_methods, 0); 75 76 /* 77 * Phy node 78 */ 79 struct phynode { 80 KOBJ_FIELDS; 81 82 TAILQ_ENTRY(phynode) phylist_link; /* Global list entry */ 83 phy_list_t consumers_list; /* Consumers list */ 84 85 86 /* Details of this device. */ 87 const char *name; /* Globally unique name */ 88 89 device_t pdev; /* Producer device_t */ 90 void *softc; /* Producer softc */ 91 intptr_t id; /* Per producer unique id */ 92 #ifdef FDT 93 phandle_t ofw_node; /* OFW node of phy */ 94 #endif 95 struct sx lock; /* Lock for this phy */ 96 int ref_cnt; /* Reference counter */ 97 int enable_cnt; /* Enabled counter */ 98 }; 99 100 struct phy { 101 device_t cdev; /* consumer device*/ 102 struct phynode *phynode; 103 TAILQ_ENTRY(phy) link; /* Consumers list entry */ 104 105 int enable_cnt; 106 }; 107 108 static phynode_list_t phynode_list = TAILQ_HEAD_INITIALIZER(phynode_list); 109 110 static struct sx phynode_topo_lock; 111 SX_SYSINIT(phy_topology, &phynode_topo_lock, "Phy topology lock"); 112 113 #define PHY_TOPO_SLOCK() sx_slock(&phynode_topo_lock) 114 #define PHY_TOPO_XLOCK() sx_xlock(&phynode_topo_lock) 115 #define PHY_TOPO_UNLOCK() sx_unlock(&phynode_topo_lock) 116 #define PHY_TOPO_ASSERT() sx_assert(&phynode_topo_lock, SA_LOCKED) 117 #define PHY_TOPO_XASSERT() sx_assert(&phynode_topo_lock, SA_XLOCKED) 118 119 #define PHYNODE_SLOCK(_sc) sx_slock(&((_sc)->lock)) 120 #define PHYNODE_XLOCK(_sc) sx_xlock(&((_sc)->lock)) 121 #define PHYNODE_UNLOCK(_sc) sx_unlock(&((_sc)->lock)) 122 123 /* ---------------------------------------------------------------------------- 124 * 125 * Default phy methods for base class. 126 * 127 */ 128 129 static int 130 phynode_method_init(struct phynode *phynode) 131 { 132 133 return (0); 134 } 135 136 static int 137 phynode_method_enable(struct phynode *phynode, bool enable) 138 { 139 140 if (!enable) 141 return (ENXIO); 142 143 return (0); 144 } 145 146 static int 147 phynode_method_status(struct phynode *phynode, int *status) 148 { 149 *status = PHY_STATUS_ENABLED; 150 return (0); 151 } 152 153 /* ---------------------------------------------------------------------------- 154 * 155 * Internal functions. 156 * 157 */ 158 /* 159 * Create and initialize phy object, but do not register it. 160 */ 161 struct phynode * 162 phynode_create(device_t pdev, phynode_class_t phynode_class, 163 struct phynode_init_def *def) 164 { 165 struct phynode *phynode; 166 167 168 /* Create object and initialize it. */ 169 phynode = malloc(sizeof(struct phynode), M_PHY, M_WAITOK | M_ZERO); 170 kobj_init((kobj_t)phynode, (kobj_class_t)phynode_class); 171 sx_init(&phynode->lock, "Phy node lock"); 172 173 /* Allocate softc if required. */ 174 if (phynode_class->size > 0) { 175 phynode->softc = malloc(phynode_class->size, M_PHY, 176 M_WAITOK | M_ZERO); 177 } 178 179 /* Rest of init. */ 180 TAILQ_INIT(&phynode->consumers_list); 181 phynode->id = def->id; 182 phynode->pdev = pdev; 183 #ifdef FDT 184 phynode->ofw_node = def->ofw_node; 185 #endif 186 187 return (phynode); 188 } 189 190 /* Register phy object. */ 191 struct phynode * 192 phynode_register(struct phynode *phynode) 193 { 194 int rv; 195 196 #ifdef FDT 197 if (phynode->ofw_node <= 0) 198 phynode->ofw_node = ofw_bus_get_node(phynode->pdev); 199 if (phynode->ofw_node <= 0) 200 return (NULL); 201 #endif 202 203 rv = PHYNODE_INIT(phynode); 204 if (rv != 0) { 205 printf("PHYNODE_INIT failed: %d\n", rv); 206 return (NULL); 207 } 208 209 PHY_TOPO_XLOCK(); 210 TAILQ_INSERT_TAIL(&phynode_list, phynode, phylist_link); 211 PHY_TOPO_UNLOCK(); 212 #ifdef FDT 213 OF_device_register_xref(OF_xref_from_node(phynode->ofw_node), 214 phynode->pdev); 215 #endif 216 return (phynode); 217 } 218 219 static struct phynode * 220 phynode_find_by_id(device_t dev, intptr_t id) 221 { 222 struct phynode *entry; 223 224 PHY_TOPO_ASSERT(); 225 226 TAILQ_FOREACH(entry, &phynode_list, phylist_link) { 227 if ((entry->pdev == dev) && (entry->id == id)) 228 return (entry); 229 } 230 231 return (NULL); 232 } 233 234 /* -------------------------------------------------------------------------- 235 * 236 * Phy providers interface 237 * 238 */ 239 240 void * 241 phynode_get_softc(struct phynode *phynode) 242 { 243 244 return (phynode->softc); 245 } 246 247 device_t 248 phynode_get_device(struct phynode *phynode) 249 { 250 251 return (phynode->pdev); 252 } 253 254 intptr_t phynode_get_id(struct phynode *phynode) 255 { 256 257 return (phynode->id); 258 } 259 260 #ifdef FDT 261 phandle_t 262 phynode_get_ofw_node(struct phynode *phynode) 263 { 264 265 return (phynode->ofw_node); 266 } 267 #endif 268 269 /* -------------------------------------------------------------------------- 270 * 271 * Real consumers executive 272 * 273 */ 274 275 /* 276 * Enable phy. 277 */ 278 int 279 phynode_enable(struct phynode *phynode) 280 { 281 int rv; 282 283 PHY_TOPO_ASSERT(); 284 285 PHYNODE_XLOCK(phynode); 286 if (phynode->enable_cnt == 0) { 287 rv = PHYNODE_ENABLE(phynode, true); 288 if (rv != 0) { 289 PHYNODE_UNLOCK(phynode); 290 return (rv); 291 } 292 } 293 phynode->enable_cnt++; 294 PHYNODE_UNLOCK(phynode); 295 return (0); 296 } 297 298 /* 299 * Disable phy. 300 */ 301 int 302 phynode_disable(struct phynode *phynode) 303 { 304 int rv; 305 306 PHY_TOPO_ASSERT(); 307 308 PHYNODE_XLOCK(phynode); 309 if (phynode->enable_cnt == 1) { 310 rv = PHYNODE_ENABLE(phynode, false); 311 if (rv != 0) { 312 PHYNODE_UNLOCK(phynode); 313 return (rv); 314 } 315 } 316 phynode->enable_cnt--; 317 PHYNODE_UNLOCK(phynode); 318 return (0); 319 } 320 321 322 /* 323 * Get phy status. (PHY_STATUS_*) 324 */ 325 int 326 phynode_status(struct phynode *phynode, int *status) 327 { 328 int rv; 329 330 PHY_TOPO_ASSERT(); 331 332 PHYNODE_XLOCK(phynode); 333 rv = PHYNODE_STATUS(phynode, status); 334 PHYNODE_UNLOCK(phynode); 335 return (rv); 336 } 337 338 /* -------------------------------------------------------------------------- 339 * 340 * Phy consumers interface. 341 * 342 */ 343 344 /* Helper function for phy_get*() */ 345 static phy_t 346 phy_create(struct phynode *phynode, device_t cdev) 347 { 348 struct phy *phy; 349 350 PHY_TOPO_ASSERT(); 351 352 phy = malloc(sizeof(struct phy), M_PHY, M_WAITOK | M_ZERO); 353 phy->cdev = cdev; 354 phy->phynode = phynode; 355 phy->enable_cnt = 0; 356 357 PHYNODE_XLOCK(phynode); 358 phynode->ref_cnt++; 359 TAILQ_INSERT_TAIL(&phynode->consumers_list, phy, link); 360 PHYNODE_UNLOCK(phynode); 361 362 return (phy); 363 } 364 365 int 366 phy_enable(phy_t phy) 367 { 368 int rv; 369 struct phynode *phynode; 370 371 phynode = phy->phynode; 372 KASSERT(phynode->ref_cnt > 0, 373 ("Attempt to access unreferenced phy.\n")); 374 375 PHY_TOPO_SLOCK(); 376 rv = phynode_enable(phynode); 377 if (rv == 0) 378 phy->enable_cnt++; 379 PHY_TOPO_UNLOCK(); 380 return (rv); 381 } 382 383 int 384 phy_disable(phy_t phy) 385 { 386 int rv; 387 struct phynode *phynode; 388 389 phynode = phy->phynode; 390 KASSERT(phynode->ref_cnt > 0, 391 ("Attempt to access unreferenced phy.\n")); 392 KASSERT(phy->enable_cnt > 0, 393 ("Attempt to disable already disabled phy.\n")); 394 395 PHY_TOPO_SLOCK(); 396 rv = phynode_disable(phynode); 397 if (rv == 0) 398 phy->enable_cnt--; 399 PHY_TOPO_UNLOCK(); 400 return (rv); 401 } 402 403 int 404 phy_status(phy_t phy, int *status) 405 { 406 int rv; 407 struct phynode *phynode; 408 409 phynode = phy->phynode; 410 KASSERT(phynode->ref_cnt > 0, 411 ("Attempt to access unreferenced phy.\n")); 412 413 PHY_TOPO_SLOCK(); 414 rv = phynode_status(phynode, status); 415 PHY_TOPO_UNLOCK(); 416 return (rv); 417 } 418 419 int 420 phy_get_by_id(device_t consumer_dev, device_t provider_dev, intptr_t id, 421 phy_t *phy) 422 { 423 struct phynode *phynode; 424 425 PHY_TOPO_SLOCK(); 426 427 phynode = phynode_find_by_id(provider_dev, id); 428 if (phynode == NULL) { 429 PHY_TOPO_UNLOCK(); 430 return (ENODEV); 431 } 432 *phy = phy_create(phynode, consumer_dev); 433 PHY_TOPO_UNLOCK(); 434 435 return (0); 436 } 437 438 void 439 phy_release(phy_t phy) 440 { 441 struct phynode *phynode; 442 443 phynode = phy->phynode; 444 KASSERT(phynode->ref_cnt > 0, 445 ("Attempt to access unreferenced phy.\n")); 446 447 PHY_TOPO_SLOCK(); 448 while (phy->enable_cnt > 0) { 449 phynode_disable(phynode); 450 phy->enable_cnt--; 451 } 452 PHYNODE_XLOCK(phynode); 453 TAILQ_REMOVE(&phynode->consumers_list, phy, link); 454 phynode->ref_cnt--; 455 PHYNODE_UNLOCK(phynode); 456 PHY_TOPO_UNLOCK(); 457 458 free(phy, M_PHY); 459 } 460 461 #ifdef FDT 462 int phydev_default_ofw_map(device_t provider, phandle_t xref, int ncells, 463 pcell_t *cells, intptr_t *id) 464 { 465 struct phynode *entry; 466 phandle_t node; 467 468 /* Single device can register multiple subnodes. */ 469 if (ncells == 0) { 470 471 node = OF_node_from_xref(xref); 472 PHY_TOPO_XLOCK(); 473 TAILQ_FOREACH(entry, &phynode_list, phylist_link) { 474 if ((entry->pdev == provider) && 475 (entry->ofw_node == node)) { 476 *id = entry->id; 477 PHY_TOPO_UNLOCK(); 478 return (0); 479 } 480 } 481 PHY_TOPO_UNLOCK(); 482 return (ERANGE); 483 } 484 485 /* First cell is ID. */ 486 if (ncells == 1) { 487 *id = cells[0]; 488 return (0); 489 } 490 491 /* No default way how to get ID, custom mapper is required. */ 492 return (ERANGE); 493 } 494 495 int 496 phy_get_by_ofw_idx(device_t consumer_dev, phandle_t cnode, int idx, phy_t *phy) 497 { 498 phandle_t xnode; 499 pcell_t *cells; 500 device_t phydev; 501 int ncells, rv; 502 intptr_t id; 503 504 if (cnode <= 0) 505 cnode = ofw_bus_get_node(consumer_dev); 506 if (cnode <= 0) { 507 device_printf(consumer_dev, 508 "%s called on not ofw based device\n", __func__); 509 return (ENXIO); 510 } 511 rv = ofw_bus_parse_xref_list_alloc(cnode, "phys", "#phy-cells", idx, 512 &xnode, &ncells, &cells); 513 if (rv != 0) 514 return (rv); 515 516 /* Tranlate provider to device. */ 517 phydev = OF_device_from_xref(xnode); 518 if (phydev == NULL) { 519 OF_prop_free(cells); 520 return (ENODEV); 521 } 522 /* Map phy to number. */ 523 rv = PHYDEV_MAP(phydev, xnode, ncells, cells, &id); 524 OF_prop_free(cells); 525 if (rv != 0) 526 return (rv); 527 528 return (phy_get_by_id(consumer_dev, phydev, id, phy)); 529 } 530 531 int 532 phy_get_by_ofw_name(device_t consumer_dev, phandle_t cnode, char *name, 533 phy_t *phy) 534 { 535 int rv, idx; 536 537 if (cnode <= 0) 538 cnode = ofw_bus_get_node(consumer_dev); 539 if (cnode <= 0) { 540 device_printf(consumer_dev, 541 "%s called on not ofw based device\n", __func__); 542 return (ENXIO); 543 } 544 rv = ofw_bus_find_string_index(cnode, "phy-names", name, &idx); 545 if (rv != 0) 546 return (rv); 547 return (phy_get_by_ofw_idx(consumer_dev, cnode, idx, phy)); 548 } 549 550 int 551 phy_get_by_ofw_property(device_t consumer_dev, phandle_t cnode, char *name, 552 phy_t *phy) 553 { 554 pcell_t *cells; 555 device_t phydev; 556 int ncells, rv; 557 intptr_t id; 558 559 if (cnode <= 0) 560 cnode = ofw_bus_get_node(consumer_dev); 561 if (cnode <= 0) { 562 device_printf(consumer_dev, 563 "%s called on not ofw based device\n", __func__); 564 return (ENXIO); 565 } 566 ncells = OF_getencprop_alloc(cnode, name, sizeof(pcell_t), 567 (void **)&cells); 568 if (ncells < 1) 569 return (ENXIO); 570 571 /* Tranlate provider to device. */ 572 phydev = OF_device_from_xref(cells[0]); 573 if (phydev == NULL) { 574 OF_prop_free(cells); 575 return (ENODEV); 576 } 577 /* Map phy to number. */ 578 rv = PHYDEV_MAP(phydev, cells[0], ncells - 1 , cells + 1, &id); 579 OF_prop_free(cells); 580 if (rv != 0) 581 return (rv); 582 583 return (phy_get_by_id(consumer_dev, phydev, id, phy)); 584 } 585 #endif 586