1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2021 ARM Ltd. 4 */ 5 6 #ifndef _LINUX_ARM_FFA_H 7 #define _LINUX_ARM_FFA_H 8 9 #include <linux/bitfield.h> 10 #include <linux/device.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/uuid.h> 14 15 #define FFA_SMC(calling_convention, func_num) \ 16 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \ 17 ARM_SMCCC_OWNER_STANDARD, (func_num)) 18 19 #define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num)) 20 #define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num)) 21 22 #define FFA_ERROR FFA_SMC_32(0x60) 23 #define FFA_SUCCESS FFA_SMC_32(0x61) 24 #define FFA_FN64_SUCCESS FFA_SMC_64(0x61) 25 #define FFA_INTERRUPT FFA_SMC_32(0x62) 26 #define FFA_VERSION FFA_SMC_32(0x63) 27 #define FFA_FEATURES FFA_SMC_32(0x64) 28 #define FFA_RX_RELEASE FFA_SMC_32(0x65) 29 #define FFA_RXTX_MAP FFA_SMC_32(0x66) 30 #define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66) 31 #define FFA_RXTX_UNMAP FFA_SMC_32(0x67) 32 #define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68) 33 #define FFA_ID_GET FFA_SMC_32(0x69) 34 #define FFA_MSG_POLL FFA_SMC_32(0x6A) 35 #define FFA_MSG_WAIT FFA_SMC_32(0x6B) 36 #define FFA_YIELD FFA_SMC_32(0x6C) 37 #define FFA_RUN FFA_SMC_32(0x6D) 38 #define FFA_MSG_SEND FFA_SMC_32(0x6E) 39 #define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F) 40 #define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F) 41 #define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70) 42 #define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70) 43 #define FFA_MEM_DONATE FFA_SMC_32(0x71) 44 #define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71) 45 #define FFA_MEM_LEND FFA_SMC_32(0x72) 46 #define FFA_FN64_MEM_LEND FFA_SMC_64(0x72) 47 #define FFA_MEM_SHARE FFA_SMC_32(0x73) 48 #define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73) 49 #define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74) 50 #define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74) 51 #define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75) 52 #define FFA_MEM_RELINQUISH FFA_SMC_32(0x76) 53 #define FFA_MEM_RECLAIM FFA_SMC_32(0x77) 54 #define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78) 55 #define FFA_MEM_OP_RESUME FFA_SMC_32(0x79) 56 #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) 57 #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) 58 #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) 59 #define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D) 60 #define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E) 61 #define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F) 62 #define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80) 63 #define FFA_NOTIFICATION_SET FFA_SMC_32(0x81) 64 #define FFA_NOTIFICATION_GET FFA_SMC_32(0x82) 65 #define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83) 66 #define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83) 67 #define FFA_RX_ACQUIRE FFA_SMC_32(0x84) 68 #define FFA_SPM_ID_GET FFA_SMC_32(0x85) 69 #define FFA_MSG_SEND2 FFA_SMC_32(0x86) 70 #define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87) 71 #define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87) 72 #define FFA_MEM_PERM_GET FFA_SMC_32(0x88) 73 #define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) 74 #define FFA_MEM_PERM_SET FFA_SMC_32(0x89) 75 #define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) 76 #define FFA_CONSOLE_LOG FFA_SMC_32(0x8A) 77 #define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B) 78 #define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C) 79 #define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D) 80 #define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E) 81 82 /* 83 * For some calls it is necessary to use SMC64 to pass or return 64-bit values. 84 * For such calls FFA_FN_NATIVE(name) will choose the appropriate 85 * (native-width) function ID. 86 */ 87 #ifdef CONFIG_64BIT 88 #define FFA_FN_NATIVE(name) FFA_FN64_##name 89 #else 90 #define FFA_FN_NATIVE(name) FFA_##name 91 #endif 92 93 /* FFA error codes. */ 94 #define FFA_RET_SUCCESS (0) 95 #define FFA_RET_NOT_SUPPORTED (-1) 96 #define FFA_RET_INVALID_PARAMETERS (-2) 97 #define FFA_RET_NO_MEMORY (-3) 98 #define FFA_RET_BUSY (-4) 99 #define FFA_RET_INTERRUPTED (-5) 100 #define FFA_RET_DENIED (-6) 101 #define FFA_RET_RETRY (-7) 102 #define FFA_RET_ABORTED (-8) 103 #define FFA_RET_NO_DATA (-9) 104 105 /* FFA version encoding */ 106 #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) 107 #define FFA_MINOR_VERSION_MASK GENMASK(15, 0) 108 #define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x)))) 109 #define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x)))) 110 #define FFA_PACK_VERSION_INFO(major, minor) \ 111 (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ 112 FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) 113 #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) 114 #define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1) 115 116 /** 117 * FF-A specification mentions explicitly about '4K pages'. This should 118 * not be confused with the kernel PAGE_SIZE, which is the translation 119 * granule kernel is configured and may be one among 4K, 16K and 64K. 120 */ 121 #define FFA_PAGE_SIZE SZ_4K 122 123 /* 124 * Minimum buffer size/alignment encodings returned by an FFA_FEATURES 125 * query for FFA_RXTX_MAP. 126 */ 127 #define FFA_FEAT_RXTX_MIN_SZ_4K 0 128 #define FFA_FEAT_RXTX_MIN_SZ_64K 1 129 #define FFA_FEAT_RXTX_MIN_SZ_16K 2 130 131 /* FFA Bus/Device/Driver related */ 132 struct ffa_device { 133 u32 id; 134 u32 properties; 135 int vm_id; 136 bool mode_32bit; 137 uuid_t uuid; 138 struct device dev; 139 const struct ffa_ops *ops; 140 }; 141 142 #define to_ffa_dev(d) container_of(d, struct ffa_device, dev) 143 144 struct ffa_device_id { 145 uuid_t uuid; 146 }; 147 148 struct ffa_driver { 149 const char *name; 150 int (*probe)(struct ffa_device *sdev); 151 void (*remove)(struct ffa_device *sdev); 152 const struct ffa_device_id *id_table; 153 154 struct device_driver driver; 155 }; 156 157 #define to_ffa_driver(d) container_of_const(d, struct ffa_driver, driver) 158 159 static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) 160 { 161 dev_set_drvdata(&fdev->dev, data); 162 } 163 164 static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev) 165 { 166 return dev_get_drvdata(&fdev->dev); 167 } 168 169 struct ffa_partition_info; 170 171 #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) 172 struct ffa_device * 173 ffa_device_register(const struct ffa_partition_info *part_info, 174 const struct ffa_ops *ops); 175 void ffa_device_unregister(struct ffa_device *ffa_dev); 176 int ffa_driver_register(struct ffa_driver *driver, struct module *owner, 177 const char *mod_name); 178 void ffa_driver_unregister(struct ffa_driver *driver); 179 void ffa_devices_unregister(void); 180 bool ffa_device_is_valid(struct ffa_device *ffa_dev); 181 182 #else 183 static inline struct ffa_device * 184 ffa_device_register(const struct ffa_partition_info *part_info, 185 const struct ffa_ops *ops) 186 { 187 return NULL; 188 } 189 190 static inline void ffa_device_unregister(struct ffa_device *dev) {} 191 192 static inline void ffa_devices_unregister(void) {} 193 194 static inline int 195 ffa_driver_register(struct ffa_driver *driver, struct module *owner, 196 const char *mod_name) 197 { 198 return -EINVAL; 199 } 200 201 static inline void ffa_driver_unregister(struct ffa_driver *driver) {} 202 203 static inline 204 bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } 205 206 #endif /* CONFIG_ARM_FFA_TRANSPORT */ 207 208 #define ffa_register(driver) \ 209 ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 210 #define ffa_unregister(driver) \ 211 ffa_driver_unregister(driver) 212 213 /** 214 * module_ffa_driver() - Helper macro for registering a psa_ffa driver 215 * @__ffa_driver: ffa_driver structure 216 * 217 * Helper macro for psa_ffa drivers to set up proper module init / exit 218 * functions. Replaces module_init() and module_exit() and keeps people from 219 * printing pointless things to the kernel log when their driver is loaded. 220 */ 221 #define module_ffa_driver(__ffa_driver) \ 222 module_driver(__ffa_driver, ffa_register, ffa_unregister) 223 224 extern const struct bus_type ffa_bus_type; 225 226 /* The FF-A 1.0 partition structure lacks the uuid[4] */ 227 #define FFA_1_0_PARTITON_INFO_SZ (8) 228 229 /* FFA transport related */ 230 struct ffa_partition_info { 231 u16 id; 232 u16 exec_ctxt; 233 /* partition supports receipt of direct requests */ 234 #define FFA_PARTITION_DIRECT_RECV BIT(0) 235 /* partition can send direct requests. */ 236 #define FFA_PARTITION_DIRECT_SEND BIT(1) 237 /* partition can send and receive indirect messages. */ 238 #define FFA_PARTITION_INDIRECT_MSG BIT(2) 239 /* partition can receive notifications */ 240 #define FFA_PARTITION_NOTIFICATION_RECV BIT(3) 241 /* partition runs in the AArch64 execution state. */ 242 #define FFA_PARTITION_AARCH64_EXEC BIT(8) 243 u32 properties; 244 uuid_t uuid; 245 }; 246 247 static inline 248 bool ffa_partition_check_property(struct ffa_device *dev, u32 property) 249 { 250 return dev->properties & property; 251 } 252 253 #define ffa_partition_supports_notify_recv(dev) \ 254 ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV) 255 256 #define ffa_partition_supports_indirect_msg(dev) \ 257 ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG) 258 259 #define ffa_partition_supports_direct_recv(dev) \ 260 ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV) 261 262 /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ 263 struct ffa_send_direct_data { 264 unsigned long data0; /* w3/x3 */ 265 unsigned long data1; /* w4/x4 */ 266 unsigned long data2; /* w5/x5 */ 267 unsigned long data3; /* w6/x6 */ 268 unsigned long data4; /* w7/x7 */ 269 }; 270 271 struct ffa_indirect_msg_hdr { 272 u32 flags; 273 u32 res0; 274 u32 offset; 275 u32 send_recv_id; 276 u32 size; 277 }; 278 279 /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */ 280 struct ffa_send_direct_data2 { 281 unsigned long data[14]; /* x4-x17 */ 282 }; 283 284 struct ffa_mem_region_addr_range { 285 /* The base IPA of the constituent memory region, aligned to 4 kiB */ 286 u64 address; 287 /* The number of 4 kiB pages in the constituent memory region. */ 288 u32 pg_cnt; 289 u32 reserved; 290 }; 291 292 struct ffa_composite_mem_region { 293 /* 294 * The total number of 4 kiB pages included in this memory region. This 295 * must be equal to the sum of page counts specified in each 296 * `struct ffa_mem_region_addr_range`. 297 */ 298 u32 total_pg_cnt; 299 /* The number of constituents included in this memory region range */ 300 u32 addr_range_cnt; 301 u64 reserved; 302 /** An array of `addr_range_cnt` memory region constituents. */ 303 struct ffa_mem_region_addr_range constituents[]; 304 }; 305 306 struct ffa_mem_region_attributes { 307 /* The ID of the VM to which the memory is being given or shared. */ 308 u16 receiver; 309 /* 310 * The permissions with which the memory region should be mapped in the 311 * receiver's page table. 312 */ 313 #define FFA_MEM_EXEC BIT(3) 314 #define FFA_MEM_NO_EXEC BIT(2) 315 #define FFA_MEM_RW BIT(1) 316 #define FFA_MEM_RO BIT(0) 317 u8 attrs; 318 /* 319 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP 320 * for memory regions with multiple borrowers. 321 */ 322 #define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) 323 u8 flag; 324 /* 325 * Offset in bytes from the start of the outer `ffa_memory_region` to 326 * an `struct ffa_mem_region_addr_range`. 327 */ 328 u32 composite_off; 329 u64 reserved; 330 }; 331 332 struct ffa_mem_region { 333 /* The ID of the VM/owner which originally sent the memory region */ 334 u16 sender_id; 335 #define FFA_MEM_NORMAL BIT(5) 336 #define FFA_MEM_DEVICE BIT(4) 337 338 #define FFA_MEM_WRITE_BACK (3 << 2) 339 #define FFA_MEM_NON_CACHEABLE (1 << 2) 340 341 #define FFA_DEV_nGnRnE (0 << 2) 342 #define FFA_DEV_nGnRE (1 << 2) 343 #define FFA_DEV_nGRE (2 << 2) 344 #define FFA_DEV_GRE (3 << 2) 345 346 #define FFA_MEM_NON_SHAREABLE (0) 347 #define FFA_MEM_OUTER_SHAREABLE (2) 348 #define FFA_MEM_INNER_SHAREABLE (3) 349 /* Memory region attributes, upper byte MBZ pre v1.1 */ 350 u16 attributes; 351 /* 352 * Clear memory region contents after unmapping it from the sender and 353 * before mapping it for any receiver. 354 */ 355 #define FFA_MEM_CLEAR BIT(0) 356 /* 357 * Whether the hypervisor may time slice the memory sharing or retrieval 358 * operation. 359 */ 360 #define FFA_TIME_SLICE_ENABLE BIT(1) 361 362 #define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) 363 #define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) 364 #define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) 365 #define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) 366 367 #define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) 368 #define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) 369 /* Flags to control behaviour of the transaction. */ 370 u32 flags; 371 #define HANDLE_LOW_MASK GENMASK_ULL(31, 0) 372 #define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) 373 #define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) 374 #define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) 375 376 #define PACK_HANDLE(l, h) \ 377 (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) 378 /* 379 * A globally-unique ID assigned by the hypervisor for a region 380 * of memory being sent between VMs. 381 */ 382 u64 handle; 383 /* 384 * An implementation defined value associated with the receiver and the 385 * memory region. 386 */ 387 u64 tag; 388 /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */ 389 u32 ep_mem_size; 390 /* 391 * The number of `ffa_mem_region_attributes` entries included in this 392 * transaction. 393 */ 394 u32 ep_count; 395 /* 396 * 16-byte aligned offset from the base address of this descriptor 397 * to the first element of the endpoint memory access descriptor array 398 * Valid only from v1.1 399 */ 400 u32 ep_mem_offset; 401 /* MBZ, valid only from v1.1 */ 402 u32 reserved[3]; 403 }; 404 405 #define CONSTITUENTS_OFFSET(x) \ 406 (offsetof(struct ffa_composite_mem_region, constituents[x])) 407 408 static inline u32 409 ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) 410 { 411 u32 offset = count * sizeof(struct ffa_mem_region_attributes); 412 /* 413 * Earlier to v1.1, the endpoint memory descriptor array started at 414 * offset 32(i.e. offset of ep_mem_offset in the current structure) 415 */ 416 if (ffa_version <= FFA_VERSION_1_0) 417 offset += offsetof(struct ffa_mem_region, ep_mem_offset); 418 else 419 offset += sizeof(struct ffa_mem_region); 420 421 return offset; 422 } 423 424 struct ffa_mem_ops_args { 425 bool use_txbuf; 426 u32 nattrs; 427 u32 flags; 428 u64 tag; 429 u64 g_handle; 430 struct scatterlist *sg; 431 struct ffa_mem_region_attributes *attrs; 432 }; 433 434 struct ffa_info_ops { 435 u32 (*api_version_get)(void); 436 int (*partition_info_get)(const char *uuid_str, 437 struct ffa_partition_info *buffer); 438 }; 439 440 struct ffa_msg_ops { 441 void (*mode_32bit_set)(struct ffa_device *dev); 442 int (*sync_send_receive)(struct ffa_device *dev, 443 struct ffa_send_direct_data *data); 444 int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz); 445 int (*sync_send_receive2)(struct ffa_device *dev, 446 struct ffa_send_direct_data2 *data); 447 }; 448 449 struct ffa_mem_ops { 450 int (*memory_reclaim)(u64 g_handle, u32 flags); 451 int (*memory_share)(struct ffa_mem_ops_args *args); 452 int (*memory_lend)(struct ffa_mem_ops_args *args); 453 }; 454 455 struct ffa_cpu_ops { 456 int (*run)(struct ffa_device *dev, u16 vcpu); 457 }; 458 459 typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data); 460 typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data); 461 462 struct ffa_notifier_ops { 463 int (*sched_recv_cb_register)(struct ffa_device *dev, 464 ffa_sched_recv_cb cb, void *cb_data); 465 int (*sched_recv_cb_unregister)(struct ffa_device *dev); 466 int (*notify_request)(struct ffa_device *dev, bool per_vcpu, 467 ffa_notifier_cb cb, void *cb_data, int notify_id); 468 int (*notify_relinquish)(struct ffa_device *dev, int notify_id); 469 int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu, 470 u16 vcpu); 471 }; 472 473 struct ffa_ops { 474 const struct ffa_info_ops *info_ops; 475 const struct ffa_msg_ops *msg_ops; 476 const struct ffa_mem_ops *mem_ops; 477 const struct ffa_cpu_ops *cpu_ops; 478 const struct ffa_notifier_ops *notifier_ops; 479 }; 480 481 #endif /* _LINUX_ARM_FFA_H */ 482