1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2021 ARM Ltd. 4 */ 5 6 #ifndef _LINUX_ARM_FFA_H 7 #define _LINUX_ARM_FFA_H 8 9 #include <linux/bitfield.h> 10 #include <linux/device.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/uuid.h> 14 15 #define FFA_SMC(calling_convention, func_num) \ 16 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \ 17 ARM_SMCCC_OWNER_STANDARD, (func_num)) 18 19 #define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num)) 20 #define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num)) 21 22 #define FFA_ERROR FFA_SMC_32(0x60) 23 #define FFA_SUCCESS FFA_SMC_32(0x61) 24 #define FFA_FN64_SUCCESS FFA_SMC_64(0x61) 25 #define FFA_INTERRUPT FFA_SMC_32(0x62) 26 #define FFA_VERSION FFA_SMC_32(0x63) 27 #define FFA_FEATURES FFA_SMC_32(0x64) 28 #define FFA_RX_RELEASE FFA_SMC_32(0x65) 29 #define FFA_RXTX_MAP FFA_SMC_32(0x66) 30 #define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66) 31 #define FFA_RXTX_UNMAP FFA_SMC_32(0x67) 32 #define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68) 33 #define FFA_ID_GET FFA_SMC_32(0x69) 34 #define FFA_MSG_POLL FFA_SMC_32(0x6A) 35 #define FFA_MSG_WAIT FFA_SMC_32(0x6B) 36 #define FFA_YIELD FFA_SMC_32(0x6C) 37 #define FFA_RUN FFA_SMC_32(0x6D) 38 #define FFA_MSG_SEND FFA_SMC_32(0x6E) 39 #define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F) 40 #define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F) 41 #define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70) 42 #define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70) 43 #define FFA_MEM_DONATE FFA_SMC_32(0x71) 44 #define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71) 45 #define FFA_MEM_LEND FFA_SMC_32(0x72) 46 #define FFA_FN64_MEM_LEND FFA_SMC_64(0x72) 47 #define FFA_MEM_SHARE FFA_SMC_32(0x73) 48 #define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73) 49 #define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74) 50 #define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74) 51 #define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75) 52 #define FFA_MEM_RELINQUISH FFA_SMC_32(0x76) 53 #define FFA_MEM_RECLAIM FFA_SMC_32(0x77) 54 #define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78) 55 #define FFA_MEM_OP_RESUME FFA_SMC_32(0x79) 56 #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) 57 #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) 58 #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) 59 #define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D) 60 #define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E) 61 #define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F) 62 #define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80) 63 #define FFA_NOTIFICATION_SET FFA_SMC_32(0x81) 64 #define FFA_NOTIFICATION_GET FFA_SMC_32(0x82) 65 #define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83) 66 #define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83) 67 #define FFA_RX_ACQUIRE FFA_SMC_32(0x84) 68 #define FFA_SPM_ID_GET FFA_SMC_32(0x85) 69 #define FFA_MSG_SEND2 FFA_SMC_32(0x86) 70 #define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87) 71 #define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87) 72 #define FFA_MEM_PERM_GET FFA_SMC_32(0x88) 73 #define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) 74 #define FFA_MEM_PERM_SET FFA_SMC_32(0x89) 75 #define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) 76 77 /* 78 * For some calls it is necessary to use SMC64 to pass or return 64-bit values. 79 * For such calls FFA_FN_NATIVE(name) will choose the appropriate 80 * (native-width) function ID. 81 */ 82 #ifdef CONFIG_64BIT 83 #define FFA_FN_NATIVE(name) FFA_FN64_##name 84 #else 85 #define FFA_FN_NATIVE(name) FFA_##name 86 #endif 87 88 /* FFA error codes. */ 89 #define FFA_RET_SUCCESS (0) 90 #define FFA_RET_NOT_SUPPORTED (-1) 91 #define FFA_RET_INVALID_PARAMETERS (-2) 92 #define FFA_RET_NO_MEMORY (-3) 93 #define FFA_RET_BUSY (-4) 94 #define FFA_RET_INTERRUPTED (-5) 95 #define FFA_RET_DENIED (-6) 96 #define FFA_RET_RETRY (-7) 97 #define FFA_RET_ABORTED (-8) 98 #define FFA_RET_NO_DATA (-9) 99 100 /* FFA version encoding */ 101 #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) 102 #define FFA_MINOR_VERSION_MASK GENMASK(15, 0) 103 #define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x)))) 104 #define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x)))) 105 #define FFA_PACK_VERSION_INFO(major, minor) \ 106 (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ 107 FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) 108 #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) 109 #define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1) 110 111 /** 112 * FF-A specification mentions explicitly about '4K pages'. This should 113 * not be confused with the kernel PAGE_SIZE, which is the translation 114 * granule kernel is configured and may be one among 4K, 16K and 64K. 115 */ 116 #define FFA_PAGE_SIZE SZ_4K 117 118 /* 119 * Minimum buffer size/alignment encodings returned by an FFA_FEATURES 120 * query for FFA_RXTX_MAP. 121 */ 122 #define FFA_FEAT_RXTX_MIN_SZ_4K 0 123 #define FFA_FEAT_RXTX_MIN_SZ_64K 1 124 #define FFA_FEAT_RXTX_MIN_SZ_16K 2 125 126 /* FFA Bus/Device/Driver related */ 127 struct ffa_device { 128 u32 id; 129 int vm_id; 130 bool mode_32bit; 131 uuid_t uuid; 132 struct device dev; 133 const struct ffa_ops *ops; 134 }; 135 136 #define to_ffa_dev(d) container_of(d, struct ffa_device, dev) 137 138 struct ffa_device_id { 139 uuid_t uuid; 140 }; 141 142 struct ffa_driver { 143 const char *name; 144 int (*probe)(struct ffa_device *sdev); 145 void (*remove)(struct ffa_device *sdev); 146 const struct ffa_device_id *id_table; 147 148 struct device_driver driver; 149 }; 150 151 #define to_ffa_driver(d) container_of(d, struct ffa_driver, driver) 152 153 static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) 154 { 155 dev_set_drvdata(&fdev->dev, data); 156 } 157 158 static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev) 159 { 160 return dev_get_drvdata(&fdev->dev); 161 } 162 163 #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) 164 struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, 165 const struct ffa_ops *ops); 166 void ffa_device_unregister(struct ffa_device *ffa_dev); 167 int ffa_driver_register(struct ffa_driver *driver, struct module *owner, 168 const char *mod_name); 169 void ffa_driver_unregister(struct ffa_driver *driver); 170 bool ffa_device_is_valid(struct ffa_device *ffa_dev); 171 172 #else 173 static inline 174 struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, 175 const struct ffa_ops *ops) 176 { 177 return NULL; 178 } 179 180 static inline void ffa_device_unregister(struct ffa_device *dev) {} 181 182 static inline int 183 ffa_driver_register(struct ffa_driver *driver, struct module *owner, 184 const char *mod_name) 185 { 186 return -EINVAL; 187 } 188 189 static inline void ffa_driver_unregister(struct ffa_driver *driver) {} 190 191 static inline 192 bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } 193 194 #endif /* CONFIG_ARM_FFA_TRANSPORT */ 195 196 #define ffa_register(driver) \ 197 ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 198 #define ffa_unregister(driver) \ 199 ffa_driver_unregister(driver) 200 201 /** 202 * module_ffa_driver() - Helper macro for registering a psa_ffa driver 203 * @__ffa_driver: ffa_driver structure 204 * 205 * Helper macro for psa_ffa drivers to set up proper module init / exit 206 * functions. Replaces module_init() and module_exit() and keeps people from 207 * printing pointless things to the kernel log when their driver is loaded. 208 */ 209 #define module_ffa_driver(__ffa_driver) \ 210 module_driver(__ffa_driver, ffa_register, ffa_unregister) 211 212 /* FFA transport related */ 213 struct ffa_partition_info { 214 u16 id; 215 u16 exec_ctxt; 216 /* partition supports receipt of direct requests */ 217 #define FFA_PARTITION_DIRECT_RECV BIT(0) 218 /* partition can send direct requests. */ 219 #define FFA_PARTITION_DIRECT_SEND BIT(1) 220 /* partition can send and receive indirect messages. */ 221 #define FFA_PARTITION_INDIRECT_MSG BIT(2) 222 /* partition runs in the AArch64 execution state. */ 223 #define FFA_PARTITION_AARCH64_EXEC BIT(8) 224 u32 properties; 225 u32 uuid[4]; 226 }; 227 228 /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ 229 struct ffa_send_direct_data { 230 unsigned long data0; /* w3/x3 */ 231 unsigned long data1; /* w4/x4 */ 232 unsigned long data2; /* w5/x5 */ 233 unsigned long data3; /* w6/x6 */ 234 unsigned long data4; /* w7/x7 */ 235 }; 236 237 struct ffa_mem_region_addr_range { 238 /* The base IPA of the constituent memory region, aligned to 4 kiB */ 239 u64 address; 240 /* The number of 4 kiB pages in the constituent memory region. */ 241 u32 pg_cnt; 242 u32 reserved; 243 }; 244 245 struct ffa_composite_mem_region { 246 /* 247 * The total number of 4 kiB pages included in this memory region. This 248 * must be equal to the sum of page counts specified in each 249 * `struct ffa_mem_region_addr_range`. 250 */ 251 u32 total_pg_cnt; 252 /* The number of constituents included in this memory region range */ 253 u32 addr_range_cnt; 254 u64 reserved; 255 /** An array of `addr_range_cnt` memory region constituents. */ 256 struct ffa_mem_region_addr_range constituents[]; 257 }; 258 259 struct ffa_mem_region_attributes { 260 /* The ID of the VM to which the memory is being given or shared. */ 261 u16 receiver; 262 /* 263 * The permissions with which the memory region should be mapped in the 264 * receiver's page table. 265 */ 266 #define FFA_MEM_EXEC BIT(3) 267 #define FFA_MEM_NO_EXEC BIT(2) 268 #define FFA_MEM_RW BIT(1) 269 #define FFA_MEM_RO BIT(0) 270 u8 attrs; 271 /* 272 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP 273 * for memory regions with multiple borrowers. 274 */ 275 #define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) 276 u8 flag; 277 /* 278 * Offset in bytes from the start of the outer `ffa_memory_region` to 279 * an `struct ffa_mem_region_addr_range`. 280 */ 281 u32 composite_off; 282 u64 reserved; 283 }; 284 285 struct ffa_mem_region { 286 /* The ID of the VM/owner which originally sent the memory region */ 287 u16 sender_id; 288 #define FFA_MEM_NORMAL BIT(5) 289 #define FFA_MEM_DEVICE BIT(4) 290 291 #define FFA_MEM_WRITE_BACK (3 << 2) 292 #define FFA_MEM_NON_CACHEABLE (1 << 2) 293 294 #define FFA_DEV_nGnRnE (0 << 2) 295 #define FFA_DEV_nGnRE (1 << 2) 296 #define FFA_DEV_nGRE (2 << 2) 297 #define FFA_DEV_GRE (3 << 2) 298 299 #define FFA_MEM_NON_SHAREABLE (0) 300 #define FFA_MEM_OUTER_SHAREABLE (2) 301 #define FFA_MEM_INNER_SHAREABLE (3) 302 /* Memory region attributes, upper byte MBZ pre v1.1 */ 303 u16 attributes; 304 /* 305 * Clear memory region contents after unmapping it from the sender and 306 * before mapping it for any receiver. 307 */ 308 #define FFA_MEM_CLEAR BIT(0) 309 /* 310 * Whether the hypervisor may time slice the memory sharing or retrieval 311 * operation. 312 */ 313 #define FFA_TIME_SLICE_ENABLE BIT(1) 314 315 #define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) 316 #define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) 317 #define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) 318 #define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) 319 320 #define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) 321 #define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) 322 /* Flags to control behaviour of the transaction. */ 323 u32 flags; 324 #define HANDLE_LOW_MASK GENMASK_ULL(31, 0) 325 #define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) 326 #define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) 327 #define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) 328 329 #define PACK_HANDLE(l, h) \ 330 (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) 331 /* 332 * A globally-unique ID assigned by the hypervisor for a region 333 * of memory being sent between VMs. 334 */ 335 u64 handle; 336 /* 337 * An implementation defined value associated with the receiver and the 338 * memory region. 339 */ 340 u64 tag; 341 /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */ 342 u32 ep_mem_size; 343 /* 344 * The number of `ffa_mem_region_attributes` entries included in this 345 * transaction. 346 */ 347 u32 ep_count; 348 /* 349 * 16-byte aligned offset from the base address of this descriptor 350 * to the first element of the endpoint memory access descriptor array 351 * Valid only from v1.1 352 */ 353 u32 ep_mem_offset; 354 /* MBZ, valid only from v1.1 */ 355 u32 reserved[3]; 356 }; 357 358 #define CONSTITUENTS_OFFSET(x) \ 359 (offsetof(struct ffa_composite_mem_region, constituents[x])) 360 361 static inline u32 362 ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) 363 { 364 u32 offset = count * sizeof(struct ffa_mem_region_attributes); 365 /* 366 * Earlier to v1.1, the endpoint memory descriptor array started at 367 * offset 32(i.e. offset of ep_mem_offset in the current structure) 368 */ 369 if (ffa_version <= FFA_VERSION_1_0) 370 offset += offsetof(struct ffa_mem_region, ep_mem_offset); 371 else 372 offset += sizeof(struct ffa_mem_region); 373 374 return offset; 375 } 376 377 struct ffa_mem_ops_args { 378 bool use_txbuf; 379 u32 nattrs; 380 u32 flags; 381 u64 tag; 382 u64 g_handle; 383 struct scatterlist *sg; 384 struct ffa_mem_region_attributes *attrs; 385 }; 386 387 struct ffa_info_ops { 388 u32 (*api_version_get)(void); 389 int (*partition_info_get)(const char *uuid_str, 390 struct ffa_partition_info *buffer); 391 }; 392 393 struct ffa_msg_ops { 394 void (*mode_32bit_set)(struct ffa_device *dev); 395 int (*sync_send_receive)(struct ffa_device *dev, 396 struct ffa_send_direct_data *data); 397 }; 398 399 struct ffa_mem_ops { 400 int (*memory_reclaim)(u64 g_handle, u32 flags); 401 int (*memory_share)(struct ffa_mem_ops_args *args); 402 int (*memory_lend)(struct ffa_mem_ops_args *args); 403 }; 404 405 struct ffa_cpu_ops { 406 int (*run)(struct ffa_device *dev, u16 vcpu); 407 }; 408 409 typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data); 410 typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data); 411 412 struct ffa_notifier_ops { 413 int (*sched_recv_cb_register)(struct ffa_device *dev, 414 ffa_sched_recv_cb cb, void *cb_data); 415 int (*sched_recv_cb_unregister)(struct ffa_device *dev); 416 int (*notify_request)(struct ffa_device *dev, bool per_vcpu, 417 ffa_notifier_cb cb, void *cb_data, int notify_id); 418 int (*notify_relinquish)(struct ffa_device *dev, int notify_id); 419 int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu, 420 u16 vcpu); 421 }; 422 423 struct ffa_ops { 424 const struct ffa_info_ops *info_ops; 425 const struct ffa_msg_ops *msg_ops; 426 const struct ffa_mem_ops *mem_ops; 427 const struct ffa_cpu_ops *cpu_ops; 428 const struct ffa_notifier_ops *notifier_ops; 429 }; 430 431 #endif /* _LINUX_ARM_FFA_H */ 432