]> git.itanic.dy.fi Git - linux-stable/blob - drivers/firmware/arm_ffa/driver.c
firmware: arm_ffa: Set reserved/MBZ fields to zero in the memory descriptors
[linux-stable] / drivers / firmware / arm_ffa / driver.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Arm Firmware Framework for ARMv8-A(FFA) interface driver
4  *
5  * The Arm FFA specification[1] describes a software architecture to
6  * leverages the virtualization extension to isolate software images
7  * provided by an ecosystem of vendors from each other and describes
8  * interfaces that standardize communication between the various software
9  * images including communication between images in the Secure world and
10  * Normal world. Any Hypervisor could use the FFA interfaces to enable
11  * communication between VMs it manages.
12  *
13  * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14  * system resources(Memory regions, Devices, CPU cycles) to the partitions
15  * and manage isolation amongst them.
16  *
17  * [1] https://developer.arm.com/docs/den0077/latest
18  *
19  * Copyright (C) 2021 ARM Ltd.
20  */
21
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
24
25 #include <linux/arm_ffa.h>
26 #include <linux/bitfield.h>
27 #include <linux/device.h>
28 #include <linux/io.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/mm.h>
32 #include <linux/scatterlist.h>
33 #include <linux/slab.h>
34 #include <linux/uuid.h>
35
36 #include "common.h"
37
38 #define FFA_DRIVER_VERSION      FFA_VERSION_1_0
39 #define FFA_MIN_VERSION         FFA_VERSION_1_0
40
41 #define SENDER_ID_MASK          GENMASK(31, 16)
42 #define RECEIVER_ID_MASK        GENMASK(15, 0)
43 #define SENDER_ID(x)            ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
44 #define RECEIVER_ID(x)          ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
45 #define PACK_TARGET_INFO(s, r)          \
46         (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
47
48 /*
49  * Keeping RX TX buffer size as 4K for now
50  * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
51  */
52 #define RXTX_BUFFER_SIZE        SZ_4K
53
54 static ffa_fn *invoke_ffa_fn;
55
56 static const int ffa_linux_errmap[] = {
57         /* better than switch case as long as return value is continuous */
58         0,              /* FFA_RET_SUCCESS */
59         -EOPNOTSUPP,    /* FFA_RET_NOT_SUPPORTED */
60         -EINVAL,        /* FFA_RET_INVALID_PARAMETERS */
61         -ENOMEM,        /* FFA_RET_NO_MEMORY */
62         -EBUSY,         /* FFA_RET_BUSY */
63         -EINTR,         /* FFA_RET_INTERRUPTED */
64         -EACCES,        /* FFA_RET_DENIED */
65         -EAGAIN,        /* FFA_RET_RETRY */
66         -ECANCELED,     /* FFA_RET_ABORTED */
67 };
68
69 static inline int ffa_to_linux_errno(int errno)
70 {
71         int err_idx = -errno;
72
73         if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
74                 return ffa_linux_errmap[err_idx];
75         return -EINVAL;
76 }
77
78 struct ffa_drv_info {
79         u32 version;
80         u16 vm_id;
81         struct mutex rx_lock; /* lock to protect Rx buffer */
82         struct mutex tx_lock; /* lock to protect Tx buffer */
83         void *rx_buffer;
84         void *tx_buffer;
85         bool mem_ops_native;
86 };
87
88 static struct ffa_drv_info *drv_info;
89
90 /*
91  * The driver must be able to support all the versions from the earliest
92  * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
93  * The specification states that if firmware supports a FFA implementation
94  * that is incompatible with and at a greater version number than specified
95  * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
96  * it must return the NOT_SUPPORTED error code.
97  */
98 static u32 ffa_compatible_version_find(u32 version)
99 {
100         u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
101         u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
102         u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
103
104         if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
105                 return version;
106
107         pr_info("Firmware version higher than driver version, downgrading\n");
108         return FFA_DRIVER_VERSION;
109 }
110
111 static int ffa_version_check(u32 *version)
112 {
113         ffa_value_t ver;
114
115         invoke_ffa_fn((ffa_value_t){
116                       .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
117                       }, &ver);
118
119         if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
120                 pr_info("FFA_VERSION returned not supported\n");
121                 return -EOPNOTSUPP;
122         }
123
124         if (ver.a0 < FFA_MIN_VERSION) {
125                 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
126                        FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
127                        FFA_MAJOR_VERSION(FFA_MIN_VERSION),
128                        FFA_MINOR_VERSION(FFA_MIN_VERSION));
129                 return -EINVAL;
130         }
131
132         pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
133                 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
134         pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
135                 FFA_MINOR_VERSION(ver.a0));
136         *version = ffa_compatible_version_find(ver.a0);
137
138         return 0;
139 }
140
141 static int ffa_rx_release(void)
142 {
143         ffa_value_t ret;
144
145         invoke_ffa_fn((ffa_value_t){
146                       .a0 = FFA_RX_RELEASE,
147                       }, &ret);
148
149         if (ret.a0 == FFA_ERROR)
150                 return ffa_to_linux_errno((int)ret.a2);
151
152         /* check for ret.a0 == FFA_RX_RELEASE ? */
153
154         return 0;
155 }
156
157 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
158 {
159         ffa_value_t ret;
160
161         invoke_ffa_fn((ffa_value_t){
162                       .a0 = FFA_FN_NATIVE(RXTX_MAP),
163                       .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
164                       }, &ret);
165
166         if (ret.a0 == FFA_ERROR)
167                 return ffa_to_linux_errno((int)ret.a2);
168
169         return 0;
170 }
171
172 static int ffa_rxtx_unmap(u16 vm_id)
173 {
174         ffa_value_t ret;
175
176         invoke_ffa_fn((ffa_value_t){
177                       .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
178                       }, &ret);
179
180         if (ret.a0 == FFA_ERROR)
181                 return ffa_to_linux_errno((int)ret.a2);
182
183         return 0;
184 }
185
186 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY    BIT(0)
187
188 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
189 static int
190 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
191                          struct ffa_partition_info *buffer, int num_partitions)
192 {
193         int idx, count, flags = 0, sz, buf_sz;
194         ffa_value_t partition_info;
195
196         if (!buffer || !num_partitions) /* Just get the count for now */
197                 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
198
199         mutex_lock(&drv_info->rx_lock);
200         invoke_ffa_fn((ffa_value_t){
201                       .a0 = FFA_PARTITION_INFO_GET,
202                       .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
203                       .a5 = flags,
204                       }, &partition_info);
205
206         if (partition_info.a0 == FFA_ERROR) {
207                 mutex_unlock(&drv_info->rx_lock);
208                 return ffa_to_linux_errno((int)partition_info.a2);
209         }
210
211         count = partition_info.a2;
212
213         if (drv_info->version > FFA_VERSION_1_0) {
214                 buf_sz = sz = partition_info.a3;
215                 if (sz > sizeof(*buffer))
216                         buf_sz = sizeof(*buffer);
217         } else {
218                 /* FFA_VERSION_1_0 lacks size in the response */
219                 buf_sz = sz = 8;
220         }
221
222         if (buffer && count <= num_partitions)
223                 for (idx = 0; idx < count; idx++)
224                         memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
225                                buf_sz);
226
227         ffa_rx_release();
228
229         mutex_unlock(&drv_info->rx_lock);
230
231         return count;
232 }
233
234 /* buffer is allocated and caller must free the same if returned count > 0 */
235 static int
236 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
237 {
238         int count;
239         u32 uuid0_4[4];
240         struct ffa_partition_info *pbuf;
241
242         export_uuid((u8 *)uuid0_4, uuid);
243         count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
244                                          uuid0_4[3], NULL, 0);
245         if (count <= 0)
246                 return count;
247
248         pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
249         if (!pbuf)
250                 return -ENOMEM;
251
252         count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
253                                          uuid0_4[3], pbuf, count);
254         if (count <= 0)
255                 kfree(pbuf);
256         else
257                 *buffer = pbuf;
258
259         return count;
260 }
261
262 #define VM_ID_MASK      GENMASK(15, 0)
263 static int ffa_id_get(u16 *vm_id)
264 {
265         ffa_value_t id;
266
267         invoke_ffa_fn((ffa_value_t){
268                       .a0 = FFA_ID_GET,
269                       }, &id);
270
271         if (id.a0 == FFA_ERROR)
272                 return ffa_to_linux_errno((int)id.a2);
273
274         *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
275
276         return 0;
277 }
278
279 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
280                                    struct ffa_send_direct_data *data)
281 {
282         u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
283         ffa_value_t ret;
284
285         if (mode_32bit) {
286                 req_id = FFA_MSG_SEND_DIRECT_REQ;
287                 resp_id = FFA_MSG_SEND_DIRECT_RESP;
288         } else {
289                 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
290                 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
291         }
292
293         invoke_ffa_fn((ffa_value_t){
294                       .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
295                       .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
296                       .a6 = data->data3, .a7 = data->data4,
297                       }, &ret);
298
299         while (ret.a0 == FFA_INTERRUPT)
300                 invoke_ffa_fn((ffa_value_t){
301                               .a0 = FFA_RUN, .a1 = ret.a1,
302                               }, &ret);
303
304         if (ret.a0 == FFA_ERROR)
305                 return ffa_to_linux_errno((int)ret.a2);
306
307         if (ret.a0 == resp_id) {
308                 data->data0 = ret.a3;
309                 data->data1 = ret.a4;
310                 data->data2 = ret.a5;
311                 data->data3 = ret.a6;
312                 data->data4 = ret.a7;
313                 return 0;
314         }
315
316         return -EINVAL;
317 }
318
319 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
320                               u32 frag_len, u32 len, u64 *handle)
321 {
322         ffa_value_t ret;
323
324         invoke_ffa_fn((ffa_value_t){
325                       .a0 = func_id, .a1 = len, .a2 = frag_len,
326                       .a3 = buf, .a4 = buf_sz,
327                       }, &ret);
328
329         while (ret.a0 == FFA_MEM_OP_PAUSE)
330                 invoke_ffa_fn((ffa_value_t){
331                               .a0 = FFA_MEM_OP_RESUME,
332                               .a1 = ret.a1, .a2 = ret.a2,
333                               }, &ret);
334
335         if (ret.a0 == FFA_ERROR)
336                 return ffa_to_linux_errno((int)ret.a2);
337
338         if (ret.a0 == FFA_SUCCESS) {
339                 if (handle)
340                         *handle = PACK_HANDLE(ret.a2, ret.a3);
341         } else if (ret.a0 == FFA_MEM_FRAG_RX) {
342                 if (handle)
343                         *handle = PACK_HANDLE(ret.a1, ret.a2);
344         } else {
345                 return -EOPNOTSUPP;
346         }
347
348         return frag_len;
349 }
350
351 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
352 {
353         ffa_value_t ret;
354
355         invoke_ffa_fn((ffa_value_t){
356                       .a0 = FFA_MEM_FRAG_TX,
357                       .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
358                       .a3 = frag_len,
359                       }, &ret);
360
361         while (ret.a0 == FFA_MEM_OP_PAUSE)
362                 invoke_ffa_fn((ffa_value_t){
363                               .a0 = FFA_MEM_OP_RESUME,
364                               .a1 = ret.a1, .a2 = ret.a2,
365                               }, &ret);
366
367         if (ret.a0 == FFA_ERROR)
368                 return ffa_to_linux_errno((int)ret.a2);
369
370         if (ret.a0 == FFA_MEM_FRAG_RX)
371                 return ret.a3;
372         else if (ret.a0 == FFA_SUCCESS)
373                 return 0;
374
375         return -EOPNOTSUPP;
376 }
377
378 static int
379 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
380                       u32 len, u64 *handle, bool first)
381 {
382         if (!first)
383                 return ffa_mem_next_frag(*handle, frag_len);
384
385         return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
386 }
387
388 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
389 {
390         u32 num_pages = 0;
391
392         do {
393                 num_pages += sg->length / FFA_PAGE_SIZE;
394         } while ((sg = sg_next(sg)));
395
396         return num_pages;
397 }
398
399 static int
400 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
401                        struct ffa_mem_ops_args *args)
402 {
403         int rc = 0;
404         bool first = true;
405         phys_addr_t addr = 0;
406         struct ffa_composite_mem_region *composite;
407         struct ffa_mem_region_addr_range *constituents;
408         struct ffa_mem_region_attributes *ep_mem_access;
409         struct ffa_mem_region *mem_region = buffer;
410         u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
411
412         mem_region->tag = args->tag;
413         mem_region->flags = args->flags;
414         mem_region->sender_id = drv_info->vm_id;
415         mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
416                                  FFA_MEM_INNER_SHAREABLE;
417         ep_mem_access = &mem_region->ep_mem_access[0];
418
419         for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
420                 ep_mem_access->receiver = args->attrs[idx].receiver;
421                 ep_mem_access->attrs = args->attrs[idx].attrs;
422                 ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
423                 ep_mem_access->flag = 0;
424                 ep_mem_access->reserved = 0;
425         }
426         mem_region->reserved_0 = 0;
427         mem_region->reserved_1 = 0;
428         mem_region->ep_count = args->nattrs;
429
430         composite = buffer + COMPOSITE_OFFSET(args->nattrs);
431         composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
432         composite->addr_range_cnt = num_entries;
433         composite->reserved = 0;
434
435         length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
436         frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
437         if (frag_len > max_fragsize)
438                 return -ENXIO;
439
440         if (!args->use_txbuf) {
441                 addr = virt_to_phys(buffer);
442                 buf_sz = max_fragsize / FFA_PAGE_SIZE;
443         }
444
445         constituents = buffer + frag_len;
446         idx = 0;
447         do {
448                 if (frag_len == max_fragsize) {
449                         rc = ffa_transmit_fragment(func_id, addr, buf_sz,
450                                                    frag_len, length,
451                                                    &args->g_handle, first);
452                         if (rc < 0)
453                                 return -ENXIO;
454
455                         first = false;
456                         idx = 0;
457                         frag_len = 0;
458                         constituents = buffer;
459                 }
460
461                 if ((void *)constituents - buffer > max_fragsize) {
462                         pr_err("Memory Region Fragment > Tx Buffer size\n");
463                         return -EFAULT;
464                 }
465
466                 constituents->address = sg_phys(args->sg);
467                 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
468                 constituents->reserved = 0;
469                 constituents++;
470                 frag_len += sizeof(struct ffa_mem_region_addr_range);
471         } while ((args->sg = sg_next(args->sg)));
472
473         return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
474                                      length, &args->g_handle, first);
475 }
476
477 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
478 {
479         int ret;
480         void *buffer;
481
482         if (!args->use_txbuf) {
483                 buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
484                 if (!buffer)
485                         return -ENOMEM;
486         } else {
487                 buffer = drv_info->tx_buffer;
488                 mutex_lock(&drv_info->tx_lock);
489         }
490
491         ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
492
493         if (args->use_txbuf)
494                 mutex_unlock(&drv_info->tx_lock);
495         else
496                 free_pages_exact(buffer, RXTX_BUFFER_SIZE);
497
498         return ret < 0 ? ret : 0;
499 }
500
501 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
502 {
503         ffa_value_t ret;
504
505         invoke_ffa_fn((ffa_value_t){
506                       .a0 = FFA_MEM_RECLAIM,
507                       .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
508                       .a3 = flags,
509                       }, &ret);
510
511         if (ret.a0 == FFA_ERROR)
512                 return ffa_to_linux_errno((int)ret.a2);
513
514         return 0;
515 }
516
517 static int ffa_features(u32 func_feat_id, u32 input_props,
518                         u32 *if_props_1, u32 *if_props_2)
519 {
520         ffa_value_t id;
521
522         if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
523                 pr_err("%s: Invalid Parameters: %x, %x", __func__,
524                        func_feat_id, input_props);
525                 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
526         }
527
528         invoke_ffa_fn((ffa_value_t){
529                 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
530                 }, &id);
531
532         if (id.a0 == FFA_ERROR)
533                 return ffa_to_linux_errno((int)id.a2);
534
535         if (if_props_1)
536                 *if_props_1 = id.a2;
537         if (if_props_2)
538                 *if_props_2 = id.a3;
539
540         return 0;
541 }
542
543 static void ffa_set_up_mem_ops_native_flag(void)
544 {
545         if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
546             !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
547                 drv_info->mem_ops_native = true;
548 }
549
550 static u32 ffa_api_version_get(void)
551 {
552         return drv_info->version;
553 }
554
555 static int ffa_partition_info_get(const char *uuid_str,
556                                   struct ffa_partition_info *buffer)
557 {
558         int count;
559         uuid_t uuid;
560         struct ffa_partition_info *pbuf;
561
562         if (uuid_parse(uuid_str, &uuid)) {
563                 pr_err("invalid uuid (%s)\n", uuid_str);
564                 return -ENODEV;
565         }
566
567         count = ffa_partition_probe(&uuid, &pbuf);
568         if (count <= 0)
569                 return -ENOENT;
570
571         memcpy(buffer, pbuf, sizeof(*pbuf) * count);
572         kfree(pbuf);
573         return 0;
574 }
575
576 static void _ffa_mode_32bit_set(struct ffa_device *dev)
577 {
578         dev->mode_32bit = true;
579 }
580
581 static void ffa_mode_32bit_set(struct ffa_device *dev)
582 {
583         if (drv_info->version > FFA_VERSION_1_0)
584                 return;
585
586         _ffa_mode_32bit_set(dev);
587 }
588
589 static int ffa_sync_send_receive(struct ffa_device *dev,
590                                  struct ffa_send_direct_data *data)
591 {
592         return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
593                                        dev->mode_32bit, data);
594 }
595
596 static int ffa_memory_share(struct ffa_mem_ops_args *args)
597 {
598         if (drv_info->mem_ops_native)
599                 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
600
601         return ffa_memory_ops(FFA_MEM_SHARE, args);
602 }
603
604 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
605 {
606         /* Note that upon a successful MEM_LEND request the caller
607          * must ensure that the memory region specified is not accessed
608          * until a successful MEM_RECALIM call has been made.
609          * On systems with a hypervisor present this will been enforced,
610          * however on systems without a hypervisor the responsibility
611          * falls to the calling kernel driver to prevent access.
612          */
613         if (drv_info->mem_ops_native)
614                 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
615
616         return ffa_memory_ops(FFA_MEM_LEND, args);
617 }
618
619 static const struct ffa_info_ops ffa_drv_info_ops = {
620         .api_version_get = ffa_api_version_get,
621         .partition_info_get = ffa_partition_info_get,
622 };
623
624 static const struct ffa_msg_ops ffa_drv_msg_ops = {
625         .mode_32bit_set = ffa_mode_32bit_set,
626         .sync_send_receive = ffa_sync_send_receive,
627 };
628
629 static const struct ffa_mem_ops ffa_drv_mem_ops = {
630         .memory_reclaim = ffa_memory_reclaim,
631         .memory_share = ffa_memory_share,
632         .memory_lend = ffa_memory_lend,
633 };
634
635 static const struct ffa_ops ffa_drv_ops = {
636         .info_ops = &ffa_drv_info_ops,
637         .msg_ops = &ffa_drv_msg_ops,
638         .mem_ops = &ffa_drv_mem_ops,
639 };
640
641 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
642 {
643         int count, idx;
644         struct ffa_partition_info *pbuf, *tpbuf;
645
646         /*
647          * FF-A v1.1 provides UUID for each partition as part of the discovery
648          * API, the discovered UUID must be populated in the device's UUID and
649          * there is no need to copy the same from the driver table.
650          */
651         if (drv_info->version > FFA_VERSION_1_0)
652                 return;
653
654         count = ffa_partition_probe(uuid, &pbuf);
655         if (count <= 0)
656                 return;
657
658         for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
659                 if (tpbuf->id == ffa_dev->vm_id)
660                         uuid_copy(&ffa_dev->uuid, uuid);
661         kfree(pbuf);
662 }
663
664 static void ffa_setup_partitions(void)
665 {
666         int count, idx;
667         uuid_t uuid;
668         struct ffa_device *ffa_dev;
669         struct ffa_partition_info *pbuf, *tpbuf;
670
671         count = ffa_partition_probe(&uuid_null, &pbuf);
672         if (count <= 0) {
673                 pr_info("%s: No partitions found, error %d\n", __func__, count);
674                 return;
675         }
676
677         for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
678                 import_uuid(&uuid, (u8 *)tpbuf->uuid);
679
680                 /* Note that if the UUID will be uuid_null, that will require
681                  * ffa_device_match() to find the UUID of this partition id
682                  * with help of ffa_device_match_uuid(). FF-A v1.1 and above
683                  * provides UUID here for each partition as part of the
684                  * discovery API and the same is passed.
685                  */
686                 ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
687                 if (!ffa_dev) {
688                         pr_err("%s: failed to register partition ID 0x%x\n",
689                                __func__, tpbuf->id);
690                         continue;
691                 }
692
693                 if (drv_info->version > FFA_VERSION_1_0 &&
694                     !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
695                         _ffa_mode_32bit_set(ffa_dev);
696         }
697         kfree(pbuf);
698 }
699
700 static int __init ffa_init(void)
701 {
702         int ret;
703
704         ret = ffa_transport_init(&invoke_ffa_fn);
705         if (ret)
706                 return ret;
707
708         ret = arm_ffa_bus_init();
709         if (ret)
710                 return ret;
711
712         drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
713         if (!drv_info) {
714                 ret = -ENOMEM;
715                 goto ffa_bus_exit;
716         }
717
718         ret = ffa_version_check(&drv_info->version);
719         if (ret)
720                 goto free_drv_info;
721
722         if (ffa_id_get(&drv_info->vm_id)) {
723                 pr_err("failed to obtain VM id for self\n");
724                 ret = -ENODEV;
725                 goto free_drv_info;
726         }
727
728         drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
729         if (!drv_info->rx_buffer) {
730                 ret = -ENOMEM;
731                 goto free_pages;
732         }
733
734         drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
735         if (!drv_info->tx_buffer) {
736                 ret = -ENOMEM;
737                 goto free_pages;
738         }
739
740         ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
741                            virt_to_phys(drv_info->rx_buffer),
742                            RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
743         if (ret) {
744                 pr_err("failed to register FFA RxTx buffers\n");
745                 goto free_pages;
746         }
747
748         mutex_init(&drv_info->rx_lock);
749         mutex_init(&drv_info->tx_lock);
750
751         ffa_setup_partitions();
752
753         ffa_set_up_mem_ops_native_flag();
754
755         return 0;
756 free_pages:
757         if (drv_info->tx_buffer)
758                 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
759         free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
760 free_drv_info:
761         kfree(drv_info);
762 ffa_bus_exit:
763         arm_ffa_bus_exit();
764         return ret;
765 }
766 subsys_initcall(ffa_init);
767
768 static void __exit ffa_exit(void)
769 {
770         ffa_rxtx_unmap(drv_info->vm_id);
771         free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
772         free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
773         kfree(drv_info);
774         arm_ffa_bus_exit();
775 }
776 module_exit(ffa_exit);
777
778 MODULE_ALIAS("arm-ffa");
779 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
780 MODULE_DESCRIPTION("Arm FF-A interface driver");
781 MODULE_LICENSE("GPL v2");