summary refs log tree commit diff stats
path: root/include/hw/xen/xen_backend_ops.h
blob: 6f9d8e2c6264a07ac2007b52c812fef4e908b00b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
/*
 * QEMU Xen backend support
 *
 * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * Authors: David Woodhouse <dwmw2@infradead.org>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 */

#ifndef QEMU_XEN_BACKEND_OPS_H
#define QEMU_XEN_BACKEND_OPS_H

/*
 * For the time being, these operations map fairly closely to the API of
 * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
 * from XenLegacyDevice back ends to the new XenDevice model, they may
 * evolve to slightly higher-level APIs.
 *
 * The internal emulations do not emulate the Xen APIs entirely faithfully;
 * only enough to be used by the Xen backend devices. For example, only one
 * event channel can be bound to each handle, since that's sufficient for
 * the device support (only the true Xen HVM backend uses more). And the
 * behaviour of unmask() and pending() is different too because the device
 * backends don't care.
 */

typedef struct xenevtchn_handle xenevtchn_handle;
typedef int xenevtchn_port_or_error_t;
typedef uint32_t evtchn_port_t;
typedef uint16_t domid_t;
typedef uint32_t grant_ref_t;

#define XEN_PAGE_SHIFT       12
#define XEN_PAGE_SIZE        (1UL << XEN_PAGE_SHIFT)
#define XEN_PAGE_MASK        (~(XEN_PAGE_SIZE - 1))

struct evtchn_backend_ops {
    xenevtchn_handle *(*open)(void);
    int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
                            evtchn_port_t guest_port);
    int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
    int (*close)(struct xenevtchn_handle *xc);
    int (*get_fd)(struct xenevtchn_handle *xc);
    int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
    int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
    int (*pending)(struct xenevtchn_handle *xc);
};

extern struct evtchn_backend_ops *xen_evtchn_ops;

static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
{
    if (!xen_evtchn_ops) {
        return NULL;
    }
    return xen_evtchn_ops->open();
}

static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
                                                   uint32_t domid,
                                                   evtchn_port_t guest_port)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
}

static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
                                         evtchn_port_t port)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->unbind(xc, port);
}

static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->close(xc);
}

static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->get_fd(xc);
}

static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
                                         evtchn_port_t port)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->notify(xc, port);
}

static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
                                         evtchn_port_t port)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->unmask(xc, port);
}

static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
{
    if (!xen_evtchn_ops) {
        return -ENOSYS;
    }
    return xen_evtchn_ops->pending(xc);
}

typedef struct xengntdev_handle xengnttab_handle;

typedef struct XenGrantCopySegment {
    union {
        void *virt;
        struct {
            uint32_t ref;
            off_t offset;
        } foreign;
    } source, dest;
    size_t len;
} XenGrantCopySegment;

#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE  (1U << 0)

struct gnttab_backend_ops {
    uint32_t features;
    xengnttab_handle *(*open)(void);
    int (*close)(xengnttab_handle *xgt);
    int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
                      XenGrantCopySegment *segs, uint32_t nr_segs,
                      Error **errp);
    int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
    void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
                      uint32_t *refs, int prot);
    int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
                 uint32_t count);
};

extern struct gnttab_backend_ops *xen_gnttab_ops;

static inline bool qemu_xen_gnttab_can_map_multi(void)
{
    return xen_gnttab_ops &&
        !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
}

static inline xengnttab_handle *qemu_xen_gnttab_open(void)
{
    if (!xen_gnttab_ops) {
        return NULL;
    }
    return xen_gnttab_ops->open();
}

static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
{
    if (!xen_gnttab_ops) {
        return -ENOSYS;
    }
    return xen_gnttab_ops->close(xgt);
}

static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
                                             bool to_domain, uint32_t domid,
                                             XenGrantCopySegment *segs,
                                             uint32_t nr_segs, Error **errp)
{
    if (!xen_gnttab_ops) {
        return -ENOSYS;
    }

    return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
                                      errp);
}

static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
                                                 uint32_t nr_grants)
{
    if (!xen_gnttab_ops) {
        return -ENOSYS;
    }
    return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
}

static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
                                             uint32_t count, uint32_t domid,
                                             uint32_t *refs, int prot)
{
    if (!xen_gnttab_ops) {
        return NULL;
    }
    return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
}

static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
                                        void *start_address, uint32_t *refs,
                                        uint32_t count)
{
    if (!xen_gnttab_ops) {
        return -ENOSYS;
    }
    return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
}

void setup_xen_backend_ops(void);

#endif /* QEMU_XEN_BACKEND_OPS_H */