blob: c8d4dcd0679c7a3867403a1a2a66cb02ffec1838 [file] [log] [blame]
Zach Reizner3a8100a2017-09-13 19:15:43 -07001// Copyright 2018 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5extern crate gpu_buffer;
6extern crate gpu_display;
7extern crate gpu_renderer;
8
Zach Reizner3a8100a2017-09-13 19:15:43 -07009mod backend;
Zach Reizner55a9e502018-10-03 10:22:32 -070010mod protocol;
Zach Reizner3a8100a2017-09-13 19:15:43 -070011
Zach Reizner3a8100a2017-09-13 19:15:43 -070012use std::cell::RefCell;
13use std::collections::VecDeque;
David Rileyf89e0b52018-05-17 17:14:42 -070014use std::i64;
Zach Reizner3a8100a2017-09-13 19:15:43 -070015use std::mem::size_of;
David Rileyb22b6132018-08-20 08:11:42 -070016use std::os::unix::io::{AsRawFd, RawFd};
17use std::path::{Path, PathBuf};
David Rileyf89e0b52018-05-17 17:14:42 -070018use std::rc::Rc;
Zach Reizner3a8100a2017-09-13 19:15:43 -070019use std::sync::atomic::{AtomicUsize, Ordering};
Zach Reizner55a9e502018-10-03 10:22:32 -070020use std::sync::Arc;
David Rileyd48445e2019-01-28 11:25:10 -080021use std::thread;
David Rileyf89e0b52018-05-17 17:14:42 -070022use std::time::Duration;
Zach Reizner3a8100a2017-09-13 19:15:43 -070023
24use data_model::*;
25
David Rileyfccfc052019-01-28 16:59:18 -080026use sys_util::{Error, EventFd, GuestAddress, GuestMemory, PollContext, PollToken};
Zach Reizner3a8100a2017-09-13 19:15:43 -070027
28use self::gpu_buffer::Device;
29use self::gpu_display::*;
Zach Reizner55a9e502018-10-03 10:22:32 -070030use self::gpu_renderer::{format_fourcc, Renderer};
Zach Reizner3a8100a2017-09-13 19:15:43 -070031
Zach Reizner55a9e502018-10-03 10:22:32 -070032use super::{
Zach Reizneraa575662018-08-15 10:46:32 -070033 resource_bridge::*, AvailIter, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_GPU,
34 VIRTIO_F_VERSION_1,
Zach Reizner55a9e502018-10-03 10:22:32 -070035};
Zach Reizner3a8100a2017-09-13 19:15:43 -070036
Zach Reizner3a8100a2017-09-13 19:15:43 -070037use self::backend::Backend;
Zach Reizner55a9e502018-10-03 10:22:32 -070038use self::protocol::*;
Zach Reizner3a8100a2017-09-13 19:15:43 -070039
40// First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
41// there to be fewer of.
David Tolnay5bbbf612018-12-01 17:49:30 -080042const QUEUE_SIZES: &[u16] = &[256, 16];
David Rileyf89e0b52018-05-17 17:14:42 -070043const FENCE_POLL_MS: u64 = 1;
Zach Reizner3a8100a2017-09-13 19:15:43 -070044
45struct QueueDescriptor {
46 index: u16,
47 addr: GuestAddress,
48 len: u32,
49 data: Option<(GuestAddress, u32)>,
50 ret: Option<(GuestAddress, u32)>,
51}
52
53struct ReturnDescriptor {
54 index: u16,
55 len: u32,
56}
57
David Rileyf89e0b52018-05-17 17:14:42 -070058struct FenceDescriptor {
59 fence_id: u32,
60 len: u32,
61 desc: QueueDescriptor,
62}
63
Zach Reizner3a8100a2017-09-13 19:15:43 -070064struct Frontend {
65 ctrl_descriptors: VecDeque<QueueDescriptor>,
66 cursor_descriptors: VecDeque<QueueDescriptor>,
67 return_ctrl_descriptors: VecDeque<ReturnDescriptor>,
68 return_cursor_descriptors: VecDeque<ReturnDescriptor>,
David Rileyf89e0b52018-05-17 17:14:42 -070069 fence_descriptors: Vec<FenceDescriptor>,
Zach Reizner3a8100a2017-09-13 19:15:43 -070070 backend: Backend,
71}
72
73impl Frontend {
74 fn new(backend: Backend) -> Frontend {
75 Frontend {
76 ctrl_descriptors: Default::default(),
77 cursor_descriptors: Default::default(),
78 return_ctrl_descriptors: Default::default(),
79 return_cursor_descriptors: Default::default(),
David Rileyf89e0b52018-05-17 17:14:42 -070080 fence_descriptors: Default::default(),
Zach Reizner3a8100a2017-09-13 19:15:43 -070081 backend,
82 }
83 }
84
85 fn display(&self) -> &Rc<RefCell<GpuDisplay>> {
86 self.backend.display()
87 }
88
89 fn process_display(&mut self) -> bool {
90 self.backend.process_display()
91 }
92
Zach Reizneraa575662018-08-15 10:46:32 -070093 fn process_resource_bridge(&self, resource_bridge: &ResourceResponseSocket) {
94 self.backend.process_resource_bridge(resource_bridge);
95 }
96
Zach Reizner55a9e502018-10-03 10:22:32 -070097 fn process_gpu_command(
98 &mut self,
99 mem: &GuestMemory,
100 cmd: GpuCommand,
101 data: Option<VolatileSlice>,
102 ) -> GpuResponse {
David Rileyc9ce2da2018-05-22 15:36:31 -0700103 self.backend.force_ctx_0();
104
Zach Reizner3a8100a2017-09-13 19:15:43 -0700105 match cmd {
106 GpuCommand::GetDisplayInfo(_) => {
107 GpuResponse::OkDisplayInfo(self.backend.display_info().to_vec())
108 }
109 GpuCommand::ResourceCreate2d(info) => {
110 let format = info.format.to_native();
111 match format_fourcc(format) {
Zach Reizner55a9e502018-10-03 10:22:32 -0700112 Some(fourcc) => self.backend.create_resource_2d(
113 info.resource_id.to_native(),
114 info.width.to_native(),
115 info.height.to_native(),
116 fourcc,
117 ),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700118 None => {
Zach Reizner55a9e502018-10-03 10:22:32 -0700119 warn!(
120 "failed to create resource with unrecognized pipe format {}",
121 format
122 );
Zach Reizner3a8100a2017-09-13 19:15:43 -0700123 GpuResponse::ErrInvalidParameter
124 }
125 }
Zach Reizner3a8100a2017-09-13 19:15:43 -0700126 }
127 GpuCommand::ResourceUnref(info) => {
128 self.backend.unref_resource(info.resource_id.to_native())
129 }
130 GpuCommand::SetScanout(info) => self.backend.set_scanout(info.resource_id.to_native()),
Zach Reizner55a9e502018-10-03 10:22:32 -0700131 GpuCommand::ResourceFlush(info) => self.backend.flush_resource(
132 info.resource_id.to_native(),
133 info.r.x.to_native(),
134 info.r.y.to_native(),
135 info.r.width.to_native(),
136 info.r.height.to_native(),
137 ),
138 GpuCommand::TransferToHost2d(info) => self.backend.transfer_to_resource_2d(
139 info.resource_id.to_native(),
140 info.r.x.to_native(),
141 info.r.y.to_native(),
142 info.r.width.to_native(),
143 info.r.height.to_native(),
144 info.offset.to_native(),
145 mem,
146 ),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700147 GpuCommand::ResourceAttachBacking(info) if data.is_some() => {
148 let data = data.unwrap();
149 let entry_count = info.nr_entries.to_native() as usize;
150 let mut iovecs = Vec::with_capacity(entry_count);
151 for i in 0..entry_count {
Zach Reizner55a9e502018-10-03 10:22:32 -0700152 if let Ok(entry_ref) =
153 data.get_ref((i * size_of::<virtio_gpu_mem_entry>()) as u64)
154 {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700155 let entry: virtio_gpu_mem_entry = entry_ref.load();
156 let addr = GuestAddress(entry.addr.to_native());
157 let len = entry.length.to_native() as usize;
158 iovecs.push((addr, len))
159 } else {
160 return GpuResponse::ErrUnspec;
161 }
162 }
163 self.backend
164 .attach_backing(info.resource_id.to_native(), mem, iovecs)
165 }
166 GpuCommand::ResourceDetachBacking(info) => {
167 self.backend.detach_backing(info.resource_id.to_native())
168 }
Zach Reizner55a9e502018-10-03 10:22:32 -0700169 GpuCommand::UpdateCursor(info) => self.backend.update_cursor(
170 info.resource_id.to_native(),
171 info.pos.x.into(),
172 info.pos.y.into(),
173 ),
174 GpuCommand::MoveCursor(info) => self
175 .backend
176 .move_cursor(info.pos.x.into(), info.pos.y.into()),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700177 GpuCommand::GetCapsetInfo(info) => {
Zach Reizner55a9e502018-10-03 10:22:32 -0700178 self.backend.get_capset_info(info.capset_index.to_native())
Zach Reizner3a8100a2017-09-13 19:15:43 -0700179 }
Zach Reizner55a9e502018-10-03 10:22:32 -0700180 GpuCommand::GetCapset(info) => self
181 .backend
182 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
183 GpuCommand::CtxCreate(info) => self
184 .backend
185 .create_renderer_context(info.hdr.ctx_id.to_native()),
186 GpuCommand::CtxDestroy(info) => self
187 .backend
188 .destroy_renderer_context(info.hdr.ctx_id.to_native()),
189 GpuCommand::CtxAttachResource(info) => self
190 .backend
191 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
192 GpuCommand::CtxDetachResource(info) => self
193 .backend
194 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700195 GpuCommand::ResourceCreate3d(info) => {
196 let id = info.resource_id.to_native();
197 let target = info.target.to_native();
198 let format = info.format.to_native();
199 let bind = info.bind.to_native();
200 let width = info.width.to_native();
201 let height = info.height.to_native();
202 let depth = info.depth.to_native();
203 let array_size = info.array_size.to_native();
204 let last_level = info.last_level.to_native();
205 let nr_samples = info.nr_samples.to_native();
206 let flags = info.flags.to_native();
Zach Reizner55a9e502018-10-03 10:22:32 -0700207 self.backend.resource_create_3d(
208 id, target, format, bind, width, height, depth, array_size, last_level,
209 nr_samples, flags,
210 )
Zach Reizner3a8100a2017-09-13 19:15:43 -0700211 }
212 GpuCommand::TransferToHost3d(info) => {
213 let ctx_id = info.hdr.ctx_id.to_native();
214 let res_id = info.resource_id.to_native();
215 let x = info.box_.x.to_native();
216 let y = info.box_.y.to_native();
217 let z = info.box_.z.to_native();
218 let width = info.box_.w.to_native();
219 let height = info.box_.h.to_native();
220 let depth = info.box_.d.to_native();
221 let level = info.level.to_native();
222 let stride = info.stride.to_native();
223 let layer_stride = info.layer_stride.to_native();
224 let offset = info.offset.to_native();
Zach Reizner55a9e502018-10-03 10:22:32 -0700225 self.backend.transfer_to_resource_3d(
226 ctx_id,
227 res_id,
228 x,
229 y,
230 z,
231 width,
232 height,
233 depth,
234 level,
235 stride,
236 layer_stride,
237 offset,
238 )
Zach Reizner3a8100a2017-09-13 19:15:43 -0700239 }
240 GpuCommand::TransferFromHost3d(info) => {
241 let ctx_id = info.hdr.ctx_id.to_native();
242 let res_id = info.resource_id.to_native();
243 let x = info.box_.x.to_native();
244 let y = info.box_.y.to_native();
245 let z = info.box_.z.to_native();
246 let width = info.box_.w.to_native();
247 let height = info.box_.h.to_native();
248 let depth = info.box_.d.to_native();
249 let level = info.level.to_native();
250 let stride = info.stride.to_native();
251 let layer_stride = info.layer_stride.to_native();
252 let offset = info.offset.to_native();
Zach Reizner55a9e502018-10-03 10:22:32 -0700253 self.backend.transfer_from_resource_3d(
254 ctx_id,
255 res_id,
256 x,
257 y,
258 z,
259 width,
260 height,
261 depth,
262 level,
263 stride,
264 layer_stride,
265 offset,
266 )
Zach Reizner3a8100a2017-09-13 19:15:43 -0700267 }
268 GpuCommand::CmdSubmit3d(info) if data.is_some() => {
269 let data = data.unwrap(); // guarded by this match arm
270 let cmd_size = info.size.to_native() as usize;
271 match data.get_slice(0, cmd_size as u64) {
272 Ok(cmd_slice) => {
273 let mut cmd_buf = vec![0; cmd_size];
274 cmd_slice.copy_to(&mut cmd_buf[..]);
275 self.backend
276 .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
277 }
278 Err(_) => GpuResponse::ErrInvalidParameter,
279 }
280 }
281 _ => {
282 error!("unhandled command {:?}", cmd);
283 GpuResponse::ErrUnspec
284 }
285 }
286 }
287
Zach Reizner55a9e502018-10-03 10:22:32 -0700288 fn take_descriptors(
289 mem: &GuestMemory,
290 desc_iter: AvailIter,
291 descriptors: &mut VecDeque<QueueDescriptor>,
292 return_descriptors: &mut VecDeque<ReturnDescriptor>,
293 ) {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700294 for desc in desc_iter {
295 if desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only() {
296 let mut q_desc = QueueDescriptor {
297 index: desc.index,
298 addr: desc.addr,
299 len: desc.len,
300 data: None,
301 ret: None,
302 };
303 if let Some(extra_desc) = desc.next_descriptor() {
304 if extra_desc.is_write_only() {
305 q_desc.ret = Some((extra_desc.addr, extra_desc.len));
306 } else {
307 q_desc.data = Some((extra_desc.addr, extra_desc.len));
308 }
David Riley11875952018-05-22 15:37:22 -0700309 if let Some(extra_desc) = extra_desc.next_descriptor() {
310 if extra_desc.is_write_only() && q_desc.ret.is_none() {
311 q_desc.ret = Some((extra_desc.addr, extra_desc.len));
312 }
Zach Reizner3a8100a2017-09-13 19:15:43 -0700313 }
314 }
315 descriptors.push_back(q_desc);
316 } else {
Zach Reizner55a9e502018-10-03 10:22:32 -0700317 let likely_type = mem.read_obj_from_addr(desc.addr).unwrap_or(Le32::from(0));
318 debug!(
319 "ctrl queue bad descriptor index = {} len = {} write = {} type = {}",
320 desc.index,
321 desc.len,
322 desc.is_write_only(),
323 virtio_gpu_cmd_str(likely_type.to_native())
324 );
Zach Reizner3a8100a2017-09-13 19:15:43 -0700325 return_descriptors.push_back(ReturnDescriptor {
Zach Reizner55a9e502018-10-03 10:22:32 -0700326 index: desc.index,
327 len: 0,
328 });
Zach Reizner3a8100a2017-09-13 19:15:43 -0700329 }
330 }
331 }
332
333 fn take_ctrl_descriptors(&mut self, mem: &GuestMemory, desc_iter: AvailIter) {
Zach Reizner55a9e502018-10-03 10:22:32 -0700334 Frontend::take_descriptors(
335 mem,
336 desc_iter,
337 &mut self.ctrl_descriptors,
338 &mut self.return_ctrl_descriptors,
339 );
Zach Reizner3a8100a2017-09-13 19:15:43 -0700340 }
341
342 fn take_cursor_descriptors(&mut self, mem: &GuestMemory, desc_iter: AvailIter) {
Zach Reizner55a9e502018-10-03 10:22:32 -0700343 Frontend::take_descriptors(
344 mem,
345 desc_iter,
346 &mut self.cursor_descriptors,
347 &mut self.return_cursor_descriptors,
348 );
Zach Reizner3a8100a2017-09-13 19:15:43 -0700349 }
350
Zach Reizner55a9e502018-10-03 10:22:32 -0700351 fn process_descriptor(
352 &mut self,
353 mem: &GuestMemory,
354 desc: QueueDescriptor,
355 ) -> Option<ReturnDescriptor> {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700356 let mut resp = GpuResponse::ErrUnspec;
357 let mut gpu_cmd = None;
358 let mut len = 0;
359 if let Ok(desc_mem) = mem.get_slice(desc.addr.offset(), desc.len as u64) {
360 match GpuCommand::decode(desc_mem) {
361 Ok(cmd) => {
362 match desc.data {
363 Some(data_desc) => {
364 match mem.get_slice(data_desc.0.offset(), data_desc.1 as u64) {
365 Ok(data_mem) => {
366 resp = self.process_gpu_command(mem, cmd, Some(data_mem))
367 }
David Tolnayb4bd00f2019-02-12 17:51:26 -0800368 Err(e) => debug!("ctrl queue invalid data descriptor: {}", e),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700369 }
370 }
371 None => resp = self.process_gpu_command(mem, cmd, None),
372 }
373 gpu_cmd = Some(cmd);
374 }
David Tolnayb4bd00f2019-02-12 17:51:26 -0800375 Err(e) => debug!("ctrl queue decode error: {}", e),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700376 }
377 }
378 if resp.is_err() {
379 debug!("{:?} -> {:?}", gpu_cmd, resp);
380 }
381 if let Some(ret_desc) = desc.ret {
382 if let Ok(ret_desc_mem) = mem.get_slice(ret_desc.0.offset(), ret_desc.1 as u64) {
383 let mut fence_id = 0;
384 let mut ctx_id = 0;
385 let mut flags = 0;
386 if let Some(cmd) = gpu_cmd {
387 let ctrl_hdr = cmd.ctrl_hdr();
Zach Reizner3a8100a2017-09-13 19:15:43 -0700388 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
389 fence_id = ctrl_hdr.fence_id.to_native();
390 ctx_id = ctrl_hdr.ctx_id.to_native();
391 flags = VIRTIO_GPU_FLAG_FENCE;
David Rileyf89e0b52018-05-17 17:14:42 -0700392
Zach Reizner55a9e502018-10-03 10:22:32 -0700393 let fence_resp = self.backend.create_fence(ctx_id, fence_id as u32);
David Rileyf89e0b52018-05-17 17:14:42 -0700394 if fence_resp.is_err() {
Zach Reizner55a9e502018-10-03 10:22:32 -0700395 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
David Rileyf89e0b52018-05-17 17:14:42 -0700396 resp = fence_resp;
397 }
Zach Reizner3a8100a2017-09-13 19:15:43 -0700398 }
399 }
David Rileyf89e0b52018-05-17 17:14:42 -0700400
David Rileyf89e0b52018-05-17 17:14:42 -0700401 // Prepare the response now, even if it is going to wait until
402 // fence is complete.
Zach Reizner3a8100a2017-09-13 19:15:43 -0700403 match resp.encode(flags, fence_id, ctx_id, ret_desc_mem) {
404 Ok(l) => len = l,
David Tolnayb4bd00f2019-02-12 17:51:26 -0800405 Err(e) => debug!("ctrl queue response encode error: {}", e),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700406 }
David Rileyf89e0b52018-05-17 17:14:42 -0700407
408 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
409 self.fence_descriptors.push(FenceDescriptor {
410 fence_id: fence_id as u32,
411 len,
412 desc,
413 });
414
Zach Reizner55a9e502018-10-03 10:22:32 -0700415 return None;
David Rileyf89e0b52018-05-17 17:14:42 -0700416 }
417
418 // No fence, respond now.
Zach Reizner3a8100a2017-09-13 19:15:43 -0700419 }
420 }
David Riley4cbaeb72018-05-17 17:03:13 -0700421 Some(ReturnDescriptor {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700422 index: desc.index,
423 len,
David Riley4cbaeb72018-05-17 17:03:13 -0700424 })
Zach Reizner3a8100a2017-09-13 19:15:43 -0700425 }
426
427 fn process_ctrl(&mut self, mem: &GuestMemory) -> Option<ReturnDescriptor> {
Zach Reizner55a9e502018-10-03 10:22:32 -0700428 self.return_ctrl_descriptors.pop_front().or_else(|| {
429 self.ctrl_descriptors
430 .pop_front()
431 .and_then(|desc| self.process_descriptor(mem, desc))
432 })
Zach Reizner3a8100a2017-09-13 19:15:43 -0700433 }
434
435 fn process_cursor(&mut self, mem: &GuestMemory) -> Option<ReturnDescriptor> {
Zach Reizner55a9e502018-10-03 10:22:32 -0700436 self.return_cursor_descriptors.pop_front().or_else(|| {
437 self.cursor_descriptors
438 .pop_front()
439 .and_then(|desc| self.process_descriptor(mem, desc))
440 })
Zach Reizner3a8100a2017-09-13 19:15:43 -0700441 }
David Rileyf89e0b52018-05-17 17:14:42 -0700442
443 fn fence_poll(&mut self) {
444 let fence_id = self.backend.fence_poll();
445 let return_descs = &mut self.return_ctrl_descriptors;
446 self.fence_descriptors.retain(|f_desc| {
447 if f_desc.fence_id > fence_id {
448 true
449 } else {
450 return_descs.push_back(ReturnDescriptor {
Zach Reizner55a9e502018-10-03 10:22:32 -0700451 index: f_desc.desc.index,
452 len: f_desc.len,
453 });
David Rileyf89e0b52018-05-17 17:14:42 -0700454 false
455 }
456 })
457 }
Zach Reizner3a8100a2017-09-13 19:15:43 -0700458}
459
460struct Worker {
461 exit_evt: EventFd,
462 mem: GuestMemory,
463 interrupt_evt: EventFd,
Daniel Verkamp120d95e2018-10-24 17:06:07 -0700464 interrupt_resample_evt: EventFd,
Zach Reizner3a8100a2017-09-13 19:15:43 -0700465 interrupt_status: Arc<AtomicUsize>,
466 ctrl_queue: Queue,
467 ctrl_evt: EventFd,
468 cursor_queue: Queue,
469 cursor_evt: EventFd,
Zach Reizneraa575662018-08-15 10:46:32 -0700470 resource_bridge: Option<ResourceResponseSocket>,
Zach Reizner3a8100a2017-09-13 19:15:43 -0700471 kill_evt: EventFd,
472 state: Frontend,
473}
474
475impl Worker {
476 fn signal_used_queue(&self) {
477 self.interrupt_status
478 .fetch_or(INTERRUPT_STATUS_USED_RING as usize, Ordering::SeqCst);
479 let _ = self.interrupt_evt.write(1);
480 }
481
Zach Reizner3a8100a2017-09-13 19:15:43 -0700482 fn run(&mut self) {
483 #[derive(PollToken)]
484 enum Token {
485 CtrlQueue,
486 CursorQueue,
487 Display,
Zach Reizneraa575662018-08-15 10:46:32 -0700488 ResourceBridge,
Daniel Verkamp120d95e2018-10-24 17:06:07 -0700489 InterruptResample,
Zach Reizner3a8100a2017-09-13 19:15:43 -0700490 Kill,
491 }
492
Zach Reizner55a9e502018-10-03 10:22:32 -0700493 let poll_ctx: PollContext<Token> = match PollContext::new()
494 .and_then(|pc| pc.add(&self.ctrl_evt, Token::CtrlQueue).and(Ok(pc)))
495 .and_then(|pc| pc.add(&self.cursor_evt, Token::CursorQueue).and(Ok(pc)))
496 .and_then(|pc| {
497 pc.add(&*self.state.display().borrow(), Token::Display)
498 .and(Ok(pc))
David Tolnay2bac1e72018-12-12 14:33:42 -0800499 })
500 .and_then(|pc| {
Daniel Verkamp120d95e2018-10-24 17:06:07 -0700501 pc.add(&self.interrupt_resample_evt, Token::InterruptResample)
502 .and(Ok(pc))
David Tolnay2bac1e72018-12-12 14:33:42 -0800503 })
504 .and_then(|pc| pc.add(&self.kill_evt, Token::Kill).and(Ok(pc)))
Zach Reizner55a9e502018-10-03 10:22:32 -0700505 {
506 Ok(pc) => pc,
507 Err(e) => {
David Tolnayb4bd00f2019-02-12 17:51:26 -0800508 error!("failed creating PollContext: {}", e);
Zach Reizner55a9e502018-10-03 10:22:32 -0700509 return;
510 }
511 };
Zach Reizner3a8100a2017-09-13 19:15:43 -0700512
Zach Reizneraa575662018-08-15 10:46:32 -0700513 if let Some(ref resource_bridge) = self.resource_bridge {
514 if let Err(e) = poll_ctx.add(resource_bridge, Token::ResourceBridge) {
David Tolnayb4bd00f2019-02-12 17:51:26 -0800515 error!("failed to add resource bridge to PollContext: {}", e);
Zach Reizneraa575662018-08-15 10:46:32 -0700516 }
517 }
518
Zach Reizner3a8100a2017-09-13 19:15:43 -0700519 'poll: loop {
David Rileyf89e0b52018-05-17 17:14:42 -0700520 // If there are outstanding fences, wake up early to poll them.
David Tolnay5bbbf612018-12-01 17:49:30 -0800521 let duration = if !self.state.fence_descriptors.is_empty() {
David Rileyf89e0b52018-05-17 17:14:42 -0700522 Duration::from_millis(FENCE_POLL_MS)
523 } else {
524 Duration::new(i64::MAX as u64, 0)
525 };
526
527 let events = match poll_ctx.wait_timeout(duration) {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700528 Ok(v) => v,
529 Err(e) => {
David Tolnayb4bd00f2019-02-12 17:51:26 -0800530 error!("failed polling for events: {}", e);
Zach Reizner3a8100a2017-09-13 19:15:43 -0700531 break;
532 }
533 };
534 let mut signal_used = false;
Zach Reizneraa575662018-08-15 10:46:32 -0700535 let mut process_resource_bridge = false;
Zach Reizner3a8100a2017-09-13 19:15:43 -0700536 for event in events.iter_readable() {
537 match event.token() {
538 Token::CtrlQueue => {
539 let _ = self.ctrl_evt.read();
540 self.state
541 .take_ctrl_descriptors(&self.mem, self.ctrl_queue.iter(&self.mem));
542 }
543 Token::CursorQueue => {
544 let _ = self.cursor_evt.read();
545 self.state
546 .take_cursor_descriptors(&self.mem, self.cursor_queue.iter(&self.mem));
547 }
548 Token::Display => {
549 let close_requested = self.state.process_display();
550 if close_requested {
551 let _ = self.exit_evt.write(1);
552 }
553 }
Zach Reizneraa575662018-08-15 10:46:32 -0700554 Token::ResourceBridge => process_resource_bridge = true,
Daniel Verkamp120d95e2018-10-24 17:06:07 -0700555 Token::InterruptResample => {
556 let _ = self.interrupt_resample_evt.read();
557 if self.interrupt_status.load(Ordering::SeqCst) != 0 {
558 self.interrupt_evt.write(1).unwrap();
559 }
560 }
Zach Reizner3a8100a2017-09-13 19:15:43 -0700561 Token::Kill => {
562 break 'poll;
563 }
564 }
565 }
566
567 // All cursor commands go first because they have higher priority.
568 loop {
569 match self.state.process_cursor(&self.mem) {
570 Some(ReturnDescriptor { index, len }) => {
571 self.cursor_queue.add_used(&self.mem, index, len);
572 signal_used = true;
573 }
574 None => break,
575 }
576 }
577
David Rileyf89e0b52018-05-17 17:14:42 -0700578 self.state.fence_poll();
579
Zach Reizner3a8100a2017-09-13 19:15:43 -0700580 loop {
581 match self.state.process_ctrl(&self.mem) {
582 Some(ReturnDescriptor { index, len }) => {
583 self.ctrl_queue.add_used(&self.mem, index, len);
584 signal_used = true;
585 }
586 None => break,
587 }
588 }
589
Zach Reizneraa575662018-08-15 10:46:32 -0700590 // Process the entire control queue before the resource bridge in case a resource is
591 // created or destroyed by the control queue. Processing the resource bridge first may
592 // lead to a race condition.
593 if process_resource_bridge {
594 if let Some(ref resource_bridge) = self.resource_bridge {
595 self.state.process_resource_bridge(resource_bridge);
596 }
597 }
598
Zach Reizner3a8100a2017-09-13 19:15:43 -0700599 if signal_used {
600 self.signal_used_queue();
601 }
602 }
603 }
604}
605
606pub struct Gpu {
607 config_event: bool,
608 exit_evt: EventFd,
Zach Reizneraa575662018-08-15 10:46:32 -0700609 resource_bridge: Option<ResourceResponseSocket>,
Zach Reizner3a8100a2017-09-13 19:15:43 -0700610 kill_evt: Option<EventFd>,
David Rileyb22b6132018-08-20 08:11:42 -0700611 wayland_socket_path: PathBuf,
Zach Reizner3a8100a2017-09-13 19:15:43 -0700612}
613
614impl Gpu {
Zach Reizneraa575662018-08-15 10:46:32 -0700615 pub fn new<P: AsRef<Path>>(
616 exit_evt: EventFd,
617 resource_bridge: Option<ResourceResponseSocket>,
618 wayland_socket_path: P,
619 ) -> Gpu {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700620 Gpu {
621 config_event: false,
622 exit_evt,
Zach Reizneraa575662018-08-15 10:46:32 -0700623 resource_bridge,
Zach Reizner3a8100a2017-09-13 19:15:43 -0700624 kill_evt: None,
David Rileyb22b6132018-08-20 08:11:42 -0700625 wayland_socket_path: wayland_socket_path.as_ref().to_path_buf(),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700626 }
627 }
628
629 fn get_config(&self) -> virtio_gpu_config {
630 let mut events_read = 0;
631 if self.config_event {
632 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
633 }
634 virtio_gpu_config {
635 events_read: Le32::from(events_read),
636 events_clear: Le32::from(0),
637 num_scanouts: Le32::from(1),
Gurchetan Singh046df602018-10-02 16:07:26 -0700638 num_capsets: Le32::from(2),
Zach Reizner3a8100a2017-09-13 19:15:43 -0700639 }
640 }
641}
642
643impl Drop for Gpu {
644 fn drop(&mut self) {
645 if let Some(kill_evt) = self.kill_evt.take() {
646 // Ignore the result because there is nothing we can do about it.
647 let _ = kill_evt.write(1);
648 }
649 }
650}
651
652impl VirtioDevice for Gpu {
653 fn keep_fds(&self) -> Vec<RawFd> {
David Rileyb22b6132018-08-20 08:11:42 -0700654 let mut keep_fds = Vec::new();
David Rileycc86d7d2019-01-28 13:27:29 -0800655 // TODO(davidriley): Remove once virgl has another path to include
656 // debugging logs.
657 if cfg!(debug_assertions) {
658 keep_fds.push(libc::STDOUT_FILENO);
659 keep_fds.push(libc::STDERR_FILENO);
660 }
David Rileyb22b6132018-08-20 08:11:42 -0700661 keep_fds.push(self.exit_evt.as_raw_fd());
David Rileyb2f03f12019-01-08 12:34:45 -0800662 if let Some(ref resource_bridge) = self.resource_bridge {
663 keep_fds.push(resource_bridge.as_raw_fd());
664 }
David Rileyb22b6132018-08-20 08:11:42 -0700665 keep_fds
Zach Reizner3a8100a2017-09-13 19:15:43 -0700666 }
667
668 fn device_type(&self) -> u32 {
669 TYPE_GPU
670 }
671
672 fn queue_max_sizes(&self) -> &[u16] {
673 QUEUE_SIZES
674 }
675
Daniel Verkampe81a3e62018-10-24 11:30:34 -0700676 fn features(&self) -> u64 {
677 1 << VIRTIO_GPU_F_VIRGL | 1 << VIRTIO_F_VERSION_1
Zach Reizner3a8100a2017-09-13 19:15:43 -0700678 }
679
Daniel Verkampe81a3e62018-10-24 11:30:34 -0700680 fn ack_features(&mut self, value: u64) {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700681 let _ = value;
682 }
683
Zach Reizner3a8100a2017-09-13 19:15:43 -0700684 fn read_config(&self, offset: u64, data: &mut [u8]) {
685 let offset = offset as usize;
686 let len = data.len();
687 let cfg = self.get_config();
688 let cfg_slice = cfg.as_slice();
689 if offset + len <= cfg_slice.len() {
690 data.copy_from_slice(&cfg_slice[offset..offset + len]);
691 }
692 }
693
694 fn write_config(&mut self, offset: u64, data: &[u8]) {
695 let offset = offset as usize;
696 let len = data.len();
697 let mut cfg = self.get_config();
698 {
699 let cfg_slice = cfg.as_mut_slice();
700 if offset + len <= cfg_slice.len() {
701 cfg_slice[offset..offset + len].copy_from_slice(data);
702 }
703 }
704 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
705 self.config_event = false;
706 }
707 }
708
Zach Reizner55a9e502018-10-03 10:22:32 -0700709 fn activate(
710 &mut self,
711 mem: GuestMemory,
712 interrupt_evt: EventFd,
Daniel Verkamp120d95e2018-10-24 17:06:07 -0700713 interrupt_resample_evt: EventFd,
Zach Reizner55a9e502018-10-03 10:22:32 -0700714 interrupt_status: Arc<AtomicUsize>,
715 mut queues: Vec<Queue>,
716 mut queue_evts: Vec<EventFd>,
717 ) {
Zach Reizner3a8100a2017-09-13 19:15:43 -0700718 if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
719 return;
720 }
721
722 let exit_evt = match self.exit_evt.try_clone() {
723 Ok(e) => e,
724 Err(e) => {
David Tolnayb4bd00f2019-02-12 17:51:26 -0800725 error!("error cloning exit eventfd: {}", e);
Zach Reizner3a8100a2017-09-13 19:15:43 -0700726 return;
727 }
728 };
729
Zach Reizner55a9e502018-10-03 10:22:32 -0700730 let (self_kill_evt, kill_evt) = match EventFd::new().and_then(|e| Ok((e.try_clone()?, e))) {
731 Ok(v) => v,
732 Err(e) => {
David Tolnayb4bd00f2019-02-12 17:51:26 -0800733 error!("error creating kill EventFd pair: {}", e);
Zach Reizner55a9e502018-10-03 10:22:32 -0700734 return;
735 }
736 };
Zach Reizner3a8100a2017-09-13 19:15:43 -0700737 self.kill_evt = Some(self_kill_evt);
738
Zach Reizneraa575662018-08-15 10:46:32 -0700739 let resource_bridge = self.resource_bridge.take();
740
Zach Reizner3a8100a2017-09-13 19:15:43 -0700741 let ctrl_queue = queues.remove(0);
742 let ctrl_evt = queue_evts.remove(0);
743 let cursor_queue = queues.remove(0);
744 let cursor_evt = queue_evts.remove(0);
David Rileyb22b6132018-08-20 08:11:42 -0700745 let socket_path = self.wayland_socket_path.clone();
David Rileyd48445e2019-01-28 11:25:10 -0800746 let worker_result =
747 thread::Builder::new()
748 .name("virtio_gpu".to_string())
749 .spawn(move || {
750 const UNDESIRED_CARDS: &[&str] = &["vgem", "pvr"];
751 let drm_card = match gpu_buffer::rendernode::open_device(UNDESIRED_CARDS) {
752 Ok(f) => f,
David Tolnayb4bd00f2019-02-12 17:51:26 -0800753 Err(()) => {
754 error!("failed to open card");
David Rileyd48445e2019-01-28 11:25:10 -0800755 return;
756 }
757 };
Zach Reizner3a8100a2017-09-13 19:15:43 -0700758
David Rileyd48445e2019-01-28 11:25:10 -0800759 let device = match Device::new(drm_card) {
760 Ok(d) => d,
761 Err(()) => {
762 error!("failed to open device");
763 return;
764 }
765 };
Zach Reizner3a8100a2017-09-13 19:15:43 -0700766
David Rileyd48445e2019-01-28 11:25:10 -0800767 let display = match GpuDisplay::new(socket_path) {
768 Ok(c) => c,
769 Err(e) => {
David Tolnayb4bd00f2019-02-12 17:51:26 -0800770 error!("failed to open display: {}", e);
David Rileyd48445e2019-01-28 11:25:10 -0800771 return;
772 }
773 };
Zach Reizner3a8100a2017-09-13 19:15:43 -0700774
David Rileyfccfc052019-01-28 16:59:18 -0800775 if cfg!(debug_assertions) {
776 let ret = unsafe { libc::dup2(libc::STDOUT_FILENO, libc::STDERR_FILENO) };
777 if ret == -1 {
778 warn!("unable to dup2 stdout to stderr: {}", Error::last());
779 }
780 }
781
David Rileyd48445e2019-01-28 11:25:10 -0800782 let renderer = match Renderer::init() {
783 Ok(r) => r,
784 Err(e) => {
785 error!("failed to initialize gpu renderer: {}", e);
786 return;
787 }
788 };
Zach Reizner3a8100a2017-09-13 19:15:43 -0700789
David Rileyd48445e2019-01-28 11:25:10 -0800790 Worker {
791 exit_evt,
792 mem,
793 interrupt_evt,
794 interrupt_resample_evt,
795 interrupt_status,
796 ctrl_queue,
797 ctrl_evt,
798 cursor_queue,
799 cursor_evt,
800 resource_bridge,
801 kill_evt,
802 state: Frontend::new(Backend::new(device, display, renderer)),
803 }
804 .run()
805 });
806
807 if let Err(e) = worker_result {
808 error!("failed to spawn virtio_gpu worker: {}", e);
809 return;
810 }
Zach Reizner3a8100a2017-09-13 19:15:43 -0700811 }
812}