Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 1 | // Copyright 2018 The Chromium OS Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | extern crate gpu_buffer; |
| 6 | extern crate gpu_display; |
| 7 | extern crate gpu_renderer; |
| 8 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 9 | mod backend; |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 10 | mod protocol; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 11 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 12 | use std::cell::RefCell; |
| 13 | use std::collections::VecDeque; |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 14 | use std::i64; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 15 | use std::mem::size_of; |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 16 | use std::os::unix::io::{AsRawFd, RawFd}; |
| 17 | use std::path::{Path, PathBuf}; |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 18 | use std::rc::Rc; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 19 | use std::sync::atomic::{AtomicUsize, Ordering}; |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 20 | use std::sync::Arc; |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 21 | use std::thread; |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 22 | use std::time::Duration; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 23 | |
| 24 | use data_model::*; |
| 25 | |
David Riley | fccfc05 | 2019-01-28 16:59:18 -0800 | [diff] [blame] | 26 | use sys_util::{Error, EventFd, GuestAddress, GuestMemory, PollContext, PollToken}; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 27 | |
| 28 | use self::gpu_buffer::Device; |
| 29 | use self::gpu_display::*; |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 30 | use self::gpu_renderer::{format_fourcc, Renderer}; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 31 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 32 | use super::{ |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 33 | resource_bridge::*, AvailIter, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_GPU, |
| 34 | VIRTIO_F_VERSION_1, |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 35 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 36 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 37 | use self::backend::Backend; |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 38 | use self::protocol::*; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 39 | |
| 40 | // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect |
| 41 | // there to be fewer of. |
David Tolnay | 5bbbf61 | 2018-12-01 17:49:30 -0800 | [diff] [blame] | 42 | const QUEUE_SIZES: &[u16] = &[256, 16]; |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 43 | const FENCE_POLL_MS: u64 = 1; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 44 | |
| 45 | struct QueueDescriptor { |
| 46 | index: u16, |
| 47 | addr: GuestAddress, |
| 48 | len: u32, |
| 49 | data: Option<(GuestAddress, u32)>, |
| 50 | ret: Option<(GuestAddress, u32)>, |
| 51 | } |
| 52 | |
| 53 | struct ReturnDescriptor { |
| 54 | index: u16, |
| 55 | len: u32, |
| 56 | } |
| 57 | |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 58 | struct FenceDescriptor { |
| 59 | fence_id: u32, |
| 60 | len: u32, |
| 61 | desc: QueueDescriptor, |
| 62 | } |
| 63 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 64 | struct Frontend { |
| 65 | ctrl_descriptors: VecDeque<QueueDescriptor>, |
| 66 | cursor_descriptors: VecDeque<QueueDescriptor>, |
| 67 | return_ctrl_descriptors: VecDeque<ReturnDescriptor>, |
| 68 | return_cursor_descriptors: VecDeque<ReturnDescriptor>, |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 69 | fence_descriptors: Vec<FenceDescriptor>, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 70 | backend: Backend, |
| 71 | } |
| 72 | |
| 73 | impl Frontend { |
| 74 | fn new(backend: Backend) -> Frontend { |
| 75 | Frontend { |
| 76 | ctrl_descriptors: Default::default(), |
| 77 | cursor_descriptors: Default::default(), |
| 78 | return_ctrl_descriptors: Default::default(), |
| 79 | return_cursor_descriptors: Default::default(), |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 80 | fence_descriptors: Default::default(), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 81 | backend, |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | fn display(&self) -> &Rc<RefCell<GpuDisplay>> { |
| 86 | self.backend.display() |
| 87 | } |
| 88 | |
| 89 | fn process_display(&mut self) -> bool { |
| 90 | self.backend.process_display() |
| 91 | } |
| 92 | |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 93 | fn process_resource_bridge(&self, resource_bridge: &ResourceResponseSocket) { |
| 94 | self.backend.process_resource_bridge(resource_bridge); |
| 95 | } |
| 96 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 97 | fn process_gpu_command( |
| 98 | &mut self, |
| 99 | mem: &GuestMemory, |
| 100 | cmd: GpuCommand, |
| 101 | data: Option<VolatileSlice>, |
| 102 | ) -> GpuResponse { |
David Riley | c9ce2da | 2018-05-22 15:36:31 -0700 | [diff] [blame] | 103 | self.backend.force_ctx_0(); |
| 104 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 105 | match cmd { |
| 106 | GpuCommand::GetDisplayInfo(_) => { |
| 107 | GpuResponse::OkDisplayInfo(self.backend.display_info().to_vec()) |
| 108 | } |
| 109 | GpuCommand::ResourceCreate2d(info) => { |
| 110 | let format = info.format.to_native(); |
| 111 | match format_fourcc(format) { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 112 | Some(fourcc) => self.backend.create_resource_2d( |
| 113 | info.resource_id.to_native(), |
| 114 | info.width.to_native(), |
| 115 | info.height.to_native(), |
| 116 | fourcc, |
| 117 | ), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 118 | None => { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 119 | warn!( |
| 120 | "failed to create resource with unrecognized pipe format {}", |
| 121 | format |
| 122 | ); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 123 | GpuResponse::ErrInvalidParameter |
| 124 | } |
| 125 | } |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 126 | } |
| 127 | GpuCommand::ResourceUnref(info) => { |
| 128 | self.backend.unref_resource(info.resource_id.to_native()) |
| 129 | } |
| 130 | GpuCommand::SetScanout(info) => self.backend.set_scanout(info.resource_id.to_native()), |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 131 | GpuCommand::ResourceFlush(info) => self.backend.flush_resource( |
| 132 | info.resource_id.to_native(), |
| 133 | info.r.x.to_native(), |
| 134 | info.r.y.to_native(), |
| 135 | info.r.width.to_native(), |
| 136 | info.r.height.to_native(), |
| 137 | ), |
| 138 | GpuCommand::TransferToHost2d(info) => self.backend.transfer_to_resource_2d( |
| 139 | info.resource_id.to_native(), |
| 140 | info.r.x.to_native(), |
| 141 | info.r.y.to_native(), |
| 142 | info.r.width.to_native(), |
| 143 | info.r.height.to_native(), |
| 144 | info.offset.to_native(), |
| 145 | mem, |
| 146 | ), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 147 | GpuCommand::ResourceAttachBacking(info) if data.is_some() => { |
| 148 | let data = data.unwrap(); |
| 149 | let entry_count = info.nr_entries.to_native() as usize; |
| 150 | let mut iovecs = Vec::with_capacity(entry_count); |
| 151 | for i in 0..entry_count { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 152 | if let Ok(entry_ref) = |
| 153 | data.get_ref((i * size_of::<virtio_gpu_mem_entry>()) as u64) |
| 154 | { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 155 | let entry: virtio_gpu_mem_entry = entry_ref.load(); |
| 156 | let addr = GuestAddress(entry.addr.to_native()); |
| 157 | let len = entry.length.to_native() as usize; |
| 158 | iovecs.push((addr, len)) |
| 159 | } else { |
| 160 | return GpuResponse::ErrUnspec; |
| 161 | } |
| 162 | } |
| 163 | self.backend |
| 164 | .attach_backing(info.resource_id.to_native(), mem, iovecs) |
| 165 | } |
| 166 | GpuCommand::ResourceDetachBacking(info) => { |
| 167 | self.backend.detach_backing(info.resource_id.to_native()) |
| 168 | } |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 169 | GpuCommand::UpdateCursor(info) => self.backend.update_cursor( |
| 170 | info.resource_id.to_native(), |
| 171 | info.pos.x.into(), |
| 172 | info.pos.y.into(), |
| 173 | ), |
| 174 | GpuCommand::MoveCursor(info) => self |
| 175 | .backend |
| 176 | .move_cursor(info.pos.x.into(), info.pos.y.into()), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 177 | GpuCommand::GetCapsetInfo(info) => { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 178 | self.backend.get_capset_info(info.capset_index.to_native()) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 179 | } |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 180 | GpuCommand::GetCapset(info) => self |
| 181 | .backend |
| 182 | .get_capset(info.capset_id.to_native(), info.capset_version.to_native()), |
| 183 | GpuCommand::CtxCreate(info) => self |
| 184 | .backend |
| 185 | .create_renderer_context(info.hdr.ctx_id.to_native()), |
| 186 | GpuCommand::CtxDestroy(info) => self |
| 187 | .backend |
| 188 | .destroy_renderer_context(info.hdr.ctx_id.to_native()), |
| 189 | GpuCommand::CtxAttachResource(info) => self |
| 190 | .backend |
| 191 | .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()), |
| 192 | GpuCommand::CtxDetachResource(info) => self |
| 193 | .backend |
| 194 | .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 195 | GpuCommand::ResourceCreate3d(info) => { |
| 196 | let id = info.resource_id.to_native(); |
| 197 | let target = info.target.to_native(); |
| 198 | let format = info.format.to_native(); |
| 199 | let bind = info.bind.to_native(); |
| 200 | let width = info.width.to_native(); |
| 201 | let height = info.height.to_native(); |
| 202 | let depth = info.depth.to_native(); |
| 203 | let array_size = info.array_size.to_native(); |
| 204 | let last_level = info.last_level.to_native(); |
| 205 | let nr_samples = info.nr_samples.to_native(); |
| 206 | let flags = info.flags.to_native(); |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 207 | self.backend.resource_create_3d( |
| 208 | id, target, format, bind, width, height, depth, array_size, last_level, |
| 209 | nr_samples, flags, |
| 210 | ) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 211 | } |
| 212 | GpuCommand::TransferToHost3d(info) => { |
| 213 | let ctx_id = info.hdr.ctx_id.to_native(); |
| 214 | let res_id = info.resource_id.to_native(); |
| 215 | let x = info.box_.x.to_native(); |
| 216 | let y = info.box_.y.to_native(); |
| 217 | let z = info.box_.z.to_native(); |
| 218 | let width = info.box_.w.to_native(); |
| 219 | let height = info.box_.h.to_native(); |
| 220 | let depth = info.box_.d.to_native(); |
| 221 | let level = info.level.to_native(); |
| 222 | let stride = info.stride.to_native(); |
| 223 | let layer_stride = info.layer_stride.to_native(); |
| 224 | let offset = info.offset.to_native(); |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 225 | self.backend.transfer_to_resource_3d( |
| 226 | ctx_id, |
| 227 | res_id, |
| 228 | x, |
| 229 | y, |
| 230 | z, |
| 231 | width, |
| 232 | height, |
| 233 | depth, |
| 234 | level, |
| 235 | stride, |
| 236 | layer_stride, |
| 237 | offset, |
| 238 | ) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 239 | } |
| 240 | GpuCommand::TransferFromHost3d(info) => { |
| 241 | let ctx_id = info.hdr.ctx_id.to_native(); |
| 242 | let res_id = info.resource_id.to_native(); |
| 243 | let x = info.box_.x.to_native(); |
| 244 | let y = info.box_.y.to_native(); |
| 245 | let z = info.box_.z.to_native(); |
| 246 | let width = info.box_.w.to_native(); |
| 247 | let height = info.box_.h.to_native(); |
| 248 | let depth = info.box_.d.to_native(); |
| 249 | let level = info.level.to_native(); |
| 250 | let stride = info.stride.to_native(); |
| 251 | let layer_stride = info.layer_stride.to_native(); |
| 252 | let offset = info.offset.to_native(); |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 253 | self.backend.transfer_from_resource_3d( |
| 254 | ctx_id, |
| 255 | res_id, |
| 256 | x, |
| 257 | y, |
| 258 | z, |
| 259 | width, |
| 260 | height, |
| 261 | depth, |
| 262 | level, |
| 263 | stride, |
| 264 | layer_stride, |
| 265 | offset, |
| 266 | ) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 267 | } |
| 268 | GpuCommand::CmdSubmit3d(info) if data.is_some() => { |
| 269 | let data = data.unwrap(); // guarded by this match arm |
| 270 | let cmd_size = info.size.to_native() as usize; |
| 271 | match data.get_slice(0, cmd_size as u64) { |
| 272 | Ok(cmd_slice) => { |
| 273 | let mut cmd_buf = vec![0; cmd_size]; |
| 274 | cmd_slice.copy_to(&mut cmd_buf[..]); |
| 275 | self.backend |
| 276 | .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..]) |
| 277 | } |
| 278 | Err(_) => GpuResponse::ErrInvalidParameter, |
| 279 | } |
| 280 | } |
| 281 | _ => { |
| 282 | error!("unhandled command {:?}", cmd); |
| 283 | GpuResponse::ErrUnspec |
| 284 | } |
| 285 | } |
| 286 | } |
| 287 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 288 | fn take_descriptors( |
| 289 | mem: &GuestMemory, |
| 290 | desc_iter: AvailIter, |
| 291 | descriptors: &mut VecDeque<QueueDescriptor>, |
| 292 | return_descriptors: &mut VecDeque<ReturnDescriptor>, |
| 293 | ) { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 294 | for desc in desc_iter { |
| 295 | if desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only() { |
| 296 | let mut q_desc = QueueDescriptor { |
| 297 | index: desc.index, |
| 298 | addr: desc.addr, |
| 299 | len: desc.len, |
| 300 | data: None, |
| 301 | ret: None, |
| 302 | }; |
| 303 | if let Some(extra_desc) = desc.next_descriptor() { |
| 304 | if extra_desc.is_write_only() { |
| 305 | q_desc.ret = Some((extra_desc.addr, extra_desc.len)); |
| 306 | } else { |
| 307 | q_desc.data = Some((extra_desc.addr, extra_desc.len)); |
| 308 | } |
David Riley | 1187595 | 2018-05-22 15:37:22 -0700 | [diff] [blame] | 309 | if let Some(extra_desc) = extra_desc.next_descriptor() { |
| 310 | if extra_desc.is_write_only() && q_desc.ret.is_none() { |
| 311 | q_desc.ret = Some((extra_desc.addr, extra_desc.len)); |
| 312 | } |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 313 | } |
| 314 | } |
| 315 | descriptors.push_back(q_desc); |
| 316 | } else { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 317 | let likely_type = mem.read_obj_from_addr(desc.addr).unwrap_or(Le32::from(0)); |
| 318 | debug!( |
| 319 | "ctrl queue bad descriptor index = {} len = {} write = {} type = {}", |
| 320 | desc.index, |
| 321 | desc.len, |
| 322 | desc.is_write_only(), |
| 323 | virtio_gpu_cmd_str(likely_type.to_native()) |
| 324 | ); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 325 | return_descriptors.push_back(ReturnDescriptor { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 326 | index: desc.index, |
| 327 | len: 0, |
| 328 | }); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 329 | } |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | fn take_ctrl_descriptors(&mut self, mem: &GuestMemory, desc_iter: AvailIter) { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 334 | Frontend::take_descriptors( |
| 335 | mem, |
| 336 | desc_iter, |
| 337 | &mut self.ctrl_descriptors, |
| 338 | &mut self.return_ctrl_descriptors, |
| 339 | ); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | fn take_cursor_descriptors(&mut self, mem: &GuestMemory, desc_iter: AvailIter) { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 343 | Frontend::take_descriptors( |
| 344 | mem, |
| 345 | desc_iter, |
| 346 | &mut self.cursor_descriptors, |
| 347 | &mut self.return_cursor_descriptors, |
| 348 | ); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 351 | fn process_descriptor( |
| 352 | &mut self, |
| 353 | mem: &GuestMemory, |
| 354 | desc: QueueDescriptor, |
| 355 | ) -> Option<ReturnDescriptor> { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 356 | let mut resp = GpuResponse::ErrUnspec; |
| 357 | let mut gpu_cmd = None; |
| 358 | let mut len = 0; |
| 359 | if let Ok(desc_mem) = mem.get_slice(desc.addr.offset(), desc.len as u64) { |
| 360 | match GpuCommand::decode(desc_mem) { |
| 361 | Ok(cmd) => { |
| 362 | match desc.data { |
| 363 | Some(data_desc) => { |
| 364 | match mem.get_slice(data_desc.0.offset(), data_desc.1 as u64) { |
| 365 | Ok(data_mem) => { |
| 366 | resp = self.process_gpu_command(mem, cmd, Some(data_mem)) |
| 367 | } |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 368 | Err(e) => debug!("ctrl queue invalid data descriptor: {}", e), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 369 | } |
| 370 | } |
| 371 | None => resp = self.process_gpu_command(mem, cmd, None), |
| 372 | } |
| 373 | gpu_cmd = Some(cmd); |
| 374 | } |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 375 | Err(e) => debug!("ctrl queue decode error: {}", e), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 376 | } |
| 377 | } |
| 378 | if resp.is_err() { |
| 379 | debug!("{:?} -> {:?}", gpu_cmd, resp); |
| 380 | } |
| 381 | if let Some(ret_desc) = desc.ret { |
| 382 | if let Ok(ret_desc_mem) = mem.get_slice(ret_desc.0.offset(), ret_desc.1 as u64) { |
| 383 | let mut fence_id = 0; |
| 384 | let mut ctx_id = 0; |
| 385 | let mut flags = 0; |
| 386 | if let Some(cmd) = gpu_cmd { |
| 387 | let ctrl_hdr = cmd.ctrl_hdr(); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 388 | if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 { |
| 389 | fence_id = ctrl_hdr.fence_id.to_native(); |
| 390 | ctx_id = ctrl_hdr.ctx_id.to_native(); |
| 391 | flags = VIRTIO_GPU_FLAG_FENCE; |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 392 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 393 | let fence_resp = self.backend.create_fence(ctx_id, fence_id as u32); |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 394 | if fence_resp.is_err() { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 395 | warn!("create_fence {} -> {:?}", fence_id, fence_resp); |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 396 | resp = fence_resp; |
| 397 | } |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 398 | } |
| 399 | } |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 400 | |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 401 | // Prepare the response now, even if it is going to wait until |
| 402 | // fence is complete. |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 403 | match resp.encode(flags, fence_id, ctx_id, ret_desc_mem) { |
| 404 | Ok(l) => len = l, |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 405 | Err(e) => debug!("ctrl queue response encode error: {}", e), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 406 | } |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 407 | |
| 408 | if flags & VIRTIO_GPU_FLAG_FENCE != 0 { |
| 409 | self.fence_descriptors.push(FenceDescriptor { |
| 410 | fence_id: fence_id as u32, |
| 411 | len, |
| 412 | desc, |
| 413 | }); |
| 414 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 415 | return None; |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | // No fence, respond now. |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 419 | } |
| 420 | } |
David Riley | 4cbaeb7 | 2018-05-17 17:03:13 -0700 | [diff] [blame] | 421 | Some(ReturnDescriptor { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 422 | index: desc.index, |
| 423 | len, |
David Riley | 4cbaeb7 | 2018-05-17 17:03:13 -0700 | [diff] [blame] | 424 | }) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | fn process_ctrl(&mut self, mem: &GuestMemory) -> Option<ReturnDescriptor> { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 428 | self.return_ctrl_descriptors.pop_front().or_else(|| { |
| 429 | self.ctrl_descriptors |
| 430 | .pop_front() |
| 431 | .and_then(|desc| self.process_descriptor(mem, desc)) |
| 432 | }) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 433 | } |
| 434 | |
| 435 | fn process_cursor(&mut self, mem: &GuestMemory) -> Option<ReturnDescriptor> { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 436 | self.return_cursor_descriptors.pop_front().or_else(|| { |
| 437 | self.cursor_descriptors |
| 438 | .pop_front() |
| 439 | .and_then(|desc| self.process_descriptor(mem, desc)) |
| 440 | }) |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 441 | } |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 442 | |
| 443 | fn fence_poll(&mut self) { |
| 444 | let fence_id = self.backend.fence_poll(); |
| 445 | let return_descs = &mut self.return_ctrl_descriptors; |
| 446 | self.fence_descriptors.retain(|f_desc| { |
| 447 | if f_desc.fence_id > fence_id { |
| 448 | true |
| 449 | } else { |
| 450 | return_descs.push_back(ReturnDescriptor { |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 451 | index: f_desc.desc.index, |
| 452 | len: f_desc.len, |
| 453 | }); |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 454 | false |
| 455 | } |
| 456 | }) |
| 457 | } |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | struct Worker { |
| 461 | exit_evt: EventFd, |
| 462 | mem: GuestMemory, |
| 463 | interrupt_evt: EventFd, |
Daniel Verkamp | 120d95e | 2018-10-24 17:06:07 -0700 | [diff] [blame] | 464 | interrupt_resample_evt: EventFd, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 465 | interrupt_status: Arc<AtomicUsize>, |
| 466 | ctrl_queue: Queue, |
| 467 | ctrl_evt: EventFd, |
| 468 | cursor_queue: Queue, |
| 469 | cursor_evt: EventFd, |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 470 | resource_bridge: Option<ResourceResponseSocket>, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 471 | kill_evt: EventFd, |
| 472 | state: Frontend, |
| 473 | } |
| 474 | |
| 475 | impl Worker { |
| 476 | fn signal_used_queue(&self) { |
| 477 | self.interrupt_status |
| 478 | .fetch_or(INTERRUPT_STATUS_USED_RING as usize, Ordering::SeqCst); |
| 479 | let _ = self.interrupt_evt.write(1); |
| 480 | } |
| 481 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 482 | fn run(&mut self) { |
| 483 | #[derive(PollToken)] |
| 484 | enum Token { |
| 485 | CtrlQueue, |
| 486 | CursorQueue, |
| 487 | Display, |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 488 | ResourceBridge, |
Daniel Verkamp | 120d95e | 2018-10-24 17:06:07 -0700 | [diff] [blame] | 489 | InterruptResample, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 490 | Kill, |
| 491 | } |
| 492 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 493 | let poll_ctx: PollContext<Token> = match PollContext::new() |
| 494 | .and_then(|pc| pc.add(&self.ctrl_evt, Token::CtrlQueue).and(Ok(pc))) |
| 495 | .and_then(|pc| pc.add(&self.cursor_evt, Token::CursorQueue).and(Ok(pc))) |
| 496 | .and_then(|pc| { |
| 497 | pc.add(&*self.state.display().borrow(), Token::Display) |
| 498 | .and(Ok(pc)) |
David Tolnay | 2bac1e7 | 2018-12-12 14:33:42 -0800 | [diff] [blame] | 499 | }) |
| 500 | .and_then(|pc| { |
Daniel Verkamp | 120d95e | 2018-10-24 17:06:07 -0700 | [diff] [blame] | 501 | pc.add(&self.interrupt_resample_evt, Token::InterruptResample) |
| 502 | .and(Ok(pc)) |
David Tolnay | 2bac1e7 | 2018-12-12 14:33:42 -0800 | [diff] [blame] | 503 | }) |
| 504 | .and_then(|pc| pc.add(&self.kill_evt, Token::Kill).and(Ok(pc))) |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 505 | { |
| 506 | Ok(pc) => pc, |
| 507 | Err(e) => { |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 508 | error!("failed creating PollContext: {}", e); |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 509 | return; |
| 510 | } |
| 511 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 512 | |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 513 | if let Some(ref resource_bridge) = self.resource_bridge { |
| 514 | if let Err(e) = poll_ctx.add(resource_bridge, Token::ResourceBridge) { |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 515 | error!("failed to add resource bridge to PollContext: {}", e); |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 516 | } |
| 517 | } |
| 518 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 519 | 'poll: loop { |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 520 | // If there are outstanding fences, wake up early to poll them. |
David Tolnay | 5bbbf61 | 2018-12-01 17:49:30 -0800 | [diff] [blame] | 521 | let duration = if !self.state.fence_descriptors.is_empty() { |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 522 | Duration::from_millis(FENCE_POLL_MS) |
| 523 | } else { |
| 524 | Duration::new(i64::MAX as u64, 0) |
| 525 | }; |
| 526 | |
| 527 | let events = match poll_ctx.wait_timeout(duration) { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 528 | Ok(v) => v, |
| 529 | Err(e) => { |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 530 | error!("failed polling for events: {}", e); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 531 | break; |
| 532 | } |
| 533 | }; |
| 534 | let mut signal_used = false; |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 535 | let mut process_resource_bridge = false; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 536 | for event in events.iter_readable() { |
| 537 | match event.token() { |
| 538 | Token::CtrlQueue => { |
| 539 | let _ = self.ctrl_evt.read(); |
| 540 | self.state |
| 541 | .take_ctrl_descriptors(&self.mem, self.ctrl_queue.iter(&self.mem)); |
| 542 | } |
| 543 | Token::CursorQueue => { |
| 544 | let _ = self.cursor_evt.read(); |
| 545 | self.state |
| 546 | .take_cursor_descriptors(&self.mem, self.cursor_queue.iter(&self.mem)); |
| 547 | } |
| 548 | Token::Display => { |
| 549 | let close_requested = self.state.process_display(); |
| 550 | if close_requested { |
| 551 | let _ = self.exit_evt.write(1); |
| 552 | } |
| 553 | } |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 554 | Token::ResourceBridge => process_resource_bridge = true, |
Daniel Verkamp | 120d95e | 2018-10-24 17:06:07 -0700 | [diff] [blame] | 555 | Token::InterruptResample => { |
| 556 | let _ = self.interrupt_resample_evt.read(); |
| 557 | if self.interrupt_status.load(Ordering::SeqCst) != 0 { |
| 558 | self.interrupt_evt.write(1).unwrap(); |
| 559 | } |
| 560 | } |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 561 | Token::Kill => { |
| 562 | break 'poll; |
| 563 | } |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | // All cursor commands go first because they have higher priority. |
| 568 | loop { |
| 569 | match self.state.process_cursor(&self.mem) { |
| 570 | Some(ReturnDescriptor { index, len }) => { |
| 571 | self.cursor_queue.add_used(&self.mem, index, len); |
| 572 | signal_used = true; |
| 573 | } |
| 574 | None => break, |
| 575 | } |
| 576 | } |
| 577 | |
David Riley | f89e0b5 | 2018-05-17 17:14:42 -0700 | [diff] [blame] | 578 | self.state.fence_poll(); |
| 579 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 580 | loop { |
| 581 | match self.state.process_ctrl(&self.mem) { |
| 582 | Some(ReturnDescriptor { index, len }) => { |
| 583 | self.ctrl_queue.add_used(&self.mem, index, len); |
| 584 | signal_used = true; |
| 585 | } |
| 586 | None => break, |
| 587 | } |
| 588 | } |
| 589 | |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 590 | // Process the entire control queue before the resource bridge in case a resource is |
| 591 | // created or destroyed by the control queue. Processing the resource bridge first may |
| 592 | // lead to a race condition. |
| 593 | if process_resource_bridge { |
| 594 | if let Some(ref resource_bridge) = self.resource_bridge { |
| 595 | self.state.process_resource_bridge(resource_bridge); |
| 596 | } |
| 597 | } |
| 598 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 599 | if signal_used { |
| 600 | self.signal_used_queue(); |
| 601 | } |
| 602 | } |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | pub struct Gpu { |
| 607 | config_event: bool, |
| 608 | exit_evt: EventFd, |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 609 | resource_bridge: Option<ResourceResponseSocket>, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 610 | kill_evt: Option<EventFd>, |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 611 | wayland_socket_path: PathBuf, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 612 | } |
| 613 | |
| 614 | impl Gpu { |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 615 | pub fn new<P: AsRef<Path>>( |
| 616 | exit_evt: EventFd, |
| 617 | resource_bridge: Option<ResourceResponseSocket>, |
| 618 | wayland_socket_path: P, |
| 619 | ) -> Gpu { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 620 | Gpu { |
| 621 | config_event: false, |
| 622 | exit_evt, |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 623 | resource_bridge, |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 624 | kill_evt: None, |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 625 | wayland_socket_path: wayland_socket_path.as_ref().to_path_buf(), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 626 | } |
| 627 | } |
| 628 | |
| 629 | fn get_config(&self) -> virtio_gpu_config { |
| 630 | let mut events_read = 0; |
| 631 | if self.config_event { |
| 632 | events_read |= VIRTIO_GPU_EVENT_DISPLAY; |
| 633 | } |
| 634 | virtio_gpu_config { |
| 635 | events_read: Le32::from(events_read), |
| 636 | events_clear: Le32::from(0), |
| 637 | num_scanouts: Le32::from(1), |
Gurchetan Singh | 046df60 | 2018-10-02 16:07:26 -0700 | [diff] [blame] | 638 | num_capsets: Le32::from(2), |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 639 | } |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | impl Drop for Gpu { |
| 644 | fn drop(&mut self) { |
| 645 | if let Some(kill_evt) = self.kill_evt.take() { |
| 646 | // Ignore the result because there is nothing we can do about it. |
| 647 | let _ = kill_evt.write(1); |
| 648 | } |
| 649 | } |
| 650 | } |
| 651 | |
| 652 | impl VirtioDevice for Gpu { |
| 653 | fn keep_fds(&self) -> Vec<RawFd> { |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 654 | let mut keep_fds = Vec::new(); |
David Riley | cc86d7d | 2019-01-28 13:27:29 -0800 | [diff] [blame] | 655 | // TODO(davidriley): Remove once virgl has another path to include |
| 656 | // debugging logs. |
| 657 | if cfg!(debug_assertions) { |
| 658 | keep_fds.push(libc::STDOUT_FILENO); |
| 659 | keep_fds.push(libc::STDERR_FILENO); |
| 660 | } |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 661 | keep_fds.push(self.exit_evt.as_raw_fd()); |
David Riley | b2f03f1 | 2019-01-08 12:34:45 -0800 | [diff] [blame] | 662 | if let Some(ref resource_bridge) = self.resource_bridge { |
| 663 | keep_fds.push(resource_bridge.as_raw_fd()); |
| 664 | } |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 665 | keep_fds |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | fn device_type(&self) -> u32 { |
| 669 | TYPE_GPU |
| 670 | } |
| 671 | |
| 672 | fn queue_max_sizes(&self) -> &[u16] { |
| 673 | QUEUE_SIZES |
| 674 | } |
| 675 | |
Daniel Verkamp | e81a3e6 | 2018-10-24 11:30:34 -0700 | [diff] [blame] | 676 | fn features(&self) -> u64 { |
| 677 | 1 << VIRTIO_GPU_F_VIRGL | 1 << VIRTIO_F_VERSION_1 |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 678 | } |
| 679 | |
Daniel Verkamp | e81a3e6 | 2018-10-24 11:30:34 -0700 | [diff] [blame] | 680 | fn ack_features(&mut self, value: u64) { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 681 | let _ = value; |
| 682 | } |
| 683 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 684 | fn read_config(&self, offset: u64, data: &mut [u8]) { |
| 685 | let offset = offset as usize; |
| 686 | let len = data.len(); |
| 687 | let cfg = self.get_config(); |
| 688 | let cfg_slice = cfg.as_slice(); |
| 689 | if offset + len <= cfg_slice.len() { |
| 690 | data.copy_from_slice(&cfg_slice[offset..offset + len]); |
| 691 | } |
| 692 | } |
| 693 | |
| 694 | fn write_config(&mut self, offset: u64, data: &[u8]) { |
| 695 | let offset = offset as usize; |
| 696 | let len = data.len(); |
| 697 | let mut cfg = self.get_config(); |
| 698 | { |
| 699 | let cfg_slice = cfg.as_mut_slice(); |
| 700 | if offset + len <= cfg_slice.len() { |
| 701 | cfg_slice[offset..offset + len].copy_from_slice(data); |
| 702 | } |
| 703 | } |
| 704 | if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 { |
| 705 | self.config_event = false; |
| 706 | } |
| 707 | } |
| 708 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 709 | fn activate( |
| 710 | &mut self, |
| 711 | mem: GuestMemory, |
| 712 | interrupt_evt: EventFd, |
Daniel Verkamp | 120d95e | 2018-10-24 17:06:07 -0700 | [diff] [blame] | 713 | interrupt_resample_evt: EventFd, |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 714 | interrupt_status: Arc<AtomicUsize>, |
| 715 | mut queues: Vec<Queue>, |
| 716 | mut queue_evts: Vec<EventFd>, |
| 717 | ) { |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 718 | if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() { |
| 719 | return; |
| 720 | } |
| 721 | |
| 722 | let exit_evt = match self.exit_evt.try_clone() { |
| 723 | Ok(e) => e, |
| 724 | Err(e) => { |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 725 | error!("error cloning exit eventfd: {}", e); |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 726 | return; |
| 727 | } |
| 728 | }; |
| 729 | |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 730 | let (self_kill_evt, kill_evt) = match EventFd::new().and_then(|e| Ok((e.try_clone()?, e))) { |
| 731 | Ok(v) => v, |
| 732 | Err(e) => { |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 733 | error!("error creating kill EventFd pair: {}", e); |
Zach Reizner | 55a9e50 | 2018-10-03 10:22:32 -0700 | [diff] [blame] | 734 | return; |
| 735 | } |
| 736 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 737 | self.kill_evt = Some(self_kill_evt); |
| 738 | |
Zach Reizner | aa57566 | 2018-08-15 10:46:32 -0700 | [diff] [blame] | 739 | let resource_bridge = self.resource_bridge.take(); |
| 740 | |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 741 | let ctrl_queue = queues.remove(0); |
| 742 | let ctrl_evt = queue_evts.remove(0); |
| 743 | let cursor_queue = queues.remove(0); |
| 744 | let cursor_evt = queue_evts.remove(0); |
David Riley | b22b613 | 2018-08-20 08:11:42 -0700 | [diff] [blame] | 745 | let socket_path = self.wayland_socket_path.clone(); |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 746 | let worker_result = |
| 747 | thread::Builder::new() |
| 748 | .name("virtio_gpu".to_string()) |
| 749 | .spawn(move || { |
| 750 | const UNDESIRED_CARDS: &[&str] = &["vgem", "pvr"]; |
| 751 | let drm_card = match gpu_buffer::rendernode::open_device(UNDESIRED_CARDS) { |
| 752 | Ok(f) => f, |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 753 | Err(()) => { |
| 754 | error!("failed to open card"); |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 755 | return; |
| 756 | } |
| 757 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 758 | |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 759 | let device = match Device::new(drm_card) { |
| 760 | Ok(d) => d, |
| 761 | Err(()) => { |
| 762 | error!("failed to open device"); |
| 763 | return; |
| 764 | } |
| 765 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 766 | |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 767 | let display = match GpuDisplay::new(socket_path) { |
| 768 | Ok(c) => c, |
| 769 | Err(e) => { |
David Tolnay | b4bd00f | 2019-02-12 17:51:26 -0800 | [diff] [blame^] | 770 | error!("failed to open display: {}", e); |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 771 | return; |
| 772 | } |
| 773 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 774 | |
David Riley | fccfc05 | 2019-01-28 16:59:18 -0800 | [diff] [blame] | 775 | if cfg!(debug_assertions) { |
| 776 | let ret = unsafe { libc::dup2(libc::STDOUT_FILENO, libc::STDERR_FILENO) }; |
| 777 | if ret == -1 { |
| 778 | warn!("unable to dup2 stdout to stderr: {}", Error::last()); |
| 779 | } |
| 780 | } |
| 781 | |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 782 | let renderer = match Renderer::init() { |
| 783 | Ok(r) => r, |
| 784 | Err(e) => { |
| 785 | error!("failed to initialize gpu renderer: {}", e); |
| 786 | return; |
| 787 | } |
| 788 | }; |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 789 | |
David Riley | d48445e | 2019-01-28 11:25:10 -0800 | [diff] [blame] | 790 | Worker { |
| 791 | exit_evt, |
| 792 | mem, |
| 793 | interrupt_evt, |
| 794 | interrupt_resample_evt, |
| 795 | interrupt_status, |
| 796 | ctrl_queue, |
| 797 | ctrl_evt, |
| 798 | cursor_queue, |
| 799 | cursor_evt, |
| 800 | resource_bridge, |
| 801 | kill_evt, |
| 802 | state: Frontend::new(Backend::new(device, display, renderer)), |
| 803 | } |
| 804 | .run() |
| 805 | }); |
| 806 | |
| 807 | if let Err(e) = worker_result { |
| 808 | error!("failed to spawn virtio_gpu worker: {}", e); |
| 809 | return; |
| 810 | } |
Zach Reizner | 3a8100a | 2017-09-13 19:15:43 -0700 | [diff] [blame] | 811 | } |
| 812 | } |