Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 1 | // Copyright 2013 The Chromium Authors. All rights reserved. |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "gpu/command_buffer/service/in_process_command_buffer.h" |
| 6 | |
| 7 | #include <queue> |
| 8 | #include <utility> |
| 9 | |
| 10 | #include <GLES2/gl2.h> |
| 11 | #ifndef GL_GLEXT_PROTOTYPES |
| 12 | #define GL_GLEXT_PROTOTYPES 1 |
| 13 | #endif |
| 14 | #include <GLES2/gl2ext.h> |
| 15 | #include <GLES2/gl2extchromium.h> |
| 16 | |
| 17 | #include "base/bind.h" |
| 18 | #include "base/bind_helpers.h" |
| 19 | #include "base/lazy_instance.h" |
| 20 | #include "base/logging.h" |
| 21 | #include "base/memory/weak_ptr.h" |
| 22 | #include "base/message_loop/message_loop_proxy.h" |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 23 | #include "base/sequence_checker.h" |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 24 | #include "base/threading/thread.h" |
| 25 | #include "gpu/command_buffer/common/id_allocator.h" |
| 26 | #include "gpu/command_buffer/service/command_buffer_service.h" |
| 27 | #include "gpu/command_buffer/service/context_group.h" |
| 28 | #include "gpu/command_buffer/service/gl_context_virtual.h" |
| 29 | #include "gpu/command_buffer/service/gpu_scheduler.h" |
| 30 | #include "gpu/command_buffer/service/image_manager.h" |
| 31 | #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| 32 | #include "ui/gfx/size.h" |
| 33 | #include "ui/gl/gl_context.h" |
| 34 | #include "ui/gl/gl_image.h" |
| 35 | #include "ui/gl/gl_share_group.h" |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 36 | |
| 37 | namespace gpu { |
| 38 | |
| 39 | namespace { |
| 40 | |
| 41 | static base::LazyInstance<std::set<InProcessCommandBuffer*> > |
| 42 | g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER; |
| 43 | |
| 44 | static bool g_use_virtualized_gl_context = false; |
| 45 | static bool g_uses_explicit_scheduling = false; |
| 46 | |
| 47 | template <typename T> |
| 48 | static void RunTaskWithResult(base::Callback<T(void)> task, |
| 49 | T* result, |
| 50 | base::WaitableEvent* completion) { |
| 51 | *result = task.Run(); |
| 52 | completion->Signal(); |
| 53 | } |
| 54 | |
| 55 | class GpuInProcessThread |
| 56 | : public base::Thread, |
| 57 | public base::RefCountedThreadSafe<GpuInProcessThread> { |
| 58 | public: |
| 59 | GpuInProcessThread(); |
| 60 | |
| 61 | private: |
| 62 | friend class base::RefCountedThreadSafe<GpuInProcessThread>; |
| 63 | virtual ~GpuInProcessThread(); |
| 64 | |
| 65 | DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread); |
| 66 | }; |
| 67 | |
| 68 | GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") { |
| 69 | Start(); |
| 70 | } |
| 71 | |
| 72 | GpuInProcessThread::~GpuInProcessThread() { |
| 73 | Stop(); |
| 74 | } |
| 75 | |
| 76 | // Used with explicit scheduling when there is no dedicated GPU thread. |
| 77 | class GpuCommandQueue { |
| 78 | public: |
| 79 | GpuCommandQueue(); |
| 80 | ~GpuCommandQueue(); |
| 81 | |
| 82 | void QueueTask(const base::Closure& task); |
| 83 | void RunTasks(); |
| 84 | void SetScheduleCallback(const base::Closure& callback); |
| 85 | |
| 86 | private: |
| 87 | base::Lock tasks_lock_; |
| 88 | std::queue<base::Closure> tasks_; |
| 89 | base::Closure schedule_callback_; |
| 90 | |
| 91 | DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue); |
| 92 | }; |
| 93 | |
| 94 | GpuCommandQueue::GpuCommandQueue() {} |
| 95 | |
| 96 | GpuCommandQueue::~GpuCommandQueue() { |
| 97 | base::AutoLock lock(tasks_lock_); |
| 98 | DCHECK(tasks_.empty()); |
| 99 | } |
| 100 | |
| 101 | void GpuCommandQueue::QueueTask(const base::Closure& task) { |
| 102 | { |
| 103 | base::AutoLock lock(tasks_lock_); |
| 104 | tasks_.push(task); |
| 105 | } |
| 106 | |
| 107 | DCHECK(!schedule_callback_.is_null()); |
| 108 | schedule_callback_.Run(); |
| 109 | } |
| 110 | |
| 111 | void GpuCommandQueue::RunTasks() { |
| 112 | size_t num_tasks; |
| 113 | { |
| 114 | base::AutoLock lock(tasks_lock_); |
| 115 | num_tasks = tasks_.size(); |
| 116 | } |
| 117 | |
| 118 | while (num_tasks) { |
| 119 | base::Closure task; |
| 120 | { |
| 121 | base::AutoLock lock(tasks_lock_); |
| 122 | task = tasks_.front(); |
| 123 | tasks_.pop(); |
| 124 | num_tasks = tasks_.size(); |
| 125 | } |
| 126 | |
| 127 | task.Run(); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) { |
| 132 | DCHECK(schedule_callback_.is_null()); |
| 133 | schedule_callback_ = callback; |
| 134 | } |
| 135 | |
| 136 | static base::LazyInstance<GpuCommandQueue> g_gpu_queue = |
| 137 | LAZY_INSTANCE_INITIALIZER; |
| 138 | |
| 139 | class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient { |
| 140 | public: |
| 141 | explicit SchedulerClientBase(bool need_thread); |
| 142 | virtual ~SchedulerClientBase(); |
| 143 | |
| 144 | static bool HasClients(); |
| 145 | |
| 146 | protected: |
| 147 | scoped_refptr<GpuInProcessThread> thread_; |
| 148 | |
| 149 | private: |
| 150 | static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_; |
| 151 | static base::LazyInstance<base::Lock> all_clients_lock_; |
| 152 | }; |
| 153 | |
| 154 | base::LazyInstance<std::set<SchedulerClientBase*> > |
| 155 | SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER; |
| 156 | base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ = |
| 157 | LAZY_INSTANCE_INITIALIZER; |
| 158 | |
| 159 | SchedulerClientBase::SchedulerClientBase(bool need_thread) { |
| 160 | base::AutoLock(all_clients_lock_.Get()); |
| 161 | if (need_thread) { |
| 162 | if (!all_clients_.Get().empty()) { |
| 163 | SchedulerClientBase* other = *all_clients_.Get().begin(); |
| 164 | thread_ = other->thread_; |
| 165 | DCHECK(thread_.get()); |
| 166 | } else { |
| 167 | thread_ = new GpuInProcessThread; |
| 168 | } |
| 169 | } |
| 170 | all_clients_.Get().insert(this); |
| 171 | } |
| 172 | |
| 173 | SchedulerClientBase::~SchedulerClientBase() { |
| 174 | base::AutoLock(all_clients_lock_.Get()); |
| 175 | all_clients_.Get().erase(this); |
| 176 | } |
| 177 | |
| 178 | bool SchedulerClientBase::HasClients() { |
| 179 | base::AutoLock(all_clients_lock_.Get()); |
| 180 | return !all_clients_.Get().empty(); |
| 181 | } |
| 182 | |
| 183 | // A client that talks to the GPU thread |
| 184 | class ThreadClient : public SchedulerClientBase { |
| 185 | public: |
| 186 | ThreadClient(); |
| 187 | virtual void QueueTask(const base::Closure& task) OVERRIDE; |
| 188 | }; |
| 189 | |
| 190 | ThreadClient::ThreadClient() : SchedulerClientBase(true) { |
| 191 | DCHECK(thread_.get()); |
| 192 | } |
| 193 | |
| 194 | void ThreadClient::QueueTask(const base::Closure& task) { |
| 195 | thread_->message_loop()->PostTask(FROM_HERE, task); |
| 196 | } |
| 197 | |
| 198 | // A client that talks to the GpuCommandQueue |
| 199 | class QueueClient : public SchedulerClientBase { |
| 200 | public: |
| 201 | QueueClient(); |
| 202 | virtual void QueueTask(const base::Closure& task) OVERRIDE; |
| 203 | }; |
| 204 | |
| 205 | QueueClient::QueueClient() : SchedulerClientBase(false) { |
| 206 | DCHECK(!thread_.get()); |
| 207 | } |
| 208 | |
| 209 | void QueueClient::QueueTask(const base::Closure& task) { |
| 210 | g_gpu_queue.Get().QueueTask(task); |
| 211 | } |
| 212 | |
| 213 | static scoped_ptr<InProcessCommandBuffer::SchedulerClient> |
| 214 | CreateSchedulerClient() { |
| 215 | scoped_ptr<InProcessCommandBuffer::SchedulerClient> client; |
| 216 | if (g_uses_explicit_scheduling) |
| 217 | client.reset(new QueueClient); |
| 218 | else |
| 219 | client.reset(new ThreadClient); |
| 220 | |
| 221 | return client.Pass(); |
| 222 | } |
| 223 | |
| 224 | class ScopedEvent { |
| 225 | public: |
| 226 | ScopedEvent(base::WaitableEvent* event) : event_(event) {} |
| 227 | ~ScopedEvent() { event_->Signal(); } |
| 228 | |
| 229 | private: |
| 230 | base::WaitableEvent* event_; |
| 231 | }; |
| 232 | |
| 233 | } // anonyous namespace |
| 234 | |
| 235 | InProcessCommandBuffer::InProcessCommandBuffer() |
| 236 | : context_lost_(false), |
| 237 | share_group_id_(0), |
| 238 | last_put_offset_(-1), |
| 239 | flush_event_(false, false), |
| 240 | queue_(CreateSchedulerClient()) {} |
| 241 | |
| 242 | InProcessCommandBuffer::~InProcessCommandBuffer() { |
| 243 | Destroy(); |
| 244 | } |
| 245 | |
| 246 | bool InProcessCommandBuffer::IsContextLost() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 247 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 248 | if (context_lost_ || !command_buffer_) { |
| 249 | return true; |
| 250 | } |
| 251 | CommandBuffer::State state = GetState(); |
| 252 | return error::IsError(state.error); |
| 253 | } |
| 254 | |
| 255 | void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 256 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 257 | DCHECK(!surface_->IsOffscreen()); |
| 258 | surface_->Resize(size); |
| 259 | } |
| 260 | |
| 261 | bool InProcessCommandBuffer::MakeCurrent() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 262 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 263 | command_buffer_lock_.AssertAcquired(); |
| 264 | |
| 265 | if (!context_lost_ && decoder_->MakeCurrent()) |
| 266 | return true; |
| 267 | DLOG(ERROR) << "Context lost because MakeCurrent failed."; |
| 268 | command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| 269 | command_buffer_->SetParseError(gpu::error::kLostContext); |
| 270 | return false; |
| 271 | } |
| 272 | |
| 273 | void InProcessCommandBuffer::PumpCommands() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 274 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 275 | command_buffer_lock_.AssertAcquired(); |
| 276 | |
| 277 | if (!MakeCurrent()) |
| 278 | return; |
| 279 | |
| 280 | gpu_scheduler_->PutChanged(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 284 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 285 | command_buffer_lock_.AssertAcquired(); |
| 286 | command_buffer_->SetGetBuffer(transfer_buffer_id); |
| 287 | return true; |
| 288 | } |
| 289 | |
| 290 | bool InProcessCommandBuffer::Initialize( |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 291 | scoped_refptr<gfx::GLSurface> surface, |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 292 | bool is_offscreen, |
| 293 | bool share_resources, |
| 294 | gfx::AcceleratedWidget window, |
| 295 | const gfx::Size& size, |
| 296 | const char* allowed_extensions, |
| 297 | const std::vector<int32>& attribs, |
| 298 | gfx::GpuPreference gpu_preference, |
| 299 | const base::Closure& context_lost_callback, |
| 300 | unsigned int share_group_id) { |
| 301 | |
| 302 | share_resources_ = share_resources; |
| 303 | context_lost_callback_ = WrapCallback(context_lost_callback); |
| 304 | share_group_id_ = share_group_id; |
| 305 | |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 306 | if (surface) { |
| 307 | // GPU thread must be the same as client thread due to GLSurface not being |
| 308 | // thread safe. |
| 309 | sequence_checker_.reset(new base::SequenceChecker); |
| 310 | surface_ = surface; |
| 311 | } |
| 312 | |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 313 | base::Callback<bool(void)> init_task = |
| 314 | base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, |
| 315 | base::Unretained(this), |
| 316 | is_offscreen, |
| 317 | window, |
| 318 | size, |
| 319 | allowed_extensions, |
| 320 | attribs, |
| 321 | gpu_preference); |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 322 | |
| 323 | base::WaitableEvent completion(true, false); |
| 324 | bool result = false; |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 325 | QueueTask( |
| 326 | base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); |
| 327 | completion.Wait(); |
| 328 | return result; |
| 329 | } |
| 330 | |
| 331 | bool InProcessCommandBuffer::InitializeOnGpuThread( |
| 332 | bool is_offscreen, |
| 333 | gfx::AcceleratedWidget window, |
| 334 | const gfx::Size& size, |
| 335 | const char* allowed_extensions, |
| 336 | const std::vector<int32>& attribs, |
| 337 | gfx::GpuPreference gpu_preference) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 338 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 339 | // Use one share group for all contexts. |
| 340 | CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group, |
| 341 | (new gfx::GLShareGroup)); |
| 342 | |
| 343 | DCHECK(size.width() >= 0 && size.height() >= 0); |
| 344 | |
| 345 | TransferBufferManager* manager = new TransferBufferManager(); |
| 346 | transfer_buffer_manager_.reset(manager); |
| 347 | manager->Initialize(); |
| 348 | |
| 349 | scoped_ptr<CommandBufferService> command_buffer( |
| 350 | new CommandBufferService(transfer_buffer_manager_.get())); |
| 351 | command_buffer->SetPutOffsetChangeCallback(base::Bind( |
| 352 | &InProcessCommandBuffer::PumpCommands, base::Unretained(this))); |
| 353 | command_buffer->SetParseErrorCallback(base::Bind( |
| 354 | &InProcessCommandBuffer::OnContextLost, base::Unretained(this))); |
| 355 | |
| 356 | if (!command_buffer->Initialize()) { |
| 357 | LOG(ERROR) << "Could not initialize command buffer."; |
| 358 | DestroyOnGpuThread(); |
| 359 | return false; |
| 360 | } |
| 361 | |
| 362 | InProcessCommandBuffer* context_group = NULL; |
| 363 | |
| 364 | if (share_resources_ && !g_all_shared_contexts.Get().empty()) { |
| 365 | DCHECK(share_group_id_); |
| 366 | for (std::set<InProcessCommandBuffer*>::iterator it = |
| 367 | g_all_shared_contexts.Get().begin(); |
| 368 | it != g_all_shared_contexts.Get().end(); |
| 369 | ++it) { |
| 370 | if ((*it)->share_group_id_ == share_group_id_) { |
| 371 | context_group = *it; |
| 372 | DCHECK(context_group->share_resources_); |
| 373 | context_lost_ = context_group->IsContextLost(); |
| 374 | break; |
| 375 | } |
| 376 | } |
| 377 | if (!context_group) |
| 378 | share_group = new gfx::GLShareGroup; |
| 379 | } |
| 380 | |
| 381 | bool bind_generates_resource = false; |
| 382 | decoder_.reset(gles2::GLES2Decoder::Create( |
| 383 | context_group ? context_group->decoder_->GetContextGroup() |
| 384 | : new gles2::ContextGroup( |
| 385 | NULL, NULL, NULL, NULL, bind_generates_resource))); |
| 386 | |
| 387 | gpu_scheduler_.reset( |
| 388 | new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get())); |
| 389 | command_buffer->SetGetBufferChangeCallback(base::Bind( |
| 390 | &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); |
| 391 | command_buffer_ = command_buffer.Pass(); |
| 392 | |
| 393 | decoder_->set_engine(gpu_scheduler_.get()); |
| 394 | |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 395 | if (!surface_) { |
| 396 | if (is_offscreen) |
| 397 | surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size); |
| 398 | else |
| 399 | surface_ = gfx::GLSurface::CreateViewGLSurface(window); |
| 400 | } |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 401 | |
| 402 | if (!surface_.get()) { |
| 403 | LOG(ERROR) << "Could not create GLSurface."; |
| 404 | DestroyOnGpuThread(); |
| 405 | return false; |
| 406 | } |
| 407 | |
| 408 | if (g_use_virtualized_gl_context) { |
| 409 | context_ = share_group->GetSharedContext(); |
| 410 | if (!context_.get()) { |
| 411 | context_ = gfx::GLContext::CreateGLContext( |
| 412 | share_group.get(), surface_.get(), gpu_preference); |
| 413 | share_group->SetSharedContext(context_.get()); |
| 414 | } |
| 415 | |
| 416 | context_ = new GLContextVirtual( |
| 417 | share_group.get(), context_.get(), decoder_->AsWeakPtr()); |
| 418 | if (context_->Initialize(surface_.get(), gpu_preference)) { |
| 419 | VLOG(1) << "Created virtual GL context."; |
| 420 | } else { |
| 421 | context_ = NULL; |
| 422 | } |
| 423 | } else { |
| 424 | context_ = gfx::GLContext::CreateGLContext( |
| 425 | share_group.get(), surface_.get(), gpu_preference); |
| 426 | } |
| 427 | |
| 428 | if (!context_.get()) { |
| 429 | LOG(ERROR) << "Could not create GLContext."; |
| 430 | DestroyOnGpuThread(); |
| 431 | return false; |
| 432 | } |
| 433 | |
| 434 | if (!context_->MakeCurrent(surface_.get())) { |
| 435 | LOG(ERROR) << "Could not make context current."; |
| 436 | DestroyOnGpuThread(); |
| 437 | return false; |
| 438 | } |
| 439 | |
| 440 | gles2::DisallowedFeatures disallowed_features; |
| 441 | disallowed_features.swap_buffer_complete_callback = true; |
| 442 | disallowed_features.gpu_memory_manager = true; |
| 443 | if (!decoder_->Initialize(surface_, |
| 444 | context_, |
| 445 | is_offscreen, |
| 446 | size, |
| 447 | disallowed_features, |
| 448 | allowed_extensions, |
| 449 | attribs)) { |
| 450 | LOG(ERROR) << "Could not initialize decoder."; |
| 451 | DestroyOnGpuThread(); |
| 452 | return false; |
| 453 | } |
| 454 | |
| 455 | if (!is_offscreen) { |
| 456 | decoder_->SetResizeCallback(base::Bind( |
| 457 | &InProcessCommandBuffer::OnResizeView, base::Unretained(this))); |
| 458 | } |
| 459 | |
| 460 | if (share_resources_) { |
| 461 | g_all_shared_contexts.Pointer()->insert(this); |
| 462 | } |
| 463 | |
| 464 | return true; |
| 465 | } |
| 466 | |
| 467 | void InProcessCommandBuffer::Destroy() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 468 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 469 | base::WaitableEvent completion(true, false); |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 470 | bool result = false; |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 471 | base::Callback<bool(void)> destroy_task = base::Bind( |
| 472 | &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
| 473 | QueueTask( |
| 474 | base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); |
| 475 | completion.Wait(); |
| 476 | } |
| 477 | |
| 478 | bool InProcessCommandBuffer::DestroyOnGpuThread() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 479 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 480 | command_buffer_.reset(); |
| 481 | // Clean up GL resources if possible. |
| 482 | bool have_context = context_ && context_->MakeCurrent(surface_); |
| 483 | if (decoder_) { |
| 484 | decoder_->Destroy(have_context); |
| 485 | decoder_.reset(); |
| 486 | } |
| 487 | context_ = NULL; |
| 488 | surface_ = NULL; |
| 489 | |
| 490 | g_all_shared_contexts.Pointer()->erase(this); |
| 491 | return true; |
| 492 | } |
| 493 | |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 494 | void InProcessCommandBuffer::CheckSequencedThread() { |
| 495 | DCHECK(!sequence_checker_ || |
| 496 | sequence_checker_->CalledOnValidSequencedThread()); |
| 497 | } |
| 498 | |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 499 | unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer( |
| 500 | gfx::GpuMemoryBufferHandle buffer, |
| 501 | gfx::Size size) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 502 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 503 | unsigned int image_id; |
| 504 | { |
| 505 | // TODO: ID allocation should go through CommandBuffer |
| 506 | base::AutoLock lock(command_buffer_lock_); |
| 507 | gles2::ContextGroup* group = decoder_->GetContextGroup(); |
| 508 | image_id = |
| 509 | group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID(); |
| 510 | } |
| 511 | base::Closure image_task = |
| 512 | base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
| 513 | base::Unretained(this), buffer, size, image_id); |
| 514 | QueueTask(image_task); |
| 515 | return image_id; |
| 516 | } |
| 517 | |
| 518 | void InProcessCommandBuffer::CreateImageOnGpuThread( |
| 519 | gfx::GpuMemoryBufferHandle buffer, |
| 520 | gfx::Size size, |
| 521 | unsigned int image_id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 522 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 523 | scoped_refptr<gfx::GLImage> gl_image = |
| 524 | gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size); |
| 525 | decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id); |
| 526 | } |
| 527 | |
| 528 | void InProcessCommandBuffer::RemoveImage(unsigned int image_id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 529 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 530 | { |
| 531 | // TODO: ID allocation should go through CommandBuffer |
| 532 | base::AutoLock lock(command_buffer_lock_); |
| 533 | gles2::ContextGroup* group = decoder_->GetContextGroup(); |
| 534 | group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id); |
| 535 | } |
| 536 | base::Closure image_manager_task = |
| 537 | base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread, |
| 538 | base::Unretained(this), |
| 539 | image_id); |
| 540 | QueueTask(image_manager_task); |
| 541 | } |
| 542 | |
| 543 | void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 544 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 545 | decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id); |
| 546 | } |
| 547 | |
| 548 | void InProcessCommandBuffer::OnContextLost() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 549 | CheckSequencedThread(); |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 550 | if (!context_lost_callback_.is_null()) { |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 551 | context_lost_callback_.Run(); |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 552 | context_lost_callback_.Reset(); |
| 553 | } |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 554 | |
| 555 | context_lost_ = true; |
| 556 | if (share_resources_) { |
| 557 | for (std::set<InProcessCommandBuffer*>::iterator it = |
| 558 | g_all_shared_contexts.Get().begin(); |
| 559 | it != g_all_shared_contexts.Get().end(); |
| 560 | ++it) { |
| 561 | (*it)->context_lost_ = true; |
| 562 | } |
| 563 | } |
| 564 | } |
| 565 | |
| 566 | CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 567 | CheckSequencedThread(); |
Ben Murdoch | 3240926 | 2013-08-07 11:04:47 +0100 | [diff] [blame] | 568 | base::AutoLock lock(state_after_last_flush_lock_); |
| 569 | if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) |
| 570 | last_state_ = state_after_last_flush_; |
| 571 | return last_state_; |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 572 | } |
| 573 | |
| 574 | CommandBuffer::State InProcessCommandBuffer::GetState() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 575 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 576 | return GetStateFast(); |
| 577 | } |
| 578 | |
| 579 | CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 580 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 581 | return last_state_; |
| 582 | } |
| 583 | |
Ben Murdoch | 3240926 | 2013-08-07 11:04:47 +0100 | [diff] [blame] | 584 | int32 InProcessCommandBuffer::GetLastToken() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 585 | CheckSequencedThread(); |
Ben Murdoch | 3240926 | 2013-08-07 11:04:47 +0100 | [diff] [blame] | 586 | GetStateFast(); |
| 587 | return last_state_.token; |
| 588 | } |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 589 | |
| 590 | void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 591 | CheckSequencedThread(); |
Ben Murdoch | 3240926 | 2013-08-07 11:04:47 +0100 | [diff] [blame] | 592 | ScopedEvent handle_flush(&flush_event_); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 593 | base::AutoLock lock(command_buffer_lock_); |
| 594 | command_buffer_->Flush(put_offset); |
Ben Murdoch | 3240926 | 2013-08-07 11:04:47 +0100 | [diff] [blame] | 595 | { |
| 596 | // Update state before signaling the flush event. |
| 597 | base::AutoLock lock(state_after_last_flush_lock_); |
| 598 | state_after_last_flush_ = command_buffer_->GetState(); |
| 599 | } |
| 600 | DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || |
| 601 | (error::IsError(state_after_last_flush_.error) && context_lost_)); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 602 | } |
| 603 | |
| 604 | void InProcessCommandBuffer::Flush(int32 put_offset) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 605 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 606 | if (last_state_.error != gpu::error::kNoError) |
| 607 | return; |
| 608 | |
| 609 | if (last_put_offset_ == put_offset) |
| 610 | return; |
| 611 | |
| 612 | last_put_offset_ = put_offset; |
| 613 | base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| 614 | base::Unretained(this), |
| 615 | put_offset); |
| 616 | QueueTask(task); |
| 617 | } |
| 618 | |
| 619 | CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset, |
| 620 | int32 last_known_get) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 621 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 622 | if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError) |
| 623 | return last_state_; |
| 624 | |
| 625 | Flush(put_offset); |
| 626 | GetStateFast(); |
| 627 | while (last_known_get == last_state_.get_offset && |
| 628 | last_state_.error == gpu::error::kNoError) { |
| 629 | flush_event_.Wait(); |
| 630 | GetStateFast(); |
| 631 | } |
| 632 | |
| 633 | return last_state_; |
| 634 | } |
| 635 | |
| 636 | void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 637 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 638 | if (last_state_.error != gpu::error::kNoError) |
| 639 | return; |
| 640 | |
| 641 | { |
| 642 | base::AutoLock lock(command_buffer_lock_); |
| 643 | command_buffer_->SetGetBuffer(shm_id); |
| 644 | last_put_offset_ = 0; |
| 645 | } |
Ben Murdoch | 3240926 | 2013-08-07 11:04:47 +0100 | [diff] [blame] | 646 | { |
| 647 | base::AutoLock lock(state_after_last_flush_lock_); |
| 648 | state_after_last_flush_ = command_buffer_->GetState(); |
| 649 | } |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 650 | } |
| 651 | |
| 652 | gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size, |
| 653 | int32* id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 654 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 655 | base::AutoLock lock(command_buffer_lock_); |
| 656 | return command_buffer_->CreateTransferBuffer(size, id); |
| 657 | } |
| 658 | |
| 659 | void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 660 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 661 | base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer, |
| 662 | base::Unretained(command_buffer_.get()), |
| 663 | id); |
| 664 | |
| 665 | QueueTask(task); |
| 666 | } |
| 667 | |
| 668 | gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) { |
| 669 | NOTREACHED(); |
| 670 | return gpu::Buffer(); |
| 671 | } |
| 672 | |
| 673 | uint32 InProcessCommandBuffer::InsertSyncPoint() { |
| 674 | NOTREACHED(); |
| 675 | return 0; |
| 676 | } |
| 677 | void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point, |
| 678 | const base::Closure& callback) { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 679 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 680 | QueueTask(WrapCallback(callback)); |
| 681 | } |
| 682 | |
| 683 | gpu::error::Error InProcessCommandBuffer::GetLastError() { |
Ben Murdoch | bb1529c | 2013-08-08 10:24:53 +0100 | [diff] [blame^] | 684 | CheckSequencedThread(); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 685 | return last_state_.error; |
| 686 | } |
| 687 | |
| 688 | bool InProcessCommandBuffer::Initialize() { |
| 689 | NOTREACHED(); |
| 690 | return false; |
| 691 | } |
| 692 | |
| 693 | void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); } |
| 694 | |
| 695 | void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); } |
| 696 | |
| 697 | void InProcessCommandBuffer::SetParseError(gpu::error::Error error) { |
| 698 | NOTREACHED(); |
| 699 | } |
| 700 | |
| 701 | void InProcessCommandBuffer::SetContextLostReason( |
| 702 | gpu::error::ContextLostReason reason) { |
| 703 | NOTREACHED(); |
| 704 | } |
| 705 | |
| 706 | namespace { |
| 707 | |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 708 | void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop, |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 709 | const base::Closure& callback) { |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 710 | if (!loop->BelongsToCurrentThread()) { |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 711 | loop->PostTask(FROM_HERE, callback); |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 712 | } else { |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 713 | callback.Run(); |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 714 | } |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 715 | } |
| 716 | |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 717 | void RunOnTargetThread(scoped_ptr<base::Closure> callback) { |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 718 | DCHECK(callback.get()); |
| 719 | callback->Run(); |
| 720 | } |
| 721 | |
| 722 | } // anonymous namespace |
| 723 | |
| 724 | base::Closure InProcessCommandBuffer::WrapCallback( |
| 725 | const base::Closure& callback) { |
| 726 | // Make sure the callback gets deleted on the target thread by passing |
| 727 | // ownership. |
| 728 | scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback)); |
| 729 | base::Closure callback_on_client_thread = |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 730 | base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback)); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 731 | base::Closure wrapped_callback = |
| 732 | base::Bind(&PostCallback, base::MessageLoopProxy::current(), |
Torne (Richard Coles) | a36e592 | 2013-08-05 13:57:33 +0100 | [diff] [blame] | 733 | callback_on_client_thread); |
Ben Murdoch | 28390f6 | 2013-08-01 12:44:22 +0100 | [diff] [blame] | 734 | return wrapped_callback; |
| 735 | } |
| 736 | |
| 737 | // static |
| 738 | void InProcessCommandBuffer::EnableVirtualizedContext() { |
| 739 | g_use_virtualized_gl_context = true; |
| 740 | } |
| 741 | |
| 742 | // static |
| 743 | void InProcessCommandBuffer::SetScheduleCallback( |
| 744 | const base::Closure& callback) { |
| 745 | DCHECK(!g_uses_explicit_scheduling); |
| 746 | DCHECK(!SchedulerClientBase::HasClients()); |
| 747 | g_uses_explicit_scheduling = true; |
| 748 | g_gpu_queue.Get().SetScheduleCallback(callback); |
| 749 | } |
| 750 | |
| 751 | // static |
| 752 | void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() { |
| 753 | g_gpu_queue.Get().RunTasks(); |
| 754 | } |
| 755 | |
| 756 | } // namespace gpu |