blob: ccbfb32a50455f2eed340f03c0a4739e9a22e503 [file] [log] [blame]
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +01001// Copyright 2013 The Chromium Authors. All rights reserved.
Ben Murdoch28390f62013-08-01 12:44:22 +01002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7#include <queue>
8#include <utility>
9
10#include <GLES2/gl2.h>
11#ifndef GL_GLEXT_PROTOTYPES
12#define GL_GLEXT_PROTOTYPES 1
13#endif
14#include <GLES2/gl2ext.h>
15#include <GLES2/gl2extchromium.h>
16
17#include "base/bind.h"
18#include "base/bind_helpers.h"
19#include "base/lazy_instance.h"
20#include "base/logging.h"
21#include "base/memory/weak_ptr.h"
22#include "base/message_loop/message_loop_proxy.h"
Ben Murdochbb1529c2013-08-08 10:24:53 +010023#include "base/sequence_checker.h"
Ben Murdoch28390f62013-08-01 12:44:22 +010024#include "base/threading/thread.h"
25#include "gpu/command_buffer/common/id_allocator.h"
26#include "gpu/command_buffer/service/command_buffer_service.h"
27#include "gpu/command_buffer/service/context_group.h"
28#include "gpu/command_buffer/service/gl_context_virtual.h"
29#include "gpu/command_buffer/service/gpu_scheduler.h"
30#include "gpu/command_buffer/service/image_manager.h"
31#include "gpu/command_buffer/service/transfer_buffer_manager.h"
32#include "ui/gfx/size.h"
33#include "ui/gl/gl_context.h"
34#include "ui/gl/gl_image.h"
35#include "ui/gl/gl_share_group.h"
Ben Murdoch28390f62013-08-01 12:44:22 +010036
37namespace gpu {
38
39namespace {
40
41static base::LazyInstance<std::set<InProcessCommandBuffer*> >
42 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
43
44static bool g_use_virtualized_gl_context = false;
45static bool g_uses_explicit_scheduling = false;
46
47template <typename T>
48static void RunTaskWithResult(base::Callback<T(void)> task,
49 T* result,
50 base::WaitableEvent* completion) {
51 *result = task.Run();
52 completion->Signal();
53}
54
55class GpuInProcessThread
56 : public base::Thread,
57 public base::RefCountedThreadSafe<GpuInProcessThread> {
58 public:
59 GpuInProcessThread();
60
61 private:
62 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
63 virtual ~GpuInProcessThread();
64
65 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
66};
67
68GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
69 Start();
70}
71
72GpuInProcessThread::~GpuInProcessThread() {
73 Stop();
74}
75
76// Used with explicit scheduling when there is no dedicated GPU thread.
77class GpuCommandQueue {
78 public:
79 GpuCommandQueue();
80 ~GpuCommandQueue();
81
82 void QueueTask(const base::Closure& task);
83 void RunTasks();
84 void SetScheduleCallback(const base::Closure& callback);
85
86 private:
87 base::Lock tasks_lock_;
88 std::queue<base::Closure> tasks_;
89 base::Closure schedule_callback_;
90
91 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
92};
93
94GpuCommandQueue::GpuCommandQueue() {}
95
96GpuCommandQueue::~GpuCommandQueue() {
97 base::AutoLock lock(tasks_lock_);
98 DCHECK(tasks_.empty());
99}
100
101void GpuCommandQueue::QueueTask(const base::Closure& task) {
102 {
103 base::AutoLock lock(tasks_lock_);
104 tasks_.push(task);
105 }
106
107 DCHECK(!schedule_callback_.is_null());
108 schedule_callback_.Run();
109}
110
111void GpuCommandQueue::RunTasks() {
112 size_t num_tasks;
113 {
114 base::AutoLock lock(tasks_lock_);
115 num_tasks = tasks_.size();
116 }
117
118 while (num_tasks) {
119 base::Closure task;
120 {
121 base::AutoLock lock(tasks_lock_);
122 task = tasks_.front();
123 tasks_.pop();
124 num_tasks = tasks_.size();
125 }
126
127 task.Run();
128 }
129}
130
131void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
132 DCHECK(schedule_callback_.is_null());
133 schedule_callback_ = callback;
134}
135
136static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
137 LAZY_INSTANCE_INITIALIZER;
138
139class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
140 public:
141 explicit SchedulerClientBase(bool need_thread);
142 virtual ~SchedulerClientBase();
143
144 static bool HasClients();
145
146 protected:
147 scoped_refptr<GpuInProcessThread> thread_;
148
149 private:
150 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
151 static base::LazyInstance<base::Lock> all_clients_lock_;
152};
153
154base::LazyInstance<std::set<SchedulerClientBase*> >
155 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
156base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
157 LAZY_INSTANCE_INITIALIZER;
158
159SchedulerClientBase::SchedulerClientBase(bool need_thread) {
160 base::AutoLock(all_clients_lock_.Get());
161 if (need_thread) {
162 if (!all_clients_.Get().empty()) {
163 SchedulerClientBase* other = *all_clients_.Get().begin();
164 thread_ = other->thread_;
165 DCHECK(thread_.get());
166 } else {
167 thread_ = new GpuInProcessThread;
168 }
169 }
170 all_clients_.Get().insert(this);
171}
172
173SchedulerClientBase::~SchedulerClientBase() {
174 base::AutoLock(all_clients_lock_.Get());
175 all_clients_.Get().erase(this);
176}
177
178bool SchedulerClientBase::HasClients() {
179 base::AutoLock(all_clients_lock_.Get());
180 return !all_clients_.Get().empty();
181}
182
183// A client that talks to the GPU thread
184class ThreadClient : public SchedulerClientBase {
185 public:
186 ThreadClient();
187 virtual void QueueTask(const base::Closure& task) OVERRIDE;
188};
189
190ThreadClient::ThreadClient() : SchedulerClientBase(true) {
191 DCHECK(thread_.get());
192}
193
194void ThreadClient::QueueTask(const base::Closure& task) {
195 thread_->message_loop()->PostTask(FROM_HERE, task);
196}
197
198// A client that talks to the GpuCommandQueue
199class QueueClient : public SchedulerClientBase {
200 public:
201 QueueClient();
202 virtual void QueueTask(const base::Closure& task) OVERRIDE;
203};
204
205QueueClient::QueueClient() : SchedulerClientBase(false) {
206 DCHECK(!thread_.get());
207}
208
209void QueueClient::QueueTask(const base::Closure& task) {
210 g_gpu_queue.Get().QueueTask(task);
211}
212
213static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
214CreateSchedulerClient() {
215 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
216 if (g_uses_explicit_scheduling)
217 client.reset(new QueueClient);
218 else
219 client.reset(new ThreadClient);
220
221 return client.Pass();
222}
223
224class ScopedEvent {
225 public:
226 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
227 ~ScopedEvent() { event_->Signal(); }
228
229 private:
230 base::WaitableEvent* event_;
231};
232
233} // anonyous namespace
234
235InProcessCommandBuffer::InProcessCommandBuffer()
236 : context_lost_(false),
237 share_group_id_(0),
238 last_put_offset_(-1),
239 flush_event_(false, false),
240 queue_(CreateSchedulerClient()) {}
241
242InProcessCommandBuffer::~InProcessCommandBuffer() {
243 Destroy();
244}
245
246bool InProcessCommandBuffer::IsContextLost() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100247 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100248 if (context_lost_ || !command_buffer_) {
249 return true;
250 }
251 CommandBuffer::State state = GetState();
252 return error::IsError(state.error);
253}
254
255void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100256 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100257 DCHECK(!surface_->IsOffscreen());
258 surface_->Resize(size);
259}
260
261bool InProcessCommandBuffer::MakeCurrent() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100262 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100263 command_buffer_lock_.AssertAcquired();
264
265 if (!context_lost_ && decoder_->MakeCurrent())
266 return true;
267 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
268 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
269 command_buffer_->SetParseError(gpu::error::kLostContext);
270 return false;
271}
272
273void InProcessCommandBuffer::PumpCommands() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100274 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100275 command_buffer_lock_.AssertAcquired();
276
277 if (!MakeCurrent())
278 return;
279
280 gpu_scheduler_->PutChanged();
Ben Murdoch28390f62013-08-01 12:44:22 +0100281}
282
283bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100284 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100285 command_buffer_lock_.AssertAcquired();
286 command_buffer_->SetGetBuffer(transfer_buffer_id);
287 return true;
288}
289
290bool InProcessCommandBuffer::Initialize(
Ben Murdochbb1529c2013-08-08 10:24:53 +0100291 scoped_refptr<gfx::GLSurface> surface,
Ben Murdoch28390f62013-08-01 12:44:22 +0100292 bool is_offscreen,
293 bool share_resources,
294 gfx::AcceleratedWidget window,
295 const gfx::Size& size,
296 const char* allowed_extensions,
297 const std::vector<int32>& attribs,
298 gfx::GpuPreference gpu_preference,
299 const base::Closure& context_lost_callback,
300 unsigned int share_group_id) {
301
302 share_resources_ = share_resources;
303 context_lost_callback_ = WrapCallback(context_lost_callback);
304 share_group_id_ = share_group_id;
305
Ben Murdochbb1529c2013-08-08 10:24:53 +0100306 if (surface) {
307 // GPU thread must be the same as client thread due to GLSurface not being
308 // thread safe.
309 sequence_checker_.reset(new base::SequenceChecker);
310 surface_ = surface;
311 }
312
Ben Murdoch28390f62013-08-01 12:44:22 +0100313 base::Callback<bool(void)> init_task =
314 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
315 base::Unretained(this),
316 is_offscreen,
317 window,
318 size,
319 allowed_extensions,
320 attribs,
321 gpu_preference);
Ben Murdochbb1529c2013-08-08 10:24:53 +0100322
323 base::WaitableEvent completion(true, false);
324 bool result = false;
Ben Murdoch28390f62013-08-01 12:44:22 +0100325 QueueTask(
326 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
327 completion.Wait();
328 return result;
329}
330
331bool InProcessCommandBuffer::InitializeOnGpuThread(
332 bool is_offscreen,
333 gfx::AcceleratedWidget window,
334 const gfx::Size& size,
335 const char* allowed_extensions,
336 const std::vector<int32>& attribs,
337 gfx::GpuPreference gpu_preference) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100338 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100339 // Use one share group for all contexts.
340 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
341 (new gfx::GLShareGroup));
342
343 DCHECK(size.width() >= 0 && size.height() >= 0);
344
345 TransferBufferManager* manager = new TransferBufferManager();
346 transfer_buffer_manager_.reset(manager);
347 manager->Initialize();
348
349 scoped_ptr<CommandBufferService> command_buffer(
350 new CommandBufferService(transfer_buffer_manager_.get()));
351 command_buffer->SetPutOffsetChangeCallback(base::Bind(
352 &InProcessCommandBuffer::PumpCommands, base::Unretained(this)));
353 command_buffer->SetParseErrorCallback(base::Bind(
354 &InProcessCommandBuffer::OnContextLost, base::Unretained(this)));
355
356 if (!command_buffer->Initialize()) {
357 LOG(ERROR) << "Could not initialize command buffer.";
358 DestroyOnGpuThread();
359 return false;
360 }
361
362 InProcessCommandBuffer* context_group = NULL;
363
364 if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
365 DCHECK(share_group_id_);
366 for (std::set<InProcessCommandBuffer*>::iterator it =
367 g_all_shared_contexts.Get().begin();
368 it != g_all_shared_contexts.Get().end();
369 ++it) {
370 if ((*it)->share_group_id_ == share_group_id_) {
371 context_group = *it;
372 DCHECK(context_group->share_resources_);
373 context_lost_ = context_group->IsContextLost();
374 break;
375 }
376 }
377 if (!context_group)
378 share_group = new gfx::GLShareGroup;
379 }
380
381 bool bind_generates_resource = false;
382 decoder_.reset(gles2::GLES2Decoder::Create(
383 context_group ? context_group->decoder_->GetContextGroup()
384 : new gles2::ContextGroup(
385 NULL, NULL, NULL, NULL, bind_generates_resource)));
386
387 gpu_scheduler_.reset(
388 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
389 command_buffer->SetGetBufferChangeCallback(base::Bind(
390 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
391 command_buffer_ = command_buffer.Pass();
392
393 decoder_->set_engine(gpu_scheduler_.get());
394
Ben Murdochbb1529c2013-08-08 10:24:53 +0100395 if (!surface_) {
396 if (is_offscreen)
397 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
398 else
399 surface_ = gfx::GLSurface::CreateViewGLSurface(window);
400 }
Ben Murdoch28390f62013-08-01 12:44:22 +0100401
402 if (!surface_.get()) {
403 LOG(ERROR) << "Could not create GLSurface.";
404 DestroyOnGpuThread();
405 return false;
406 }
407
408 if (g_use_virtualized_gl_context) {
409 context_ = share_group->GetSharedContext();
410 if (!context_.get()) {
411 context_ = gfx::GLContext::CreateGLContext(
412 share_group.get(), surface_.get(), gpu_preference);
413 share_group->SetSharedContext(context_.get());
414 }
415
416 context_ = new GLContextVirtual(
417 share_group.get(), context_.get(), decoder_->AsWeakPtr());
418 if (context_->Initialize(surface_.get(), gpu_preference)) {
419 VLOG(1) << "Created virtual GL context.";
420 } else {
421 context_ = NULL;
422 }
423 } else {
424 context_ = gfx::GLContext::CreateGLContext(
425 share_group.get(), surface_.get(), gpu_preference);
426 }
427
428 if (!context_.get()) {
429 LOG(ERROR) << "Could not create GLContext.";
430 DestroyOnGpuThread();
431 return false;
432 }
433
434 if (!context_->MakeCurrent(surface_.get())) {
435 LOG(ERROR) << "Could not make context current.";
436 DestroyOnGpuThread();
437 return false;
438 }
439
440 gles2::DisallowedFeatures disallowed_features;
441 disallowed_features.swap_buffer_complete_callback = true;
442 disallowed_features.gpu_memory_manager = true;
443 if (!decoder_->Initialize(surface_,
444 context_,
445 is_offscreen,
446 size,
447 disallowed_features,
448 allowed_extensions,
449 attribs)) {
450 LOG(ERROR) << "Could not initialize decoder.";
451 DestroyOnGpuThread();
452 return false;
453 }
454
455 if (!is_offscreen) {
456 decoder_->SetResizeCallback(base::Bind(
457 &InProcessCommandBuffer::OnResizeView, base::Unretained(this)));
458 }
459
460 if (share_resources_) {
461 g_all_shared_contexts.Pointer()->insert(this);
462 }
463
464 return true;
465}
466
467void InProcessCommandBuffer::Destroy() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100468 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100469 base::WaitableEvent completion(true, false);
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100470 bool result = false;
Ben Murdoch28390f62013-08-01 12:44:22 +0100471 base::Callback<bool(void)> destroy_task = base::Bind(
472 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
473 QueueTask(
474 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
475 completion.Wait();
476}
477
478bool InProcessCommandBuffer::DestroyOnGpuThread() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100479 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100480 command_buffer_.reset();
481 // Clean up GL resources if possible.
482 bool have_context = context_ && context_->MakeCurrent(surface_);
483 if (decoder_) {
484 decoder_->Destroy(have_context);
485 decoder_.reset();
486 }
487 context_ = NULL;
488 surface_ = NULL;
489
490 g_all_shared_contexts.Pointer()->erase(this);
491 return true;
492}
493
Ben Murdochbb1529c2013-08-08 10:24:53 +0100494void InProcessCommandBuffer::CheckSequencedThread() {
495 DCHECK(!sequence_checker_ ||
496 sequence_checker_->CalledOnValidSequencedThread());
497}
498
Ben Murdoch28390f62013-08-01 12:44:22 +0100499unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer(
500 gfx::GpuMemoryBufferHandle buffer,
501 gfx::Size size) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100502 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100503 unsigned int image_id;
504 {
505 // TODO: ID allocation should go through CommandBuffer
506 base::AutoLock lock(command_buffer_lock_);
507 gles2::ContextGroup* group = decoder_->GetContextGroup();
508 image_id =
509 group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID();
510 }
511 base::Closure image_task =
512 base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
513 base::Unretained(this), buffer, size, image_id);
514 QueueTask(image_task);
515 return image_id;
516}
517
518void InProcessCommandBuffer::CreateImageOnGpuThread(
519 gfx::GpuMemoryBufferHandle buffer,
520 gfx::Size size,
521 unsigned int image_id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100522 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100523 scoped_refptr<gfx::GLImage> gl_image =
524 gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size);
525 decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id);
526}
527
528void InProcessCommandBuffer::RemoveImage(unsigned int image_id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100529 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100530 {
531 // TODO: ID allocation should go through CommandBuffer
532 base::AutoLock lock(command_buffer_lock_);
533 gles2::ContextGroup* group = decoder_->GetContextGroup();
534 group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id);
535 }
536 base::Closure image_manager_task =
537 base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread,
538 base::Unretained(this),
539 image_id);
540 QueueTask(image_manager_task);
541}
542
543void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100544 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100545 decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id);
546}
547
548void InProcessCommandBuffer::OnContextLost() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100549 CheckSequencedThread();
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100550 if (!context_lost_callback_.is_null()) {
Ben Murdoch28390f62013-08-01 12:44:22 +0100551 context_lost_callback_.Run();
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100552 context_lost_callback_.Reset();
553 }
Ben Murdoch28390f62013-08-01 12:44:22 +0100554
555 context_lost_ = true;
556 if (share_resources_) {
557 for (std::set<InProcessCommandBuffer*>::iterator it =
558 g_all_shared_contexts.Get().begin();
559 it != g_all_shared_contexts.Get().end();
560 ++it) {
561 (*it)->context_lost_ = true;
562 }
563 }
564}
565
566CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100567 CheckSequencedThread();
Ben Murdoch32409262013-08-07 11:04:47 +0100568 base::AutoLock lock(state_after_last_flush_lock_);
569 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
570 last_state_ = state_after_last_flush_;
571 return last_state_;
Ben Murdoch28390f62013-08-01 12:44:22 +0100572}
573
574CommandBuffer::State InProcessCommandBuffer::GetState() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100575 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100576 return GetStateFast();
577}
578
579CommandBuffer::State InProcessCommandBuffer::GetLastState() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100580 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100581 return last_state_;
582}
583
Ben Murdoch32409262013-08-07 11:04:47 +0100584int32 InProcessCommandBuffer::GetLastToken() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100585 CheckSequencedThread();
Ben Murdoch32409262013-08-07 11:04:47 +0100586 GetStateFast();
587 return last_state_.token;
588}
Ben Murdoch28390f62013-08-01 12:44:22 +0100589
590void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100591 CheckSequencedThread();
Ben Murdoch32409262013-08-07 11:04:47 +0100592 ScopedEvent handle_flush(&flush_event_);
Ben Murdoch28390f62013-08-01 12:44:22 +0100593 base::AutoLock lock(command_buffer_lock_);
594 command_buffer_->Flush(put_offset);
Ben Murdoch32409262013-08-07 11:04:47 +0100595 {
596 // Update state before signaling the flush event.
597 base::AutoLock lock(state_after_last_flush_lock_);
598 state_after_last_flush_ = command_buffer_->GetState();
599 }
600 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
601 (error::IsError(state_after_last_flush_.error) && context_lost_));
Ben Murdoch28390f62013-08-01 12:44:22 +0100602}
603
604void InProcessCommandBuffer::Flush(int32 put_offset) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100605 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100606 if (last_state_.error != gpu::error::kNoError)
607 return;
608
609 if (last_put_offset_ == put_offset)
610 return;
611
612 last_put_offset_ = put_offset;
613 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
614 base::Unretained(this),
615 put_offset);
616 QueueTask(task);
617}
618
619CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
620 int32 last_known_get) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100621 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100622 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
623 return last_state_;
624
625 Flush(put_offset);
626 GetStateFast();
627 while (last_known_get == last_state_.get_offset &&
628 last_state_.error == gpu::error::kNoError) {
629 flush_event_.Wait();
630 GetStateFast();
631 }
632
633 return last_state_;
634}
635
636void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100637 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100638 if (last_state_.error != gpu::error::kNoError)
639 return;
640
641 {
642 base::AutoLock lock(command_buffer_lock_);
643 command_buffer_->SetGetBuffer(shm_id);
644 last_put_offset_ = 0;
645 }
Ben Murdoch32409262013-08-07 11:04:47 +0100646 {
647 base::AutoLock lock(state_after_last_flush_lock_);
648 state_after_last_flush_ = command_buffer_->GetState();
649 }
Ben Murdoch28390f62013-08-01 12:44:22 +0100650}
651
652gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
653 int32* id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100654 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100655 base::AutoLock lock(command_buffer_lock_);
656 return command_buffer_->CreateTransferBuffer(size, id);
657}
658
659void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100660 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100661 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
662 base::Unretained(command_buffer_.get()),
663 id);
664
665 QueueTask(task);
666}
667
668gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
669 NOTREACHED();
670 return gpu::Buffer();
671}
672
673uint32 InProcessCommandBuffer::InsertSyncPoint() {
674 NOTREACHED();
675 return 0;
676}
677void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
678 const base::Closure& callback) {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100679 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100680 QueueTask(WrapCallback(callback));
681}
682
683gpu::error::Error InProcessCommandBuffer::GetLastError() {
Ben Murdochbb1529c2013-08-08 10:24:53 +0100684 CheckSequencedThread();
Ben Murdoch28390f62013-08-01 12:44:22 +0100685 return last_state_.error;
686}
687
688bool InProcessCommandBuffer::Initialize() {
689 NOTREACHED();
690 return false;
691}
692
693void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
694
695void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
696
697void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
698 NOTREACHED();
699}
700
701void InProcessCommandBuffer::SetContextLostReason(
702 gpu::error::ContextLostReason reason) {
703 NOTREACHED();
704}
705
706namespace {
707
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100708void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
Ben Murdoch28390f62013-08-01 12:44:22 +0100709 const base::Closure& callback) {
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100710 if (!loop->BelongsToCurrentThread()) {
Ben Murdoch28390f62013-08-01 12:44:22 +0100711 loop->PostTask(FROM_HERE, callback);
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100712 } else {
Ben Murdoch28390f62013-08-01 12:44:22 +0100713 callback.Run();
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100714 }
Ben Murdoch28390f62013-08-01 12:44:22 +0100715}
716
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100717void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
Ben Murdoch28390f62013-08-01 12:44:22 +0100718 DCHECK(callback.get());
719 callback->Run();
720}
721
722} // anonymous namespace
723
724base::Closure InProcessCommandBuffer::WrapCallback(
725 const base::Closure& callback) {
726 // Make sure the callback gets deleted on the target thread by passing
727 // ownership.
728 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
729 base::Closure callback_on_client_thread =
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100730 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
Ben Murdoch28390f62013-08-01 12:44:22 +0100731 base::Closure wrapped_callback =
732 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
Torne (Richard Coles)a36e5922013-08-05 13:57:33 +0100733 callback_on_client_thread);
Ben Murdoch28390f62013-08-01 12:44:22 +0100734 return wrapped_callback;
735}
736
737// static
738void InProcessCommandBuffer::EnableVirtualizedContext() {
739 g_use_virtualized_gl_context = true;
740}
741
742// static
743void InProcessCommandBuffer::SetScheduleCallback(
744 const base::Closure& callback) {
745 DCHECK(!g_uses_explicit_scheduling);
746 DCHECK(!SchedulerClientBase::HasClients());
747 g_uses_explicit_scheduling = true;
748 g_gpu_queue.Get().SetScheduleCallback(callback);
749}
750
751// static
752void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
753 g_gpu_queue.Get().RunTasks();
754}
755
756} // namespace gpu