| // Copyright (C) 2016 The Android Open Source Project |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| package gles |
| |
| import ( |
| "fmt" |
| "reflect" |
| "strings" |
| |
| "android.googlesource.com/platform/tools/gpu/framework/interval" |
| "android.googlesource.com/platform/tools/gpu/framework/log" |
| "android.googlesource.com/platform/tools/gpu/gapid/atom" |
| "android.googlesource.com/platform/tools/gpu/gapid/database" |
| "android.googlesource.com/platform/tools/gpu/gapid/gfxapi" |
| "android.googlesource.com/platform/tools/gpu/gapid/gfxapi/gles/glsl/ast" |
| "android.googlesource.com/platform/tools/gpu/gapid/memory" |
| "android.googlesource.com/platform/tools/gpu/gapid/replay" |
| "android.googlesource.com/platform/tools/gpu/gapid/replay/builder" |
| "android.googlesource.com/platform/tools/gpu/gapid/service" |
| ) |
| |
| type support int |
| |
| const ( |
| unsupported support = iota |
| supported |
| required |
| ) |
| |
| var ( |
| // We don't include tests directly in the gles package as it adds |
| // signaficantly to the test build time. |
| VisibleForTestingCompat = compat |
| VisibleForTestingGetContext = getContext |
| VisibleForTestingGlSlCompat = glslCompat |
| ) |
| |
| // If the default vertex array object (id 0) is not allowed on |
| // the target platform, we remap the uses to this array. |
| const DefaultVertexArrayId = VertexArrayId(0xFFFF0001) |
| |
| type extensions map[string]struct{} |
| |
| func parseExtensions(list string) extensions { |
| out := extensions{} |
| for _, s := range strings.Split(list, " ") { |
| out[s] = struct{}{} |
| } |
| return out |
| } |
| |
| func (e extensions) get(name string) support { |
| if _, ok := e[name]; ok { |
| return supported |
| } |
| return unsupported |
| } |
| |
| func (s support) String() string { |
| switch s { |
| case unsupported: |
| return "unsupported" |
| case supported: |
| return "supported" |
| case required: |
| return "required" |
| default: |
| return fmt.Sprintf("support<%d>", s) |
| } |
| } |
| |
| type features struct { |
| vertexHalfFloatOES support // support for GL_OES_vertex_half_float |
| textureHalfFloatOES support // support for GL_OES_texture_half_float |
| eglImageExternal support // support for GL_OES_EGL_image_external |
| vertexArrayObjects support // support for VBOs |
| supportGenerateMipmapHint bool // support for GL_GENERATE_MIPMAP_HINT |
| uncompressedTextureFormats map[GLenum]struct{} |
| compressedTextureFormats map[GLenum]struct{} |
| glTexImage3D bool // support for glTexImage3D and friends |
| } |
| |
| func getFeatures(ctx log.Context, version string, extensions string) (features, error) { |
| v, err := ParseVersion(version) |
| if err != nil { |
| return features{}, err |
| } |
| |
| ext := parseExtensions(extensions) |
| |
| utfs, err := getSupportedUncompressedTextureFormats(*v, ext) |
| if err != nil { |
| ctx.Warning().Fail(err, "getSupportedUncompressedTextureFormats") |
| } |
| |
| f := features{ |
| vertexHalfFloatOES: ext.get("GL_OES_vertex_half_float"), |
| textureHalfFloatOES: ext.get("GL_OES_texture_half_float"), |
| eglImageExternal: ext.get("GL_OES_EGL_image_external"), |
| uncompressedTextureFormats: utfs, |
| compressedTextureFormats: getSupportedCompressedTextureFormats(ext), |
| supportGenerateMipmapHint: v.IsES, |
| glTexImage3D: !v.IsES, |
| } |
| |
| // TODO: Properly check the specifications for these flags. |
| switch { |
| case v.IsES && v.Major >= 3: |
| f.vertexArrayObjects = supported |
| case !v.IsES && v.Major >= 3: |
| f.vertexArrayObjects = required |
| } |
| |
| return f, nil |
| } |
| |
| func compat(ctx log.Context, device *service.Device, d database.Database) (atom.Transformer, error) { |
| ctx = ctx.Enter("compat") |
| |
| target, err := getFeatures(ctx, device.Version, device.Extensions) |
| if err != nil { |
| return nil, fmt.Errorf( |
| "Error '%v' when getting feature list for version: '%s', extensions: '%s'.", |
| err, device.Version, device.Extensions) |
| } |
| |
| contexts := map[*Context]features{} |
| |
| s := gfxapi.NewState() |
| var t atom.Transformer |
| t = atom.Transform("compat", func(ctx log.Context, i atom.ID, a atom.Atom, out atom.Writer) { |
| c := getContext(s) |
| if c == nil { |
| // The compatibility translations below assume that we have a valid context. |
| mutateAndWrite(ctx, i, a, s, d, out) |
| return |
| } |
| |
| switch a := a.(type) { |
| case *ContextInfo: |
| if _, found := contexts[c]; found { |
| break |
| } |
| |
| // Mutate to set the Version and Extensions strings. |
| mutateAndWrite(ctx, i, a, s, d, out) |
| |
| source, err := getFeatures(ctx, c.Constants.Version, c.Constants.Extensions) |
| if err != nil { |
| ctx.Error().V("version", c.Constants.Version).V("extensions", c.Constants.Extensions).Fail(err, |
| "Getting feature list for {version?$: '$s'}, {extensions?$: '%s'}.") |
| break |
| } |
| |
| contexts[c] = source |
| |
| if target.vertexArrayObjects == required && |
| source.vertexArrayObjects != required { |
| // Replay device requires VAO, but capture did not enforce it. |
| // Satisfy the target by creating and binding a single VAO |
| // which we will use instead of the default VAO (id 0). |
| out.Write(ctx, atom.NoID, NewGlGenVertexArrays(1, memory.Tmp). |
| AddWrite(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp, VertexArrayId(DefaultVertexArrayId)))) |
| out.Write(ctx, atom.NoID, NewGlBindVertexArray(DefaultVertexArrayId)) |
| } |
| return |
| |
| case *GlBindTexture: |
| if !c.Instances.Textures.Contains(a.Texture) { |
| // glGenTextures() was not used to generate the texture. Legal in GLES 2. |
| out.Write(ctx, atom.NoID, NewGlGenTextures(1, memory.Tmp). |
| AddRead(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp, VertexArrayId(a.Texture)))) |
| } |
| |
| if a.Target == GLenum_GL_TEXTURE_EXTERNAL_OES && target.eglImageExternal == unsupported { |
| // TODO: Implement full support for external images. |
| // Remap external textures to plain 2D textures - this matches GLSL compat. |
| out.Write(ctx, atom.NoID, NewGlBindTexture(GLenum_GL_TEXTURE_2D, a.Texture)) |
| return |
| } |
| |
| case *GlBindVertexArray: |
| if a.Array == VertexArrayId(0) { |
| if target.vertexArrayObjects == required && |
| contexts[c].vertexArrayObjects != required { |
| a.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| out.Write(ctx, atom.NoID, NewGlBindVertexArray(DefaultVertexArrayId)) |
| return |
| } |
| } |
| |
| case *GlDisableVertexAttribArray: |
| vao := c.Instances.VertexArrays[c.BoundVertexArray] |
| if vao.VertexAttributeArrays[a.Location].Enabled == GLboolean_GL_FALSE { |
| // Ignore the call if it is redundant (i.e. it is already disabled). |
| // Some applications iterate over all arrays and explicitly disable them. |
| // This is a problem if the target supports fewer arrays than the capture. |
| return |
| } |
| |
| case *GlVertexAttrib4fv: |
| if oldAttrib, ok := c.VertexAttributes[a.Location]; ok { |
| oldValue := oldAttrib.Value.Read(ctx, a, s, d, nil /* builder */) |
| a.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| newAttrib := c.VertexAttributes[a.Location] |
| newValue := newAttrib.Value.Read(ctx, a, s, d, nil /* builder */) |
| if reflect.DeepEqual(oldValue, newValue) { |
| // Ignore the call if it is redundant. |
| // Some applications iterate over all arrays and explicitly initialize them. |
| // This is a problem if the target supports fewer arrays than the capture. |
| return |
| } |
| } |
| out.Write(ctx, i, a) |
| return |
| |
| case *GlGetVertexAttribIiv, |
| *GlGetVertexAttribIuiv, |
| *GlGetVertexAttribPointerv, |
| *GlGetVertexAttribfv, |
| *GlGetVertexAttribiv: |
| // Some applications iterate over all arrays and query their state. |
| // This may fail if the target supports fewer arrays than the capture. |
| // As these should have no side-effects, just drop them. |
| return |
| |
| case *GlShaderSource: |
| // Apply the state mutation of the unmodified glShaderSource atom. |
| // This is so we can grab the source string from the Shader object. |
| a.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| shader := c.Instances.Shaders.Get(a.Shader) |
| |
| lang := ast.LangVertexShader |
| switch shader.Type { |
| case GLenum_GL_VERTEX_SHADER: |
| case GLenum_GL_FRAGMENT_SHADER: |
| lang = ast.LangFragmentShader |
| default: |
| ctx.Warning().V("type", shader.Type).Log("Unknown shader type") |
| } |
| |
| src, err := glslCompat(shader.Source, lang, device) |
| if err != nil { |
| ctx.Error().V("id", i).Fail(err, "Reformatting GLSL source for atom") |
| } |
| |
| a = NewGlShaderSource(a.Shader, 1, memory.Tmp, memory.Nullptr). |
| AddRead(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp.Offset(8), src)). |
| AddRead(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp, memory.Tmp.Offset(8))) |
| mutateAndWrite(ctx, i, a, s, d, out) |
| return |
| |
| // TODO: glVertexAttribIPointer |
| case *GlVertexAttribPointer: |
| if a.Type == GLenum_GL_HALF_FLOAT_OES && target.vertexHalfFloatOES == unsupported { |
| // Convert GL_HALF_FLOAT_OES to GL_HALF_FLOAT_ARB. |
| a = NewGlVertexAttribPointer(a.Location, a.Size, GLenum_GL_HALF_FLOAT_ARB, a.Normalized, a.Stride, memory.Pointer(a.Data)) |
| } |
| if target.vertexArrayObjects == required && |
| c.BoundBuffers[GLenum_GL_ARRAY_BUFFER] == 0 { |
| // Client-pointers are not supported, we need to copy this data to a buffer. |
| // However, we can't do this now as the observation only happens at the draw call. |
| // Apply the state changes, but don't write the emit the atom - we need to defer |
| // the trickery to the draw call. |
| a.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| return |
| } |
| mutateAndWrite(ctx, i, a, s, d, out) |
| return |
| |
| case *GlDrawArrays: |
| if target.vertexArrayObjects == required { |
| if clientVAsBound(c) { |
| first := int(a.FirstIndex) |
| last := first + int(a.IndicesCount) - 1 |
| defer moveClientVBsToVAs(ctx, first, last, i, a, s, c, d, out)() |
| } |
| } |
| |
| case *GlDrawElements: |
| if target.vertexArrayObjects == required { |
| e := externs{ctx: ctx, a: a, s: s, d: d} |
| |
| ib := c.BoundBuffers[GLenum_GL_ELEMENT_ARRAY_BUFFER] |
| clientIB := ib == 0 |
| clientVB := clientVAsBound(c) |
| if clientIB { |
| // The indices for the glDrawElements call is in client memory. |
| // We need to move this into a temporary buffer. |
| |
| // Generate a new element array buffer and bind it. |
| id := BufferId(newUnusedID(func(x uint32) bool { _, ok := c.Instances.Buffers[BufferId(x)]; return ok })) |
| c.Instances.Buffers[id] = &Buffer{} // Not used aside from reserving the ID. |
| out.Write(ctx, atom.NoID, NewGlGenBuffers(1, memory.Tmp). |
| AddRead(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp, id))) |
| out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ELEMENT_ARRAY_BUFFER, id)) |
| |
| // By moving the draw call's observations earlier, populate the element array buffer. |
| size, base := DataTypeSize(a.IndicesType)*int(a.IndicesCount), memory.Pointer(a.Indices) |
| glBufferData := NewGlBufferData(GLenum_GL_ELEMENT_ARRAY_BUFFER, GLsizeiptr(size), memory.Pointer(base), GLenum_GL_STATIC_DRAW) |
| glBufferData.extras = a.extras |
| out.Write(ctx, atom.NoID, glBufferData) |
| |
| // Clean-up |
| defer func() { |
| out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ELEMENT_ARRAY_BUFFER, ib)) |
| delete(c.Instances.Buffers, id) |
| }() |
| |
| if clientVB { |
| // Some of the vertex arrays for the glDrawElements call is in |
| // client memory and we need to move this into temporary buffer(s). |
| // The indices are also in client memory, so we need to apply the |
| // atom's reads now so that the indices can be read from the |
| // application pool. |
| a.Extras().Observations().ApplyReads(s.Memory[memory.ApplicationPool]) |
| limits := e.calcIndexLimits(U8áµ–(a.Indices), a.IndicesType, 0, uint32(a.IndicesCount)) |
| defer moveClientVBsToVAs(ctx, int(limits.Min), int(limits.Max), i, a, s, c, d, out)() |
| } |
| |
| glDrawElements := *a |
| glDrawElements.Indices.Address = 0 |
| glDrawElements.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| out.Write(ctx, i, &glDrawElements) |
| return |
| |
| } else if clientVB { // GL_ELEMENT_ARRAY_BUFFER is bound |
| // Some of the vertex arrays for the glDrawElements call is in |
| // client memory and we need to move this into temporary buffer(s). |
| // The indices are server-side, so can just be read from the internal |
| // pooled buffer. |
| data := c.Instances.Buffers[ib].Data.Index(0, s) |
| base := uint32(a.Indices.Address) |
| limits := e.calcIndexLimits(data, a.IndicesType, base, uint32(a.IndicesCount)) |
| defer moveClientVBsToVAs(ctx, int(limits.Min), int(limits.Max), i, a, s, c, d, out)() |
| } |
| } |
| |
| case *GlTexStorage2DEXT: |
| if _, supported := target.uncompressedTextureFormats[a.Format]; !supported { |
| a := *a |
| a.Format = GLenum_GL_RGBA8 |
| a.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| out.Write(ctx, i, &a) |
| return |
| } |
| |
| case *GlTexImage2D: |
| { |
| a := *a |
| if a.Type == GLenum_GL_HALF_FLOAT_OES && target.textureHalfFloatOES == unsupported { |
| // Half-float made it to core desktop and ES specification, but it was renumbered. |
| a.Type = GLenum_GL_HALF_FLOAT |
| } |
| if _, supported := target.uncompressedTextureFormats[a.Format]; !supported { |
| if err := convertTexImage2DtoRGBA(ctx, i, &a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| } |
| |
| case *GlTexSubImage2D: |
| { |
| a := *a |
| if a.Type == GLenum_GL_HALF_FLOAT_OES && target.textureHalfFloatOES == unsupported { |
| // Half-float made it to core desktop and ES specification, but it was renumbered. |
| a.Type = GLenum_GL_HALF_FLOAT |
| } |
| if _, supported := target.uncompressedTextureFormats[a.Format]; !supported { |
| if err := convertTexSubImage2DtoRGBA(ctx, i, &a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| } |
| |
| case *GlCompressedTexImage2D: |
| if _, supported := target.compressedTextureFormats[a.Format]; !supported { |
| if err := decompressTexImage2D(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Decompressing texture") |
| } |
| |
| case *GlCompressedTexSubImage2D: |
| if _, supported := target.compressedTextureFormats[a.Format]; !supported { |
| if err := decompressTexSubImage2D(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Decompressing texture") |
| } |
| |
| case *GlTexImage3D: |
| if _, supported := target.uncompressedTextureFormats[a.Format]; !supported { |
| if err := convertTexImage3DtoRGBA(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| |
| case *GlTexImage3DOES: |
| if _, supported := target.uncompressedTextureFormats[a.Format]; !supported { |
| if err := convertTexImage3DOEStoRGBA(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| if target.glTexImage3D { |
| if err := convertTexImage3DOES(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| |
| case *GlTexSubImage3DOES: |
| if target.glTexImage3D { |
| if err := convertTexSubImage3DOES(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| |
| case *GlCopyTexSubImage3DOES: |
| if target.glTexImage3D { |
| if err := convertCopyTexSubImage3DOES(ctx, i, a, s, d, out); err == nil { |
| return |
| } |
| ctx.Fail(err, "Converting texture") |
| } |
| |
| case *GlProgramBinary: |
| if !canUsePrecompiledShader(c, device) { |
| for _, a := range buildStubProgram(ctx, a.Extras(), s, d, a.Program) { |
| mutateAndWrite(ctx, atom.NoID, a, s, d, atom.TransformWriter{T: t, O: out}) |
| } |
| return |
| } |
| |
| case *GlProgramBinaryOES: |
| if !canUsePrecompiledShader(c, device) { |
| for _, a := range buildStubProgram(ctx, a.Extras(), s, d, a.Program) { |
| mutateAndWrite(ctx, atom.NoID, a, s, d, atom.TransformWriter{T: t, O: out}) |
| } |
| return |
| } |
| |
| case *GlHint: |
| if a.Target == GLenum_GL_GENERATE_MIPMAP_HINT && !target.supportGenerateMipmapHint { |
| return // Not supported in the core profile of OpenGL. |
| } |
| |
| case *GlGetBooleani_v, |
| *GlGetBooleanv, |
| *GlGetFloatv, |
| *GlGetInteger64i_v, |
| *GlGetInteger64v, |
| *GlGetIntegeri_v, |
| *GlGetIntegerv, |
| *GlGetInternalformativ, |
| *GlGetString, |
| *GlGetStringi: |
| // The acceptable values of these get functions vary between GL versions. |
| // As these should have no side-effects, just drop them. |
| return |
| |
| case *GlGetActiveAttrib, |
| *GlGetActiveUniform: |
| // The number of active attributes and uniforms can vary between compilers |
| // depending on their ability to eliminate dead code. In particular, |
| // dead code in pixel shader can allow code removal in the vertex shader. |
| // As these should have no side-effects, just drop them. |
| return |
| |
| case *GlLabelObjectEXT, |
| *GlGetObjectLabelEXT, |
| *GlObjectLabel, |
| *GlObjectLabelKHR, |
| *GlGetObjectLabel, |
| *GlObjectPtrLabel, |
| *GlGetObjectPtrLabel, |
| *GlGetObjectLabelKHR: |
| // These methods require non-trivial remapping for replay. |
| // As they do not affect rendering output, just drop them. |
| return |
| |
| case *GlGetProgramBinary, |
| *GlGetProgramBinaryOES: |
| // Program binaries are very driver specific. This command may fail on replay |
| // because one of the arguments must be GL_PROGRAM_BINARY_LENGTH. |
| // It has no side effects, so just drop it. |
| return |
| |
| case *GlDisable: |
| // GL_QCOM_alpha_test adds back GL_ALPHA_TEST from GLES 1.0 as extension. |
| // It seems that applications only disable it to make sure it is off, so |
| // we can safely ignore it. We should not ignore glEnable for it though. |
| if a.Capability == GLenum_GL_ALPHA_TEST_QCOM { |
| return |
| } |
| |
| default: |
| if a.AtomFlags().IsDrawCall() && clientVAsBound(c) { |
| ctx.Warning().T("atom", a).Log("Draw call with client-pointers not handled by the compatability layer") |
| } |
| } |
| |
| mutateAndWrite(ctx, i, a, s, d, out) |
| }) |
| |
| return t, nil |
| } |
| |
| func mutateAndWrite(ctx log.Context, i atom.ID, a atom.Atom, s *gfxapi.State, d database.Database, out atom.Writer) { |
| a.Mutate(ctx, s, d, nil /* no builder, just mutate */) |
| out.Write(ctx, i, a) |
| } |
| |
| // canUsePrecompiledShader returns true if precompiled shaders / programs |
| // captured with the context c can be replayed on the device d. |
| func canUsePrecompiledShader(c *Context, d *service.Device) bool { |
| return c.Constants.Vendor == d.Vendor && c.Constants.Version == d.Version |
| } |
| |
| // clientVAsBound returns true if there are any vertex attribute arrays enabled |
| // with pointers to client-side memory. |
| func clientVAsBound(c *Context) bool { |
| // Only the default vertex array can use client-side memory. |
| if c.BoundVertexArray == 0 { |
| va := c.Instances.VertexArrays[c.BoundVertexArray] |
| for _, arr := range va.VertexAttributeArrays { |
| if arr.Enabled == GLboolean_GL_TRUE { |
| vb := va.VertexBufferBindings[arr.Binding] |
| if vb.Buffer == 0 && arr.Pointer.Address != 0 { |
| return true |
| } |
| } |
| } |
| } |
| return false |
| } |
| |
| // moveClientVBsToVAs is a compatability helper for transforming client-side |
| // vertex array data (which is not supported by glVertexAttribPointer in later |
| // versions of GL), into array-buffers. |
| func moveClientVBsToVAs( |
| ctx log.Context, |
| first, last int, // vertex indices |
| i atom.ID, |
| a atom.Atom, |
| s *gfxapi.State, |
| c *Context, |
| d database.Database, |
| out atom.Writer) (revert func()) { |
| |
| rngs := interval.U64RangeList{} |
| // Gather together all the client-buffers in use by the vertex-attribs. |
| // Merge together all the memory intervals that these use. |
| va := c.Instances.VertexArrays[c.BoundVertexArray] |
| for _, arr := range va.VertexAttributeArrays { |
| if arr.Enabled == GLboolean_GL_TRUE { |
| vb := va.VertexBufferBindings[arr.Binding] |
| if vb.Buffer == 0 && arr.Pointer.Address != 0 { |
| // TODO: We're currently ignoring the Offset and Stride fields of the VBB. |
| // TODO: We're currently ignoring the RelativeOffset field of the VA. |
| // TODO: Merge logic with ReadVertexArrays macro in vertex_arrays.api. |
| if vb.Divisor != 0 { |
| panic("Instanced draw calls not currently supported by the compatibility layer") |
| } |
| size := DataTypeSize(arr.Type) * int(arr.Size) |
| stride := int(vb.Stride) |
| base := memory.Pointer(arr.Pointer) // Always start from the 0'th vertex to simplify logic. |
| rng := base.Range(uint64(last*stride + size)) |
| interval.Merge(&rngs, rng.Span(), true) |
| } |
| } |
| } |
| |
| if len(rngs) == 0 { |
| // Draw call does not use client-side buffers. Just draw. |
| mutateAndWrite(ctx, i, a, s, d, out) |
| return |
| } |
| |
| // Create an array-buffer for each chunk of overlapping client-side buffers in |
| // use. These are populated with data below. |
| ids := make([]BufferId, len(rngs)) |
| for i := range rngs { |
| id := BufferId(newUnusedID(func(x uint32) bool { _, ok := c.Instances.Buffers[BufferId(x)]; return ok })) |
| c.Instances.Buffers[id] = &Buffer{} // Not used aside from reserving the ID. |
| ids[i] = id |
| } |
| out.Write(ctx, atom.NoID, NewGlGenBuffers(GLsizei(len(ids)), memory.Tmp). |
| AddRead(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp, ids))) |
| |
| // Apply the memory observations that were made by the draw call now. |
| // We need to do this as the glBufferData calls below will require the data. |
| out.Write(ctx, atom.NoID, replay.Custom(func(ctx log.Context, s *gfxapi.State, d database.Database, b *builder.Builder) error { |
| a.Extras().Observations().ApplyReads(s.Memory[memory.ApplicationPool]) |
| return nil |
| })) |
| |
| // Note: be careful of overwriting the observations made above, before the |
| // calls to glBufferData below. |
| |
| // Fill the array-buffers with the observed memory data. |
| for i, rng := range rngs { |
| base := memory.Pointer{Address: rng.First, Pool: memory.ApplicationPool} |
| size := GLsizeiptr(rng.Count) |
| out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ARRAY_BUFFER, ids[i])) |
| out.Write(ctx, atom.NoID, NewGlBufferData(GLenum_GL_ARRAY_BUFFER, size, base, GLenum_GL_STATIC_DRAW)) |
| } |
| |
| // Redirect all the vertex attrib arrays to point to the array-buffer data. |
| for l, arr := range va.VertexAttributeArrays { |
| if arr.Enabled == GLboolean_GL_TRUE { |
| vb := va.VertexBufferBindings[arr.Binding] |
| if vb.Buffer == 0 && arr.Pointer.Address != 0 { |
| i := interval.IndexOf(&rngs, arr.Pointer.Address) |
| offset := arr.Pointer.Address - rngs[i].First |
| out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ARRAY_BUFFER, ids[i])) |
| out.Write(ctx, atom.NoID, &GlVertexAttribPointer{ |
| Location: l, |
| Size: GLint(arr.Size), |
| Type: arr.Type, |
| Normalized: arr.Normalized, |
| Stride: arr.Stride, |
| Data: NewVertexPointer(offset), |
| }) |
| } |
| } |
| } |
| |
| // Restore original state. |
| return func() { |
| out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ARRAY_BUFFER, c.BoundBuffers[GLenum_GL_ARRAY_BUFFER])) |
| for _, id := range ids { |
| delete(c.Instances.Buffers, id) |
| out.Write(ctx, atom.NoID, NewGlDeleteBuffers(1, memory.Tmp). |
| AddRead(atom.Data(ctx, s.MemoryLayout, d, memory.Tmp, id))) |
| } |
| } |
| } |