diff --git a/bench/ColorPrivBench.cpp b/bench/ColorPrivBench.cpp index 404fc1f348..23cc48f3cc 100644 --- a/bench/ColorPrivBench.cpp +++ b/bench/ColorPrivBench.cpp @@ -41,7 +41,7 @@ public: void onDraw(int loops, SkCanvas*) override { // We xor results of FourByteInterp into junk to make sure the function runs. - volatile SkPMColor junk = 0; + SK_MAYBE_UNUSED volatile SkPMColor junk = 0; for (int loop = 0; loop < loops; loop++) { for (int i = 0; i < kInputs; i++) { diff --git a/bench/ControlBench.cpp b/bench/ControlBench.cpp index f8e15c9711..19696934ab 100644 --- a/bench/ControlBench.cpp +++ b/bench/ControlBench.cpp @@ -16,7 +16,7 @@ struct ControlBench : public Benchmark { void onDraw(int loops, SkCanvas*) override { // Nothing terribly useful: force a memory read, a memory write, and some math. - volatile uint32_t rand = 0; + SK_MAYBE_UNUSED volatile uint32_t rand = 0; for (int i = 0; i < 1000*loops; i++) { rand *= 1664525; rand += 1013904223; diff --git a/bench/MatrixBench.cpp b/bench/MatrixBench.cpp index 4aa745eef6..07683ead7e 100644 --- a/bench/MatrixBench.cpp +++ b/bench/MatrixBench.cpp @@ -53,7 +53,7 @@ protected: m2.reset(); // xor into a volatile prevents these comparisons from being optimized away. - volatile bool junk = false; + SK_MAYBE_UNUSED volatile bool junk = false; junk ^= (m0 == m1); junk ^= (m1 == m2); junk ^= (m2 == m0); @@ -116,7 +116,7 @@ protected: fArray[3], fArray[4], fArray[5], fArray[6], fArray[7], fArray[8]); // xoring into a volatile prevents the compiler from optimizing these away - volatile int junk = 0; + SK_MAYBE_UNUSED volatile int junk = 0; junk ^= (fMatrix.getType()); fMatrix.dirtyMatrixTypeCache(); junk ^= (fMatrix.getType()); diff --git a/bench/ScalarBench.cpp b/bench/ScalarBench.cpp index 68c0884804..9148532654 100644 --- a/bench/ScalarBench.cpp +++ b/bench/ScalarBench.cpp @@ -59,7 +59,7 @@ protected: int mulLoopCount() const override { return 4; } void performTest() override { // xoring into a volatile prevents the compiler from optimizing these checks away. - volatile bool junk = false; + SK_MAYBE_UNUSED volatile bool junk = false; junk ^= (fArray[6] != 0.0f || fArray[7] != 0.0f || fArray[8] != 1.0f); junk ^= (fArray[2] != 0.0f || fArray[5] != 0.0f); } @@ -78,7 +78,7 @@ protected: int mulLoopCount() const override { return 4; } void performTest() override { // xoring into a volatile prevents the compiler from optimizing these checks away. - volatile int32_t junk = 0; + SK_MAYBE_UNUSED volatile int32_t junk = 0; junk ^= (SkScalarAs2sCompliment(fArray[6]) | SkScalarAs2sCompliment(fArray[7]) | (SkScalarAs2sCompliment(fArray[8]) - kPersp1Int)); diff --git a/fuzz/FuzzMain.cpp b/fuzz/FuzzMain.cpp index 6f6a535eba..1a9e7fae9d 100644 --- a/fuzz/FuzzMain.cpp +++ b/fuzz/FuzzMain.cpp @@ -592,12 +592,7 @@ static void fuzz_img(sk_sp bytes, uint8_t scale, uint8_t mode) { SkBitmap subsetBm; // We will reuse pixel memory from bitmap. void* pixels = bitmap.getPixels(); - // Keep track of left and top (for drawing subsetBm into canvas). We could use - // fscale * x and fscale * y, but we want integers such that the next subset will start - // where the last one ended. So we'll add decodeInfo.width() and height(). - int left = 0; for (int x = 0; x < W; x += w) { - int top = 0; for (int y = 0; y < H; y+= h) { // Do not make the subset go off the edge of the image. const int preScaleW = std::min(w, W - x); @@ -645,11 +640,7 @@ static void fuzz_img(sk_sp bytes, uint8_t scale, uint8_t mode) { W, H, result); return; } - // translate by the scaled height. - top += decodeInfo.height(); } - // translate by the scaled width. - left += decodeInfo.width(); } SkDebugf("[terminated] Success!\n"); break; diff --git a/gm/rasterhandleallocator.cpp b/gm/rasterhandleallocator.cpp index 340b03354b..fa4488bb3a 100644 --- a/gm/rasterhandleallocator.cpp +++ b/gm/rasterhandleallocator.cpp @@ -255,7 +255,7 @@ public: RECT clip_bounds_RECT = toRECT(clip_bounds); HRGN hrgn = CreateRectRgnIndirect(&clip_bounds_RECT); - int result = SelectClipRgn(hdc, hrgn); + SK_MAYBE_UNUSED int result = SelectClipRgn(hdc, hrgn); SkASSERT(result != ERROR); result = DeleteObject(hrgn); SkASSERT(result != 0); diff --git a/src/gpu/d3d/GrD3DGpu.cpp b/src/gpu/d3d/GrD3DGpu.cpp index 1edeee76d3..958a482c90 100644 --- a/src/gpu/d3d/GrD3DGpu.cpp +++ b/src/gpu/d3d/GrD3DGpu.cpp @@ -740,11 +740,8 @@ bool GrD3DGpu::uploadToTexture(GrD3DTexture* tex, int currentWidth = rect.width(); int currentHeight = rect.height(); - int layerHeight = tex->height(); - for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { if (texels[currentMipLevel].fPixels) { - SkASSERT(1 == mipLevelCount || currentHeight == layerHeight); const size_t trimRowBytes = currentWidth * bpp; const size_t srcRowBytes = texels[currentMipLevel].fRowBytes; @@ -758,7 +755,6 @@ bool GrD3DGpu::uploadToTexture(GrD3DTexture* tex, } currentWidth = std::max(1, currentWidth / 2); currentHeight = std::max(1, currentHeight / 2); - layerHeight = currentHeight; } // Update the offsets in the footprints to be relative to the slice's offset diff --git a/src/gpu/mtl/GrMtlGpu.mm b/src/gpu/mtl/GrMtlGpu.mm index fcb44c4966..9f9b3d1352 100644 --- a/src/gpu/mtl/GrMtlGpu.mm +++ b/src/gpu/mtl/GrMtlGpu.mm @@ -1339,16 +1339,6 @@ bool GrMtlGpu::onReadPixels(GrSurface* surface, size_t transBufferRowBytes = bpp*rect.width(); size_t transBufferImageBytes = transBufferRowBytes*rect.height(); - // TODO: implement some way of reusing buffers instead of making a new one every time. - NSUInteger options = 0; - if (@available(macOS 10.11, iOS 9.0, *)) { -#ifdef SK_BUILD_FOR_MAC - options |= MTLResourceStorageModeManaged; -#else - options |= MTLResourceStorageModeShared; -#endif - } - GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider(); sk_sp transferBuffer = resourceProvider->createBuffer( transBufferImageBytes, GrGpuBufferType::kXferGpuToCpu, diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp index 4c4e06c56c..656397aaf1 100644 --- a/src/gpu/vk/GrVkGpu.cpp +++ b/src/gpu/vk/GrVkGpu.cpp @@ -919,10 +919,8 @@ bool GrVkGpu::uploadTexDataOptimal(GrVkAttachment* texAttachment, int currentWidth = rect.width(); int currentHeight = rect.height(); - int layerHeight = texAttachment->height(); for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { if (texelsShallowCopy[currentMipLevel].fPixels) { - SkASSERT(1 == mipLevelCount || currentHeight == layerHeight); const size_t trimRowBytes = currentWidth * bpp; const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes; @@ -943,8 +941,6 @@ bool GrVkGpu::uploadTexDataOptimal(GrVkAttachment* texAttachment, currentWidth = std::max(1, currentWidth/2); currentHeight = std::max(1, currentHeight/2); - - layerHeight = currentHeight; } // Change layout of our target so it can be copied to diff --git a/src/sksl/SkSLDehydrator.cpp b/src/sksl/SkSLDehydrator.cpp index ab55f3f10a..522571c4bf 100644 --- a/src/sksl/SkSLDehydrator.cpp +++ b/src/sksl/SkSLDehydrator.cpp @@ -239,12 +239,12 @@ void Dehydrator::write(const SymbolTable& symbols) { ordered.insert({name, symbol}); }); for (std::pair p : ordered) { - bool found = false; + SkDEBUGCODE(bool found = false;) for (size_t i = 0; i < symbols.fOwnedSymbols.size(); ++i) { if (symbols.fOwnedSymbols[i].get() == p.second) { fCommandBreaks.add(fBody.bytesWritten()); this->writeU16(i); - found = true; + SkDEBUGCODE(found = true;) break; } } diff --git a/tools/sk_app/win/Window_win.cpp b/tools/sk_app/win/Window_win.cpp index cf39afd9b3..353125a09b 100644 --- a/tools/sk_app/win/Window_win.cpp +++ b/tools/sk_app/win/Window_win.cpp @@ -210,7 +210,6 @@ static skui::ModifierKey get_modifiers(UINT message, WPARAM wParam, LPARAM lPara LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { PAINTSTRUCT ps; - HDC hdc; Window_win* window = (Window_win*) GetWindowLongPtr(hWnd, GWLP_USERDATA); @@ -218,7 +217,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) switch (message) { case WM_PAINT: - hdc = BeginPaint(hWnd, &ps); + BeginPaint(hWnd, &ps); window->onPaint(); EndPaint(hWnd, &ps); eventHandled = true;