Refactor logging messages in operators and subgraph nodes

PiperOrigin-RevId: 314654928
diff --git a/BUILD.bazel b/BUILD.bazel
index 2836f0d..9761f8b 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1806,6 +1806,15 @@
     "//conditions:default": ["-DXNN_LOG_LEVEL=2"],
 })
 
+LOGGING_SRCS = select({
+    # No logging in optimized mode
+    ":optimized_build": [],
+    "//conditions:default": [
+        "src/operator-strings.c",
+        "src/subgraph-strings.c",
+    ],
+})
+
 LOGGING_HDRS = [
     "src/xnnpack/log.h",
 ]
@@ -2404,6 +2413,27 @@
     aarch64_srcs = AARCH64_ASM_UKERNELS,
 )
 
+xnnpack_cc_library(
+    name = "logging_utils",
+    srcs = LOGGING_SRCS,
+    hdrs = INTERNAL_HDRS + LOGGING_HDRS,
+    copts = LOGGING_COPTS + [
+        "-Isrc",
+        "-Iinclude",
+    ] + select({
+        ":debug_build": [],
+        "//conditions:default": xnnpack_min_size_copts(),
+    }),
+    gcc_copts = xnnpack_gcc_std_copts(),
+    msvc_copts = xnnpack_msvc_std_copts(),
+    visibility = xnnpack_visibility(),
+    deps = [
+        "@FP16",
+        "@clog",
+        "@pthreadpool",
+    ],
+)
+
 xnnpack_aggregate_library(
     name = "ukernels",
     aarch32_deps = [
@@ -2534,6 +2564,7 @@
     gcc_copts = xnnpack_gcc_std_copts(),
     msvc_copts = xnnpack_msvc_std_copts(),
     deps = [
+        ":logging_utils",
         "@FP16",
         "@FXdiv",
         "@clog",
@@ -2555,6 +2586,7 @@
     gcc_copts = xnnpack_gcc_std_copts(),
     msvc_copts = xnnpack_msvc_std_copts(),
     deps = [
+        ":logging_utils",
         "@FP16",
         "@FXdiv",
         "@clog",
@@ -2574,6 +2606,7 @@
     gcc_copts = xnnpack_gcc_std_copts(),
     msvc_copts = xnnpack_msvc_std_copts(),
     deps = [
+        ":logging_utils",
         "@pthreadpool",
     ],
 )
@@ -2594,6 +2627,7 @@
     gcc_copts = xnnpack_gcc_std_copts(),
     msvc_copts = xnnpack_msvc_std_copts(),
     deps = [
+        ":logging_utils",
         "@pthreadpool",
     ],
 )
@@ -2628,6 +2662,7 @@
     msvc_copts = xnnpack_msvc_std_copts(),
     deps = [
         ":indirection",
+        ":logging_utils",
         "@FP16",
         "@FXdiv",
         "@clog",
@@ -2658,6 +2693,7 @@
     msvc_copts = xnnpack_msvc_std_copts(),
     deps = [
         ":indirection_test_mode",
+        ":logging_utils",
         "@FP16",
         "@FXdiv",
         "@clog",
@@ -2672,7 +2708,7 @@
         "src/runtime.c",
         "src/subgraph.c",
         "src/tensor.c",
-    ] + SUBGRAPH_SRCS,
+    ] + SUBGRAPH_SRCS + LOGGING_SRCS,
     hdrs = ["include/xnnpack.h"],
     copts = LOGGING_COPTS + [
         "-Isrc",
@@ -2690,10 +2726,11 @@
     visibility = xnnpack_visibility(),
     deps = [
         ":enable_assembly",
+        ":logging_utils",
         ":memory_planner",
-        ":ukernels",
         ":operator_run",
         ":operators",
+        ":ukernels",
         "@clog",
         "@FP16",
         "@pthreadpool",
@@ -2710,7 +2747,7 @@
         "src/runtime.c",
         "src/subgraph.c",
         "src/tensor.c",
-    ] + SUBGRAPH_SRCS,
+    ] + SUBGRAPH_SRCS + LOGGING_SRCS,
     hdrs = ["include/xnnpack.h"],
     copts = LOGGING_COPTS + [
         "-Isrc",
@@ -2730,10 +2767,11 @@
     visibility = xnnpack_visibility(),
     deps = [
         ":enable_assembly",
+        ":logging_utils",
         ":memory_planner_test_mode",
-        ":ukernels_test_mode",
         ":operator_run_test_mode",
         ":operators_test_mode",
+        ":ukernels_test_mode",
         "@clog",
         "@FP16",
         "@pthreadpool",
@@ -2771,9 +2809,10 @@
     visibility = xnnpack_visibility(),
     deps = [
         ":enable_assembly",
-        ":ukernels",
+        ":logging_utils",
         ":operator_run",
         ":operators",
+        ":ukernels",
         "@clog",
         "@pthreadpool",
     ] + select({
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7a8b55f..c5bd838 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -199,7 +199,11 @@
   src/subgraph/static-constant-pad.c
   src/subgraph/unpooling-2d.c)
 
-SET(XNNPACK_COLD_SRCS ${XNNPACK_OPERATOR_SRCS})
+SET(XNNPACK_LOGGING_SRCS
+  src/operator-strings.c
+  src/subgraph-strings.c)
+
+SET(XNNPACK_COLD_SRCS ${XNNPACK_OPERATOR_SRCS} ${XNNPACK_SUBGRAPH_SRCS} ${XNNPACK_LOGGING_SRCS})
 LIST(APPEND XNNPACK_COLD_SRCS
   src/init.c
   src/memory.c
diff --git a/src/operator-strings.c b/src/operator-strings.c
new file mode 100644
index 0000000..883c430
--- /dev/null
+++ b/src/operator-strings.c
@@ -0,0 +1,98 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/log.h>
+#include <xnnpack/subgraph.h>
+
+
+const char* xnn_operator_type_to_string(enum xnn_operator_type type) {
+  switch (type) {
+    case xnn_operator_type_invalid:
+      return "Invalid";
+    case xnn_operator_type_add_nc_f32:
+      return "Add (NC, F32)";
+    case xnn_operator_type_add_nd_f32:
+      return "Add (ND, F32)";
+    case xnn_operator_type_add_nc_q8:
+      return "Add (NC, Q8)";
+    case xnn_operator_type_argmax_pooling_nhwc_f32:
+      return "ArgMax Pooling (NHWC, F32)";
+    case xnn_operator_type_average_pooling_nhwc_f32:
+      return "Average Pooling (NHWC, F32)";
+    case xnn_operator_type_average_pooling_nhwc_q8:
+      return "Average Pooling (NHWC, Q8)";
+    case xnn_operator_type_channel_pad_nc_x32:
+      return "Channel Pad (NC, X32)";
+    case xnn_operator_type_channel_shuffle_nc_x32:
+      return "Channel Shuffle (NC, X32)";
+    case xnn_operator_type_channel_shuffle_nc_x8:
+      return "Channel Shuffle (NC, X8)";
+    case xnn_operator_type_clamp_nc_f32:
+      return "Clamp (NC, F32)";
+    case xnn_operator_type_clamp_nc_u8:
+      return "Clamp (NC, U8)";
+    case xnn_operator_type_constant_pad_nd_x32:
+      return "Constant Pad (ND, X32)";
+    case xnn_operator_type_convolution_nhwc_f32:
+      return "Convolution (NHWC, F32)";
+    case xnn_operator_type_convolution_nhwc_q8:
+      return "Convolution (NHWC, Q8)";
+    case xnn_operator_type_convolution_nchw_f32:
+      return "Convolution (NCHW, F32)";
+    case xnn_operator_type_deconvolution_nhwc_f32:
+      return "Deconvolution (NHWC, F32)";
+    case xnn_operator_type_deconvolution_nhwc_q8:
+      return "Deconvolution (NHWC, Q8)";
+    case xnn_operator_type_divide_nd_f32:
+      return "Divide (ND, F32)";
+    case xnn_operator_type_fully_connected_nc_f32:
+      return "Fully Connected (NC, F32)";
+    case xnn_operator_type_fully_connected_nc_q8:
+      return "Fully Connected (NC, Q8)";
+    case xnn_operator_type_global_average_pooling_nwc_f32:
+      return "Global Average Pooling (NWC, F32)";
+    case xnn_operator_type_global_average_pooling_nwc_q8:
+      return "Global Average Pooling (NWC, Q8)";
+    case xnn_operator_type_global_average_pooling_ncw_f32:
+      return "Global Average Pooling (NCW, F32)";
+    case xnn_operator_type_hardswish_nc_f32:
+      return "HardSwish (NC, F32)";
+    case xnn_operator_type_leaky_relu_nc_q8:
+      return "Leaky ReLU (NC, Q8)";
+    case xnn_operator_type_max_pooling_nhwc_f32:
+      return "Max Pooling (NHWC, F32)";
+    case xnn_operator_type_max_pooling_nhwc_u8:
+      return "Max Pooling (NHWC, U8)";
+    case xnn_operator_type_maximum_nd_f32:
+      return "Maximum (ND, F32)";
+    case xnn_operator_type_minimum_nd_f32:
+      return "Minimum (ND, F32)";
+    case xnn_operator_type_multiply_nd_f32:
+      return "Multiply (ND, F32)";
+    case xnn_operator_type_prelu_nc_f32:
+      return "PReLU (NC, F32)";
+    case xnn_operator_type_resize_bilinear_nhwc_f32:
+      return "Resize Bilinear (NHWC, F32)";
+    case xnn_operator_type_sigmoid_nc_f32:
+      return "Sigmoid (NC, F32)";
+    case xnn_operator_type_sigmoid_nc_q8:
+      return "Sigmoid (NC, Q8)";
+    case xnn_operator_type_softmax_nc_f32:
+      return "Softmax (NC, F32)";
+    case xnn_operator_type_softmax_nc_q8:
+      return "Softmax (NC, Q8)";
+    case xnn_operator_type_subtract_nd_f32:
+      return "Subtract (ND, F32)";
+    case xnn_operator_type_unpooling_nhwc_x32:
+      return "Unpooling (NHWC, X32)";
+  }
+  XNN_UNREACHABLE;
+  return NULL;
+}
diff --git a/src/operators/add-nc.c b/src/operators/add-nc.c
index 1ec6cb5..dcbf36e 100644
--- a/src/operators/add-nc.c
+++ b/src/operators/add-nc.c
@@ -40,7 +40,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Add operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8));
     goto error;
   }
 
@@ -48,57 +49,60 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Add operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), channels);
     goto error;
   }
 
   if (a_stride < channels) {
     xnn_log_error(
-      "failed to create Add operator with A element stride of %zu: "
+      "failed to create %s operator with A element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      a_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), a_stride, channels);
     goto error;
   }
 
   if (b_stride < channels) {
     xnn_log_error(
-      "failed to create Add operator with B element stride of %zu: "
+      "failed to create %s operator with B element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      b_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), b_stride, channels);
     goto error;
   }
 
   if (sum_stride < channels) {
     xnn_log_error(
-      "failed to create Add operator with Sum element stride of %zu: "
+      "failed to create %s operator with Sum element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      sum_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), sum_stride, channels);
     goto error;
   }
 
   if (a_scale <= 0.0f || !isnormal(a_scale)) {
     xnn_log_error(
-      "failed to create Add operator with %.7g A scale: scale must be finite, normalized, and positive", a_scale);
+      "failed to create %s operator with %.7g A scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), a_scale);
     goto error;
   }
 
   if (b_scale <= 0.0f || !isnormal(b_scale)) {
     xnn_log_error(
-      "failed to create Add operator with %.7g B scale: scale must be finite, normalized, and positive", b_scale);
+      "failed to create %s operator with %.7g B scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), b_scale);
     goto error;
   }
 
   if (sum_scale <= 0.0f || !isnormal(sum_scale)) {
     xnn_log_error(
-      "failed to create Add operator with %.7g output scale: scale must be finite, normalized, and positive",
-      sum_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), sum_scale);
     goto error;
   }
 
   if (sum_min >= sum_max) {
     xnn_log_error(
-      "failed to create Add operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
-      sum_min, sum_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), sum_min, sum_max);
     goto error;
   }
 
@@ -107,16 +111,16 @@
   const float a_output_scale = a_scale / sum_scale;
   if (a_output_scale < 0x1.0p-14f || a_output_scale >= 0x1.0p+8f) {
     xnn_log_error(
-      "failed to create Add operator with %.7g A-to-output scale ratio: scale ratio must be in [2**-14, 2**8) range",
-      a_output_scale);
+      "failed to create %s operator with %.7g A-to-output scale ratio: scale ratio must be in [2**-14, 2**8) range",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), a_output_scale);
     goto error;
   }
 
   const float b_output_scale = b_scale / sum_scale;
   if (b_output_scale < 0x1.0p-14f || b_output_scale >= 0x1.0p+8f) {
     xnn_log_error(
-      "failed to create Add operator with %.7g A-to-output scale ratio: scale ratio must be in [2**-14, 2**8) range",
-      b_output_scale);
+      "failed to create %s operator with %.7g A-to-output scale ratio: scale ratio must be in [2**-14, 2**8) range",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8), b_output_scale);
     goto error;
   }
 
@@ -124,7 +128,9 @@
 
   add_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (add_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Add operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_add_nc_q8));
     goto error;
   }
 
@@ -165,7 +171,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Add operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32));
     goto error;
   }
 
@@ -173,50 +180,53 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create add operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32), channels);
     goto error;
   }
 
   if (a_stride < channels) {
     xnn_log_error(
-      "failed to create Add operator with A element stride of %zu: "
+      "failed to create %s operator with A element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      a_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32), a_stride, channels);
     goto error;
   }
 
   if (b_stride < channels) {
     xnn_log_error(
-      "failed to create Add operator with B element stride of %zu: "
+      "failed to create %s operator with B element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      b_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32), b_stride, channels);
     goto error;
   }
 
   if (sum_stride < channels) {
     xnn_log_error(
-      "failed to create Add operator with Sum element stride of %zu: "
+      "failed to create %s operator with Sum element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      sum_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32), sum_stride, channels);
     goto error;
   }
 
   if (isnan(sum_min)) {
     xnn_log_error(
-      "failed to create Add operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32));
     goto error;
   }
 
   if (isnan(sum_max)) {
     xnn_log_error(
-      "failed to create Add operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32));
     goto error;
   }
 
   if (sum_min >= sum_max) {
     xnn_log_error(
-      "failed to create Add operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      sum_min, sum_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32), sum_min, sum_max);
     goto error;
   }
 
@@ -224,7 +234,9 @@
 
   add_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (add_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Add operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_add_nc_f32));
     goto error;
   }
 
@@ -256,13 +268,16 @@
     pthreadpool_t threadpool)
 {
   if (add_op->type != xnn_operator_type_add_nc_q8) {
-    xnn_log_error("failed to setup Add (NC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8),
+      xnn_operator_type_to_string(add_op->type));
     return xnn_status_invalid_parameter;
   }
   add_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Add operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_q8));
     return xnn_status_uninitialized;
   }
 
@@ -319,13 +334,16 @@
     pthreadpool_t threadpool)
 {
   if (add_op->type != xnn_operator_type_add_nc_f32) {
-    xnn_log_error("failed to setup Add (NC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32),
+      xnn_operator_type_to_string(add_op->type));
     return xnn_status_invalid_parameter;
   }
   add_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Add operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_add_nc_f32));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/argmax-pooling-nhwc.c b/src/operators/argmax-pooling-nhwc.c
index 5b21747..907f39d 100644
--- a/src/operators/argmax-pooling-nhwc.c
+++ b/src/operators/argmax-pooling-nhwc.c
@@ -58,7 +58,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Argmax Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
     goto error;
   }
 
@@ -67,62 +68,61 @@
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32), pooling_width, pooling_height);
     goto error;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with 1 pooling element: "
-      "1x1 pooling is meaningless");
+      "failed to create %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
     goto error;
   }
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with %zu channels: "
-      "number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32), output_pixel_stride, channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with NaN output lower bound: "
-      "lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with NaN output upper bound: "
-      "upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Argmax Pooling operator with [%.7g, %.7g] output range: "
+      "failed to create %s operator with [%.7g, %.7g] output range: "
       "lower bound must be below upper bound",
-      output_min, output_max);
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32), output_min, output_max);
     goto error;
   }
 
@@ -130,8 +130,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Argmax Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -141,7 +142,9 @@
 
   argmax_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (argmax_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Argmax Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
     goto error;
   }
 
@@ -187,20 +190,23 @@
     pthreadpool_t threadpool)
 {
   if (argmax_pooling_op->type != xnn_operator_type_argmax_pooling_nhwc_f32) {
-    xnn_log_error("failed to setup Argmax Pooling (NHWC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32),
+      xnn_operator_type_to_string(argmax_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
   argmax_pooling_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Argmax Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Argmax Pooling operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
@@ -251,9 +257,12 @@
     // Micro-kernel may read up to (mr - 1) elements after the end of indirection buffer.
     const size_t indirection_buffer_size = sizeof(void*) * ((mr - 1) + output_height * step_height);
 
-    const void** indirection_buffer = (const void**) xnn_reallocate_memory(argmax_pooling_op->indirection_buffer, indirection_buffer_size);
+    const void** indirection_buffer =
+      (const void**) xnn_reallocate_memory(argmax_pooling_op->indirection_buffer, indirection_buffer_size);
     if (indirection_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator indirection buffer",
+        indirection_buffer_size, xnn_operator_type_to_string(xnn_operator_type_argmax_pooling_nhwc_f32));
       return xnn_status_out_of_memory;
     }
     argmax_pooling_op->indirection_buffer = indirection_buffer;
diff --git a/src/operators/average-pooling-nhwc.c b/src/operators/average-pooling-nhwc.c
index 657c6c6..d03ba22 100644
--- a/src/operators/average-pooling-nhwc.c
+++ b/src/operators/average-pooling-nhwc.c
@@ -65,7 +65,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8));
     goto error;
   }
 
@@ -74,70 +75,67 @@
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), pooling_width, pooling_height);
     goto error;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to create Average Pooling operator with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to create %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8));
     goto error;
   }
 
   if (stride_height == 0 || stride_width == 0) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %" PRIu32 "x%" PRIu32 " stride: "
-      "stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), stride_width, stride_height);
     goto error;
   }
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Average Pooling operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Average Pooling operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), output_pixel_stride, channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %.7g input scale: "
-      "scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), input_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %.7g output scale: "
-      "scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Average Pooling operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8), output_min, output_max);
     goto error;
   }
 
@@ -145,8 +143,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Average Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -157,16 +156,18 @@
   const float input_output_scale = input_scale / output_scale;
   if (input_output_scale < 0x1.0p-8f || input_output_scale >= 0x1.0p+8f) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %.7g input scale and %.7g output scale: "
+      "failed to create %s operator with %.7g input scale and %.7g output scale: "
       "input-to-output scale ratio (%.7f) must be in [2**-8, 2**8) range",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8),
       input_scale, output_scale, input_output_scale);
     goto error;
   }
 
   if (pooling_size >= 16777216) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %"PRIu32" (%" PRIu32 "x%" PRIu32 ") pooling elements: "
+      "failed to create %s operator with %"PRIu32" (%" PRIu32 "x%" PRIu32 ") pooling elements: "
       "the number of elements in the pooling area must be below 2**24",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8),
       pooling_size, pooling_width, pooling_height);
     goto error;
   }
@@ -175,14 +176,18 @@
 
   average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (average_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Average Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8));
     goto error;
   }
 
-  void* zero_buffer = xnn_allocate_simd_memory(channels * sizeof(uint8_t) + XNN_EXTRA_BYTES);
+  const size_t zero_bytes = channels * sizeof(uint8_t) + XNN_EXTRA_BYTES;
+  void* zero_buffer = xnn_allocate_simd_memory(zero_bytes);
   if (zero_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Average Pooling zero padding",
-      channels * sizeof(uint8_t) + XNN_EXTRA_BYTES);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator zero padding",
+      zero_bytes, xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8));
     goto error;
   }
   memset(zero_buffer, input_zero_point, channels * sizeof(uint8_t));
@@ -252,7 +257,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32));
     goto error;
   }
 
@@ -261,65 +267,67 @@
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32), pooling_width, pooling_height);
     goto error;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to create Average Pooling operator with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to create %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32));
     goto error;
   }
 
   if (stride_height == 0 || stride_width == 0) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %" PRIu32 "x%" PRIu32 " stride: "
-      "stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32), stride_width, stride_height);
     goto error;
   }
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Average Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Average Pooling operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Average Pooling operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32), output_pixel_stride, channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Average Pooling operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Average Pooling operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Average Pooling operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32), output_min, output_max);
     goto error;
   }
 
@@ -327,8 +335,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Average Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -338,14 +347,18 @@
 
   average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (average_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Average Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32));
     goto error;
   }
 
-  void* zero_buffer = xnn_allocate_zero_simd_memory(channels * sizeof(float) + XNN_EXTRA_BYTES);
+  const size_t zero_bytes = channels * sizeof(float) + XNN_EXTRA_BYTES;
+  void* zero_buffer = xnn_allocate_zero_simd_memory(zero_bytes);
   if (zero_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Average Pooling zero padding",
-      channels * sizeof(float) + XNN_EXTRA_BYTES);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator zero padding",
+      zero_bytes, xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32));
     goto error;
   }
   average_pooling_op->zero_buffer = zero_buffer;
@@ -410,14 +423,15 @@
   average_pooling_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(average_pooling_op->type));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Average Pooling operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(average_pooling_op->type), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
@@ -506,9 +520,11 @@
       // Micro-kernel may read up to (mr - 1) elements after the end of indirection buffer.
       const size_t indirection_buffer_size = sizeof(void*) * ((mr - 1) + batch_size * output_height * step_height);
 
-      const void** indirection_buffer = (const void**) xnn_reallocate_memory(average_pooling_op->indirection_buffer, indirection_buffer_size);
+      const void** indirection_buffer =
+        (const void**) xnn_reallocate_memory(average_pooling_op->indirection_buffer, indirection_buffer_size);
       if (indirection_buffer == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+        xnn_log_error("failed to allocate %zu bytes for %s operator indirection buffer",
+          indirection_buffer_size, xnn_operator_type_to_string(average_pooling_op->type));
         return xnn_status_out_of_memory;
       }
       average_pooling_op->indirection_buffer = indirection_buffer;
@@ -533,9 +549,11 @@
       /* This part is specific to FP32, needs revision if Q8 gets a PAVGPOOL micro-kernel */
       if (input_height != last_input_height || input_width != last_input_width) {
         const size_t pixelwise_buffer_size = output_height * output_width * sizeof(float);
-        float* pixelwise_buffer = (float*) xnn_reallocate_memory(average_pooling_op->pixelwise_buffer, pixelwise_buffer_size);
+        float* pixelwise_buffer =
+          (float*) xnn_reallocate_memory(average_pooling_op->pixelwise_buffer, pixelwise_buffer_size);
         if (pixelwise_buffer == NULL) {
-          xnn_log_error("failed to allocate %zu bytes for pixelwise buffer", pixelwise_buffer_size);
+          xnn_log_error("failed to allocate %zu bytes for %s operator pixelwise buffer",
+            pixelwise_buffer_size, xnn_operator_type_to_string(average_pooling_op->type));
           return xnn_status_out_of_memory;
         }
         average_pooling_op->pixelwise_buffer = pixelwise_buffer;
@@ -632,7 +650,9 @@
     pthreadpool_t threadpool)
 {
   if (average_pooling_op->type != xnn_operator_type_average_pooling_nhwc_q8) {
-    xnn_log_error("failed to setup Average Pooling (Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_q8),
+      xnn_operator_type_to_string(average_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
 
@@ -677,7 +697,9 @@
     pthreadpool_t threadpool)
 {
   if (average_pooling_op->type != xnn_operator_type_average_pooling_nhwc_f32) {
-    xnn_log_error("failed to setup Average Pooling (F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_average_pooling_nhwc_f32),
+      xnn_operator_type_to_string(average_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/binary-elementwise-nd.c b/src/operators/binary-elementwise-nd.c
index 806b8ea..cb9ee1e 100644
--- a/src/operators/binary-elementwise-nd.c
+++ b/src/operators/binary-elementwise-nd.c
@@ -28,7 +28,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Add/Subtract/Multiply/Divide/Minimum/Maximum operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(operator_type));
     goto error;
   }
 
@@ -36,20 +37,22 @@
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Add/Subtract/Multiply/Divide/Minimum/Maximum operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(operator_type));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Add/Subtract/Multiply/Divide/Minimum/Maximum operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(operator_type));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Add/Subtract/Multiply/Divide/Minimum/Maximum operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(operator_type), output_min, output_max);
     goto error;
   }
 
@@ -57,7 +60,9 @@
 
   binary_elementwise_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (binary_elementwise_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Add/Subtract/Multiply/Divide/Minimum/Maximum operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
     goto error;
   }
 
@@ -148,34 +153,41 @@
     size_t num_threads)
 {
   if (binary_elementwise_op->type != expected_operator_type) {
-    xnn_log_error("failed to setup Add/Subtract/Multiply/Divide/Minimum/Maximum (ND, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(expected_operator_type),
+      xnn_operator_type_to_string(binary_elementwise_op->type));
     return xnn_status_invalid_parameter;
   }
   binary_elementwise_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Add/Subtract/Multiply/Divide/Minimum/Maximum operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(binary_elementwise_op->type));
     return xnn_status_uninitialized;
   }
 
   if (max(num_input1_dims, num_input2_dims) > XNN_MAX_TENSOR_DIMS) {
     xnn_log_error(
-      "failed to setup Add/Subtract/Multiply/Divide/Minimum/Maximum operator with %zu and %zu dimensions in input shapes: "
+      "failed to setup %s operator with %zu and %zu dimensions in input shapes: "
       "the number of input dimensions must not exceed %d",
-      num_input1_dims, num_input2_dims, XNN_MAX_TENSOR_DIMS);
+      xnn_operator_type_to_string(binary_elementwise_op->type), num_input1_dims, num_input2_dims, XNN_MAX_TENSOR_DIMS);
     return xnn_status_unsupported_parameter;
   }
 
   for (size_t i = 0; i < num_input1_dims; i++) {
     if (input1_shape[i] == 0) {
-      xnn_log_error("failed to setup Add/Subtract/Multiply/Divide/Minimum/Maximum operator: shape dimension #%zu of input #1 is zero", i);
+      xnn_log_error(
+        "failed to setup %s operator: shape dimension #%zu of input #1 is zero",
+        xnn_operator_type_to_string(binary_elementwise_op->type), i);
       return xnn_status_invalid_parameter;
     }
   }
 
   for (size_t i = 0; i < num_input2_dims; i++) {
     if (input2_shape[i] == 0) {
-      xnn_log_error("failed to setup Add/Subtract/Multiply/Divide/Minimum/Maximum operator: shape dimension #%zu of input #2 is zero", i);
+      xnn_log_error(
+        "failed to setup %s operator: shape dimension #%zu of input #2 is zero",
+        xnn_operator_type_to_string(binary_elementwise_op->type), i);
       return xnn_status_invalid_parameter;
     }
   }
@@ -227,8 +239,10 @@
       compressed_input2_shape[num_compressed_dims - 1] *= input1_dim;
       compressed_output_shape[num_compressed_dims - 1] *= input1_dim;
     } else {
-      xnn_log_error("failed to setup Add/Subtract/Multiply/Divide/Minimum/Maximum operator: "
+      xnn_log_error(
+        "failed to setup %s operator: "
         "shape dimension #%zu of input1 (%zu) does not match shape dimension #%zu of input2 (%zu)",
+        xnn_operator_type_to_string(binary_elementwise_op->type),
         num_input1_dims - i, input1_dim, num_input2_dims - i, input2_dim);
       return xnn_status_invalid_parameter;
     }
diff --git a/src/operators/channel-pad-nc.c b/src/operators/channel-pad-nc.c
index ffe25b8..24f253b 100644
--- a/src/operators/channel-pad-nc.c
+++ b/src/operators/channel-pad-nc.c
@@ -30,7 +30,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Channel Pad operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32));
     goto error;
   }
 
@@ -38,24 +39,25 @@
 
   if (input_channels == 0) {
     xnn_log_error(
-      "failed to create Channel Pad operator with %zu input channels: number of channels must be non-zero",
-      input_channels);
+      "failed to create %s operator with %zu input channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32), input_channels);
     goto error;
   }
 
   if (input_stride < input_channels) {
     xnn_log_error(
-      "failed to create Channel Pad operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of input channels (%zu)",
-      input_stride, input_channels);
+      xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32), input_stride, input_channels);
     goto error;
   }
 
   const size_t output_channels = pad_before_channels + input_channels + pad_after_channels;
   if (output_stride < output_channels) {
     xnn_log_error(
-      "failed to create Channel Pad operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of output channels (%zu+%zu+%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32),
       output_stride, pad_before_channels, input_channels, pad_after_channels);
     goto error;
   }
@@ -64,7 +66,9 @@
 
   channel_pad_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (channel_pad_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Channel Pad operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32));
     goto error;
   }
 
@@ -96,13 +100,16 @@
     pthreadpool_t threadpool)
 {
   if (channel_pad_op->type != xnn_operator_type_channel_pad_nc_x32) {
-    xnn_log_error("failed to setup Channel Pad (X32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32),
+      xnn_operator_type_to_string(channel_pad_op->type));
     return xnn_status_invalid_parameter;
   }
   channel_pad_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Channel Pad operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_channel_pad_nc_x32));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/channel-shuffle-nc.c b/src/operators/channel-shuffle-nc.c
index 75e877a..77ed9a7 100644
--- a/src/operators/channel-shuffle-nc.c
+++ b/src/operators/channel-shuffle-nc.c
@@ -32,7 +32,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Channel Shuffle operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(operator_type));
     goto error;
   }
 
@@ -40,31 +41,32 @@
 
   if (groups <= 1) {
     xnn_log_error(
-      "failed to create Channel Shuffle operator with %zu groups: at least two groups required", groups);
+      "failed to create %s operator with %zu groups: at least two groups required",
+      xnn_operator_type_to_string(operator_type), groups);
     goto error;
   }
 
   if (group_channels == 0) {
     xnn_log_error(
-      "failed to create Channel Shuffle operator with %zu group channels: number of group channels must be non-zero",
-      group_channels);
+      "failed to create %s operator with %zu group channels: number of group channels must be non-zero",
+      xnn_operator_type_to_string(operator_type), group_channels);
     goto error;
   }
 
   const size_t channels = groups * group_channels;
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Channel Shuffle operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zux%zu)",
-      input_stride, groups, group_channels);
+      xnn_operator_type_to_string(operator_type), input_stride, groups, group_channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Channel Shuffle operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zux%zu)",
-      output_stride, groups, group_channels);
+      xnn_operator_type_to_string(operator_type), output_stride, groups, group_channels);
     goto error;
   }
 
@@ -72,7 +74,9 @@
 
   channel_shuffle_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (channel_shuffle_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Channel Shuffle operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
     goto error;
   }
 
@@ -142,7 +146,8 @@
   channel_shuffle_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Channel Shuffle operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(channel_shuffle_op->type));
     return xnn_status_uninitialized;
   }
 
@@ -200,7 +205,9 @@
     pthreadpool_t threadpool)
 {
   if (channel_shuffle_op->type != xnn_operator_type_channel_shuffle_nc_x8) {
-    xnn_log_error("failed to setup Channel Shuffle (NC, X8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x8),
+      xnn_operator_type_to_string(channel_shuffle_op->type));
     return xnn_status_invalid_parameter;
   }
 
@@ -221,7 +228,9 @@
     pthreadpool_t threadpool)
 {
   if (channel_shuffle_op->type != xnn_operator_type_channel_shuffle_nc_x32) {
-    xnn_log_error("failed to setup Channel Shuffle (NC, X32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_channel_shuffle_nc_x32),
+      xnn_operator_type_to_string(channel_shuffle_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/clamp-nc.c b/src/operators/clamp-nc.c
index 02a1f65..22aa917 100644
--- a/src/operators/clamp-nc.c
+++ b/src/operators/clamp-nc.c
@@ -32,7 +32,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Clamp operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8));
     goto error;
   }
 
@@ -40,30 +41,31 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Clamp operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Clamp operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Clamp operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8), output_stride, channels);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Clamp operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8), output_min, output_max);
     goto error;
   }
 
@@ -71,7 +73,9 @@
 
   clamp_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (clamp_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Clamp operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8));
     goto error;
   }
 
@@ -106,7 +110,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Clamp operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32));
     goto error;
   }
 
@@ -114,42 +119,45 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Clamp operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Clamp operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Clamp operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32), output_stride, channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Clamp operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Clamp operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Clamp operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32), output_min, output_max);
     goto error;
   }
 
@@ -157,7 +165,9 @@
 
   clamp_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (clamp_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Clamp operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32));
     goto error;
   }
 
@@ -187,13 +197,16 @@
     pthreadpool_t threadpool)
 {
   if (clamp_op->type != xnn_operator_type_clamp_nc_u8) {
-    xnn_log_error("failed to setup Clamp (NC, U8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8),
+      xnn_operator_type_to_string(clamp_op->type));
     return xnn_status_invalid_parameter;
   }
   clamp_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Clamp operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8));
     return xnn_status_uninitialized;
   }
 
@@ -247,13 +260,16 @@
     pthreadpool_t threadpool)
 {
   if (clamp_op->type != xnn_operator_type_clamp_nc_f32) {
-    xnn_log_error("failed to setup Clamp (NC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_f32),
+      xnn_operator_type_to_string(clamp_op->type));
     return xnn_status_invalid_parameter;
   }
   clamp_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Clamp operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_clamp_nc_u8));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/constant-pad-nd.c b/src/operators/constant-pad-nd.c
index d0e79fb..0dc5c32 100644
--- a/src/operators/constant-pad-nd.c
+++ b/src/operators/constant-pad-nd.c
@@ -27,7 +27,9 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Constant Pad operator: XNNPACK is not initialized");
+    xnn_log_error(
+      "failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_constant_pad_nd_x32));
     goto error;
   }
 
@@ -35,7 +37,9 @@
 
   constant_pad_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (constant_pad_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Constant Pad operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_constant_pad_nd_x32));
     goto error;
   }
 
@@ -75,27 +79,32 @@
     size_t num_threads)
 {
   if (constant_pad_op->type != expected_operator_type) {
-    xnn_log_error("failed to setup Constant Pad (ND, X32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(expected_operator_type),
+      xnn_operator_type_to_string(constant_pad_op->type));
     return xnn_status_invalid_parameter;
   }
   constant_pad_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Constant Pad operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(constant_pad_op->type));
     return xnn_status_uninitialized;
   }
 
   if (num_dims > XNN_MAX_TENSOR_DIMS) {
     xnn_log_error(
-      "failed to setup Constant Pad operator with %zu dimensions in input shape: "
+      "failed to setup %s operator with %zu dimensions in input shape: "
       "the number of input dimensions must not exceed %d",
-      num_dims, XNN_MAX_TENSOR_DIMS);
+      xnn_operator_type_to_string(constant_pad_op->type), num_dims, XNN_MAX_TENSOR_DIMS);
     return xnn_status_unsupported_parameter;
   }
 
   for (size_t i = 0; i < num_dims; i++) {
     if (input_shape[i] == 0) {
-      xnn_log_error("failed to setup Constant Pad operator: input shape dimension #%zu is zero", i);
+      xnn_log_error(
+        "failed to setup %s operator: input shape dimension #%zu is zero",
+        xnn_operator_type_to_string(constant_pad_op->type), i);
       return xnn_status_invalid_parameter;
     }
   }
diff --git a/src/operators/convolution-nchw.c b/src/operators/convolution-nchw.c
index f7b47bb..cffd251 100644
--- a/src/operators/convolution-nchw.c
+++ b/src/operators/convolution-nchw.c
@@ -59,7 +59,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Convolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
     goto error;
   }
 
@@ -67,74 +68,72 @@
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), kernel_width, kernel_height);
     goto error;
   }
 
   if (subsampling_width == 0 || subsampling_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " subsampling: "
-      "subsampling dimensions must be non-zero",
-      subsampling_width, subsampling_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), subsampling_width, subsampling_height);
     goto error;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), dilation_width, dilation_height);
     goto error;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), groups);
     goto error;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), group_input_channels);
     goto error;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), group_output_channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Convolution operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Convolution operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Convolution operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), output_min, output_max);
     goto error;
   }
 
   if ((flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) != 0 && group_input_channels != 1) {
     xnn_log_error(
-      "failed to create Depthwise Convolution operator with %zu input channels per group: "
-      "Depthwise Convolution must have exactly 1 input channel per group",
-      group_input_channels);
+      "failed to create depthwise %s operator with %zu input channels per group: "
+      "depthwise convolution must have exactly 1 input channel per group",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), group_input_channels);
     goto error;
   }
 
@@ -187,7 +186,8 @@
     dwconv_parameters = &xnn_params.f32.dwconv_chw_5x5s2;
   } else {
     xnn_log_error(
-      "failed to create Convolution operator: only selected Convolution parameters are supported");
+      "failed to create %s operator: only selected convolution parameters are supported",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
     goto error;
   }
 
@@ -195,7 +195,9 @@
 
   convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (convolution_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Convolution operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
     goto error;
   }
 
@@ -275,7 +277,9 @@
 
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
         goto error;
       }
       convolution_op->num_nonzero_values = num_nonzero_values;
@@ -386,7 +390,9 @@
         (group_input_channels * kernel_height * kernel_width + 1 /* bias */) * sizeof(float);
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
         goto error;
       }
 
@@ -414,7 +420,9 @@
       const size_t packed_weights_size = groups * (kernel_height * kernel_width + 1 /* bias */) * sizeof(float);
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
         goto error;
       }
 
@@ -488,14 +496,15 @@
   convolution_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Convolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Convolution operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
@@ -504,8 +513,9 @@
   const size_t input_neurons = groups * group_input_channels * input_height * input_width;
   if (input_batch_stride < input_neurons) {
     xnn_log_error(
-      "failed to setup Convolution operator with input batch stride of %zu: "
+      "failed to setup %s operator with input batch stride of %zu: "
       "stride must be at least as large as the number of input neurons (%" PRIu32 "x%zux%zux%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
       input_batch_stride, groups, group_input_channels, input_height, input_width);
     return xnn_status_invalid_parameter;
   }
@@ -525,8 +535,9 @@
   const size_t output_neurons = groups * group_output_channels * output_height * output_width;
   if (output_batch_stride < output_neurons) {
     xnn_log_error(
-      "failed to setup Convolution operator with output batch stride of %zu: "
+      "failed to setup %s operator with output batch stride of %zu: "
       "stride must be at least as large as the number of output neurons (%" PRIu32 "x%zux%zux%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
       output_batch_stride, groups, group_output_channels, output_height, output_width);
     return xnn_status_invalid_parameter;
   }
@@ -563,8 +574,9 @@
         const int32_t diff = input_channel_diffs[i];
         const int64_t increment = (int64_t) diff * input_size;
         if ((int64_t) (int32_t) increment != increment) {
-          xnn_log_error("failed to setup Convolution operator with sparse kernel representation: "
-            "input increment exceeds int32_t range");
+          xnn_log_error(
+            "failed to setup %s operator with sparse kernel representation: input increment exceeds int32_t range",
+            xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
           return xnn_status_unsupported_parameter;
         }
         input_increments[i] = (int32_t) increment;
@@ -606,7 +618,9 @@
       const size_t zero_size = (input_width * convolution_op->group_input_channels << log2_input_element_size) + XNN_EXTRA_BYTES;
       void* zero_buffer = xnn_reallocate_memory(convolution_op->zero_buffer, zero_size);
       if (zero_buffer == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for zero padding", sizeof(struct xnn_operator));
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator zero padding",
+          sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
         return xnn_status_out_of_memory;
       }
       memset(zero_buffer, 0, zero_size);
@@ -653,7 +667,9 @@
       const size_t zero_size = (input_width << log2_input_element_size) + 2 * XNN_EXTRA_BYTES;
       void* zero_buffer = xnn_reallocate_memory(convolution_op->zero_buffer, zero_size);
       if (zero_buffer == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for zero padding", sizeof(struct xnn_operator));
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator zero padding",
+          sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
         return xnn_status_out_of_memory;
       }
       memset(zero_buffer, 0, zero_size);
@@ -707,7 +723,9 @@
     pthreadpool_t threadpool)
 {
   if (convolution_op->type != xnn_operator_type_convolution_nchw_f32) {
-    xnn_log_error("failed to setup Convolution (NCHW, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
+      xnn_operator_type_to_string(convolution_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/convolution-nhwc.c b/src/operators/convolution-nhwc.c
index b77577c..c740ba8 100644
--- a/src/operators/convolution-nhwc.c
+++ b/src/operators/convolution-nhwc.c
@@ -44,7 +44,7 @@
   return divide_round_up(input_dimension, subsampling_dimension);
 }
 
-static const struct dwconv_parameters* find_dwigemm_ukernel(
+static const struct dwconv_parameters* find_dwconv_ukernel(
     size_t kernel_size,
     const struct dwconv_parameters* ukernel,
     size_t num_ukernels)
@@ -91,7 +91,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Convolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8));
     goto error;
   }
 
@@ -99,54 +100,52 @@
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), kernel_width, kernel_height);
     goto error;
   }
 
   if (subsampling_width == 0 || subsampling_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " subsampling: "
-      "subsampling dimensions must be non-zero",
-      subsampling_width, subsampling_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), subsampling_width, subsampling_height);
     goto error;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), dilation_width, dilation_height);
     goto error;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), groups);
     goto error;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), group_input_channels);
     goto error;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), group_output_channels);
     goto error;
   }
 
   const size_t input_channels = groups * group_input_channels;
   if (input_pixel_stride < input_channels) {
     xnn_log_error(
-      "failed to create Convolution operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of input channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8),
       input_pixel_stride, groups, group_input_channels);
     goto error;
   }
@@ -154,46 +153,46 @@
   const size_t output_channels = groups * group_output_channels;
   if (output_pixel_stride < output_channels) {
     xnn_log_error(
-      "failed to create Convolution operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8),
       output_pixel_stride, groups, group_output_channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Convolution operator with %.7g input scale: scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), input_scale);
     goto error;
   }
 
   if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
     xnn_log_error(
-      "failed to create Convolution operator with %.7g kernel scale: scale must be finite, normalized, and positive",
-      kernel_scale);
+      "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), kernel_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Convolution operator with %.7g output scale: scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Convolution operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), output_min, output_max);
     goto error;
   }
 
   if ((flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) != 0 && group_input_channels != 1) {
     xnn_log_error(
-      "failed to create Depthwise Convolution operator with %zu input channels per group: "
-      "Depthwise Convolution must have exactly 1 input channel per group",
-      group_input_channels);
+      "failed to create depthwise %s operator with %zu input channels per group: "
+      "depthwise convolution must have exactly 1 input channel per group",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8), group_input_channels);
     goto error;
   }
 
@@ -201,8 +200,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Convolution operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -213,8 +213,9 @@
   const float convolution_scale = input_scale * kernel_scale / output_scale;
   if (convolution_scale >= 1.0f) {
     xnn_log_error(
-      "failed to create Convolution operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
+      "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
       "convolution scale %.7g is greater or equal to 1.0",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8),
       input_scale, kernel_scale, output_scale, convolution_scale);
     goto error;
   }
@@ -223,7 +224,9 @@
 
   convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (convolution_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Convolution operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8));
     goto error;
   }
 
@@ -232,7 +235,7 @@
   enum xnn_ukernel_type ukernel_type = xnn_ukernel_type_none;
   const struct dwconv_parameters* dwconv_parameters = NULL;
   if (group_input_channels == 1 && group_output_channels == 1 && groups > 1 &&
-      (dwconv_parameters = find_dwigemm_ukernel(kernel_size, xnn_params.q8.dwconv, XNN_MAX_Q8_DWCONV_UKERNELS)) != NULL)
+      (dwconv_parameters = find_dwconv_ukernel(kernel_size, xnn_params.q8.dwconv, XNN_MAX_Q8_DWCONV_UKERNELS)) != NULL)
   {
     ukernel_type = xnn_ukernel_type_dwconv;
   } else if (kernel_size == 1 && subsampling_height == 1 && subsampling_width == 1 && !any_padding) {
@@ -252,7 +255,9 @@
       const size_t packed_weights_size = (sizeof(uint8_t) * kernel_size + sizeof(int32_t)) * c_stride;
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8));
         goto error;
       }
 
@@ -291,7 +296,9 @@
         (sizeof(uint8_t) * kernel_size * k_stride + sizeof(int32_t)) * n_stride;
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_group_weights_size * groups);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_group_weights_size * groups);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_group_weights_size * groups, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8));
         goto error;
       }
       memset(convolution_op->packed_weights, kernel_zero_point, packed_group_weights_size * groups);
@@ -346,7 +353,9 @@
   if (any_padding || tf_same_padding) {
     void* zero_buffer = xnn_allocate_simd_memory(zero_size);
     if (zero_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for zero padding", zero_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator zero padding",
+        zero_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8));
       goto error;
     }
     memset(zero_buffer, input_zero_point, zero_size);
@@ -420,7 +429,9 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Convolution operator: XNNPACK is not initialized");
+    xnn_log_error(
+      "failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
     goto error;
   }
 
@@ -428,54 +439,52 @@
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), kernel_width, kernel_height);
     goto error;
   }
 
   if (subsampling_width == 0 || subsampling_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " subsampling: "
-      "subsampling dimensions must be non-zero",
-      subsampling_width, subsampling_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), subsampling_width, subsampling_height);
     goto error;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), dilation_width, dilation_height);
     goto error;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), groups);
     goto error;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), group_input_channels);
     goto error;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to create Convolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), group_output_channels);
     goto error;
   }
 
   const size_t input_channels = groups * group_input_channels;
   if (input_pixel_stride < input_channels) {
     xnn_log_error(
-      "failed to create Convolution operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of input channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32),
       input_pixel_stride, groups, group_input_channels);
     goto error;
   }
@@ -483,37 +492,39 @@
   const size_t output_channels = groups * group_output_channels;
   if (output_pixel_stride < output_channels) {
     xnn_log_error(
-      "failed to create Convolution operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32),
       output_pixel_stride, groups, group_output_channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Convolution operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Convolution operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Convolution operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), output_min, output_max);
     goto error;
   }
 
   if ((flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) != 0 && group_input_channels != 1) {
     xnn_log_error(
-      "failed to create Depthwise Convolution operator with %zu input channels per group: "
-      "Depthwise Convolution must have exactly 1 input channel per group",
-      group_input_channels);
+      "failed to create depthwise %s operator with %zu input channels per group: "
+      "depthwise convolution must have exactly 1 input channel per group",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32), group_input_channels);
     goto error;
   }
 
@@ -521,8 +532,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Convolution operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -532,7 +544,9 @@
 
   convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (convolution_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Convolution operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
     goto error;
   }
 
@@ -544,7 +558,7 @@
   if (group_input_channels == 1 && group_output_channels == 1 && kernel_size == 1 && unit_subsampling && !any_padding) {
     ukernel_type = xnn_ukernel_type_vmulcaddc;
   } else if (group_input_channels == 1 && group_output_channels == 1 && (dwconv_parameters =
-               find_dwigemm_ukernel(kernel_size, xnn_params.f32.dwconv, XNN_MAX_F32_DWCONV_UKERNELS)) != NULL)
+               find_dwconv_ukernel(kernel_size, xnn_params.f32.dwconv, XNN_MAX_F32_DWCONV_UKERNELS)) != NULL)
   {
     ukernel_type = xnn_ukernel_type_dwconv;
   } else if (kernel_size == 1 && unit_subsampling && !any_padding) {
@@ -563,7 +577,9 @@
       const size_t packed_weights_size = 2 * sizeof(float) * c_stride;
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
         goto error;
       }
 
@@ -586,7 +602,9 @@
       const size_t packed_weights_size = (kernel_size + 1) * sizeof(float) * c_stride;
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
         goto error;
       }
 
@@ -627,7 +645,9 @@
       const size_t packed_group_weights_size = (kernel_size * k_stride + 1) * sizeof(float) * n_stride;
       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_group_weights_size * groups);
       if (convolution_op->packed_weights == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for packed weights", packed_group_weights_size * groups);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator packed weights",
+          packed_group_weights_size * groups, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
         goto error;
       }
       memset(convolution_op->packed_weights, 0, packed_group_weights_size * groups);
@@ -687,7 +707,9 @@
   if (any_padding || tf_same_padding) {
     void* zero_buffer = xnn_allocate_zero_simd_memory(zero_size);
     if (zero_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for zero padding", zero_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator zero padding",
+        zero_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32));
       goto error;
     }
     convolution_op->zero_buffer = zero_buffer;
@@ -745,14 +767,15 @@
   convolution_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Convolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(convolution_op->type));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Convolution operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(convolution_op->type), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
@@ -808,7 +831,8 @@
 
       const size_t groups = convolution_op->groups;
       const size_t group_input_channels = convolution_op->group_input_channels;
-      const size_t w_stride = (round_up_po2(group_input_channels, convolution_op->ukernel.gemm.kr) << log2_filter_element_size) + bias_element_size;
+      const size_t w_stride = bias_element_size +
+        (round_up_po2(group_input_channels, convolution_op->ukernel.gemm.kr) << log2_filter_element_size);
       const size_t group_output_channels = convolution_op->group_output_channels;
 
       uint32_t mr = convolution_op->ukernel.gemm.mr;
@@ -910,7 +934,9 @@
       {
         const void** indirection_buffer = (const void**) xnn_reallocate_memory((void*) convolution_op->indirection_buffer, indirection_buffer_size);
         if (indirection_buffer == NULL) {
-          xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+          xnn_log_error(
+            "failed to allocate %zu bytes for %s operator indirection buffer",
+            indirection_buffer_size, xnn_operator_type_to_string(convolution_op->type));
           return xnn_status_out_of_memory;
         }
         convolution_op->indirection_buffer = indirection_buffer;
@@ -1025,7 +1051,9 @@
       const void** indirection_buffer =
         (const void**) xnn_reallocate_memory((void*) convolution_op->indirection_buffer, indirection_buffer_size);
       if (indirection_buffer == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator indirection buffer",
+          indirection_buffer_size, xnn_operator_type_to_string(convolution_op->type));
         return xnn_status_out_of_memory;
       }
       convolution_op->indirection_buffer = indirection_buffer;
@@ -1106,7 +1134,9 @@
     pthreadpool_t threadpool)
 {
   if (convolution_op->type != xnn_operator_type_convolution_nhwc_q8) {
-    xnn_log_error("failed to setup Convolution (NHWC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_q8),
+      xnn_operator_type_to_string(convolution_op->type));
     return xnn_status_invalid_parameter;
   }
 
@@ -1132,7 +1162,9 @@
     pthreadpool_t threadpool)
 {
   if (convolution_op->type != xnn_operator_type_convolution_nhwc_f32) {
-    xnn_log_error("failed to setup Convolution (NHWC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_convolution_nhwc_f32),
+      xnn_operator_type_to_string(convolution_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/deconvolution-nhwc.c b/src/operators/deconvolution-nhwc.c
index 81de3c9..9514e52 100644
--- a/src/operators/deconvolution-nhwc.c
+++ b/src/operators/deconvolution-nhwc.c
@@ -71,7 +71,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Deconvolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8));
     goto error;
   }
 
@@ -79,53 +80,52 @@
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), kernel_width, kernel_height);
     goto error;
   }
 
   if (stride_width == 0 || stride_height == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), stride_width, stride_height);
     goto error;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), dilation_width, dilation_height);
     goto error;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), groups);
     goto error;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), group_input_channels);
     goto error;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), group_output_channels);
     goto error;
   }
 
   const size_t input_channels = groups * group_input_channels;
   if (input_pixel_stride < input_channels) {
     xnn_log_error(
-      "failed to create Deconvolution operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8),
       input_pixel_stride, groups, group_input_channels);
     goto error;
   }
@@ -133,46 +133,47 @@
   const size_t output_channels = groups * group_output_channels;
   if (output_pixel_stride < output_channels) {
     xnn_log_error(
-      "failed to create Deconvolution operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8),
       output_pixel_stride, groups, group_output_channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %.7g input scale: scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), input_scale);
     goto error;
   }
 
   if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %.7g kernel scale: scale must be finite, normalized, and positive",
-      kernel_scale);
+      "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), kernel_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %.7g output scale: scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Deconvolution operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8), output_min, output_max);
     goto error;
   }
 
   const bool any_padding = (output_padding_left | output_padding_top | output_padding_right | output_padding_bottom) != 0;
   if (any_padding && (flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+      "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
       "TensorFlow SAME padding can't be combined with explicit padding specification",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8),
       output_padding_top, output_padding_left, output_padding_bottom, output_padding_right);
     goto error;
   }
@@ -182,8 +183,9 @@
   const float deconvolution_scale = input_scale * kernel_scale / output_scale;
   if (deconvolution_scale >= 1.0f) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
-      "Deconvolution operator scale %.7g is greater or equal to 1.0",
+      "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
+      "deconvolution scale %.7g is greater or equal to 1.0",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8),
       input_scale, kernel_scale, output_scale, deconvolution_scale);
     goto error;
   }
@@ -192,7 +194,9 @@
 
   deconvolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (deconvolution_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Deconvolution operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8));
     goto error;
   }
 
@@ -216,7 +220,9 @@
     const size_t subconvolution_buffer_size = sizeof(struct subconvolution_params) * subkernels;
     deconvolution_op->subconvolution_buffer = xnn_allocate_zero_memory(subconvolution_buffer_size);
     if (deconvolution_op->subconvolution_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for subconvolution buffer", subconvolution_buffer_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator subconvolution buffer",
+        subconvolution_buffer_size, xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8));
       goto error;
     }
 
@@ -235,7 +241,9 @@
   }
   deconvolution_op->packed_weights = xnn_allocate_simd_memory(packed_group_weights_size * groups);
   if (deconvolution_op->packed_weights == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for packed weights", packed_group_weights_size * groups);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator packed weights",
+      packed_group_weights_size * groups, xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8));
     goto error;
   }
   memset(deconvolution_op->packed_weights, kernel_zero_point, packed_group_weights_size * groups);
@@ -263,7 +271,9 @@
   size_t zero_size = sizeof(uint8_t) * k_stride + XNN_EXTRA_BYTES;
   void* zero_buffer = xnn_allocate_simd_memory(zero_size);
   if (zero_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for zero padding", zero_size);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator zero padding",
+      zero_size, xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8));
     goto error;
   }
   memset(zero_buffer, input_zero_point, zero_size);
@@ -358,7 +368,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Deconvolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
     goto error;
   }
 
@@ -366,53 +377,52 @@
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), kernel_width, kernel_height);
     goto error;
   }
 
   if (stride_width == 0 || stride_height == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), stride_width, stride_height);
     goto error;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), dilation_width, dilation_height);
     goto error;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), groups);
     goto error;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), group_input_channels);
     goto error;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), group_output_channels);
     goto error;
   }
 
   const size_t input_channels = groups * group_input_channels;
   if (input_pixel_stride < input_channels) {
     xnn_log_error(
-      "failed to create Deconvolution operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32),
       input_pixel_stride, groups, group_input_channels);
     goto error;
   }
@@ -420,37 +430,40 @@
   const size_t output_channels = groups * group_output_channels;
   if (output_pixel_stride < output_channels) {
     xnn_log_error(
-      "failed to create Deconvolution operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32),
       output_pixel_stride, groups, group_output_channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Deconvolution operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Deconvolution operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Deconvolution operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32), output_min, output_max);
     goto error;
   }
 
   const bool any_padding = (output_padding_left | output_padding_top | output_padding_right | output_padding_bottom) != 0;
   if (any_padding && (flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     xnn_log_error(
-      "failed to create Deconvolution operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+      "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
       "TensorFlow SAME padding can't be combined with explicit padding specification",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32),
       output_padding_top, output_padding_left, output_padding_bottom, output_padding_right);
     goto error;
   }
@@ -459,7 +472,9 @@
 
   deconvolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (deconvolution_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Deconvolution operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
     goto error;
   }
 
@@ -496,7 +511,9 @@
     const size_t subconvolution_buffer_size = sizeof(struct subconvolution_params) * subkernels;
     deconvolution_op->subconvolution_buffer = xnn_allocate_zero_memory(subconvolution_buffer_size);
     if (deconvolution_op->subconvolution_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for subconvolution buffer", subconvolution_buffer_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator subconvolution buffer",
+        subconvolution_buffer_size, xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
       goto error;
     }
 
@@ -515,7 +532,9 @@
   }
   deconvolution_op->packed_weights = xnn_allocate_simd_memory(packed_group_weights_size * groups);
   if (deconvolution_op->packed_weights == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for packed weights", packed_group_weights_size * groups);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator packed weights",
+      packed_group_weights_size * groups, xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
     goto error;
   }
   memset(deconvolution_op->packed_weights, 0, packed_group_weights_size * groups);
@@ -541,7 +560,9 @@
   const size_t zero_size = k_stride * sizeof(float) + XNN_EXTRA_BYTES;
   void* zero_buffer = xnn_allocate_zero_simd_memory(zero_size);
   if (zero_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for zero padding", zero_size);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator zero padding",
+      zero_size, xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32));
     goto error;
   }
   deconvolution_op->zero_buffer = zero_buffer;
@@ -636,7 +657,9 @@
   {
     const void** indirection_buffer = (const void**) xnn_reallocate_memory(deconvolution_op->indirection_buffer, indirection_buffer_size);
     if (indirection_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator indirection buffer",
+        indirection_buffer_size, xnn_operator_type_to_string(deconvolution_op->type));
       return xnn_status_out_of_memory;
     }
     deconvolution_op->indirection_buffer = indirection_buffer;
@@ -774,7 +797,9 @@
       const void** indirection_buffer =
         (const void**) xnn_reallocate_memory(deconvolution_op->indirection_buffer, indirection_buffer_size);
       if (indirection_buffer == NULL) {
-        xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+        xnn_log_error(
+          "failed to allocate %zu bytes for %s operator indirection buffer",
+          indirection_buffer_size, xnn_operator_type_to_string(deconvolution_op->type));
         return xnn_status_out_of_memory;
       }
       deconvolution_op->indirection_buffer = indirection_buffer;
@@ -895,30 +920,31 @@
   deconvolution_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Deconvolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(deconvolution_op->type));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Deconvolution with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(deconvolution_op->type), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
   if (adjustment_height >= deconvolution_op->stride_height) {
     xnn_log_error(
-      "failed to setup Deconvolution with %" PRIu32 " height adjustment: "
+      "failed to setup %s operator with %" PRIu32 " height adjustment: "
       "height adjustment must be smaller than height stride (%" PRIu32 ")",
-      adjustment_height, deconvolution_op->stride_height);
+      xnn_operator_type_to_string(deconvolution_op->type), adjustment_height, deconvolution_op->stride_height);
     return xnn_status_invalid_parameter;
   }
 
   if (adjustment_width >= deconvolution_op->stride_width) {
     xnn_log_error(
-      "failed to setup Deconvolution with %" PRIu32 " width adjustment: "
+      "failed to setup %s operator with %" PRIu32 " width adjustment: "
       "width adjustment must be smaller than width stride (%" PRIu32 ")",
-      adjustment_width, deconvolution_op->stride_width);
+      xnn_operator_type_to_string(deconvolution_op->type), adjustment_width, deconvolution_op->stride_width);
     return xnn_status_invalid_parameter;
   }
 
@@ -996,7 +1022,9 @@
     pthreadpool_t threadpool)
 {
   if (deconvolution_op->type != xnn_operator_type_deconvolution_nhwc_q8) {
-    xnn_log_error("failed to setup Deconvolution (NHWC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_q8),
+      xnn_operator_type_to_string(deconvolution_op->type));
     return xnn_status_invalid_parameter;
   }
 
@@ -1025,7 +1053,9 @@
     pthreadpool_t threadpool)
 {
   if (deconvolution_op->type != xnn_operator_type_deconvolution_nhwc_f32) {
-    xnn_log_error("failed to setup Deconvolution (NHWC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_deconvolution_nhwc_f32),
+      xnn_operator_type_to_string(deconvolution_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/fully-connected-nc.c b/src/operators/fully-connected-nc.c
index fb796c7..85cd577 100644
--- a/src/operators/fully-connected-nc.c
+++ b/src/operators/fully-connected-nc.c
@@ -45,7 +45,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Fully Connected operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8));
     goto error;
   }
 
@@ -53,60 +54,59 @@
 
   if (input_channels == 0) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %zu input channels: number of channels must be non-zero",
-      input_channels);
+      "failed to create %s operator with %zu input channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), input_channels);
     goto error;
   }
 
   if (output_channels == 0) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %zu output channels: number of channels must be non-zero",
-      output_channels);
+      "failed to create %s operator with %zu output channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), output_channels);
     goto error;
   }
 
   if (input_stride < input_channels) {
     xnn_log_error(
-      "failed to create Fully Connected operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of input channels (%zu)",
-      input_stride, input_channels);
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), input_stride, input_channels);
     goto error;
   }
 
   if (output_stride < output_channels) {
     xnn_log_error(
-      "failed to create Fully Connected operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of output channels (%zu)",
-      output_stride, output_channels);
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), output_stride, output_channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %.7g input scale: scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), input_scale);
     goto error;
   }
 
   if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %.7g kernel scale: scale must be finite, normalized, and positive",
-      kernel_scale);
+      "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), kernel_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %.7g output scale: scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Fully Connected operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8), output_min, output_max);
     goto error;
   }
 
@@ -115,8 +115,9 @@
   const float requantization_scale = input_scale * kernel_scale / output_scale;
   if (requantization_scale >= 1.0f) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
+      "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
       "requantization scale %.7g is greater or equal to 1.0",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8),
       input_scale, kernel_scale, output_scale, requantization_scale);
     goto error;
   }
@@ -125,7 +126,8 @@
 
   fully_connected_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (fully_connected_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Fully Connected operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error("failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8));
     goto error;
   }
 
@@ -135,10 +137,11 @@
   const size_t n_stride = round_up(output_channels, nr);
   const size_t k_stride = round_up_po2(input_channels, kr);
 
-  fully_connected_op->packed_weights = xnn_allocate_simd_memory(n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
+  const size_t packed_weights_size = n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t));
+  fully_connected_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
   if (fully_connected_op->packed_weights == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for packed weights",
-      n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
+    xnn_log_error("failed to allocate %zu bytes for %s operator packed weights",
+      packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8));
     goto error;
   }
   memset(fully_connected_op->packed_weights, kernel_zero_point, n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
@@ -207,7 +210,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Fully Connected operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
     goto error;
   }
 
@@ -215,50 +219,52 @@
 
   if (input_channels == 0) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %zu input channels: number of channels must be non-zero",
-      input_channels);
+      "failed to create %s operator with %zu input channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32), input_channels);
     goto error;
   }
 
   if (output_channels == 0) {
     xnn_log_error(
-      "failed to create Fully Connected operator with %zu output channels: number of channels must be non-zero",
-      output_channels);
+      "failed to create %s operator with %zu output channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32), output_channels);
     goto error;
   }
 
   if (input_stride < input_channels) {
     xnn_log_error(
-      "failed to create Fully Connected operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of input channels (%zu)",
-      input_stride, input_channels);
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32), input_stride, input_channels);
     goto error;
   }
 
   if (output_stride < output_channels) {
     xnn_log_error(
-      "failed to create Fully Connected operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of output channels (%zu)",
-      output_stride, output_channels);
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32), output_stride, output_channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Fully Connected operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Fully Connected operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Fully Connected operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32), output_min, output_max);
     goto error;
   }
 
@@ -266,7 +272,9 @@
 
   fully_connected_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (fully_connected_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Fully Connected operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
     goto error;
   }
 
@@ -277,10 +285,12 @@
   const size_t n_stride = round_up(output_channels, nr);
   const size_t k_stride = round_up_po2(input_channels, kr);
 
-  fully_connected_op->packed_weights = xnn_allocate_simd_memory(n_stride * (k_stride * sizeof(float) + sizeof(float)));
+  const size_t packed_weights_size = n_stride * (k_stride * sizeof(float) + sizeof(float));
+  fully_connected_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
   if (fully_connected_op->packed_weights == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for packed weights",
-      n_stride * (k_stride * sizeof(float) + sizeof(float)));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator packed weights",
+      packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
     goto error;
   }
   memset(fully_connected_op->packed_weights, 0, n_stride * (k_stride * sizeof(float) + sizeof(float)));
@@ -348,7 +358,8 @@
   fully_connected_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Fully Connected operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(fully_connected_op->type));
     return xnn_status_uninitialized;
   }
 
@@ -420,7 +431,9 @@
     pthreadpool_t threadpool)
 {
   if (fully_connected_op->type != xnn_operator_type_fully_connected_nc_q8) {
-    xnn_log_error("failed to setup Fully Connected (NC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_q8),
+      xnn_operator_type_to_string(fully_connected_op->type));
     return xnn_status_invalid_parameter;
   }
 
@@ -444,7 +457,9 @@
     pthreadpool_t threadpool)
 {
   if (fully_connected_op->type != xnn_operator_type_fully_connected_nc_f32) {
-    xnn_log_error("failed to setup Fully Connected (NC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32),
+      xnn_operator_type_to_string(fully_connected_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/global-average-pooling-ncw.c b/src/operators/global-average-pooling-ncw.c
index a18f4ac..80076b2 100644
--- a/src/operators/global-average-pooling-ncw.c
+++ b/src/operators/global-average-pooling-ncw.c
@@ -28,7 +28,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Global Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
     goto error;
   }
 
@@ -36,36 +37,37 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32), channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32), output_min, output_max);
     goto error;
   }
 
   status = xnn_status_unsupported_parameter;
   if (xnn_params.f32.gavgpool_cw.ukernel == NULL) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator: "
-      "only selected configurations parameters are supported");
+      "failed to create %s operator: only selected configurations parameters are supported",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
     goto error;
   }
 
@@ -73,7 +75,9 @@
 
   global_average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (global_average_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Global Average Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
     goto error;
   }
 
@@ -102,18 +106,23 @@
     pthreadpool_t threadpool)
 {
   if (global_average_pooling_op->type != xnn_operator_type_global_average_pooling_ncw_f32) {
-    xnn_log_error("failed to setup Global Average Pooling (F32, NCW) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32),
+      xnn_operator_type_to_string(global_average_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
   global_average_pooling_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Global Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32));
     return xnn_status_uninitialized;
   }
 
   if (width == 0) {
-    xnn_log_error("failed to setup Global Average Pooling operator with width %zu: width must be non-zero", width);
+    xnn_log_error(
+      "failed to setup %s operator with width %zu: width must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_ncw_f32), width);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/global-average-pooling-nwc.c b/src/operators/global-average-pooling-nwc.c
index 993a5f6..0ca860b 100644
--- a/src/operators/global-average-pooling-nwc.c
+++ b/src/operators/global-average-pooling-nwc.c
@@ -37,7 +37,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Global Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8));
     goto error;
   }
 
@@ -45,48 +46,45 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), output_stride, channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with %.7g input scale: "
-      "scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), input_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with %.7g output scale: "
-      "scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), output_min, output_max);
     goto error;
   }
 
@@ -95,9 +93,8 @@
   const float input_output_scale = input_scale / output_scale;
   if (input_output_scale < 0x1.0p-8f || input_output_scale >= 0x1.0p+8f) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with %.7g input-to-output scale ratio: "
-      "scale ratio must be in [2**-8, 2**8) range",
-      input_output_scale);
+      "failed to create %s operator with %.7g input-to-output scale ratio: scale ratio must be in [2**-8, 2**8) range",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), input_output_scale);
     goto error;
   }
 
@@ -105,14 +102,18 @@
 
   global_average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (global_average_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Global Average Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8));
     goto error;
   }
 
-  void* zero_buffer = xnn_allocate_zero_simd_memory(channels * sizeof(uint8_t) + XNN_EXTRA_BYTES);
+  const size_t zero_size = channels * sizeof(uint8_t) + XNN_EXTRA_BYTES;
+  void* zero_buffer = xnn_allocate_zero_simd_memory(zero_size);
   if (zero_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Global Average Pooling zero padding",
-      channels * sizeof(uint8_t) + XNN_EXTRA_BYTES);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator zero padding",
+      zero_size, xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8));
     goto error;
   }
   global_average_pooling_op->zero_buffer = zero_buffer;
@@ -153,7 +154,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Global Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32));
     goto error;
   }
 
@@ -161,44 +163,45 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32), output_stride, channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Global Average Pooling operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32), output_min, output_max);
     goto error;
   }
 
@@ -206,14 +209,18 @@
 
   global_average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (global_average_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Global Average Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32));
     goto error;
   }
 
-  void* zero_buffer = xnn_allocate_zero_simd_memory(channels * sizeof(float) + XNN_EXTRA_BYTES);
+  const size_t zero_size = channels * sizeof(float) + XNN_EXTRA_BYTES;
+  void* zero_buffer = xnn_allocate_zero_simd_memory(zero_size);
   if (zero_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Global Average Pooling zero padding",
-      channels * sizeof(float) + XNN_EXTRA_BYTES);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator zero padding",
+      zero_size, xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32));
     goto error;
   }
   global_average_pooling_op->zero_buffer = zero_buffer;
@@ -245,18 +252,22 @@
     pthreadpool_t threadpool)
 {
   if (global_average_pooling_op->type != xnn_operator_type_global_average_pooling_nwc_q8) {
-    xnn_log_error("failed to setup Global Average Pooling (NWC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8),
+      xnn_operator_type_to_string(global_average_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
   global_average_pooling_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Global Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8));
     return xnn_status_uninitialized;
   }
 
   if (width == 0) {
-    xnn_log_error("failed to setup Global Average Pooling operator with width %zu: width must be non-zero", width);
+    xnn_log_error("failed to setup %s operator with width %zu: width must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_q8), width);
     return xnn_status_invalid_parameter;
   }
 
@@ -315,18 +326,22 @@
     pthreadpool_t threadpool)
 {
   if (global_average_pooling_op->type != xnn_operator_type_global_average_pooling_nwc_f32) {
-    xnn_log_error("failed to setup Global Average Pooling (NWC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32),
+      xnn_operator_type_to_string(global_average_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
   global_average_pooling_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Global Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32));
     return xnn_status_uninitialized;
   }
 
   if (width == 0) {
-    xnn_log_error("failed to setup Global Average Pooling operator with width %zu: width must be non-zero", width);
+    xnn_log_error("failed to setup %s operator with width %zu: width must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_global_average_pooling_nwc_f32), width);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/hardswish-nc.c b/src/operators/hardswish-nc.c
index c3f4b6c..46adba3 100644
--- a/src/operators/hardswish-nc.c
+++ b/src/operators/hardswish-nc.c
@@ -27,7 +27,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create HardSwish operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32));
     goto error;
   }
 
@@ -35,23 +36,24 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create HardSwish operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create HardSwish operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create HardSwish operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32), output_stride, channels);
     goto error;
   }
 
@@ -59,7 +61,9 @@
 
   hardswish_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (hardswish_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for xnn_operator structure", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32));
     goto error;
   }
 
@@ -89,13 +93,16 @@
     pthreadpool_t threadpool)
 {
   if (hardswish_op->type != xnn_operator_type_hardswish_nc_f32) {
-    xnn_log_error("failed to setup HardSwish (F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32),
+      xnn_operator_type_to_string(hardswish_op->type));
     return xnn_status_invalid_parameter;
   }
   hardswish_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup HardSwish operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_hardswish_nc_f32));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/leaky-relu-nc.c b/src/operators/leaky-relu-nc.c
index fda6b3a..4eeb5f1 100644
--- a/src/operators/leaky-relu-nc.c
+++ b/src/operators/leaky-relu-nc.c
@@ -36,7 +36,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Leaky ReLU operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8));
     goto error;
   }
 
@@ -44,58 +45,59 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), output_stride, channels);
     goto error;
   }
 
   if (negative_slope <= 0.0f || !isnormal(negative_slope)) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with %.7g negative slope: slope must be finite, normalized, and positive",
-      negative_slope);
+      "failed to create %s operator with %.7g negative slope: slope must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), negative_slope);
     goto error;
   }
 
   if (negative_slope > 1.0f) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with %.7g negative slope: slope must not exceed 1.0", negative_slope);
+      "failed to create %s operator with %.7g negative slope: slope must not exceed 1.0",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), negative_slope);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with %.7g input scale: scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), input_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with %.7g output scale: scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), output_min, output_max);
     goto error;
   }
 
@@ -104,9 +106,9 @@
   const float input_output_scale = input_scale / output_scale;
   if (input_output_scale < 0x1.0p-8f || input_output_scale >= 0x1.0p+8f) {
     xnn_log_error(
-      "failed to create Leaky ReLU operator with %.7g input-to-output scale ratio: "
+      "failed to create %s operator with %.7g input-to-output scale ratio: "
       "scale ratio must be in [2**-8, 2**8) range",
-      input_output_scale);
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8), input_output_scale);
     goto error;
   }
 
@@ -114,13 +116,17 @@
 
   leaky_relu_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (leaky_relu_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Leaky ReLU operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8));
     goto error;
   }
 
   leaky_relu_op->lookup_table = xnn_allocate_simd_memory(256 * sizeof(uint8_t));
   if (leaky_relu_op->lookup_table == NULL) {
-    xnn_log_error("failed to allocate 256 bytes for Leaky ReLU lookup table");
+    xnn_log_error(
+      "failed to allocate 256 bytes for %s operator lookup table",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8));
     goto error;
   }
 
@@ -164,13 +170,17 @@
     pthreadpool_t threadpool)
 {
   if (leaky_relu_op->type != xnn_operator_type_leaky_relu_nc_q8) {
-    xnn_log_error("failed to setup Leaky ReLU (NC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8),
+      xnn_operator_type_to_string(leaky_relu_op->type));
     return xnn_status_invalid_parameter;
   }
   leaky_relu_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Leaky ReLU operator: XNNPACK is not initialized");
+    xnn_log_error(
+      "failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_leaky_relu_nc_q8));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/max-pooling-nhwc.c b/src/operators/max-pooling-nhwc.c
index 908615e..0990a20 100644
--- a/src/operators/max-pooling-nhwc.c
+++ b/src/operators/max-pooling-nhwc.c
@@ -65,7 +65,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Max Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8));
     goto error;
   }
 
@@ -74,62 +75,60 @@
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), pooling_width, pooling_height);
     goto error;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to create Max Pooling operator with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to create %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8));
     goto error;
   }
 
   if (stride_height == 0 || stride_width == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %" PRIu32 "x%" PRIu32 " stride: "
-      "stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), stride_width, stride_height);
     goto error;
   }
 
   if (dilation_height == 0 || dilation_width == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), dilation_width, dilation_height);
     goto error;
   }
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Max Pooling operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Max Pooling operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), output_pixel_stride, channels);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Max Pooling operator with [%" PRIu8 ", %" PRIu8 "] output range: "
-      "range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8), output_min, output_max);
     goto error;
   }
 
@@ -137,8 +136,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Max Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -148,7 +148,9 @@
 
   max_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (max_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Max Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8));
     goto error;
   }
 
@@ -206,7 +208,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Max Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32));
     return xnn_status_uninitialized;
   }
 
@@ -215,74 +218,75 @@
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32),
       pooling_width, pooling_height);
     goto error;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to create Max Pooling operator with 1 pooling element: "
-      "1x1 pooling is meaningless");
+      "failed to create %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32));
     goto error;
   }
 
   if (stride_height == 0 || stride_width == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %" PRIu32 "x%" PRIu32 " stride: "
-      "stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32), stride_width, stride_height);
     goto error;
   }
 
   if (dilation_height == 0 || dilation_width == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32), dilation_width, dilation_height);
     goto error;
   }
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Max Pooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Max Pooling operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Max Pooling operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32), output_pixel_stride, channels);
     goto error;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to create Max Pooling with NaN output lower bound: lower bound must be non-NaN");
+      "failed to create %s with NaN output lower bound: lower bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32));
     goto error;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to create Max Pooling with NaN output upper bound: upper bound must be non-NaN");
+      "failed to create %s with NaN output upper bound: upper bound must be non-NaN",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32));
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Max Pooling with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to create %s with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32), output_min, output_max);
     goto error;
   }
 
@@ -290,8 +294,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to create Max Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to create %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       goto error;
     }
@@ -301,7 +306,9 @@
 
   max_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (max_pooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Max Pooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32));
     goto error;
   }
 
@@ -353,14 +360,15 @@
 
   if (!xnn_params.initialized) {
     xnn_log_error(
-      "failed to setup Max Pooling operator: XNNPACK is not initialized");
+      "failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(max_pooling_op->type));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Max Pooling operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(max_pooling_op->type), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
@@ -418,7 +426,8 @@
   {
     // Micro-kernel may read up to (mr - 1) elements after the end of indirection buffer.
     const size_t indirection_buffer_size = sizeof(void*) * ((mr - 1) + output_height * step_height);
-    const void** indirection_buffer = (const void**) xnn_reallocate_memory(max_pooling_op->indirection_buffer, indirection_buffer_size);
+    const void** indirection_buffer =
+      (const void**) xnn_reallocate_memory(max_pooling_op->indirection_buffer, indirection_buffer_size);
     if (indirection_buffer == NULL) {
       xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
       return xnn_status_out_of_memory;
@@ -476,7 +485,9 @@
     pthreadpool_t threadpool)
 {
   if (max_pooling_op->type != xnn_operator_type_max_pooling_nhwc_u8) {
-    xnn_log_error("failed to setup Max Pooling (NHWC, U8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_u8),
+      xnn_operator_type_to_string(max_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
 
@@ -501,7 +512,9 @@
     pthreadpool_t threadpool)
 {
   if (max_pooling_op->type != xnn_operator_type_max_pooling_nhwc_f32) {
-    xnn_log_error("failed to setup Max Pooling (NHWC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_max_pooling_nhwc_f32),
+      xnn_operator_type_to_string(max_pooling_op->type));
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/operators/prelu-nc.c b/src/operators/prelu-nc.c
index 7536f20..c9d70c7 100644
--- a/src/operators/prelu-nc.c
+++ b/src/operators/prelu-nc.c
@@ -29,7 +29,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create PReLU operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32));
     goto error;
   }
 
@@ -37,23 +38,24 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create PReLU operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create PReLU operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create PReLU operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32), output_stride, channels);
     goto error;
   }
 
@@ -61,15 +63,18 @@
 
   prelu_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (prelu_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for PReLU operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32));
     goto error;
   }
 
-  const size_t packed_channels = round_up_po2(channels, XNN_EXTRA_BYTES / sizeof(float));
-  prelu_op->packed_weights = xnn_allocate_simd_memory(packed_channels * sizeof(float));
+  const size_t packed_weights_size = channels * sizeof(float) + XNN_EXTRA_BYTES;
+  prelu_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
   if (prelu_op->packed_weights == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for packed slope data",
-      packed_channels * sizeof(float));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator packed weights",
+      packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32));
     goto error;
   }
   memcpy(prelu_op->packed_weights, negative_slope, channels * sizeof(float));
@@ -99,13 +104,16 @@
     pthreadpool_t threadpool)
 {
   if (prelu_op->type != xnn_operator_type_prelu_nc_f32) {
-    xnn_log_error("failed to setup PReLU (NC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32),
+      xnn_operator_type_to_string(prelu_op->type));
     return xnn_status_invalid_parameter;
   }
   prelu_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup PReLU operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_prelu_nc_f32));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/resize-bilinear-nhwc.c b/src/operators/resize-bilinear-nhwc.c
index 3f350f9..8de7d69 100644
--- a/src/operators/resize-bilinear-nhwc.c
+++ b/src/operators/resize-bilinear-nhwc.c
@@ -31,7 +31,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Resize Bilinear operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32));
     goto error;
   }
 
@@ -39,24 +40,24 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Resize Bilinear operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Resize Bilinear operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Resize Bilinear operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), output_pixel_stride, channels);
     goto error;
   }
 
@@ -64,7 +65,9 @@
 
   resize_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (resize_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Resize Bilinear operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32));
     goto error;
   }
 
@@ -98,43 +101,44 @@
     pthreadpool_t threadpool)
 {
   if (resize_op->type != xnn_operator_type_resize_bilinear_nhwc_f32) {
-    xnn_log_error("failed to setup Resize Bilinear (NHWC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32),
+      xnn_operator_type_to_string(resize_op->type));
     return xnn_status_invalid_parameter;
   }
   resize_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Resize Bilinear operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Resize Bilinear operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
   if (max(input_width, input_height) >= 16777216) {
     xnn_log_error(
-      "failed to setup Resize Bilinear operator with %zux%zu input: "
-      "input dimensions must be below 2**24",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be below 2**24",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), input_width, input_height);
     return xnn_status_unsupported_parameter;
   }
 
   if (output_width == 0 || output_height == 0) {
     xnn_log_error(
-      "failed to setup Resize Bilinear operator with %zux%zu output: output dimensions must be non-zero",
-      output_width, output_height);
+      "failed to setup %s operator with %zux%zu output: output dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), output_width, output_height);
     return xnn_status_invalid_parameter;
   }
 
   if (max(output_width, output_height) >= 16777216) {
     xnn_log_error(
-      "failed to setup Resize Bilinear operator with %zux%zu output: "
-      "output dimensions must be below 2**24",
-      output_width, output_height);
+      "failed to setup %s operator with %zux%zu output: output dimensions must be below 2**24",
+      xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32), output_width, output_height);
     return xnn_status_unsupported_parameter;
   }
 
@@ -149,7 +153,9 @@
 
     const void** indirection_buffer = (const void**) xnn_reallocate_memory(resize_op->indirection_buffer, indirection_buffer_size);
     if (indirection_buffer == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator indirection buffer",
+        indirection_buffer_size, xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32));
       return xnn_status_out_of_memory;
     }
     resize_op->indirection_buffer = indirection_buffer;
@@ -158,7 +164,9 @@
     xnn_release_simd_memory(resize_op->packed_weights);
     resize_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
     if (resize_op->packed_weights == NULL) {
-      xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
+      xnn_log_error(
+        "failed to allocate %zu bytes for %s operator packed weights",
+        packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_resize_bilinear_nhwc_f32));
       return xnn_status_out_of_memory;
     }
   }
diff --git a/src/operators/sigmoid-nc.c b/src/operators/sigmoid-nc.c
index 8ea899c..8724153 100644
--- a/src/operators/sigmoid-nc.c
+++ b/src/operators/sigmoid-nc.c
@@ -35,7 +35,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Sigmoid operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     goto error;
   }
 
@@ -43,44 +44,45 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Sigmoid operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Sigmoid operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Sigmoid operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_stride, channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create Sigmoid operator with %.7g input scale: scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), input_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create Sigmoid operator with %.7g output scale: scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_scale);
     goto error;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to create Sigmoid operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
-      output_min, output_max);
+      "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_min, output_max);
     goto error;
   }
 
@@ -88,15 +90,15 @@
 
   if (output_scale != 0x1.0p-8f) {
     xnn_log_error(
-      "failed to create Sigmoid operator with %.7g output scale: only output scale of 1/256 is supported",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: only output scale of 1/256 is supported",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_scale);
     goto error;
   }
 
   if (output_zero_point != 0) {
     xnn_log_error(
-      "failed to create Sigmoid operator with %" PRIu8 " output zero point: only output zero point of 0 is supported",
-      output_zero_point);
+      "failed to create %s operator with %" PRIu8 " output zero point: only output zero point of 0 is supported",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_zero_point);
     goto error;
   }
 
@@ -104,13 +106,17 @@
 
   sigmoid_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (sigmoid_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Sigmoid operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     goto error;
   }
 
   sigmoid_op->lookup_table = xnn_allocate_simd_memory(256 * sizeof(uint8_t));
   if (sigmoid_op->lookup_table == NULL) {
-    xnn_log_error("failed to allocate 256 bytes for Sigmoid lookup table");
+    xnn_log_error(
+      "failed to allocate 256 bytes for %s operator lookup table",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     goto error;
   }
 
@@ -158,7 +164,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Sigmoid operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32));
     goto error;
   }
 
@@ -166,32 +173,24 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Sigmoid operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create Sigmoid operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create Sigmoid operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
-    goto error;
-  }
-
-  status = xnn_status_unsupported_hardware;
-
-  if (xnn_params.f32.sigmoid == NULL) {
-    xnn_log_error(
-      "failed to create Sigmoid operator: "
-      "only selected hardware configurations are supported");
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32), output_stride, channels);
     goto error;
   }
 
@@ -199,7 +198,9 @@
 
   sigmoid_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (sigmoid_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for xnn_operator structure", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32));
     goto error;
   }
 
@@ -228,13 +229,16 @@
     pthreadpool_t threadpool)
 {
   if (sigmoid_op->type != xnn_operator_type_sigmoid_nc_q8) {
-    xnn_log_error("failed to setup Sigmoid (Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8),
+      xnn_operator_type_to_string(sigmoid_op->type));
     return xnn_status_invalid_parameter;
   }
   sigmoid_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Sigmoid operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     return xnn_status_uninitialized;
   }
 
@@ -292,13 +296,16 @@
     pthreadpool_t threadpool)
 {
   if (sigmoid_op->type != xnn_operator_type_sigmoid_nc_f32) {
-    xnn_log_error("failed to setup Sigmoid (F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32),
+      xnn_operator_type_to_string(sigmoid_op->type));
     return xnn_status_invalid_parameter;
   }
   sigmoid_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Sigmoid operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/softmax-nc.c b/src/operators/softmax-nc.c
index 6351ae8..f438103 100644
--- a/src/operators/softmax-nc.c
+++ b/src/operators/softmax-nc.c
@@ -33,7 +33,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create SoftMax operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     goto error;
   }
 
@@ -41,37 +42,38 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create SoftMax operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create SoftMax operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create SoftMax operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_stride, channels);
     goto error;
   }
 
   if (input_scale <= 0.0f || !isnormal(input_scale)) {
     xnn_log_error(
-      "failed to create SoftMax operator with %.7g input scale: scale must be finite, normalized, and positive",
-      input_scale);
+      "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), input_scale);
     goto error;
   }
 
   if (output_scale <= 0.0f || !isnormal(output_scale)) {
     xnn_log_error(
-      "failed to create SoftMax operator with %.7g output scale: scale must be finite, normalized, and positive",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_scale);
     goto error;
   }
 
@@ -79,16 +81,15 @@
 
   if (output_scale != 0x1.0p-8f) {
     xnn_log_error(
-      "failed to create SoftMax operator with %.7g output scale: only output scale of 1/256 is supported",
-      output_scale);
+      "failed to create %s operator with %.7g output scale: only output scale of 1/256 is supported",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_scale);
     goto error;
   }
 
   if (output_zero_point != 0) {
     xnn_log_error(
-      "failed to create SoftMax operator with %" PRIu8 " output zero point: "
-      "only output zero point of 0 is supported",
-      output_zero_point);
+      "failed to create %s operator with %" PRIu8 " output zero point: only output zero point of 0 is supported",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8), output_zero_point);
     goto error;
   }
 
@@ -96,13 +97,17 @@
 
   softmax_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (softmax_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for SoftMax operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     goto error;
   }
 
   softmax_op->lookup_table = xnn_allocate_simd_memory(256 * sizeof(uint32_t));
   if (softmax_op->lookup_table == NULL) {
-    xnn_log_error("failed to allocate 256 bytes for SoftMax lookup table");
+    xnn_log_error(
+      "failed to allocate 256 bytes for %s operator lookup table",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     goto error;
   }
 
@@ -138,13 +143,16 @@
     pthreadpool_t threadpool)
 {
   if (softmax_op->type != xnn_operator_type_softmax_nc_q8) {
-    xnn_log_error("failed to setup SoftMax (NC, Q8) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_softmax_nc_q8),
+      xnn_operator_type_to_string(softmax_op->type));
     return xnn_status_invalid_parameter;
   }
   softmax_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup SoftMax operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_q8));
     return xnn_status_uninitialized;
   }
 
@@ -186,7 +194,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create SoftMax operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32));
     goto error;
   }
 
@@ -194,23 +203,24 @@
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create SoftMax operator with %zu channels: number of channels must be non-zero", channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32), channels);
     goto error;
   }
 
   if (input_stride < channels) {
     xnn_log_error(
-      "failed to create SoftMax operator with input element stride of %zu: "
+      "failed to create %s operator with input element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32), input_stride, channels);
     goto error;
   }
 
   if (output_stride < channels) {
     xnn_log_error(
-      "failed to create SoftMax operator with output element stride of %zu: "
+      "failed to create %s operator with output element stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32), output_stride, channels);
     goto error;
   }
 
@@ -218,7 +228,9 @@
 
   softmax_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (softmax_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for SoftMax operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32));
     goto error;
   }
 
@@ -247,13 +259,16 @@
     pthreadpool_t threadpool)
 {
   if (softmax_op->type != xnn_operator_type_softmax_nc_f32) {
-    xnn_log_error("failed to setup SoftMax (NC, F32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_softmax_nc_f32),
+      xnn_operator_type_to_string(softmax_op->type));
     return xnn_status_invalid_parameter;
   }
   softmax_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup SoftMax operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_sigmoid_nc_f32));
     return xnn_status_uninitialized;
   }
 
diff --git a/src/operators/unpooling-nhwc.c b/src/operators/unpooling-nhwc.c
index a423053..fb22849 100644
--- a/src/operators/unpooling-nhwc.c
+++ b/src/operators/unpooling-nhwc.c
@@ -46,7 +46,8 @@
   enum xnn_status status = xnn_status_uninitialized;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to create Unpooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
     goto error;
   }
 
@@ -55,38 +56,39 @@
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to create Unpooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to create %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), pooling_width, pooling_height);
     goto error;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to create Unpooling operator with 1 pooling element: 1x1 unpooling is meaningless");
+      "failed to create %s operator with 1 pooling element: 1x1 unpooling is meaningless",
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
     goto error;
   }
 
   if (channels == 0) {
     xnn_log_error(
-      "failed to create Unpooling operator with %zu channels: number of channels must be non-zero",
-      channels);
+      "failed to create %s operator with %zu channels: number of channels must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), channels);
     goto error;
   }
 
   if (input_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Unpooling operator with input pixel stride of %zu: "
+      "failed to create %s operator with input pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      input_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), input_pixel_stride, channels);
     goto error;
   }
 
   if (output_pixel_stride < channels) {
     xnn_log_error(
-      "failed to create Unpooling operator with output pixel stride of %zu: "
+      "failed to create %s operator with output pixel stride of %zu: "
       "stride must be at least as large as the number of channels (%zu)",
-      output_pixel_stride, channels);
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), output_pixel_stride, channels);
     goto error;
   }
 
@@ -94,7 +96,9 @@
 
   unpooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
   if (unpooling_op == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for Unpooling operator descriptor", sizeof(struct xnn_operator));
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator descriptor",
+      sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
     goto error;
   }
 
@@ -133,20 +137,23 @@
     pthreadpool_t threadpool)
 {
   if (unpooling_op->type != xnn_operator_type_unpooling_nhwc_x32) {
-    xnn_log_error("failed to setup Unpooling (X32) operator: operator type mismatch");
+    xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32),
+      xnn_operator_type_to_string(unpooling_op->type));
     return xnn_status_invalid_parameter;
   }
   unpooling_op->state = xnn_run_state_invalid;
 
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to setup Unpooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
     return xnn_status_uninitialized;
   }
 
   if (input_width == 0 || input_height == 0) {
     xnn_log_error(
-      "failed to setup Unpooling operator with %zux%zu input: input dimensions must be non-zero",
-      input_width, input_height);
+      "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
+      xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32), input_width, input_height);
     return xnn_status_invalid_parameter;
   }
 
@@ -186,10 +193,11 @@
   const size_t pooling_size = pooling_height * pooling_width;
 
   const size_t indirection_buffer_size = sizeof(void*) * (batch_size * input_height * input_width * pooling_size);
-
   void** indirection_buffer = (void**) xnn_reallocate_memory(unpooling_op->indirection_buffer, indirection_buffer_size);
   if (indirection_buffer == NULL) {
-    xnn_log_error("failed to allocate %zu bytes for indirection buffer", indirection_buffer_size);
+    xnn_log_error(
+      "failed to allocate %zu bytes for %s operator indirection buffer",
+      indirection_buffer_size, xnn_operator_type_to_string(xnn_operator_type_unpooling_nhwc_x32));
     return xnn_status_out_of_memory;
   }
   unpooling_op->indirection_buffer = (const void**) indirection_buffer;
diff --git a/src/subgraph-strings.c b/src/subgraph-strings.c
new file mode 100644
index 0000000..f1e0025
--- /dev/null
+++ b/src/subgraph-strings.c
@@ -0,0 +1,54 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/log.h>
+#include <xnnpack/subgraph.h>
+
+
+const char* xnn_node_type_to_string(enum xnn_node_type type) {
+  switch (type) {
+    case xnn_node_type_invalid:
+      return "Invalid";
+    case xnn_node_type_add2:
+      return "Add2";
+    case xnn_node_type_argmax_pooling_2d:
+      return "ArgMax Pooling 2D";
+    case xnn_node_type_average_pooling_2d:
+      return "Average Pooling 2D";
+    case xnn_node_type_clamp:
+      return "Clamp";
+    case xnn_node_type_constant_pad:
+      return "Constant Pad";
+    case xnn_node_type_convolution_2d:
+      return "Convolution 2D";
+    case xnn_node_type_deconvolution_2d:
+      return "Deconvolution 2D";
+    case xnn_node_type_depthwise_convolution_2d:
+      return "Depthwise Convolution 2D";
+    case xnn_node_type_fully_connected:
+      return "Fully Connected";
+    case xnn_node_type_hardswish:
+      return "HardSwish";
+    case xnn_node_type_multiply2:
+      return "Multiply2";
+    case xnn_node_type_max_pooling_2d:
+      return "Max Pooling 2D";
+    case xnn_node_type_prelu:
+      return "PReLU";
+    case xnn_node_type_sigmoid:
+      return "Sigmoid";
+    case xnn_node_type_softmax:
+      return "Softmax";
+    case xnn_node_type_unpooling_2d:
+      return "Unpooling 2D";
+  }
+  XNN_UNREACHABLE;
+  return NULL;
+}
diff --git a/src/subgraph/add2.c b/src/subgraph/add2.c
index 78a0e65..971daea 100644
--- a/src/subgraph/add2.c
+++ b/src/subgraph/add2.c
@@ -23,48 +23,50 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Add2 operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_add2));
     return xnn_status_uninitialized;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Add2 operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_add2));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Add2 operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_add2));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Add2 operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_add2), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
   if (input1_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Add2 operator with the first input ID #%" PRIu32 ": invalid Value ID",
-      input1_id);
+      "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_add2), input1_id);
     return xnn_status_invalid_parameter;
   }
 
   if (input2_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Add2 operator with the second input ID #%" PRIu32 ": invalid Value ID",
-      input2_id);
+      "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_add2), input2_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Add2 operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_add2), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/argmax-pooling-2d.c b/src/subgraph/argmax-pooling-2d.c
index 623300b..e81ab8a 100644
--- a/src/subgraph/argmax-pooling-2d.c
+++ b/src/subgraph/argmax-pooling-2d.c
@@ -27,43 +27,45 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define ArgMax Pooling: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d));
     return xnn_status_uninitialized;
   }
 
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to define ArgMax Pooling with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), pooling_width, pooling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to define ArgMax Pooling with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define ArgMax Pooling with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_value_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define ArgMax Pooling with output value ID #%" PRIu32 ": invalid Value ID",
-      output_value_id);
+      "failed to define %s operator with output value ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_value_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_index_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define ArgMax Pooling with output index ID #%" PRIu32 ": invalid Value ID",
-      output_index_id);
+      "failed to define %s operator with output index ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_argmax_pooling_2d), output_index_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/average-pooling-2d.c b/src/subgraph/average-pooling-2d.c
index c8efaeb..9d56f00 100644
--- a/src/subgraph/average-pooling-2d.c
+++ b/src/subgraph/average-pooling-2d.c
@@ -30,49 +30,53 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Average Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d));
     return xnn_status_uninitialized;
   }
 
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to define Average Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d), pooling_width, pooling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to define Average Pooling operator with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (stride_height == 0 || stride_width == 0) {
     xnn_log_error(
-      "failed to define Average Pooling operator with %" PRIu32 "x%" PRIu32 " stride: "
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " stride: "
       "stride dimensions must be non-zero",
-      stride_width, stride_height);
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d), stride_width, stride_height);
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Average Pooling with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Average Pooling with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Average Pooling with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
@@ -80,8 +84,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to define Average Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to define %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_node_type_to_string(xnn_node_type_average_pooling_2d),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       return xnn_status_invalid_parameter;
     }
@@ -89,15 +94,15 @@
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Average Pooling operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Average Pooling operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_average_pooling_2d), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/clamp.c b/src/subgraph/clamp.c
index 8bb5dfa..8886496 100644
--- a/src/subgraph/clamp.c
+++ b/src/subgraph/clamp.c
@@ -22,21 +22,22 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Clamp operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_clamp));
     return xnn_status_uninitialized;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Clamp operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_clamp), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Clamp operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_clamp), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/convolution-2d.c b/src/subgraph/convolution-2d.c
index 44e8b73..0057d25 100644
--- a/src/subgraph/convolution-2d.c
+++ b/src/subgraph/convolution-2d.c
@@ -37,100 +37,99 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Convolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d));
     return xnn_status_uninitialized;
   }
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to define Convolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), kernel_width, kernel_height);
     return xnn_status_invalid_parameter;
   }
 
   if (subsampling_width == 0 || subsampling_height == 0) {
     xnn_log_error(
-      "failed to define Convolution operator with %" PRIu32 "x%" PRIu32 " subsampling: "
-      "subsampling dimensions must be non-zero",
-      subsampling_width, subsampling_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), subsampling_width, subsampling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to define Convolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), dilation_width, dilation_height);
     return xnn_status_invalid_parameter;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to define Convolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to define %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), groups);
     return xnn_status_invalid_parameter;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to define Convolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to define %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), group_input_channels);
     return xnn_status_invalid_parameter;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to define Convolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to define %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), group_output_channels);
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Convolution operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Convolution operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Convolution operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Convolution operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (filter_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Convolution operator with filter ID #%" PRIu32 ": invalid Value ID",
-      filter_id);
+      "failed to define %s operator with filter ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), filter_id);
     return xnn_status_invalid_parameter;
   }
 
   if (bias_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Convolution operator with bias ID #%" PRIu32 ": invalid Value ID",
-      bias_id);
+      "failed to define %s operator with bias ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), bias_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Convolution operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_convolution_2d), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/deconvolution-2d.c b/src/subgraph/deconvolution-2d.c
index 1ac2654..f87be9e 100644
--- a/src/subgraph/deconvolution-2d.c
+++ b/src/subgraph/deconvolution-2d.c
@@ -39,100 +39,99 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Deconvolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d));
     return xnn_status_uninitialized;
   }
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to define Deconvolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), kernel_width, kernel_height);
     return xnn_status_invalid_parameter;
   }
 
   if (upsampling_width == 0 || upsampling_height == 0) {
     xnn_log_error(
-      "failed to define Deconvolution operator with %" PRIu32 "x%" PRIu32 " upsampling: "
-      "upsampling dimensions must be non-zero",
-      upsampling_width, upsampling_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " upsampling: upsampling dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), upsampling_width, upsampling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to define Deconvolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), dilation_width, dilation_height);
     return xnn_status_invalid_parameter;
   }
 
   if (groups == 0) {
     xnn_log_error(
-      "failed to define Deconvolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
+      "failed to define %s operator with %" PRIu32 " groups: number of groups must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), groups);
     return xnn_status_invalid_parameter;
   }
 
   if (group_input_channels == 0) {
     xnn_log_error(
-      "failed to define Deconvolution operator with %zu input channels per group: "
-      "number of channels must be non-zero",
-      group_input_channels);
+      "failed to define %s operator with %zu input channels per group: number of channels must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), group_input_channels);
     return xnn_status_invalid_parameter;
   }
 
   if (group_output_channels == 0) {
     xnn_log_error(
-      "failed to define Deconvolution operator with %zu output channels per group: "
-      "number of channels must be non-zero",
-      group_output_channels);
+      "failed to define %s operator with %zu output channels per group: number of channels must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), group_output_channels);
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Deconvolution operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Deconvolution operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Deconvolution operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Deconvolution operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (filter_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Deconvolution operator with filter ID #%" PRIu32 ": invalid Value ID",
-      filter_id);
+      "failed to define %s operator with filter ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), filter_id);
     return xnn_status_invalid_parameter;
   }
 
   if (bias_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Deconvolution operator with bias ID #%" PRIu32 ": invalid Value ID",
-      bias_id);
+      "failed to define %s operator with bias ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), bias_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Deconvolution operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_deconvolution_2d), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/depthwise-convolution-2d.c b/src/subgraph/depthwise-convolution-2d.c
index 58614f5..733ea5b 100644
--- a/src/subgraph/depthwise-convolution-2d.c
+++ b/src/subgraph/depthwise-convolution-2d.c
@@ -36,94 +36,92 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Depthwise Convolution operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d));
     return xnn_status_uninitialized;
   }
 
   if (kernel_width == 0 || kernel_height == 0) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
-      kernel_width, kernel_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), kernel_width, kernel_height);
     return xnn_status_invalid_parameter;
   }
 
   if (subsampling_width == 0 || subsampling_height == 0) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with %" PRIu32 "x%" PRIu32 " subsampling: "
-      "subsampling dimensions must be non-zero",
-      subsampling_width, subsampling_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), subsampling_width, subsampling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (dilation_width == 0 || dilation_height == 0) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), dilation_width, dilation_height);
     return xnn_status_invalid_parameter;
   }
 
   if (depth_multiplier == 0) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with %" PRIu32 " depth multiplier: "
-      "depth multiplier must be non-zero",
-      depth_multiplier);
+      "failed to define %s operator with %" PRIu32 " depth multiplier: depth multiplier must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), depth_multiplier);
     return xnn_status_invalid_parameter;
   }
 
   if (input_channels == 0) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with %zu input channels: "
-      "number of channels must be non-zero",
-      input_channels);
+      "failed to define %s operator with %zu input channels: number of channels must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), input_channels);
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (filter_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with filter ID #%" PRIu32 ": invalid Value ID",
-      filter_id);
+      "failed to define %s operator with filter ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), filter_id);
     return xnn_status_invalid_parameter;
   }
 
   if (bias_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with bias ID #%" PRIu32 ": invalid Value ID",
-      bias_id);
+      "failed to define %s operator with bias ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), bias_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Depthwise Convolution operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_depthwise_convolution_2d), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/fully-connected.c b/src/subgraph/fully-connected.c
index 3e2e6b2..0cedcd5 100644
--- a/src/subgraph/fully-connected.c
+++ b/src/subgraph/fully-connected.c
@@ -24,55 +24,57 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Fully Connected operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_fully_connected));
     return xnn_status_uninitialized;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Fully Connected operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_fully_connected));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Fully Connected operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_fully_connected));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Fully Connected operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_fully_connected), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Fully Connected operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_fully_connected), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (filter_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Fully Connected operator with filter ID #%" PRIu32 ": invalid Value ID",
-      filter_id);
+      "failed to define %s operator with filter ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_fully_connected), filter_id);
     return xnn_status_invalid_parameter;
   }
 
   if (bias_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Fully Connected operator with bias ID #%" PRIu32 ": invalid Value ID",
-      bias_id);
+      "failed to define %s operator with bias ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_fully_connected), bias_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Fully Connected operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_fully_connected), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/hardswish.c b/src/subgraph/hardswish.c
index b281bbd..36d58d2 100644
--- a/src/subgraph/hardswish.c
+++ b/src/subgraph/hardswish.c
@@ -20,21 +20,22 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define HardSwish operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_hardswish));
     return xnn_status_uninitialized;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define HardSwish operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_hardswish), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define HardSwish operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_hardswish), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/max-pooling-2d.c b/src/subgraph/max-pooling-2d.c
index 0f4e5f6..895765c 100644
--- a/src/subgraph/max-pooling-2d.c
+++ b/src/subgraph/max-pooling-2d.c
@@ -32,57 +32,59 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Max Pooling operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d));
     return xnn_status_uninitialized;
   }
 
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to define Max Pooling operator with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d), pooling_width, pooling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to define Max Pooling operator with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (stride_height == 0 || stride_width == 0) {
     xnn_log_error(
-      "failed to define Max Pooling operator with %" PRIu32 "x%" PRIu32 " stride: "
-      "stride dimensions must be non-zero",
-      stride_width, stride_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " stride: stride dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d), stride_width, stride_height);
     return xnn_status_invalid_parameter;
   }
 
   if (dilation_height == 0 || dilation_width == 0) {
     xnn_log_error(
-      "failed to define Max Pooling operator with %" PRIu32 "x%" PRIu32 " dilation: "
-      "dilation dimensions must be non-zero",
-      dilation_width, dilation_height);
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d), dilation_width, dilation_height);
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Max Pooling with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Max Pooling with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Max Pooling with [%.7g, %.7g] output range: lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
@@ -90,8 +92,9 @@
   if ((flags & XNN_FLAG_TENSORFLOW_SAME_PADDING) != 0) {
     if (any_padding) {
       xnn_log_error(
-        "failed to define Max Pooling operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
+        "failed to define %s operator with %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding: "
         "TensorFlow SAME padding can't be combined with explicit padding specification",
+        xnn_node_type_to_string(xnn_node_type_max_pooling_2d),
         input_padding_top, input_padding_left, input_padding_bottom, input_padding_right);
       return xnn_status_invalid_parameter;
     }
@@ -99,15 +102,15 @@
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Max Pooling operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Max Pooling operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_max_pooling_2d), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/multiply2.c b/src/subgraph/multiply2.c
index 4289db2..c3094f9 100644
--- a/src/subgraph/multiply2.c
+++ b/src/subgraph/multiply2.c
@@ -23,48 +23,50 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Multiply2 operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_multiply2));
     return xnn_status_uninitialized;
   }
 
   if (isnan(output_min)) {
     xnn_log_error(
-      "failed to define Multiply2 operator with NaN output lower bound: lower bound must be non-NaN");
+      "failed to define %s operator with NaN output lower bound: lower bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_multiply2));
     return xnn_status_invalid_parameter;
   }
 
   if (isnan(output_max)) {
     xnn_log_error(
-      "failed to define Multiply2 operator with NaN output upper bound: upper bound must be non-NaN");
+      "failed to define %s operator with NaN output upper bound: upper bound must be non-NaN",
+      xnn_node_type_to_string(xnn_node_type_multiply2));
     return xnn_status_invalid_parameter;
   }
 
   if (output_min >= output_max) {
     xnn_log_error(
-      "failed to define Multiply2 operator with [%.7g, %.7g] output range: "
-      "lower bound must be below upper bound",
-      output_min, output_max);
+      "failed to define %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
+      xnn_node_type_to_string(xnn_node_type_multiply2), output_min, output_max);
     return xnn_status_invalid_parameter;
   }
 
   if (input1_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Multiply2 operator with the first input ID #%" PRIu32 ": invalid Value ID",
-      input1_id);
+      "failed to define %s operator with the first input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_multiply2), input1_id);
     return xnn_status_invalid_parameter;
   }
 
   if (input2_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Multiply2 operator with the second input ID #%" PRIu32 ": invalid Value ID",
-      input2_id);
+      "failed to define %s operator with the second input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_multiply2), input2_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Multiply2 operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_multiply2), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/prelu.c b/src/subgraph/prelu.c
index 7c93337..26a2625 100644
--- a/src/subgraph/prelu.c
+++ b/src/subgraph/prelu.c
@@ -21,28 +21,29 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define PReLU operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_prelu));
     return xnn_status_uninitialized;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define PReLU operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_prelu), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (slope_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define PReLU operator with slope ID #%" PRIu32 ": invalid Value ID",
-      slope_id);
+      "failed to define %s operator with slope ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_prelu), slope_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define PReLU operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_prelu), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/sigmoid.c b/src/subgraph/sigmoid.c
index 281a9e4..fb5a592 100644
--- a/src/subgraph/sigmoid.c
+++ b/src/subgraph/sigmoid.c
@@ -20,21 +20,22 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Sigmoid operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_sigmoid));
     return xnn_status_uninitialized;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Sigmoid operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_sigmoid), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Sigmoid operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_sigmoid), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/softmax.c b/src/subgraph/softmax.c
index 3812d92..85e355d 100644
--- a/src/subgraph/softmax.c
+++ b/src/subgraph/softmax.c
@@ -20,21 +20,22 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define SoftMax operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_softmax));
     return xnn_status_uninitialized;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define SoftMax operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_softmax), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define SoftMax operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_softmax), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/static-constant-pad.c b/src/subgraph/static-constant-pad.c
index 7b9e40c..9e1c365 100644
--- a/src/subgraph/static-constant-pad.c
+++ b/src/subgraph/static-constant-pad.c
@@ -26,21 +26,22 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define Constant Pad operator: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_constant_pad));
     return xnn_status_uninitialized;
   }
 
   if (input_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Constant Pad operator with input ID #%" PRIu32 ": invalid Value ID",
-      input_id);
+      "failed to define %s operator with input ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_constant_pad), input_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define Constant Pad operator with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_constant_pad), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/subgraph/unpooling-2d.c b/src/subgraph/unpooling-2d.c
index 7d70ee0..74e0d1e 100644
--- a/src/subgraph/unpooling-2d.c
+++ b/src/subgraph/unpooling-2d.c
@@ -27,43 +27,45 @@
   uint32_t flags)
 {
   if (!xnn_params.initialized) {
-    xnn_log_error("failed to define UnPooling: XNNPACK is not initialized");
+    xnn_log_error("failed to define %s operator: XNNPACK is not initialized",
+      xnn_node_type_to_string(xnn_node_type_unpooling_2d));
     return xnn_status_uninitialized;
   }
 
   const uint32_t pooling_size = pooling_height * pooling_width;
   if (pooling_size == 0) {
     xnn_log_error(
-      "failed to define UnPooling with %" PRIu32 "x%" PRIu32 " pooling size: "
+      "failed to define %s operator with %" PRIu32 "x%" PRIu32 " pooling size: "
       "pooling size dimensions must be non-zero",
-      pooling_width, pooling_height);
+      xnn_node_type_to_string(xnn_node_type_unpooling_2d), pooling_width, pooling_height);
     return xnn_status_invalid_parameter;
   }
 
   if (pooling_size == 1) {
     xnn_log_error(
-      "failed to define UnPooling with 1 pooling element: 1x1 pooling is meaningless");
+      "failed to define %s operator with 1 pooling element: 1x1 pooling is meaningless",
+      xnn_node_type_to_string(xnn_node_type_unpooling_2d));
     return xnn_status_invalid_parameter;
   }
 
   if (input_value_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define UnPooling with input value ID #%" PRIu32 ": invalid Value ID",
-      input_value_id);
+      "failed to define %s operator with input value ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_unpooling_2d), input_value_id);
     return xnn_status_invalid_parameter;
   }
 
   if (input_index_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define UnPooling with input index ID #%" PRIu32 ": invalid Value ID",
-      input_index_id);
+      "failed to define %s operator with input index ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_unpooling_2d), input_index_id);
     return xnn_status_invalid_parameter;
   }
 
   if (output_id >= subgraph->num_values) {
     xnn_log_error(
-      "failed to define UnPooling with output ID #%" PRIu32 ": invalid Value ID",
-      output_id);
+      "failed to define %s operator with output ID #%" PRIu32 ": invalid Value ID",
+      xnn_node_type_to_string(xnn_node_type_unpooling_2d), output_id);
     return xnn_status_invalid_parameter;
   }
 
diff --git a/src/xnnpack/log.h b/src/xnnpack/log.h
index cb2ad0d..0c1f39f 100644
--- a/src/xnnpack/log.h
+++ b/src/xnnpack/log.h
@@ -12,6 +12,9 @@
 
 #include <clog.h>
 
+#include <xnnpack/operator.h>
+#include <xnnpack/subgraph.h>
+
 #ifndef XNN_LOG_LEVEL
   #error "Undefined XNN_LOG_LEVEL"
 #endif
@@ -21,3 +24,24 @@
 CLOG_DEFINE_LOG_WARNING(xnn_log_warning, "XNNPACK", XNN_LOG_LEVEL);
 CLOG_DEFINE_LOG_ERROR(xnn_log_error, "XNNPACK", XNN_LOG_LEVEL);
 CLOG_DEFINE_LOG_FATAL(xnn_log_fatal, "XNNPACK", XNN_LOG_LEVEL);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if XNN_LOG_LEVEL == 0
+  inline static const char* xnn_operator_type_to_string(enum xnn_operator_type type) {
+    return "Unknown";
+  }
+
+  inline static const char* xnn_node_type_to_string(enum xnn_node_type type) {
+    return "Unknown";
+  }
+#else
+  const char* xnn_operator_type_to_string(enum xnn_operator_type type);
+  const char* xnn_node_type_to_string(enum xnn_node_type type);
+#endif
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
diff --git a/src/xnnpack/operator.h b/src/xnnpack/operator.h
index 1cfc3b1..cd39d62 100644
--- a/src/xnnpack/operator.h
+++ b/src/xnnpack/operator.h
@@ -45,7 +45,7 @@
 };
 
 enum xnn_operator_type {
-  xnn_operator_type_none = 0,
+  xnn_operator_type_invalid = 0,
   xnn_operator_type_add_nc_f32,
   xnn_operator_type_add_nd_f32,
   xnn_operator_type_add_nc_q8,