Include FP16 operators in XNNPACK build for TensorFlow Lite

This will result in some regression in code size, particularly on AArch64.

PiperOrigin-RevId: 414923640
diff --git a/BUILD.bazel b/BUILD.bazel
index 450ac25..49577ce 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -8116,10 +8116,7 @@
         ":xnn_enable_hmp_explicit_false": ["-DXNN_MAX_UARCH_TYPES=1"],
         "//conditions:default": [],
     }),
-    defines = [
-        "XNN_NO_F16_OPERATORS",
-        "XNN_NO_X16_OPERATORS",
-    ] + select({
+    defines = select({
         ":xnn_enable_qs8_explicit_true": [],
         ":xnn_enable_qs8_explicit_false": [
             "XNN_NO_QC8_OPERATORS",