Apply all necessary tilings and interchangings to get a macro-kernel

This is the second patch to apply the BLIS matmul optimization pattern
on matmul kernels
(http://www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf).
BLIS implements gemm as three nested loops around a macro-kernel, plus
two packing routines. The macro-kernel is implemented in terms
of two additional loops around a micro-kernel. The micro-kernel
is a loop around a rank-1 (i.e., outer product) update. In this change
we create the BLIS macro-kernel by applying a combination of tiling
and interchanging. In subsequent changes we will implement the packing
transformation.

Reviewed-by: Tobias Grosser <tobias@grosser.es>

Differential Revision: http://reviews.llvm.org/D21491

llvm-svn: 276627
diff --git a/polly/lib/Transform/ScheduleOptimizer.cpp b/polly/lib/Transform/ScheduleOptimizer.cpp
index e1689e5..acecf9d 100644
--- a/polly/lib/Transform/ScheduleOptimizer.cpp
+++ b/polly/lib/Transform/ScheduleOptimizer.cpp
@@ -134,6 +134,17 @@
              "instructions per clock cycle."),
     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
 
+static cl::list<int>
+    CacheLevelAssociativity("polly-target-cache-level-associativity",
+                            cl::desc("The associativity of each cache level."),
+                            cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
+                            cl::cat(PollyCategory));
+
+static cl::list<int> CacheLevelSizes(
+    "polly-target-cache-level-sizes",
+    cl::desc("The size of each cache level specified in bytes."), cl::Hidden,
+    cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory));
+
 static cl::opt<int> FirstLevelDefaultTileSize(
     "polly-default-tile-size",
     cl::desc("The default tile size (if not enough were provided by"
@@ -493,12 +504,52 @@
   return isl_map_set_tuple_id(IslMap, isl_dim_in, InputDimsId);
 }
 
+/// @brief Permute two dimensions of the band node.
+///
+/// Permute FirstDim and SecondDim dimensions of the Node.
+///
+/// @param Node The band node to be modified.
+/// @param FirstDim The first dimension to be permuted.
+/// @param SecondDim The second dimension to be permuted.
+static __isl_give isl_schedule_node *
+permuteBandNodeDimensions(__isl_take isl_schedule_node *Node, unsigned FirstDim,
+                          unsigned SecondDim) {
+  assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band &&
+         isl_schedule_node_band_n_member(Node) > std::max(FirstDim, SecondDim));
+  auto PartialSchedule = isl_schedule_node_band_get_partial_schedule(Node);
+  auto PartialScheduleFirstDim =
+      isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, FirstDim);
+  auto PartialScheduleSecondDim =
+      isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, SecondDim);
+  PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff(
+      PartialSchedule, SecondDim, PartialScheduleFirstDim);
+  PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff(
+      PartialSchedule, FirstDim, PartialScheduleSecondDim);
+  Node = isl_schedule_node_delete(Node);
+  Node = isl_schedule_node_insert_partial_schedule(Node, PartialSchedule);
+  return Node;
+}
+
 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMicroKernel(
     __isl_take isl_schedule_node *Node, MicroKernelParamsTy MicroKernelParams) {
   return applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
                              1);
 }
 
+__isl_give isl_schedule_node *ScheduleTreeOptimizer::createMacroKernel(
+    __isl_take isl_schedule_node *Node, MacroKernelParamsTy MacroKernelParams) {
+  assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
+  if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
+      MacroKernelParams.Kc == 1)
+    return Node;
+  Node = tileNode(
+      Node, "1st level tiling",
+      {MacroKernelParams.Mc, MacroKernelParams.Nc, MacroKernelParams.Kc}, 1);
+  Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
+  Node = permuteBandNodeDimensions(Node, 1, 2);
+  return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
+}
+
 /// Get parameters of the BLIS micro kernel.
 ///
 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
@@ -525,10 +576,54 @@
   return {Mr, Nr};
 }
 
+/// Get parameters of the BLIS macro kernel.
+///
+/// During the computation of matrix multiplication, blocks of partitioned
+/// matrices are mapped to different layers of the memory hierarchy.
+/// To optimize data reuse, blocks should be ideally kept in cache between
+/// iterations. Since parameters of the macro kernel determine sizes of these
+/// blocks, there are upper and lower bounds on these parameters.
+///
+/// @param MicroKernelParams Parameters of the micro-kernel
+///                          to be taken into account.
+/// @return The structure of type MacroKernelParamsTy.
+/// @see MacroKernelParamsTy
+/// @see MicroKernelParamsTy
+static struct MacroKernelParamsTy
+getMacroKernelParams(const MicroKernelParamsTy &MicroKernelParams) {
+  // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
+  // it requires information about the first two levels of a cache to determine
+  // all the parameters of a macro-kernel. It also checks that an associativity
+  // degree of a cache level is greater than two. Otherwise, another algorithm
+  // for determination of the parameters should be used.
+  if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
+        CacheLevelSizes.size() >= 2 && CacheLevelAssociativity.size() >= 2 &&
+        CacheLevelSizes[0] > 0 && CacheLevelSizes[1] > 0 &&
+        CacheLevelAssociativity[0] > 2 && CacheLevelAssociativity[1] > 2))
+    return {1, 1, 1};
+  int Cbr = floor(
+      (CacheLevelAssociativity[0] - 1) /
+      (1 + static_cast<double>(MicroKernelParams.Mr) / MicroKernelParams.Nr));
+  int Kc = (Cbr * CacheLevelSizes[0]) /
+           (MicroKernelParams.Nr * CacheLevelAssociativity[0] * 8);
+  double Cac = static_cast<double>(MicroKernelParams.Mr * Kc * 8 *
+                                   CacheLevelAssociativity[1]) /
+               CacheLevelSizes[1];
+  double Cbc = static_cast<double>(MicroKernelParams.Nr * Kc * 8 *
+                                   CacheLevelAssociativity[1]) /
+               CacheLevelSizes[1];
+  int Mc = floor(MicroKernelParams.Mr / Cac);
+  int Nc =
+      floor((MicroKernelParams.Nr * (CacheLevelAssociativity[1] - 2)) / Cbc);
+  return {Mc, Nc, Kc};
+}
+
 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeMatMulPattern(
     __isl_take isl_schedule_node *Node, const llvm::TargetTransformInfo *TTI) {
   assert(TTI && "The target transform info should be provided.");
   auto MicroKernelParams = getMicroKernelParams(TTI);
+  auto MacroKernelParams = getMacroKernelParams(MicroKernelParams);
+  Node = createMacroKernel(Node, MacroKernelParams);
   Node = createMicroKernel(Node, MicroKernelParams);
   return Node;
 }