am 8bfc32ba: DO NOT MERGE - Infer document and sample metadata when generating docs.

* commit '8bfc32ba1c79cdc576101944346f66112b1a3776':
  DO NOT MERGE - Infer document and sample metadata when generating docs.
diff --git a/Android.mk b/Android.mk
index 6e4ff82..7e7686d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -24,7 +24,9 @@
 	jsilver \
 	guavalib \
 	antlr-runtime
-	
+
+LOCAL_STATIC_JAVA_LIBRARIES += tagsouplib
+
 LOCAL_CLASSPATH := \
 	$(HOST_JDK_TOOLS_JAR)
 
diff --git a/build.gradle b/build.gradle
index 568426f..79d856e 100644
--- a/build.gradle
+++ b/build.gradle
@@ -24,6 +24,7 @@
 
 dependencies {
     compile files(findToolsJar())
-    compile project(path: ':external:antlr', configuration: 'antlrRuntime')
-    compile project(':external:jsilver')
+    compile project(path: ':antlr', configuration: 'antlrRuntime')
+    compile project(':jsilver')
+    compile project(':tagsoup')
 }
\ No newline at end of file
diff --git a/src/com/google/doclava/DocFile.java b/src/com/google/doclava/DocFile.java
index c27c82a..e28ad59 100644
--- a/src/com/google/doclava/DocFile.java
+++ b/src/com/google/doclava/DocFile.java
@@ -55,24 +55,19 @@
       "ru", "zh-cn", "zh-tw", "pt-br"};
 
   public static String getPathRoot(String filename) {
-    String[] stripStr = filename.split("\\/");
-    String outFrag = stripStr[0];
-    if (stripStr.length > 0) {
-      for (String t : DEVSITE_VALID_LANGS) {
-        if (stripStr[0].equals("intl")) {
-          if (stripStr[1].equals(t)) {
-            outFrag = stripStr[2];
-            break;
-          }
-        } else if (stripStr[0].equals(t)) {
-            outFrag = stripStr[1];
-            break;
-        }
+    //look for a valid lang string in the file path. If found,
+    //snip the intl/lang from the path.
+    for (String t : DEVSITE_VALID_LANGS) {
+      int langStart = filename.indexOf("/" + t + "/");
+      if (langStart > -1) {
+        int langEnd = filename.indexOf("/", langStart + 1);
+        filename = filename.substring(langEnd + 1);
+        break;
       }
     }
-    return outFrag;
+    return filename;
   }
-  
+
   public static Data getPageMetadata (String docfile, Data hdf) {
     //utility method for extracting metadata without generating file output.
     if (hdf == null) {
@@ -193,6 +188,7 @@
         hdf.setValue("page.type", "design");
       } else if (filename.indexOf("develop") == 0) {
         hdf.setValue("develop", "true");
+        hdf.setValue("page.type", "develop");
       } else if (filename.indexOf("guide") == 0) {
         hdf.setValue("guide", "true");
         hdf.setValue("page.type", "guide");
@@ -213,6 +209,21 @@
       } else if (filename.indexOf("distribute") == 0) {
         hdf.setValue("distribute", "true");
         hdf.setValue("page.type", "distribute");
+        if (filename.indexOf("distribute/googleplay") == 0) {
+          hdf.setValue("googleplay", "true");
+        } else if (filename.indexOf("distribute/essentials") == 0) {
+          hdf.setValue("essentials", "true");
+        } else if (filename.indexOf("distribute/users") == 0) {
+          hdf.setValue("users", "true");
+        } else if (filename.indexOf("distribute/engage") == 0) {
+          hdf.setValue("engage", "true");
+        } else if (filename.indexOf("distribute/monetize") == 0) {
+          hdf.setValue("monetize", "true");
+        } else if (filename.indexOf("distribute/tools") == 0) {
+          hdf.setValue("disttools", "true");
+        } else if (filename.indexOf("distribute/stories") == 0) {
+          hdf.setValue("stories", "true");
+        }
       } else if (filename.indexOf("about") == 0) {
         hdf.setValue("about", "true");
         hdf.setValue("page.type", "about");
@@ -232,6 +243,9 @@
       } else if (filename.indexOf("wear") == 0) {
         hdf.setValue("wear", "true");
       }
+      //set metadata for this file in jd_lists_unified
+      PageMetadata.setPageMetadata(docfile, relative, outfile, hdf, Doclava.sTaglist);
+
       if (fromTemplate.equals("sdk")) {
         ClearPage.write(hdf, "sdkpage.cs", outfile);
       } else {
diff --git a/src/com/google/doclava/Doclava.java b/src/com/google/doclava/Doclava.java
index 213492a..a96b91c 100644
--- a/src/com/google/doclava/Doclava.java
+++ b/src/com/google/doclava/Doclava.java
@@ -66,6 +66,8 @@
   public static int showLevel = SHOW_PROTECTED;
 
   public static final boolean SORT_BY_NAV_GROUPS = true;
+  /* Debug output for PageMetadata, format urls from site root */
+  public static boolean META_DBG=false;
 
   public static String outputPathBase = "/";
   public static ArrayList<String> inputPathHtmlDirs = new ArrayList<String>();
@@ -244,6 +246,8 @@
         sinceTagger.addVersion(a[1], a[2]);
       } else if (a[0].equals("-offlinemode")) {
         offlineMode = true;
+      } else if (a[0].equals("-metadataDebug")) {
+        META_DBG = true;
       } else if (a[0].equals("-federate")) {
         try {
           String name = a[1];
@@ -313,10 +317,10 @@
         TodoFile.writeTodoFile(todoFile);
       }
 
-      if (samplesRef) {
+  if (samplesRef) {
         // always write samples without offlineMode behaviors
-        writeSamples(false, sampleCodes, SORT_BY_NAV_GROUPS);
-      }
+  writeSamples(false, sampleCodes, SORT_BY_NAV_GROUPS);
+  }
 
       // HTML2 Pages -- Generate Pages from optional secondary dir
       if (!inputPathHtmlDir2.isEmpty()) {
@@ -355,25 +359,25 @@
       writePackages(javadocDir + refPrefix + "packages" + htmlExtension);
 
       // Classes
-      writeClassLists();
-      writeClasses();
-      writeHierarchy();
+  writeClassLists();
+  writeClasses();
+  writeHierarchy();
       // writeKeywords();
 
       // Lists for JavaScript
-      writeLists();
+  writeLists();
       if (keepListFile != null) {
         writeKeepList(keepListFile);
       }
 
       // Index page
-      writeIndex();
+  writeIndex();
 
-      Proofread.finishProofread(proofreadFile);
+  Proofread.finishProofread(proofreadFile);
 
-      if (sdkValuePath != null) {
-        writeSdkValues(sdkValuePath);
-      }
+  if (sdkValuePath != null) {
+    writeSdkValues(sdkValuePath);
+  }
       // Write metadata for all processed files to jd_lists_unified.js in out dir
       if (!sTaglist.isEmpty()) {
         PageMetadata.WriteList(sTaglist);
@@ -644,6 +648,9 @@
       gcmRef = true;
       return 1;
     }
+    if (option.equals("-metadataDebug")) {
+      return 1;
+    }
     return 0;
   }
   public static boolean validOptions(String[][] options, DocErrorReporter r) {
@@ -768,11 +775,6 @@
           Data data = makeHDF();
           String filename = templ.substring(0, len - 3) + htmlExtension;
           DocFile.writePage(f.getAbsolutePath(), relative, filename, data);
-          String[] sections = relative.split("\\/");
-          boolean isIntl = ((sections.length > 0) && (sections[0].equals("intl")));
-          //if (!isIntl) {
-          PageMetadata.setPageMetadata(f, relative, filename, data, sTaglist);
-          //}
         } else if(!f.getName().equals(".DS_Store")){
               Data data = makeHDF();
               String hdfValue = data.getValue("sac") == null ? "" : data.getValue("sac");
diff --git a/src/com/google/doclava/LinkReference.java b/src/com/google/doclava/LinkReference.java
index dfece8e..816bdb1 100644
--- a/src/com/google/doclava/LinkReference.java
+++ b/src/com/google/doclava/LinkReference.java
@@ -59,7 +59,7 @@
   public boolean good;
 
   /**
-   * regex pattern to use when matching explicit "<a href" reference text
+   * regex pattern to use when matching explicit 'a href' reference text
    */
   private static final Pattern HREF_PATTERN =
       Pattern.compile("^<a href=\"([^\"]*)\">([^<]*)</a>[ \n\r\t]*$", Pattern.CASE_INSENSITIVE);
diff --git a/src/com/google/doclava/PageMetadata.java b/src/com/google/doclava/PageMetadata.java
index c22ac0f..360ae84 100644
--- a/src/com/google/doclava/PageMetadata.java
+++ b/src/com/google/doclava/PageMetadata.java
@@ -16,6 +16,8 @@
 
 package com.google.doclava;
 
+import java.io.*;
+import java.text.BreakIterator;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -26,6 +28,24 @@
 
 import com.google.clearsilver.jsilver.data.Data;
 
+import org.ccil.cowan.tagsoup.*;
+import org.xml.sax.XMLReader;
+import org.xml.sax.InputSource;
+import org.xml.sax.Attributes;
+import org.xml.sax.helpers.DefaultHandler;
+
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.xml.transform.dom.DOMResult;
+import javax.xml.transform.sax.SAXSource;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import javax.xml.xpath.XPathFactory;
+
 /**
 * Metadata associated with a specific documentation page. Extracts
 * metadata based on the page's declared hdf vars (meta.tags and others)
@@ -43,6 +63,13 @@
   String mTagList;
   static boolean sLowercaseTags = true;
   static boolean sLowercaseKeywords = true;
+  //static String linkPrefix = (Doclava.META_DBG) ? "/" : "http://developer.android.com/";
+  /**
+   * regex pattern to match javadoc @link and similar tags. Extracts
+   * root symbol to $1.
+   */
+  private static final Pattern JD_TAG_PATTERN =
+      Pattern.compile("\\{@.*?[\\s\\.\\#]([A-Za-z\\(\\)\\d_]+)(?=\u007D)\u007D");
 
   public PageMetadata(File source, String dest, List<Node> taglist) {
     mSource = source;
@@ -87,32 +114,152 @@
   * are normalized. Unsupported metadata fields are ignored. See
   * Node for supported metadata fields and methods for accessing values.
   *
-  * @param file The file from which to extract metadata.
+  * @param docfile The file from which to extract metadata.
   * @param dest The output path for the file, used to set link to page.
   * @param filename The file from which to extract metadata.
   * @param hdf Data object in which to store the metadata values.
   * @param tagList The file from which to extract metadata.
-  * @return tagList with new node added.
   */
-  public static List<Node> setPageMetadata(File file, String dest, String filename,
+  public static void setPageMetadata(String docfile, String dest, String filename,
       Data hdf, List<Node> tagList) {
     //exclude this page if author does not want it included
     boolean excludeNode = "true".equals(hdf.getValue("excludeFromSuggestions",""));
+
+    //check whether summary and image exist and if not, get them from itemprop/markup
+    Boolean needsSummary = "".equals(hdf.getValue("page.metaDescription", ""));
+    Boolean needsImage = "".equals(hdf.getValue("page.image", ""));
+    if ((needsSummary) || (needsImage)) {
+      //try to extract the metadata from itemprop and markup
+      inferMetadata(docfile, hdf, needsSummary, needsImage);
+    }
+
+    //extract available metadata and set it in a node
     if (!excludeNode) {
       Node pageMeta = new Node.Builder().build();
       pageMeta.setLabel(getTitleNormalized(hdf, "page.title"));
       pageMeta.setTitleFriendly(hdf.getValue("page.titleFriendly",""));
-      pageMeta.setSummary(hdf.getValue("page.summary",""));
-      pageMeta.setLink(filename);
+      pageMeta.setSummary(hdf.getValue("page.metaDescription",""));
+      pageMeta.setLink(getPageUrlNormalized(filename));
       pageMeta.setGroup(getStringValueNormalized(hdf,"sample.group"));
       pageMeta.setKeywords(getPageTagsNormalized(hdf, "page.tags"));
       pageMeta.setTags(getPageTagsNormalized(hdf, "meta.tags"));
-      pageMeta.setImage(getStringValueNormalized(hdf, "page.image"));
+      pageMeta.setImage(getImageUrlNormalized(hdf.getValue("page.image", "")));
       pageMeta.setLang(getLangStringNormalized(filename));
       pageMeta.setType(getStringValueNormalized(hdf, "page.type"));
       appendMetaNodeByType(pageMeta, tagList);
     }
-    return tagList;
+  }
+
+  /**
+  * Attempt to infer page metadata based on the contents of the
+  * file. Load and parse the file as a dom tree. Select values
+  * in this order: 1. dom node specifically tagged with
+  * microdata (itemprop). 2. first qualitifed p or img node.
+  *
+  * @param docfile The file from which to extract metadata.
+  * @param hdf Data object in which to store the metadata values.
+  * @param needsSummary Whether to extract summary metadata.
+  * @param needsImage Whether to extract image metadata.
+  */
+  public static void inferMetadata(String docfile, Data hdf,
+      Boolean needsSummary, Boolean needsImage) {
+    String sum = "";
+    String imageUrl = "";
+    String sumFrom = needsSummary ? "none" : "hdf";
+    String imgFrom = needsImage ? "none" : "hdf";
+    String filedata = hdf.getValue("commentText", "");
+    if (Doclava.META_DBG) System.out.println("----- " + docfile + "\n");
+
+    try {
+      XPathFactory xpathFac = XPathFactory.newInstance();
+      XPath xpath = xpathFac.newXPath();
+      InputStream inputStream = new ByteArrayInputStream(filedata.getBytes());
+      XMLReader reader = new Parser();
+      reader.setFeature(Parser.namespacesFeature, false);
+      reader.setFeature(Parser.namespacePrefixesFeature, false);
+      reader.setFeature(Parser.ignoreBogonsFeature, true);
+
+      Transformer transformer = TransformerFactory.newInstance().newTransformer();
+      DOMResult result = new DOMResult();
+      transformer.transform(new SAXSource(reader, new InputSource(inputStream)), result);
+      org.w3c.dom.Node htmlNode = result.getNode();
+
+      if (needsSummary) {
+        StringBuilder sumStrings = new StringBuilder();
+        XPathExpression ItempropDescExpr = xpath.compile("/descendant-or-self::*"
+            + "[@itemprop='description'][1]//text()[string(.)]");
+        org.w3c.dom.NodeList nodes = (org.w3c.dom.NodeList) ItempropDescExpr.evaluate(htmlNode,
+            XPathConstants.NODESET);
+        if (nodes.getLength() > 0) {
+          for (int i = 0; i < nodes.getLength(); i++) {
+            String tx = nodes.item(i).getNodeValue();
+            sumStrings.append(tx);
+            sumFrom = "itemprop";
+          }
+        } else {
+          XPathExpression FirstParaExpr = xpath.compile("//p[not(../../../"
+              + "@class='notice-developers') and not(../@class='sidebox')"
+              + "and not(@class)]//text()");
+          nodes = (org.w3c.dom.NodeList) FirstParaExpr.evaluate(htmlNode, XPathConstants.NODESET);
+          if (nodes.getLength() > 0) {
+            for (int i = 0; i < nodes.getLength(); i++) {
+              String tx = nodes.item(i).getNodeValue();
+              sumStrings.append(tx + " ");
+              sumFrom = "markup";
+            }
+          }
+        }
+        //found a summary string, now normalize it
+        sum = sumStrings.toString().trim();
+        if ((sum != null) && (!"".equals(sum))) {
+          sum = getSummaryNormalized(sum);
+        }
+        //normalized summary ended up being too short to be meaningful
+        if ("".equals(sum)) {
+           if (Doclava.META_DBG) System.out.println("Warning: description too short! ("
+            + sum.length() + "chars) ...\n\n");
+        }
+        //summary looks good, store it to the file hdf data
+        hdf.setValue("page.metaDescription", sum);
+      }
+      if (needsImage) {
+        XPathExpression ItempropImageExpr = xpath.compile("//*[@itemprop='image']/@src");
+        org.w3c.dom.NodeList imgNodes = (org.w3c.dom.NodeList) ItempropImageExpr.evaluate(htmlNode,
+            XPathConstants.NODESET);
+        if (imgNodes.getLength() > 0) {
+          imageUrl = imgNodes.item(0).getNodeValue();
+          imgFrom = "itemprop";
+        } else {
+          XPathExpression FirstImgExpr = xpath.compile("//img/@src");
+          imgNodes = (org.w3c.dom.NodeList) FirstImgExpr.evaluate(htmlNode, XPathConstants.NODESET);
+          if (imgNodes.getLength() > 0) {
+            //iterate nodes looking for valid image url and normalize.
+            for (int i = 0; i < imgNodes.getLength(); i++) {
+              String tx = imgNodes.item(i).getNodeValue();
+              //qualify and normalize the image
+              imageUrl = getImageUrlNormalized(tx);
+              //this img src did not qualify, keep looking...
+              if ("".equals(imageUrl)) {
+                if (Doclava.META_DBG) System.out.println("    >>>>> Discarded image: " + tx);
+                continue;
+              } else {
+                imgFrom = "markup";
+                break;
+              }
+            }
+          }
+        }
+        //img src url looks good, store it to the file hdf data
+        hdf.setValue("page.image", imageUrl);
+      }
+      if (Doclava.META_DBG) System.out.println("Image (" + imgFrom + "): " + imageUrl);
+      if (Doclava.META_DBG) System.out.println("Summary (" + sumFrom + "): " + sum.length()
+          + " chars\n\n" + sum + "\n");
+      return;
+
+    } catch (Exception e) {
+      if (Doclava.META_DBG) System.out.println("    >>>>> Exception: " + e + "\n");
+    }
   }
 
   /**
@@ -126,25 +273,38 @@
   */
   public static String getPageTagsNormalized(Data hdf, String tag) {
 
+    String normTags = "";
     StringBuilder tags = new StringBuilder();
     String tagList = hdf.getValue(tag, "");
+    if (tag.equals("meta.tags") && (tagList.equals(""))) {
+      //use keywords as tags if no meta tags are available
+      tagList = hdf.getValue("page.tags", "");
+    }
     if (!tagList.equals("")) {
       tagList = tagList.replaceAll("\"", "");
       String[] tagParts = tagList.split(",");
       for (int iter = 0; iter < tagParts.length; iter++) {
-        tags.append("'");
+        tags.append("\"");
         if (tag.equals("meta.tags") && sLowercaseTags) {
           tagParts[iter] = tagParts[iter].toLowerCase();
         } else if (tag.equals("page.tags") && sLowercaseKeywords) {
           tagParts[iter] = tagParts[iter].toLowerCase();
         }
+        if (tag.equals("meta.tags")) {
+          //tags.append("#"); //to match hashtag format used with yt/blogger resources
+          tagParts[iter] = tagParts[iter].replaceAll(" ","");
+        }
         tags.append(tagParts[iter].trim());
-        tags.append("'");
+        tags.append("\"");
         if (iter < tagParts.length - 1) {
           tags.append(",");
         }
       }
     }
+    //write this back to hdf to expose through js
+    if (tag.equals("meta.tags")) {
+      hdf.setValue(tag, tags.toString());
+    }
     return tags.toString();
   }
 
@@ -161,8 +321,8 @@
   public static String getStringValueNormalized(Data hdf, String tag) {
     StringBuilder outString =  new StringBuilder();
     String tagList = hdf.getValue(tag, "");
+    tagList.replaceAll("\"", "");
     if (!tagList.isEmpty()) {
-      tagList.replaceAll("\"", "");
       int end = tagList.indexOf(",");
       if (end != -1) {
         tagList = tagList.substring(0,end);
@@ -188,7 +348,7 @@
     StringBuilder outTitle =  new StringBuilder();
     String title = hdf.getValue(tag, "");
     if (!title.isEmpty()) {
-      title = title.replaceAll("\"", "'");
+      title = escapeString(title);
       if (title.indexOf("<span") != -1) {
         String[] splitTitle = title.split("<span(.*?)</span>");
         title = splitTitle[0];
@@ -226,6 +386,99 @@
   }
 
   /**
+  * Normalize a page summary string and truncate as needed. Strings
+  * exceeding max_chars are truncated at the first word boundary
+  * following the max_size marker. Strings smaller than min_chars
+  * are discarded (as they are assumed to be too little context).
+  *
+  * @param s String extracted from the page as it's summary.
+  * @return A normalized string value.
+  */
+  public static String getSummaryNormalized(String s) {
+    String str = "";
+    int max_chars = 250;
+    int min_chars = 50;
+    int marker = 0;
+    if (s.length() < min_chars) {
+      return str;
+    } else {
+      str = s.replaceAll("^\"|\"$", "");
+      str = str.replaceAll("\\s+", " ");
+      str = JD_TAG_PATTERN.matcher(str).replaceAll("$1");
+      str = escapeString(str);
+      BreakIterator bi = BreakIterator.getWordInstance();
+      bi.setText(str);
+      if (str.length() > max_chars) {
+        marker = bi.following(max_chars);
+      } else {
+        marker = bi.last();
+      }
+      str = str.substring(0, marker);
+      str = str.concat("\u2026" );
+    }
+    return str;
+  }
+
+  public static String escapeString(String s) {
+    s = s.replaceAll("\"", "&quot;");
+    s = s.replaceAll("\'", "&#39;");
+    s = s.replaceAll("<", "&lt;");
+    s = s.replaceAll(">", "&gt;");
+    s = s.replaceAll("/", "&#47;");
+    return s;
+  }
+
+  //Disqualify img src urls that include these substrings
+  public static String[] IMAGE_EXCLUDE = {"/triangle-", "favicon","android-logo",
+      "icon_play.png", "robot-tiny"};
+
+  public static boolean inList(String s, String[] list) {
+    for (String t : list) {
+      if (s.contains(t)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+  * Normalize an img src url by removing docRoot and leading
+  * slash for local image references. These are added later
+  * in js to support offline mode and keep path reference
+  * format consistent with hrefs.
+  *
+  * @param url Abs or rel url sourced from img src.
+  * @return Normalized url if qualified, else empty
+  */
+  public static String getImageUrlNormalized(String url) {
+    String absUrl = "";
+    // validate to avoid choosing using specific images
+    if ((url != null) && (!url.equals("")) && (!inList(url, IMAGE_EXCLUDE))) {
+      absUrl = url.replace("{@docRoot}", "");
+      absUrl = absUrl.replaceFirst("^/(?!/)", "");
+    }
+    return absUrl;
+  }
+
+  /**
+  * Normalize an href url by removing docRoot and leading
+  * slash for local image references. These are added later
+  * in js to support offline mode and keep path reference
+  * format consistent with hrefs.
+  *
+  * @param url Abs or rel page url sourced from href
+  * @return Normalized url, either abs or rel to root
+  */
+  public static String getPageUrlNormalized(String url) {
+    String absUrl = "";
+    if ((url !=null) && (!url.equals(""))) {
+      absUrl = url.replace("{@docRoot}", "");
+      absUrl = absUrl.replaceFirst("^/(?!/)", "");
+    }
+    return absUrl;
+  }
+
+  /**
   * Given a metadata node, add it as a child of a root node based on its
   * type. If there is no root node that matches the node's type, create one
   * and add the metadata node as a child node.
@@ -272,6 +525,7 @@
         for (String t : nodeTags) { //process each of the meta.tags
           for (Node n : rootTagNodesList) {
             if (n.getLabel().equals(t.toString())) {
+              n.getTags().add(String.valueOf(iter));
               matched = true;
               break; // add to the first root node only
             } // tag did not match
@@ -383,16 +637,16 @@
         final int n = list.size();
         for (int i = 0; i < n; i++) {
           buf.append("\n      {\n");
-          buf.append("        title:\"" + list.get(i).mLabel + "\",\n" );
-          buf.append("        titleFriendly:\"" + list.get(i).mTitleFriendly + "\",\n" );
-          buf.append("        summary:\"" + list.get(i).mSummary + "\",\n" );
-          buf.append("        url:\"" + list.get(i).mLink + "\",\n" );
-          buf.append("        group:\"" + list.get(i).mGroup + "\",\n" );
+          buf.append("        \"title\":\"" + list.get(i).mLabel + "\",\n" );
+          buf.append("        \"titleFriendly\":\"" + list.get(i).mTitleFriendly + "\",\n" );
+          buf.append("        \"summary\":\"" + list.get(i).mSummary + "\",\n" );
+          buf.append("        \"url\":\"" + list.get(i).mLink + "\",\n" );
+          buf.append("        \"group\":\"" + list.get(i).mGroup + "\",\n" );
           list.get(i).renderArrayType(buf, list.get(i).mKeywords, "keywords");
           list.get(i).renderArrayType(buf, list.get(i).mTags, "tags");
-          buf.append("        image:\"" + list.get(i).mImage + "\",\n" );
-          buf.append("        lang:\"" + list.get(i).mLang + "\",\n" );
-          buf.append("        type:\"" + list.get(i).mType + "\"");
+          buf.append("        \"image\":\"" + list.get(i).mImage + "\",\n" );
+          buf.append("        \"lang\":\"" + list.get(i).mLang + "\",\n" );
+          buf.append("        \"type\":\"" + list.get(i).mType + "\"");
           buf.append("\n      }");
           if (i != n - 1) {
             buf.append(", ");
@@ -434,7 +688,6 @@
       } else {
         final int n = list.size();
         for (int i = 0; i < n; i++) {
-
           buf.append("\n    " + list.get(i).mLabel + ":[");
           renderArrayValue(buf, list.get(i).mTags);
           buf.append("]");
@@ -452,7 +705,7 @@
     * @param key The key for the pair.
     */
     void renderArrayType(StringBuilder buf, List<String> type, String key) {
-      buf.append("        " + key + ": [");
+      buf.append("        \"" + key + "\": [");
       renderArrayValue(buf, type);
       buf.append("],\n");
     }
diff --git a/src/com/google/doclava/SampleCode.java b/src/com/google/doclava/SampleCode.java
index 45f9833..57f1c54 100644
--- a/src/com/google/doclava/SampleCode.java
+++ b/src/com/google/doclava/SampleCode.java
@@ -296,8 +296,6 @@
         ClearPage.write(hdf, "sampleindex.cs", mDest + "index" + Doclava.htmlExtension);
       } else {
         DocFile.writePage(filename, rel, mDest + "index" + Doclava.htmlExtension, hdf);
-        PageMetadata.setPageMetadata(f, rel, mDest + "index" + Doclava.htmlExtension,
-            hdf, Doclava.sTaglist);
       }
     } else if (f.isFile()) {
       //gather metadata for toc and jd_lists_unified