Add openJdk8 java.util.stream package
Based on openJdk 8u60 source & iam@ stream prototype in
ag/872080
Uncommented all code that was waiting for java.util.stream
to show up
Differences from original sources:
- Removed unsignedDivision usage from LongStream, it's not currently
supported and we don't expect that large workloads on mobile devices.
- Removed java.nio.file references.
- Removed (yet) not implemented stream-related methods from
other packages listed in package-info.java.
Bug: 27692239
Change-Id: Ie24e60e8248367b576ef91046837ccde152de373
diff --git a/jsr166-tests/src/test/java/jsr166/Collection8Test.java b/jsr166-tests/src/test/java/jsr166/Collection8Test.java
index 634182b..0204ce6 100644
--- a/jsr166-tests/src/test/java/jsr166/Collection8Test.java
+++ b/jsr166-tests/src/test/java/jsr166/Collection8Test.java
@@ -44,61 +44,60 @@
/**
* stream().forEach returns elements in the collection
*/
- // TODO(streams):
- // public void testForEach() throws Throwable {
- // final Collection c = impl.emptyCollection();
- // final AtomicLong count = new AtomicLong(0L);
- // final Object x = impl.makeElement(1);
- // final Object y = impl.makeElement(2);
- // final ArrayList found = new ArrayList();
- // Consumer<Object> spy = (o) -> { found.add(o); };
- // c.stream().forEach(spy);
- // assertTrue(found.isEmpty());
+ public void testForEach() throws Throwable {
+ final Collection c = impl.emptyCollection();
+ final AtomicLong count = new AtomicLong(0L);
+ final Object x = impl.makeElement(1);
+ final Object y = impl.makeElement(2);
+ final ArrayList found = new ArrayList();
+ Consumer<Object> spy = (o) -> { found.add(o); };
+ c.stream().forEach(spy);
+ assertTrue(found.isEmpty());
- // assertTrue(c.add(x));
- // c.stream().forEach(spy);
- // assertEquals(Collections.singletonList(x), found);
- // found.clear();
+ assertTrue(c.add(x));
+ c.stream().forEach(spy);
+ assertEquals(Collections.singletonList(x), found);
+ found.clear();
- // assertTrue(c.add(y));
- // c.stream().forEach(spy);
- // assertEquals(2, found.size());
- // assertTrue(found.contains(x));
- // assertTrue(found.contains(y));
- // found.clear();
+ assertTrue(c.add(y));
+ c.stream().forEach(spy);
+ assertEquals(2, found.size());
+ assertTrue(found.contains(x));
+ assertTrue(found.contains(y));
+ found.clear();
- // c.clear();
- // c.stream().forEach(spy);
- // assertTrue(found.isEmpty());
- // }
+ c.clear();
+ c.stream().forEach(spy);
+ assertTrue(found.isEmpty());
+ }
- // public void testForEachConcurrentStressTest() throws Throwable {
- // if (!impl.isConcurrent()) return;
- // final Collection c = impl.emptyCollection();
- // final long testDurationMillis = timeoutMillis();
- // final AtomicBoolean done = new AtomicBoolean(false);
- // final Object elt = impl.makeElement(1);
- // final Future<?> f1, f2;
- // final ExecutorService pool = Executors.newCachedThreadPool();
- // try (PoolCleaner cleaner = cleaner(pool, done)) {
- // final CountDownLatch threadsStarted = new CountDownLatch(2);
- // Runnable checkElt = () -> {
- // threadsStarted.countDown();
- // while (!done.get())
- // c.stream().forEach((x) -> { assertSame(x, elt); }); };
- // Runnable addRemove = () -> {
- // threadsStarted.countDown();
- // while (!done.get()) {
- // assertTrue(c.add(elt));
- // assertTrue(c.remove(elt));
- // }};
- // f1 = pool.submit(checkElt);
- // f2 = pool.submit(addRemove);
- // Thread.sleep(testDurationMillis);
- // }
- // assertNull(f1.get(0L, MILLISECONDS));
- // assertNull(f2.get(0L, MILLISECONDS));
- // }
+ public void testForEachConcurrentStressTest() throws Throwable {
+ if (!impl.isConcurrent()) return;
+ final Collection c = impl.emptyCollection();
+ final long testDurationMillis = timeoutMillis();
+ final AtomicBoolean done = new AtomicBoolean(false);
+ final Object elt = impl.makeElement(1);
+ final Future<?> f1, f2;
+ final ExecutorService pool = Executors.newCachedThreadPool();
+ try (PoolCleaner cleaner = cleaner(pool, done)) {
+ final CountDownLatch threadsStarted = new CountDownLatch(2);
+ Runnable checkElt = () -> {
+ threadsStarted.countDown();
+ while (!done.get())
+ c.stream().forEach((x) -> { assertSame(x, elt); }); };
+ Runnable addRemove = () -> {
+ threadsStarted.countDown();
+ while (!done.get()) {
+ assertTrue(c.add(elt));
+ assertTrue(c.remove(elt));
+ }};
+ f1 = pool.submit(checkElt);
+ f2 = pool.submit(addRemove);
+ Thread.sleep(testDurationMillis);
+ }
+ assertNull(f1.get(0L, MILLISECONDS));
+ assertNull(f2.get(0L, MILLISECONDS));
+ }
// public void testCollection8DebugFail() { fail(); }
}
diff --git a/jsr166-tests/src/test/java/jsr166/CompletableFutureTest.java b/jsr166-tests/src/test/java/jsr166/CompletableFutureTest.java
index 1372cc4..37bc285 100644
--- a/jsr166-tests/src/test/java/jsr166/CompletableFutureTest.java
+++ b/jsr166-tests/src/test/java/jsr166/CompletableFutureTest.java
@@ -15,9 +15,8 @@
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
-// TODO(streams):
-//import java.util.stream.Collectors;
-//import java.util.stream.Stream;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
import java.util.ArrayList;
import java.util.Arrays;
@@ -3725,66 +3724,65 @@
/**
* Minimal completion stages throw UOE for all non-CompletionStage methods
*/
- // TODO(streams):
- // public void testMinimalCompletionStage_minimality() {
- // if (!testImplementationDetails) return;
- // Function<Method, String> toSignature =
- // (method) -> method.getName() + Arrays.toString(method.getParameterTypes());
- // Predicate<Method> isNotStatic =
- // (method) -> (method.getModifiers() & Modifier.STATIC) == 0;
- // List<Method> minimalMethods =
- // Stream.of(Object.class, CompletionStage.class)
- // .flatMap((klazz) -> Stream.of(klazz.getMethods()))
- // .filter(isNotStatic)
- // .collect(Collectors.toList());
- // // Methods from CompletableFuture permitted NOT to throw UOE
- // String[] signatureWhitelist = {
- // "newIncompleteFuture[]",
- // "defaultExecutor[]",
- // "minimalCompletionStage[]",
- // "copy[]",
- // };
- // Set<String> permittedMethodSignatures =
- // Stream.concat(minimalMethods.stream().map(toSignature),
- // Stream.of(signatureWhitelist))
- // .collect(Collectors.toSet());
- // List<Method> allMethods = Stream.of(CompletableFuture.class.getMethods())
- // .filter(isNotStatic)
- // .filter((method) -> !permittedMethodSignatures.contains(toSignature.apply(method)))
- // .collect(Collectors.toList());
+ public void testMinimalCompletionStage_minimality() {
+ if (!testImplementationDetails) return;
+ Function<Method, String> toSignature =
+ (method) -> method.getName() + Arrays.toString(method.getParameterTypes());
+ Predicate<Method> isNotStatic =
+ (method) -> (method.getModifiers() & Modifier.STATIC) == 0;
+ List<Method> minimalMethods =
+ Stream.of(Object.class, CompletionStage.class)
+ .flatMap((klazz) -> Stream.of(klazz.getMethods()))
+ .filter(isNotStatic)
+ .collect(Collectors.toList());
+ // Methods from CompletableFuture permitted NOT to throw UOE
+ String[] signatureWhitelist = {
+ "newIncompleteFuture[]",
+ "defaultExecutor[]",
+ "minimalCompletionStage[]",
+ "copy[]",
+ };
+ Set<String> permittedMethodSignatures =
+ Stream.concat(minimalMethods.stream().map(toSignature),
+ Stream.of(signatureWhitelist))
+ .collect(Collectors.toSet());
+ List<Method> allMethods = Stream.of(CompletableFuture.class.getMethods())
+ .filter(isNotStatic)
+ .filter((method) -> !permittedMethodSignatures.contains(toSignature.apply(method)))
+ .collect(Collectors.toList());
- // CompletionStage<Integer> minimalStage =
- // new CompletableFuture<Integer>().minimalCompletionStage();
+ CompletionStage<Integer> minimalStage =
+ new CompletableFuture<Integer>().minimalCompletionStage();
- // List<Method> bugs = new ArrayList<>();
- // for (Method method : allMethods) {
- // Class<?>[] parameterTypes = method.getParameterTypes();
- // Object[] args = new Object[parameterTypes.length];
- // // Manufacture boxed primitives for primitive params
- // for (int i = 0; i < args.length; i++) {
- // Class<?> type = parameterTypes[i];
- // if (parameterTypes[i] == boolean.class)
- // args[i] = false;
- // else if (parameterTypes[i] == int.class)
- // args[i] = 0;
- // else if (parameterTypes[i] == long.class)
- // args[i] = 0L;
- // }
- // try {
- // method.invoke(minimalStage, args);
- // bugs.add(method);
- // }
- // catch (java.lang.reflect.InvocationTargetException expected) {
- // if (! (expected.getCause() instanceof UnsupportedOperationException)) {
- // bugs.add(method);
- // // expected.getCause().printStackTrace();
- // }
- // }
- // catch (ReflectiveOperationException bad) { throw new Error(bad); }
- // }
- // if (!bugs.isEmpty())
- // throw new Error("Methods did not throw UOE: " + bugs.toString());
- // }
+ List<Method> bugs = new ArrayList<>();
+ for (Method method : allMethods) {
+ Class<?>[] parameterTypes = method.getParameterTypes();
+ Object[] args = new Object[parameterTypes.length];
+ // Manufacture boxed primitives for primitive params
+ for (int i = 0; i < args.length; i++) {
+ Class<?> type = parameterTypes[i];
+ if (parameterTypes[i] == boolean.class)
+ args[i] = false;
+ else if (parameterTypes[i] == int.class)
+ args[i] = 0;
+ else if (parameterTypes[i] == long.class)
+ args[i] = 0L;
+ }
+ try {
+ method.invoke(minimalStage, args);
+ bugs.add(method);
+ }
+ catch (java.lang.reflect.InvocationTargetException expected) {
+ if (! (expected.getCause() instanceof UnsupportedOperationException)) {
+ bugs.add(method);
+ // expected.getCause().printStackTrace();
+ }
+ }
+ catch (ReflectiveOperationException bad) { throw new Error(bad); }
+ }
+ if (!bugs.isEmpty())
+ throw new Error("Methods did not throw UOE: " + bugs.toString());
+ }
static class Monad {
static class ZeroException extends RuntimeException {
diff --git a/jsr166-tests/src/test/java/jsr166/ThreadLocalRandom8Test.java b/jsr166-tests/src/test/java/jsr166/ThreadLocalRandom8Test.java
index 614af83..b98dc31 100644
--- a/jsr166-tests/src/test/java/jsr166/ThreadLocalRandom8Test.java
+++ b/jsr166-tests/src/test/java/jsr166/ThreadLocalRandom8Test.java
@@ -39,203 +39,202 @@
* Invoking sized ints, long, doubles, with negative sizes throws
* IllegalArgumentException
*/
- // TODO(streams):
- // public void testBadStreamSize() {
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // Runnable[] throwingActions = {
- // () -> r.ints(-1L),
- // () -> r.ints(-1L, 2, 3),
- // () -> r.longs(-1L),
- // () -> r.longs(-1L, -1L, 1L),
- // () -> r.doubles(-1L),
- // () -> r.doubles(-1L, .5, .6),
- // };
- // assertThrows(IllegalArgumentException.class, throwingActions);
- // }
+ public void testBadStreamSize() {
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ Runnable[] throwingActions = {
+ () -> r.ints(-1L),
+ () -> r.ints(-1L, 2, 3),
+ () -> r.longs(-1L),
+ () -> r.longs(-1L, -1L, 1L),
+ () -> r.doubles(-1L),
+ () -> r.doubles(-1L, .5, .6),
+ };
+ assertThrows(IllegalArgumentException.class, throwingActions);
+ }
- // /**
- // * Invoking bounded ints, long, doubles, with illegal bounds throws
- // * IllegalArgumentException
- // */
- // public void testBadStreamBounds() {
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // Runnable[] throwingActions = {
- // () -> r.ints(2, 1),
- // () -> r.ints(10, 42, 42),
- // () -> r.longs(-1L, -1L),
- // () -> r.longs(10, 1L, -2L),
- // () -> r.doubles(0.0, 0.0),
- // () -> r.doubles(10, .5, .4),
- // };
- // assertThrows(IllegalArgumentException.class, throwingActions);
- // }
+ /**
+ * Invoking bounded ints, long, doubles, with illegal bounds throws
+ * IllegalArgumentException
+ */
+ public void testBadStreamBounds() {
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ Runnable[] throwingActions = {
+ () -> r.ints(2, 1),
+ () -> r.ints(10, 42, 42),
+ () -> r.longs(-1L, -1L),
+ () -> r.longs(10, 1L, -2L),
+ () -> r.doubles(0.0, 0.0),
+ () -> r.doubles(10, .5, .4),
+ };
+ assertThrows(IllegalArgumentException.class, throwingActions);
+ }
- // /**
- // * A parallel sized stream of ints generates the given number of values
- // */
- // public void testIntsCount() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 0;
- // for (int reps = 0; reps < REPS; ++reps) {
- // counter.reset();
- // r.ints(size).parallel().forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // size += 524959;
- // }
- // }
+ /**
+ * A parallel sized stream of ints generates the given number of values
+ */
+ public void testIntsCount() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 0;
+ for (int reps = 0; reps < REPS; ++reps) {
+ counter.reset();
+ r.ints(size).parallel().forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ size += 524959;
+ }
+ }
- // /**
- // * A parallel sized stream of longs generates the given number of values
- // */
- // public void testLongsCount() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 0;
- // for (int reps = 0; reps < REPS; ++reps) {
- // counter.reset();
- // r.longs(size).parallel().forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // size += 524959;
- // }
- // }
+ /**
+ * A parallel sized stream of longs generates the given number of values
+ */
+ public void testLongsCount() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 0;
+ for (int reps = 0; reps < REPS; ++reps) {
+ counter.reset();
+ r.longs(size).parallel().forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ size += 524959;
+ }
+ }
- // /**
- // * A parallel sized stream of doubles generates the given number of values
- // */
- // public void testDoublesCount() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 0;
- // for (int reps = 0; reps < REPS; ++reps) {
- // counter.reset();
- // r.doubles(size).parallel().forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // size += 524959;
- // }
- // }
+ /**
+ * A parallel sized stream of doubles generates the given number of values
+ */
+ public void testDoublesCount() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 0;
+ for (int reps = 0; reps < REPS; ++reps) {
+ counter.reset();
+ r.doubles(size).parallel().forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ size += 524959;
+ }
+ }
- // /**
- // * Each of a parallel sized stream of bounded ints is within bounds
- // */
- // public void testBoundedInts() {
- // AtomicInteger fails = new AtomicInteger(0);
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 12345L;
- // for (int least = -15485867; least < MAX_INT_BOUND; least += 524959) {
- // for (int bound = least + 2; bound > least && bound < MAX_INT_BOUND; bound += 67867967) {
- // final int lo = least, hi = bound;
- // r.ints(size, lo, hi).parallel().forEach(
- // x -> {
- // if (x < lo || x >= hi)
- // fails.getAndIncrement(); });
- // }
- // }
- // assertEquals(0, fails.get());
- // }
+ /**
+ * Each of a parallel sized stream of bounded ints is within bounds
+ */
+ public void testBoundedInts() {
+ AtomicInteger fails = new AtomicInteger(0);
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 12345L;
+ for (int least = -15485867; least < MAX_INT_BOUND; least += 524959) {
+ for (int bound = least + 2; bound > least && bound < MAX_INT_BOUND; bound += 67867967) {
+ final int lo = least, hi = bound;
+ r.ints(size, lo, hi).parallel().forEach(
+ x -> {
+ if (x < lo || x >= hi)
+ fails.getAndIncrement(); });
+ }
+ }
+ assertEquals(0, fails.get());
+ }
- // /**
- // * Each of a parallel sized stream of bounded longs is within bounds
- // */
- // public void testBoundedLongs() {
- // AtomicInteger fails = new AtomicInteger(0);
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 123L;
- // for (long least = -86028121; least < MAX_LONG_BOUND; least += 1982451653L) {
- // for (long bound = least + 2; bound > least && bound < MAX_LONG_BOUND; bound += Math.abs(bound * 7919)) {
- // final long lo = least, hi = bound;
- // r.longs(size, lo, hi).parallel().forEach(
- // x -> {
- // if (x < lo || x >= hi)
- // fails.getAndIncrement(); });
- // }
- // }
- // assertEquals(0, fails.get());
- // }
+ /**
+ * Each of a parallel sized stream of bounded longs is within bounds
+ */
+ public void testBoundedLongs() {
+ AtomicInteger fails = new AtomicInteger(0);
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 123L;
+ for (long least = -86028121; least < MAX_LONG_BOUND; least += 1982451653L) {
+ for (long bound = least + 2; bound > least && bound < MAX_LONG_BOUND; bound += Math.abs(bound * 7919)) {
+ final long lo = least, hi = bound;
+ r.longs(size, lo, hi).parallel().forEach(
+ x -> {
+ if (x < lo || x >= hi)
+ fails.getAndIncrement(); });
+ }
+ }
+ assertEquals(0, fails.get());
+ }
- // /**
- // * Each of a parallel sized stream of bounded doubles is within bounds
- // */
- // public void testBoundedDoubles() {
- // AtomicInteger fails = new AtomicInteger(0);
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 456;
- // for (double least = 0.00011; least < 1.0e20; least *= 9) {
- // for (double bound = least * 1.0011; bound < 1.0e20; bound *= 17) {
- // final double lo = least, hi = bound;
- // r.doubles(size, lo, hi).parallel().forEach(
- // x -> {
- // if (x < lo || x >= hi)
- // fails.getAndIncrement(); });
- // }
- // }
- // assertEquals(0, fails.get());
- // }
+ /**
+ * Each of a parallel sized stream of bounded doubles is within bounds
+ */
+ public void testBoundedDoubles() {
+ AtomicInteger fails = new AtomicInteger(0);
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 456;
+ for (double least = 0.00011; least < 1.0e20; least *= 9) {
+ for (double bound = least * 1.0011; bound < 1.0e20; bound *= 17) {
+ final double lo = least, hi = bound;
+ r.doubles(size, lo, hi).parallel().forEach(
+ x -> {
+ if (x < lo || x >= hi)
+ fails.getAndIncrement(); });
+ }
+ }
+ assertEquals(0, fails.get());
+ }
- // /**
- // * A parallel unsized stream of ints generates at least 100 values
- // */
- // public void testUnsizedIntsCount() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 100;
- // r.ints().limit(size).parallel().forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // }
+ /**
+ * A parallel unsized stream of ints generates at least 100 values
+ */
+ public void testUnsizedIntsCount() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 100;
+ r.ints().limit(size).parallel().forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ }
- // /**
- // * A parallel unsized stream of longs generates at least 100 values
- // */
- // public void testUnsizedLongsCount() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 100;
- // r.longs().limit(size).parallel().forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // }
+ /**
+ * A parallel unsized stream of longs generates at least 100 values
+ */
+ public void testUnsizedLongsCount() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 100;
+ r.longs().limit(size).parallel().forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ }
- // /**
- // * A parallel unsized stream of doubles generates at least 100 values
- // */
- // public void testUnsizedDoublesCount() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 100;
- // r.doubles().limit(size).parallel().forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // }
+ /**
+ * A parallel unsized stream of doubles generates at least 100 values
+ */
+ public void testUnsizedDoublesCount() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 100;
+ r.doubles().limit(size).parallel().forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ }
- // /**
- // * A sequential unsized stream of ints generates at least 100 values
- // */
- // public void testUnsizedIntsCountSeq() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 100;
- // r.ints().limit(size).forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // }
+ /**
+ * A sequential unsized stream of ints generates at least 100 values
+ */
+ public void testUnsizedIntsCountSeq() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 100;
+ r.ints().limit(size).forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ }
- // /**
- // * A sequential unsized stream of longs generates at least 100 values
- // */
- // public void testUnsizedLongsCountSeq() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 100;
- // r.longs().limit(size).forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // }
+ /**
+ * A sequential unsized stream of longs generates at least 100 values
+ */
+ public void testUnsizedLongsCountSeq() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 100;
+ r.longs().limit(size).forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ }
- // /**
- // * A sequential unsized stream of doubles generates at least 100 values
- // */
- // public void testUnsizedDoublesCountSeq() {
- // LongAdder counter = new LongAdder();
- // ThreadLocalRandom r = ThreadLocalRandom.current();
- // long size = 100;
- // r.doubles().limit(size).forEach(x -> counter.increment());
- // assertEquals(size, counter.sum());
- // }
+ /**
+ * A sequential unsized stream of doubles generates at least 100 values
+ */
+ public void testUnsizedDoublesCountSeq() {
+ LongAdder counter = new LongAdder();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ long size = 100;
+ r.doubles().limit(size).forEach(x -> counter.increment());
+ assertEquals(size, counter.sum());
+ }
}
diff --git a/luni/src/main/java/java/util/concurrent/CompletionStage.java b/luni/src/main/java/java/util/concurrent/CompletionStage.java
index ccb1aa4..4a94cb3 100644
--- a/luni/src/main/java/java/util/concurrent/CompletionStage.java
+++ b/luni/src/main/java/java/util/concurrent/CompletionStage.java
@@ -128,7 +128,7 @@
*
* <p>This method is analogous to
* {@link java.util.Optional#map Optional.map} and
- * TODO(streams): make a link to java.util.stream.Stream#map Stream.map.
+ * {@link java.util.stream.Stream#map Stream.map}.
*
* <p>See the {@link CompletionStage} documentation for rules
* covering exceptional completion.
@@ -606,7 +606,7 @@
*
* <p>This method is analogous to
* {@link java.util.Optional#flatMap Optional.flatMap} and
- * TODO(streams): make a link to java.util.stream.Stream#flatMap Stream.flatMap.
+ * {@link java.util.stream.Stream#flatMap Stream.flatMap}.
*
* <p>See the {@link CompletionStage} documentation for rules
* covering exceptional completion.
diff --git a/luni/src/main/java/java/util/concurrent/ConcurrentHashMap.java b/luni/src/main/java/java/util/concurrent/ConcurrentHashMap.java
index b4fa8aa..2581230 100644
--- a/luni/src/main/java/java/util/concurrent/ConcurrentHashMap.java
+++ b/luni/src/main/java/java/util/concurrent/ConcurrentHashMap.java
@@ -38,8 +38,7 @@
import java.util.function.ToIntFunction;
import java.util.function.ToLongBiFunction;
import java.util.function.ToLongFunction;
-// TODO(streams):
-//import java.util.stream.Stream;
+import java.util.stream.Stream;
// BEGIN android-note
// removed link to collections framework docs
@@ -121,7 +120,7 @@
* does <em>not</em> allow {@code null} to be used as a key or value.
*
* <p>ConcurrentHashMaps support a set of sequential and parallel bulk
- * operations that, unlike most (TODO(streams): link to Stream) methods, are designed
+ * operations that, unlike most {@link Stream} methods, are designed
* to be safely, and often sensibly, applied even with maps that are
* being concurrently updated by other threads; for example, when
* computing a snapshot summary of the values in a shared registry.
diff --git a/luni/src/main/java/java/util/concurrent/ThreadLocalRandom.java b/luni/src/main/java/java/util/concurrent/ThreadLocalRandom.java
index 8c65bde..e13c329 100644
--- a/luni/src/main/java/java/util/concurrent/ThreadLocalRandom.java
+++ b/luni/src/main/java/java/util/concurrent/ThreadLocalRandom.java
@@ -14,11 +14,10 @@
import java.util.function.DoubleConsumer;
import java.util.function.IntConsumer;
import java.util.function.LongConsumer;
-// TODO(streams):
-// import java.util.stream.DoubleStream;
-// import java.util.stream.IntStream;
-// import java.util.stream.LongStream;
-// import java.util.stream.StreamSupport;
+import java.util.stream.DoubleStream;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+import java.util.stream.StreamSupport;
/**
* A random number generator isolated to the current thread. Like the
@@ -433,274 +432,272 @@
// stream methods, coded in a way intended to better isolate for
// maintenance purposes the small differences across forms.
+ /**
+ * Returns a stream producing the given {@code streamSize} number of
+ * pseudorandom {@code int} values.
+ *
+ * @param streamSize the number of values to generate
+ * @return a stream of pseudorandom {@code int} values
+ * @throws IllegalArgumentException if {@code streamSize} is
+ * less than zero
+ * @since 1.8
+ */
+ public IntStream ints(long streamSize) {
+ if (streamSize < 0L)
+ throw new IllegalArgumentException(BAD_SIZE);
+ return StreamSupport.intStream
+ (new RandomIntsSpliterator
+ (0L, streamSize, Integer.MAX_VALUE, 0),
+ false);
+ }
- // TODO(streams):
- // /**
- // * Returns a stream producing the given {@code streamSize} number of
- // * pseudorandom {@code int} values.
- // *
- // * @param streamSize the number of values to generate
- // * @return a stream of pseudorandom {@code int} values
- // * @throws IllegalArgumentException if {@code streamSize} is
- // * less than zero
- // * @since 1.8
- // */
- // public IntStream ints(long streamSize) {
- // if (streamSize < 0L)
- // throw new IllegalArgumentException(BAD_SIZE);
- // return StreamSupport.intStream
- // (new RandomIntsSpliterator
- // (0L, streamSize, Integer.MAX_VALUE, 0),
- // false);
- // }
+ /**
+ * Returns an effectively unlimited stream of pseudorandom {@code int}
+ * values.
+ *
+ * @implNote This method is implemented to be equivalent to {@code
+ * ints(Long.MAX_VALUE)}.
+ *
+ * @return a stream of pseudorandom {@code int} values
+ * @since 1.8
+ */
+ public IntStream ints() {
+ return StreamSupport.intStream
+ (new RandomIntsSpliterator
+ (0L, Long.MAX_VALUE, Integer.MAX_VALUE, 0),
+ false);
+ }
- // /**
- // * Returns an effectively unlimited stream of pseudorandom {@code int}
- // * values.
- // *
- // * @implNote This method is implemented to be equivalent to {@code
- // * ints(Long.MAX_VALUE)}.
- // *
- // * @return a stream of pseudorandom {@code int} values
- // * @since 1.8
- // */
- // public IntStream ints() {
- // return StreamSupport.intStream
- // (new RandomIntsSpliterator
- // (0L, Long.MAX_VALUE, Integer.MAX_VALUE, 0),
- // false);
- // }
+ /**
+ * Returns a stream producing the given {@code streamSize} number
+ * of pseudorandom {@code int} values, each conforming to the given
+ * origin (inclusive) and bound (exclusive).
+ *
+ * @param streamSize the number of values to generate
+ * @param randomNumberOrigin the origin (inclusive) of each random value
+ * @param randomNumberBound the bound (exclusive) of each random value
+ * @return a stream of pseudorandom {@code int} values,
+ * each with the given origin (inclusive) and bound (exclusive)
+ * @throws IllegalArgumentException if {@code streamSize} is
+ * less than zero, or {@code randomNumberOrigin}
+ * is greater than or equal to {@code randomNumberBound}
+ * @since 1.8
+ */
+ public IntStream ints(long streamSize, int randomNumberOrigin,
+ int randomNumberBound) {
+ if (streamSize < 0L)
+ throw new IllegalArgumentException(BAD_SIZE);
+ if (randomNumberOrigin >= randomNumberBound)
+ throw new IllegalArgumentException(BAD_RANGE);
+ return StreamSupport.intStream
+ (new RandomIntsSpliterator
+ (0L, streamSize, randomNumberOrigin, randomNumberBound),
+ false);
+ }
- // /**
- // * Returns a stream producing the given {@code streamSize} number
- // * of pseudorandom {@code int} values, each conforming to the given
- // * origin (inclusive) and bound (exclusive).
- // *
- // * @param streamSize the number of values to generate
- // * @param randomNumberOrigin the origin (inclusive) of each random value
- // * @param randomNumberBound the bound (exclusive) of each random value
- // * @return a stream of pseudorandom {@code int} values,
- // * each with the given origin (inclusive) and bound (exclusive)
- // * @throws IllegalArgumentException if {@code streamSize} is
- // * less than zero, or {@code randomNumberOrigin}
- // * is greater than or equal to {@code randomNumberBound}
- // * @since 1.8
- // */
- // public IntStream ints(long streamSize, int randomNumberOrigin,
- // int randomNumberBound) {
- // if (streamSize < 0L)
- // throw new IllegalArgumentException(BAD_SIZE);
- // if (randomNumberOrigin >= randomNumberBound)
- // throw new IllegalArgumentException(BAD_RANGE);
- // return StreamSupport.intStream
- // (new RandomIntsSpliterator
- // (0L, streamSize, randomNumberOrigin, randomNumberBound),
- // false);
- // }
+ /**
+ * Returns an effectively unlimited stream of pseudorandom {@code
+ * int} values, each conforming to the given origin (inclusive) and bound
+ * (exclusive).
+ *
+ * @implNote This method is implemented to be equivalent to {@code
+ * ints(Long.MAX_VALUE, randomNumberOrigin, randomNumberBound)}.
+ *
+ * @param randomNumberOrigin the origin (inclusive) of each random value
+ * @param randomNumberBound the bound (exclusive) of each random value
+ * @return a stream of pseudorandom {@code int} values,
+ * each with the given origin (inclusive) and bound (exclusive)
+ * @throws IllegalArgumentException if {@code randomNumberOrigin}
+ * is greater than or equal to {@code randomNumberBound}
+ * @since 1.8
+ */
+ public IntStream ints(int randomNumberOrigin, int randomNumberBound) {
+ if (randomNumberOrigin >= randomNumberBound)
+ throw new IllegalArgumentException(BAD_RANGE);
+ return StreamSupport.intStream
+ (new RandomIntsSpliterator
+ (0L, Long.MAX_VALUE, randomNumberOrigin, randomNumberBound),
+ false);
+ }
- // /**
- // * Returns an effectively unlimited stream of pseudorandom {@code
- // * int} values, each conforming to the given origin (inclusive) and bound
- // * (exclusive).
- // *
- // * @implNote This method is implemented to be equivalent to {@code
- // * ints(Long.MAX_VALUE, randomNumberOrigin, randomNumberBound)}.
- // *
- // * @param randomNumberOrigin the origin (inclusive) of each random value
- // * @param randomNumberBound the bound (exclusive) of each random value
- // * @return a stream of pseudorandom {@code int} values,
- // * each with the given origin (inclusive) and bound (exclusive)
- // * @throws IllegalArgumentException if {@code randomNumberOrigin}
- // * is greater than or equal to {@code randomNumberBound}
- // * @since 1.8
- // */
- // public IntStream ints(int randomNumberOrigin, int randomNumberBound) {
- // if (randomNumberOrigin >= randomNumberBound)
- // throw new IllegalArgumentException(BAD_RANGE);
- // return StreamSupport.intStream
- // (new RandomIntsSpliterator
- // (0L, Long.MAX_VALUE, randomNumberOrigin, randomNumberBound),
- // false);
- // }
+ /**
+ * Returns a stream producing the given {@code streamSize} number of
+ * pseudorandom {@code long} values.
+ *
+ * @param streamSize the number of values to generate
+ * @return a stream of pseudorandom {@code long} values
+ * @throws IllegalArgumentException if {@code streamSize} is
+ * less than zero
+ * @since 1.8
+ */
+ public LongStream longs(long streamSize) {
+ if (streamSize < 0L)
+ throw new IllegalArgumentException(BAD_SIZE);
+ return StreamSupport.longStream
+ (new RandomLongsSpliterator
+ (0L, streamSize, Long.MAX_VALUE, 0L),
+ false);
+ }
- // /**
- // * Returns a stream producing the given {@code streamSize} number of
- // * pseudorandom {@code long} values.
- // *
- // * @param streamSize the number of values to generate
- // * @return a stream of pseudorandom {@code long} values
- // * @throws IllegalArgumentException if {@code streamSize} is
- // * less than zero
- // * @since 1.8
- // */
- // public LongStream longs(long streamSize) {
- // if (streamSize < 0L)
- // throw new IllegalArgumentException(BAD_SIZE);
- // return StreamSupport.longStream
- // (new RandomLongsSpliterator
- // (0L, streamSize, Long.MAX_VALUE, 0L),
- // false);
- // }
+ /**
+ * Returns an effectively unlimited stream of pseudorandom {@code long}
+ * values.
+ *
+ * @implNote This method is implemented to be equivalent to {@code
+ * longs(Long.MAX_VALUE)}.
+ *
+ * @return a stream of pseudorandom {@code long} values
+ * @since 1.8
+ */
+ public LongStream longs() {
+ return StreamSupport.longStream
+ (new RandomLongsSpliterator
+ (0L, Long.MAX_VALUE, Long.MAX_VALUE, 0L),
+ false);
+ }
- // /**
- // * Returns an effectively unlimited stream of pseudorandom {@code long}
- // * values.
- // *
- // * @implNote This method is implemented to be equivalent to {@code
- // * longs(Long.MAX_VALUE)}.
- // *
- // * @return a stream of pseudorandom {@code long} values
- // * @since 1.8
- // */
- // public LongStream longs() {
- // return StreamSupport.longStream
- // (new RandomLongsSpliterator
- // (0L, Long.MAX_VALUE, Long.MAX_VALUE, 0L),
- // false);
- // }
+ /**
+ * Returns a stream producing the given {@code streamSize} number of
+ * pseudorandom {@code long}, each conforming to the given origin
+ * (inclusive) and bound (exclusive).
+ *
+ * @param streamSize the number of values to generate
+ * @param randomNumberOrigin the origin (inclusive) of each random value
+ * @param randomNumberBound the bound (exclusive) of each random value
+ * @return a stream of pseudorandom {@code long} values,
+ * each with the given origin (inclusive) and bound (exclusive)
+ * @throws IllegalArgumentException if {@code streamSize} is
+ * less than zero, or {@code randomNumberOrigin}
+ * is greater than or equal to {@code randomNumberBound}
+ * @since 1.8
+ */
+ public LongStream longs(long streamSize, long randomNumberOrigin,
+ long randomNumberBound) {
+ if (streamSize < 0L)
+ throw new IllegalArgumentException(BAD_SIZE);
+ if (randomNumberOrigin >= randomNumberBound)
+ throw new IllegalArgumentException(BAD_RANGE);
+ return StreamSupport.longStream
+ (new RandomLongsSpliterator
+ (0L, streamSize, randomNumberOrigin, randomNumberBound),
+ false);
+ }
- // /**
- // * Returns a stream producing the given {@code streamSize} number of
- // * pseudorandom {@code long}, each conforming to the given origin
- // * (inclusive) and bound (exclusive).
- // *
- // * @param streamSize the number of values to generate
- // * @param randomNumberOrigin the origin (inclusive) of each random value
- // * @param randomNumberBound the bound (exclusive) of each random value
- // * @return a stream of pseudorandom {@code long} values,
- // * each with the given origin (inclusive) and bound (exclusive)
- // * @throws IllegalArgumentException if {@code streamSize} is
- // * less than zero, or {@code randomNumberOrigin}
- // * is greater than or equal to {@code randomNumberBound}
- // * @since 1.8
- // */
- // public LongStream longs(long streamSize, long randomNumberOrigin,
- // long randomNumberBound) {
- // if (streamSize < 0L)
- // throw new IllegalArgumentException(BAD_SIZE);
- // if (randomNumberOrigin >= randomNumberBound)
- // throw new IllegalArgumentException(BAD_RANGE);
- // return StreamSupport.longStream
- // (new RandomLongsSpliterator
- // (0L, streamSize, randomNumberOrigin, randomNumberBound),
- // false);
- // }
+ /**
+ * Returns an effectively unlimited stream of pseudorandom {@code
+ * long} values, each conforming to the given origin (inclusive) and bound
+ * (exclusive).
+ *
+ * @implNote This method is implemented to be equivalent to {@code
+ * longs(Long.MAX_VALUE, randomNumberOrigin, randomNumberBound)}.
+ *
+ * @param randomNumberOrigin the origin (inclusive) of each random value
+ * @param randomNumberBound the bound (exclusive) of each random value
+ * @return a stream of pseudorandom {@code long} values,
+ * each with the given origin (inclusive) and bound (exclusive)
+ * @throws IllegalArgumentException if {@code randomNumberOrigin}
+ * is greater than or equal to {@code randomNumberBound}
+ * @since 1.8
+ */
+ public LongStream longs(long randomNumberOrigin, long randomNumberBound) {
+ if (randomNumberOrigin >= randomNumberBound)
+ throw new IllegalArgumentException(BAD_RANGE);
+ return StreamSupport.longStream
+ (new RandomLongsSpliterator
+ (0L, Long.MAX_VALUE, randomNumberOrigin, randomNumberBound),
+ false);
+ }
- // /**
- // * Returns an effectively unlimited stream of pseudorandom {@code
- // * long} values, each conforming to the given origin (inclusive) and bound
- // * (exclusive).
- // *
- // * @implNote This method is implemented to be equivalent to {@code
- // * longs(Long.MAX_VALUE, randomNumberOrigin, randomNumberBound)}.
- // *
- // * @param randomNumberOrigin the origin (inclusive) of each random value
- // * @param randomNumberBound the bound (exclusive) of each random value
- // * @return a stream of pseudorandom {@code long} values,
- // * each with the given origin (inclusive) and bound (exclusive)
- // * @throws IllegalArgumentException if {@code randomNumberOrigin}
- // * is greater than or equal to {@code randomNumberBound}
- // * @since 1.8
- // */
- // public LongStream longs(long randomNumberOrigin, long randomNumberBound) {
- // if (randomNumberOrigin >= randomNumberBound)
- // throw new IllegalArgumentException(BAD_RANGE);
- // return StreamSupport.longStream
- // (new RandomLongsSpliterator
- // (0L, Long.MAX_VALUE, randomNumberOrigin, randomNumberBound),
- // false);
- // }
+ /**
+ * Returns a stream producing the given {@code streamSize} number of
+ * pseudorandom {@code double} values, each between zero
+ * (inclusive) and one (exclusive).
+ *
+ * @param streamSize the number of values to generate
+ * @return a stream of {@code double} values
+ * @throws IllegalArgumentException if {@code streamSize} is
+ * less than zero
+ * @since 1.8
+ */
+ public DoubleStream doubles(long streamSize) {
+ if (streamSize < 0L)
+ throw new IllegalArgumentException(BAD_SIZE);
+ return StreamSupport.doubleStream
+ (new RandomDoublesSpliterator
+ (0L, streamSize, Double.MAX_VALUE, 0.0),
+ false);
+ }
- // /**
- // * Returns a stream producing the given {@code streamSize} number of
- // * pseudorandom {@code double} values, each between zero
- // * (inclusive) and one (exclusive).
- // *
- // * @param streamSize the number of values to generate
- // * @return a stream of {@code double} values
- // * @throws IllegalArgumentException if {@code streamSize} is
- // * less than zero
- // * @since 1.8
- // */
- // public DoubleStream doubles(long streamSize) {
- // if (streamSize < 0L)
- // throw new IllegalArgumentException(BAD_SIZE);
- // return StreamSupport.doubleStream
- // (new RandomDoublesSpliterator
- // (0L, streamSize, Double.MAX_VALUE, 0.0),
- // false);
- // }
+ /**
+ * Returns an effectively unlimited stream of pseudorandom {@code
+ * double} values, each between zero (inclusive) and one
+ * (exclusive).
+ *
+ * @implNote This method is implemented to be equivalent to {@code
+ * doubles(Long.MAX_VALUE)}.
+ *
+ * @return a stream of pseudorandom {@code double} values
+ * @since 1.8
+ */
+ public DoubleStream doubles() {
+ return StreamSupport.doubleStream
+ (new RandomDoublesSpliterator
+ (0L, Long.MAX_VALUE, Double.MAX_VALUE, 0.0),
+ false);
+ }
- // /**
- // * Returns an effectively unlimited stream of pseudorandom {@code
- // * double} values, each between zero (inclusive) and one
- // * (exclusive).
- // *
- // * @implNote This method is implemented to be equivalent to {@code
- // * doubles(Long.MAX_VALUE)}.
- // *
- // * @return a stream of pseudorandom {@code double} values
- // * @since 1.8
- // */
- // public DoubleStream doubles() {
- // return StreamSupport.doubleStream
- // (new RandomDoublesSpliterator
- // (0L, Long.MAX_VALUE, Double.MAX_VALUE, 0.0),
- // false);
- // }
+ /**
+ * Returns a stream producing the given {@code streamSize} number of
+ * pseudorandom {@code double} values, each conforming to the given origin
+ * (inclusive) and bound (exclusive).
+ *
+ * @param streamSize the number of values to generate
+ * @param randomNumberOrigin the origin (inclusive) of each random value
+ * @param randomNumberBound the bound (exclusive) of each random value
+ * @return a stream of pseudorandom {@code double} values,
+ * each with the given origin (inclusive) and bound (exclusive)
+ * @throws IllegalArgumentException if {@code streamSize} is
+ * less than zero
+ * @throws IllegalArgumentException if {@code randomNumberOrigin}
+ * is greater than or equal to {@code randomNumberBound}
+ * @since 1.8
+ */
+ public DoubleStream doubles(long streamSize, double randomNumberOrigin,
+ double randomNumberBound) {
+ if (streamSize < 0L)
+ throw new IllegalArgumentException(BAD_SIZE);
+ if (!(randomNumberOrigin < randomNumberBound))
+ throw new IllegalArgumentException(BAD_RANGE);
+ return StreamSupport.doubleStream
+ (new RandomDoublesSpliterator
+ (0L, streamSize, randomNumberOrigin, randomNumberBound),
+ false);
+ }
- // /**
- // * Returns a stream producing the given {@code streamSize} number of
- // * pseudorandom {@code double} values, each conforming to the given origin
- // * (inclusive) and bound (exclusive).
- // *
- // * @param streamSize the number of values to generate
- // * @param randomNumberOrigin the origin (inclusive) of each random value
- // * @param randomNumberBound the bound (exclusive) of each random value
- // * @return a stream of pseudorandom {@code double} values,
- // * each with the given origin (inclusive) and bound (exclusive)
- // * @throws IllegalArgumentException if {@code streamSize} is
- // * less than zero
- // * @throws IllegalArgumentException if {@code randomNumberOrigin}
- // * is greater than or equal to {@code randomNumberBound}
- // * @since 1.8
- // */
- // public DoubleStream doubles(long streamSize, double randomNumberOrigin,
- // double randomNumberBound) {
- // if (streamSize < 0L)
- // throw new IllegalArgumentException(BAD_SIZE);
- // if (!(randomNumberOrigin < randomNumberBound))
- // throw new IllegalArgumentException(BAD_RANGE);
- // return StreamSupport.doubleStream
- // (new RandomDoublesSpliterator
- // (0L, streamSize, randomNumberOrigin, randomNumberBound),
- // false);
- // }
-
- // /**
- // * Returns an effectively unlimited stream of pseudorandom {@code
- // * double} values, each conforming to the given origin (inclusive) and bound
- // * (exclusive).
- // *
- // * @implNote This method is implemented to be equivalent to {@code
- // * doubles(Long.MAX_VALUE, randomNumberOrigin, randomNumberBound)}.
- // *
- // * @param randomNumberOrigin the origin (inclusive) of each random value
- // * @param randomNumberBound the bound (exclusive) of each random value
- // * @return a stream of pseudorandom {@code double} values,
- // * each with the given origin (inclusive) and bound (exclusive)
- // * @throws IllegalArgumentException if {@code randomNumberOrigin}
- // * is greater than or equal to {@code randomNumberBound}
- // * @since 1.8
- // */
- // public DoubleStream doubles(double randomNumberOrigin, double randomNumberBound) {
- // if (!(randomNumberOrigin < randomNumberBound))
- // throw new IllegalArgumentException(BAD_RANGE);
- // return StreamSupport.doubleStream
- // (new RandomDoublesSpliterator
- // (0L, Long.MAX_VALUE, randomNumberOrigin, randomNumberBound),
- // false);
- // }
+ /**
+ * Returns an effectively unlimited stream of pseudorandom {@code
+ * double} values, each conforming to the given origin (inclusive) and bound
+ * (exclusive).
+ *
+ * @implNote This method is implemented to be equivalent to {@code
+ * doubles(Long.MAX_VALUE, randomNumberOrigin, randomNumberBound)}.
+ *
+ * @param randomNumberOrigin the origin (inclusive) of each random value
+ * @param randomNumberBound the bound (exclusive) of each random value
+ * @return a stream of pseudorandom {@code double} values,
+ * each with the given origin (inclusive) and bound (exclusive)
+ * @throws IllegalArgumentException if {@code randomNumberOrigin}
+ * is greater than or equal to {@code randomNumberBound}
+ * @since 1.8
+ */
+ public DoubleStream doubles(double randomNumberOrigin, double randomNumberBound) {
+ if (!(randomNumberOrigin < randomNumberBound))
+ throw new IllegalArgumentException(BAD_RANGE);
+ return StreamSupport.doubleStream
+ (new RandomDoublesSpliterator
+ (0L, Long.MAX_VALUE, randomNumberOrigin, randomNumberBound),
+ false);
+ }
/**
* Spliterator for int streams. We multiplex the four int
diff --git a/ojluni/src/main/java/java/util/ArrayList.java b/ojluni/src/main/java/java/util/ArrayList.java
index fec492c..da2689de 100755
--- a/ojluni/src/main/java/java/util/ArrayList.java
+++ b/ojluni/src/main/java/java/util/ArrayList.java
@@ -1410,4 +1410,14 @@
return anyToRemove;
}
+
+ @SuppressWarnings("unchecked")
+ public void sort(Comparator<? super E> c) {
+ final int expectedModCount = modCount;
+ Arrays.sort((E[]) elementData, 0, size, c);
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
}
diff --git a/ojluni/src/main/java/java/util/Arrays.java b/ojluni/src/main/java/java/util/Arrays.java
index 3478662..fc6ae95 100755
--- a/ojluni/src/main/java/java/util/Arrays.java
+++ b/ojluni/src/main/java/java/util/Arrays.java
@@ -33,6 +33,11 @@
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
+import java.util.stream.DoubleStream;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
/**
* This class contains various methods for manipulating arrays (such as
@@ -4734,4 +4739,127 @@
return Spliterators.spliterator(array, startInclusive, endExclusive,
Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
+
+
+ /**
+ * Returns a sequential {@link Stream} with the specified array as its
+ * source.
+ *
+ * @param <T> The type of the array elements
+ * @param array The array, assumed to be unmodified during use
+ * @return a {@code Stream} for the array
+ * @since 1.8
+ */
+ public static <T> Stream<T> stream(T[] array) {
+ return stream(array, 0, array.length);
+ }
+
+ /**
+ * Returns a sequential {@link Stream} with the specified range of the
+ * specified array as its source.
+ *
+ * @param <T> the type of the array elements
+ * @param array the array, assumed to be unmodified during use
+ * @param startInclusive the first index to cover, inclusive
+ * @param endExclusive index immediately past the last index to cover
+ * @return a {@code Stream} for the array range
+ * @throws ArrayIndexOutOfBoundsException if {@code startInclusive} is
+ * negative, {@code endExclusive} is less than
+ * {@code startInclusive}, or {@code endExclusive} is greater than
+ * the array size
+ * @since 1.8
+ */
+ public static <T> Stream<T> stream(T[] array, int startInclusive, int endExclusive) {
+ return StreamSupport.stream(spliterator(array, startInclusive, endExclusive), false);
+ }
+
+ /**
+ * Returns a sequential {@link IntStream} with the specified array as its
+ * source.
+ *
+ * @param array the array, assumed to be unmodified during use
+ * @return an {@code IntStream} for the array
+ * @since 1.8
+ */
+ public static IntStream stream(int[] array) {
+ return stream(array, 0, array.length);
+ }
+
+ /**
+ * Returns a sequential {@link IntStream} with the specified range of the
+ * specified array as its source.
+ *
+ * @param array the array, assumed to be unmodified during use
+ * @param startInclusive the first index to cover, inclusive
+ * @param endExclusive index immediately past the last index to cover
+ * @return an {@code IntStream} for the array range
+ * @throws ArrayIndexOutOfBoundsException if {@code startInclusive} is
+ * negative, {@code endExclusive} is less than
+ * {@code startInclusive}, or {@code endExclusive} is greater than
+ * the array size
+ * @since 1.8
+ */
+ public static IntStream stream(int[] array, int startInclusive, int endExclusive) {
+ return StreamSupport.intStream(spliterator(array, startInclusive, endExclusive), false);
+ }
+
+ /**
+ * Returns a sequential {@link LongStream} with the specified array as its
+ * source.
+ *
+ * @param array the array, assumed to be unmodified during use
+ * @return a {@code LongStream} for the array
+ * @since 1.8
+ */
+ public static LongStream stream(long[] array) {
+ return stream(array, 0, array.length);
+ }
+
+ /**
+ * Returns a sequential {@link LongStream} with the specified range of the
+ * specified array as its source.
+ *
+ * @param array the array, assumed to be unmodified during use
+ * @param startInclusive the first index to cover, inclusive
+ * @param endExclusive index immediately past the last index to cover
+ * @return a {@code LongStream} for the array range
+ * @throws ArrayIndexOutOfBoundsException if {@code startInclusive} is
+ * negative, {@code endExclusive} is less than
+ * {@code startInclusive}, or {@code endExclusive} is greater than
+ * the array size
+ * @since 1.8
+ */
+ public static LongStream stream(long[] array, int startInclusive, int endExclusive) {
+ return StreamSupport.longStream(spliterator(array, startInclusive, endExclusive), false);
+ }
+
+ /**
+ * Returns a sequential {@link DoubleStream} with the specified array as its
+ * source.
+ *
+ * @param array the array, assumed to be unmodified during use
+ * @return a {@code DoubleStream} for the array
+ * @since 1.8
+ */
+ public static DoubleStream stream(double[] array) {
+ return stream(array, 0, array.length);
+ }
+
+ /**
+ * Returns a sequential {@link DoubleStream} with the specified range of the
+ * specified array as its source.
+ *
+ * @param array the array, assumed to be unmodified during use
+ * @param startInclusive the first index to cover, inclusive
+ * @param endExclusive index immediately past the last index to cover
+ * @return a {@code DoubleStream} for the array range
+ * @throws ArrayIndexOutOfBoundsException if {@code startInclusive} is
+ * negative, {@code endExclusive} is less than
+ * {@code startInclusive}, or {@code endExclusive} is greater than
+ * the array size
+ * @since 1.8
+ */
+ public static DoubleStream stream(double[] array, int startInclusive, int endExclusive) {
+ return StreamSupport.doubleStream(spliterator(array, startInclusive, endExclusive), false);
+ }
}
diff --git a/ojluni/src/main/java/java/util/Collection.java b/ojluni/src/main/java/java/util/Collection.java
index 9f0227f..d3542d9 100755
--- a/ojluni/src/main/java/java/util/Collection.java
+++ b/ojluni/src/main/java/java/util/Collection.java
@@ -26,6 +26,8 @@
package java.util;
import java.util.function.Predicate;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
/**
* The root interface in the <i>collection hierarchy</i>. A collection
@@ -547,4 +549,44 @@
default Spliterator<E> spliterator() {
return Spliterators.spliterator(this, 0);
}
+
+ /**
+ * Returns a sequential {@code Stream} with this collection as its source.
+ *
+ * <p>This method should be overridden when the {@link #spliterator()}
+ * method cannot return a spliterator that is {@code IMMUTABLE},
+ * {@code CONCURRENT}, or <em>late-binding</em>. (See {@link #spliterator()}
+ * for details.)
+ *
+ * @implSpec
+ * The default implementation creates a sequential {@code Stream} from the
+ * collection's {@code Spliterator}.
+ *
+ * @return a sequential {@code Stream} over the elements in this collection
+ * @since 1.8
+ */
+ default Stream<E> stream() {
+ return StreamSupport.stream(spliterator(), false);
+ }
+
+ /**
+ * Returns a possibly parallel {@code Stream} with this collection as its
+ * source. It is allowable for this method to return a sequential stream.
+ *
+ * <p>This method should be overridden when the {@link #spliterator()}
+ * method cannot return a spliterator that is {@code IMMUTABLE},
+ * {@code CONCURRENT}, or <em>late-binding</em>. (See {@link #spliterator()}
+ * for details.)
+ *
+ * @implSpec
+ * The default implementation creates a parallel {@code Stream} from the
+ * collection's {@code Spliterator}.
+ *
+ * @return a possibly parallel {@code Stream} over the elements in this
+ * collection
+ * @since 1.8
+ */
+ default Stream<E> parallelStream() {
+ return StreamSupport.stream(spliterator(), true);
+ }
}
diff --git a/ojluni/src/main/java/java/util/SplittableRandom.java b/ojluni/src/main/java/java/util/SplittableRandom.java
index b2a07ec..f17c37a 100644
--- a/ojluni/src/main/java/java/util/SplittableRandom.java
+++ b/ojluni/src/main/java/java/util/SplittableRandom.java
@@ -35,7 +35,6 @@
import java.util.stream.StreamSupport;
-// TODO(streams): Include in openjdk_java_files.mk
/**
* A generator of uniform pseudorandom values applicable for use in
* (among other contexts) isolated parallel computations that may
diff --git a/ojluni/src/main/java/java/util/stream/AbstractPipeline.java b/ojluni/src/main/java/java/util/stream/AbstractPipeline.java
new file mode 100644
index 0000000..d3ccdac
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/AbstractPipeline.java
@@ -0,0 +1,706 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.function.IntFunction;
+import java.util.function.Supplier;
+
+/**
+ * Abstract base class for "pipeline" classes, which are the core
+ * implementations of the Stream interface and its primitive specializations.
+ * Manages construction and evaluation of stream pipelines.
+ *
+ * <p>An {@code AbstractPipeline} represents an initial portion of a stream
+ * pipeline, encapsulating a stream source and zero or more intermediate
+ * operations. The individual {@code AbstractPipeline} objects are often
+ * referred to as <em>stages</em>, where each stage describes either the stream
+ * source or an intermediate operation.
+ *
+ * <p>A concrete intermediate stage is generally built from an
+ * {@code AbstractPipeline}, a shape-specific pipeline class which extends it
+ * (e.g., {@code IntPipeline}) which is also abstract, and an operation-specific
+ * concrete class which extends that. {@code AbstractPipeline} contains most of
+ * the mechanics of evaluating the pipeline, and implements methods that will be
+ * used by the operation; the shape-specific classes add helper methods for
+ * dealing with collection of results into the appropriate shape-specific
+ * containers.
+ *
+ * <p>After chaining a new intermediate operation, or executing a terminal
+ * operation, the stream is considered to be consumed, and no more intermediate
+ * or terminal operations are permitted on this stream instance.
+ *
+ * @implNote
+ * <p>For sequential streams, and parallel streams without
+ * <a href="package-summary.html#StreamOps">stateful intermediate
+ * operations</a>, parallel streams, pipeline evaluation is done in a single
+ * pass that "jams" all the operations together. For parallel streams with
+ * stateful operations, execution is divided into segments, where each
+ * stateful operations marks the end of a segment, and each segment is
+ * evaluated separately and the result used as the input to the next
+ * segment. In all cases, the source data is not consumed until a terminal
+ * operation begins.
+ *
+ * @param <E_IN> type of input elements
+ * @param <E_OUT> type of output elements
+ * @param <S> type of the subclass implementing {@code BaseStream}
+ * @since 1.8
+ */
+abstract class AbstractPipeline<E_IN, E_OUT, S extends BaseStream<E_OUT, S>>
+ extends PipelineHelper<E_OUT> implements BaseStream<E_OUT, S> {
+ private static final String MSG_STREAM_LINKED = "stream has already been operated upon or closed";
+ private static final String MSG_CONSUMED = "source already consumed or closed";
+
+ /**
+ * Backlink to the head of the pipeline chain (self if this is the source
+ * stage).
+ */
+ @SuppressWarnings("rawtypes")
+ private final AbstractPipeline sourceStage;
+
+ /**
+ * The "upstream" pipeline, or null if this is the source stage.
+ */
+ @SuppressWarnings("rawtypes")
+ private final AbstractPipeline previousStage;
+
+ /**
+ * The operation flags for the intermediate operation represented by this
+ * pipeline object.
+ */
+ protected final int sourceOrOpFlags;
+
+ /**
+ * The next stage in the pipeline, or null if this is the last stage.
+ * Effectively final at the point of linking to the next pipeline.
+ */
+ @SuppressWarnings("rawtypes")
+ private AbstractPipeline nextStage;
+
+ /**
+ * The number of intermediate operations between this pipeline object
+ * and the stream source if sequential, or the previous stateful if parallel.
+ * Valid at the point of pipeline preparation for evaluation.
+ */
+ private int depth;
+
+ /**
+ * The combined source and operation flags for the source and all operations
+ * up to and including the operation represented by this pipeline object.
+ * Valid at the point of pipeline preparation for evaluation.
+ */
+ private int combinedFlags;
+
+ /**
+ * The source spliterator. Only valid for the head pipeline.
+ * Before the pipeline is consumed if non-null then {@code sourceSupplier}
+ * must be null. After the pipeline is consumed if non-null then is set to
+ * null.
+ */
+ private Spliterator<?> sourceSpliterator;
+
+ /**
+ * The source supplier. Only valid for the head pipeline. Before the
+ * pipeline is consumed if non-null then {@code sourceSpliterator} must be
+ * null. After the pipeline is consumed if non-null then is set to null.
+ */
+ private Supplier<? extends Spliterator<?>> sourceSupplier;
+
+ /**
+ * True if this pipeline has been linked or consumed
+ */
+ private boolean linkedOrConsumed;
+
+ /**
+ * True if there are any stateful ops in the pipeline; only valid for the
+ * source stage.
+ */
+ private boolean sourceAnyStateful;
+
+ private Runnable sourceCloseAction;
+
+ /**
+ * True if pipeline is parallel, otherwise the pipeline is sequential; only
+ * valid for the source stage.
+ */
+ private boolean parallel;
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream source
+ * @param sourceFlags The source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel True if the pipeline is parallel
+ */
+ AbstractPipeline(Supplier<? extends Spliterator<?>> source,
+ int sourceFlags, boolean parallel) {
+ this.previousStage = null;
+ this.sourceSupplier = source;
+ this.sourceStage = this;
+ this.sourceOrOpFlags = sourceFlags & StreamOpFlag.STREAM_MASK;
+ // The following is an optimization of:
+ // StreamOpFlag.combineOpFlags(sourceOrOpFlags, StreamOpFlag.INITIAL_OPS_VALUE);
+ this.combinedFlags = (~(sourceOrOpFlags << 1)) & StreamOpFlag.INITIAL_OPS_VALUE;
+ this.depth = 0;
+ this.parallel = parallel;
+ }
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ AbstractPipeline(Spliterator<?> source,
+ int sourceFlags, boolean parallel) {
+ this.previousStage = null;
+ this.sourceSpliterator = source;
+ this.sourceStage = this;
+ this.sourceOrOpFlags = sourceFlags & StreamOpFlag.STREAM_MASK;
+ // The following is an optimization of:
+ // StreamOpFlag.combineOpFlags(sourceOrOpFlags, StreamOpFlag.INITIAL_OPS_VALUE);
+ this.combinedFlags = (~(sourceOrOpFlags << 1)) & StreamOpFlag.INITIAL_OPS_VALUE;
+ this.depth = 0;
+ this.parallel = parallel;
+ }
+
+ /**
+ * Constructor for appending an intermediate operation stage onto an
+ * existing pipeline.
+ *
+ * @param previousStage the upstream pipeline stage
+ * @param opFlags the operation flags for the new stage, described in
+ * {@link StreamOpFlag}
+ */
+ AbstractPipeline(AbstractPipeline<?, E_IN, ?> previousStage, int opFlags) {
+ if (previousStage.linkedOrConsumed)
+ throw new IllegalStateException(MSG_STREAM_LINKED);
+ previousStage.linkedOrConsumed = true;
+ previousStage.nextStage = this;
+
+ this.previousStage = previousStage;
+ this.sourceOrOpFlags = opFlags & StreamOpFlag.OP_MASK;
+ this.combinedFlags = StreamOpFlag.combineOpFlags(opFlags, previousStage.combinedFlags);
+ this.sourceStage = previousStage.sourceStage;
+ if (opIsStateful())
+ sourceStage.sourceAnyStateful = true;
+ this.depth = previousStage.depth + 1;
+ }
+
+
+ // Terminal evaluation methods
+
+ /**
+ * Evaluate the pipeline with a terminal operation to produce a result.
+ *
+ * @param <R> the type of result
+ * @param terminalOp the terminal operation to be applied to the pipeline.
+ * @return the result
+ */
+ final <R> R evaluate(TerminalOp<E_OUT, R> terminalOp) {
+ assert getOutputShape() == terminalOp.inputShape();
+ if (linkedOrConsumed)
+ throw new IllegalStateException(MSG_STREAM_LINKED);
+ linkedOrConsumed = true;
+
+ return isParallel()
+ ? terminalOp.evaluateParallel(this, sourceSpliterator(terminalOp.getOpFlags()))
+ : terminalOp.evaluateSequential(this, sourceSpliterator(terminalOp.getOpFlags()));
+ }
+
+ /**
+ * Collect the elements output from the pipeline stage.
+ *
+ * @param generator the array generator to be used to create array instances
+ * @return a flat array-backed Node that holds the collected output elements
+ */
+ @SuppressWarnings("unchecked")
+ final Node<E_OUT> evaluateToArrayNode(IntFunction<E_OUT[]> generator) {
+ if (linkedOrConsumed)
+ throw new IllegalStateException(MSG_STREAM_LINKED);
+ linkedOrConsumed = true;
+
+ // If the last intermediate operation is stateful then
+ // evaluate directly to avoid an extra collection step
+ if (isParallel() && previousStage != null && opIsStateful()) {
+ // Set the depth of this, last, pipeline stage to zero to slice the
+ // pipeline such that this operation will not be included in the
+ // upstream slice and upstream operations will not be included
+ // in this slice
+ depth = 0;
+ return opEvaluateParallel(previousStage, previousStage.sourceSpliterator(0), generator);
+ }
+ else {
+ return evaluate(sourceSpliterator(0), true, generator);
+ }
+ }
+
+ /**
+ * Gets the source stage spliterator if this pipeline stage is the source
+ * stage. The pipeline is consumed after this method is called and
+ * returns successfully.
+ *
+ * @return the source stage spliterator
+ * @throws IllegalStateException if this pipeline stage is not the source
+ * stage.
+ */
+ @SuppressWarnings("unchecked")
+ final Spliterator<E_OUT> sourceStageSpliterator() {
+ if (this != sourceStage)
+ throw new IllegalStateException();
+
+ if (linkedOrConsumed)
+ throw new IllegalStateException(MSG_STREAM_LINKED);
+ linkedOrConsumed = true;
+
+ if (sourceStage.sourceSpliterator != null) {
+ @SuppressWarnings("unchecked")
+ Spliterator<E_OUT> s = sourceStage.sourceSpliterator;
+ sourceStage.sourceSpliterator = null;
+ return s;
+ }
+ else if (sourceStage.sourceSupplier != null) {
+ @SuppressWarnings("unchecked")
+ Spliterator<E_OUT> s = (Spliterator<E_OUT>) sourceStage.sourceSupplier.get();
+ sourceStage.sourceSupplier = null;
+ return s;
+ }
+ else {
+ throw new IllegalStateException(MSG_CONSUMED);
+ }
+ }
+
+ // BaseStream
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public final S sequential() {
+ sourceStage.parallel = false;
+ return (S) this;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public final S parallel() {
+ sourceStage.parallel = true;
+ return (S) this;
+ }
+
+ @Override
+ public void close() {
+ linkedOrConsumed = true;
+ sourceSupplier = null;
+ sourceSpliterator = null;
+ if (sourceStage.sourceCloseAction != null) {
+ Runnable closeAction = sourceStage.sourceCloseAction;
+ sourceStage.sourceCloseAction = null;
+ closeAction.run();
+ }
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public S onClose(Runnable closeHandler) {
+ Runnable existingHandler = sourceStage.sourceCloseAction;
+ sourceStage.sourceCloseAction =
+ (existingHandler == null)
+ ? closeHandler
+ : Streams.composeWithExceptions(existingHandler, closeHandler);
+ return (S) this;
+ }
+
+ // Primitive specialization use co-variant overrides, hence is not final
+ @Override
+ @SuppressWarnings("unchecked")
+ public Spliterator<E_OUT> spliterator() {
+ if (linkedOrConsumed)
+ throw new IllegalStateException(MSG_STREAM_LINKED);
+ linkedOrConsumed = true;
+
+ if (this == sourceStage) {
+ if (sourceStage.sourceSpliterator != null) {
+ @SuppressWarnings("unchecked")
+ Spliterator<E_OUT> s = (Spliterator<E_OUT>) sourceStage.sourceSpliterator;
+ sourceStage.sourceSpliterator = null;
+ return s;
+ }
+ else if (sourceStage.sourceSupplier != null) {
+ @SuppressWarnings("unchecked")
+ Supplier<Spliterator<E_OUT>> s = (Supplier<Spliterator<E_OUT>>) sourceStage.sourceSupplier;
+ sourceStage.sourceSupplier = null;
+ return lazySpliterator(s);
+ }
+ else {
+ throw new IllegalStateException(MSG_CONSUMED);
+ }
+ }
+ else {
+ return wrap(this, () -> sourceSpliterator(0), isParallel());
+ }
+ }
+
+ @Override
+ public final boolean isParallel() {
+ return sourceStage.parallel;
+ }
+
+
+ /**
+ * Returns the composition of stream flags of the stream source and all
+ * intermediate operations.
+ *
+ * @return the composition of stream flags of the stream source and all
+ * intermediate operations
+ * @see StreamOpFlag
+ */
+ final int getStreamFlags() {
+ return StreamOpFlag.toStreamFlags(combinedFlags);
+ }
+
+ /**
+ * Get the source spliterator for this pipeline stage. For a sequential or
+ * stateless parallel pipeline, this is the source spliterator. For a
+ * stateful parallel pipeline, this is a spliterator describing the results
+ * of all computations up to and including the most recent stateful
+ * operation.
+ */
+ @SuppressWarnings("unchecked")
+ private Spliterator<?> sourceSpliterator(int terminalFlags) {
+ // Get the source spliterator of the pipeline
+ Spliterator<?> spliterator = null;
+ if (sourceStage.sourceSpliterator != null) {
+ spliterator = sourceStage.sourceSpliterator;
+ sourceStage.sourceSpliterator = null;
+ }
+ else if (sourceStage.sourceSupplier != null) {
+ spliterator = (Spliterator<?>) sourceStage.sourceSupplier.get();
+ sourceStage.sourceSupplier = null;
+ }
+ else {
+ throw new IllegalStateException(MSG_CONSUMED);
+ }
+
+ if (isParallel() && sourceStage.sourceAnyStateful) {
+ // Adapt the source spliterator, evaluating each stateful op
+ // in the pipeline up to and including this pipeline stage.
+ // The depth and flags of each pipeline stage are adjusted accordingly.
+ int depth = 1;
+ for (@SuppressWarnings("rawtypes") AbstractPipeline u = sourceStage, p = sourceStage.nextStage, e = this;
+ u != e;
+ u = p, p = p.nextStage) {
+
+ int thisOpFlags = p.sourceOrOpFlags;
+ if (p.opIsStateful()) {
+ depth = 0;
+
+ if (StreamOpFlag.SHORT_CIRCUIT.isKnown(thisOpFlags)) {
+ // Clear the short circuit flag for next pipeline stage
+ // This stage encapsulates short-circuiting, the next
+ // stage may not have any short-circuit operations, and
+ // if so spliterator.forEachRemaining should be used
+ // for traversal
+ thisOpFlags = thisOpFlags & ~StreamOpFlag.IS_SHORT_CIRCUIT;
+ }
+
+ spliterator = p.opEvaluateParallelLazy(u, spliterator);
+
+ // Inject or clear SIZED on the source pipeline stage
+ // based on the stage's spliterator
+ thisOpFlags = spliterator.hasCharacteristics(Spliterator.SIZED)
+ ? (thisOpFlags & ~StreamOpFlag.NOT_SIZED) | StreamOpFlag.IS_SIZED
+ : (thisOpFlags & ~StreamOpFlag.IS_SIZED) | StreamOpFlag.NOT_SIZED;
+ }
+ p.depth = depth++;
+ p.combinedFlags = StreamOpFlag.combineOpFlags(thisOpFlags, u.combinedFlags);
+ }
+ }
+
+ if (terminalFlags != 0) {
+ // Apply flags from the terminal operation to last pipeline stage
+ combinedFlags = StreamOpFlag.combineOpFlags(terminalFlags, combinedFlags);
+ }
+
+ return spliterator;
+ }
+
+ // PipelineHelper
+
+ @Override
+ final StreamShape getSourceShape() {
+ @SuppressWarnings("rawtypes")
+ AbstractPipeline p = AbstractPipeline.this;
+ while (p.depth > 0) {
+ p = p.previousStage;
+ }
+ return p.getOutputShape();
+ }
+
+ @Override
+ final <P_IN> long exactOutputSizeIfKnown(Spliterator<P_IN> spliterator) {
+ return StreamOpFlag.SIZED.isKnown(getStreamAndOpFlags()) ? spliterator.getExactSizeIfKnown() : -1;
+ }
+
+ @Override
+ final <P_IN, S extends Sink<E_OUT>> S wrapAndCopyInto(S sink, Spliterator<P_IN> spliterator) {
+ copyInto(wrapSink(Objects.requireNonNull(sink)), spliterator);
+ return sink;
+ }
+
+ @Override
+ final <P_IN> void copyInto(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator) {
+ Objects.requireNonNull(wrappedSink);
+
+ if (!StreamOpFlag.SHORT_CIRCUIT.isKnown(getStreamAndOpFlags())) {
+ wrappedSink.begin(spliterator.getExactSizeIfKnown());
+ spliterator.forEachRemaining(wrappedSink);
+ wrappedSink.end();
+ }
+ else {
+ copyIntoWithCancel(wrappedSink, spliterator);
+ }
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final <P_IN> void copyIntoWithCancel(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator) {
+ @SuppressWarnings({"rawtypes","unchecked"})
+ AbstractPipeline p = AbstractPipeline.this;
+ while (p.depth > 0) {
+ p = p.previousStage;
+ }
+ wrappedSink.begin(spliterator.getExactSizeIfKnown());
+ p.forEachWithCancel(spliterator, wrappedSink);
+ wrappedSink.end();
+ }
+
+ @Override
+ final int getStreamAndOpFlags() {
+ return combinedFlags;
+ }
+
+ final boolean isOrdered() {
+ return StreamOpFlag.ORDERED.isKnown(combinedFlags);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final <P_IN> Sink<P_IN> wrapSink(Sink<E_OUT> sink) {
+ Objects.requireNonNull(sink);
+
+ for ( @SuppressWarnings("rawtypes") AbstractPipeline p=AbstractPipeline.this; p.depth > 0; p=p.previousStage) {
+ sink = p.opWrapSink(p.previousStage.combinedFlags, sink);
+ }
+ return (Sink<P_IN>) sink;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final <P_IN> Spliterator<E_OUT> wrapSpliterator(Spliterator<P_IN> sourceSpliterator) {
+ if (depth == 0) {
+ return (Spliterator<E_OUT>) sourceSpliterator;
+ }
+ else {
+ return wrap(this, () -> sourceSpliterator, isParallel());
+ }
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final <P_IN> Node<E_OUT> evaluate(Spliterator<P_IN> spliterator,
+ boolean flatten,
+ IntFunction<E_OUT[]> generator) {
+ if (isParallel()) {
+ // @@@ Optimize if op of this pipeline stage is a stateful op
+ return evaluateToNode(this, spliterator, flatten, generator);
+ }
+ else {
+ Node.Builder<E_OUT> nb = makeNodeBuilder(
+ exactOutputSizeIfKnown(spliterator), generator);
+ return wrapAndCopyInto(nb, spliterator).build();
+ }
+ }
+
+
+ // Shape-specific abstract methods, implemented by XxxPipeline classes
+
+ /**
+ * Get the output shape of the pipeline. If the pipeline is the head,
+ * then it's output shape corresponds to the shape of the source.
+ * Otherwise, it's output shape corresponds to the output shape of the
+ * associated operation.
+ *
+ * @return the output shape
+ */
+ abstract StreamShape getOutputShape();
+
+ /**
+ * Collect elements output from a pipeline into a Node that holds elements
+ * of this shape.
+ *
+ * @param helper the pipeline helper describing the pipeline stages
+ * @param spliterator the source spliterator
+ * @param flattenTree true if the returned node should be flattened
+ * @param generator the array generator
+ * @return a Node holding the output of the pipeline
+ */
+ abstract <P_IN> Node<E_OUT> evaluateToNode(PipelineHelper<E_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree,
+ IntFunction<E_OUT[]> generator);
+
+ /**
+ * Create a spliterator that wraps a source spliterator, compatible with
+ * this stream shape, and operations associated with a {@link
+ * PipelineHelper}.
+ *
+ * @param ph the pipeline helper describing the pipeline stages
+ * @param supplier the supplier of a spliterator
+ * @return a wrapping spliterator compatible with this shape
+ */
+ abstract <P_IN> Spliterator<E_OUT> wrap(PipelineHelper<E_OUT> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean isParallel);
+
+ /**
+ * Create a lazy spliterator that wraps and obtains the supplied the
+ * spliterator when a method is invoked on the lazy spliterator.
+ * @param supplier the supplier of a spliterator
+ */
+ abstract Spliterator<E_OUT> lazySpliterator(Supplier<? extends Spliterator<E_OUT>> supplier);
+
+ /**
+ * Traverse the elements of a spliterator compatible with this stream shape,
+ * pushing those elements into a sink. If the sink requests cancellation,
+ * no further elements will be pulled or pushed.
+ *
+ * @param spliterator the spliterator to pull elements from
+ * @param sink the sink to push elements to
+ */
+ abstract void forEachWithCancel(Spliterator<E_OUT> spliterator, Sink<E_OUT> sink);
+
+ /**
+ * Make a node builder compatible with this stream shape.
+ *
+ * @param exactSizeIfKnown if {@literal >=0}, then a node builder will be
+ * created that has a fixed capacity of at most sizeIfKnown elements. If
+ * {@literal < 0}, then the node builder has an unfixed capacity. A fixed
+ * capacity node builder will throw exceptions if an element is added after
+ * builder has reached capacity, or is built before the builder has reached
+ * capacity.
+ *
+ * @param generator the array generator to be used to create instances of a
+ * T[] array. For implementations supporting primitive nodes, this parameter
+ * may be ignored.
+ * @return a node builder
+ */
+ @Override
+ abstract Node.Builder<E_OUT> makeNodeBuilder(long exactSizeIfKnown,
+ IntFunction<E_OUT[]> generator);
+
+
+ // Op-specific abstract methods, implemented by the operation class
+
+ /**
+ * Returns whether this operation is stateful or not. If it is stateful,
+ * then the method
+ * {@link #opEvaluateParallel(PipelineHelper, java.util.Spliterator, java.util.function.IntFunction)}
+ * must be overridden.
+ *
+ * @return {@code true} if this operation is stateful
+ */
+ abstract boolean opIsStateful();
+
+ /**
+ * Accepts a {@code Sink} which will receive the results of this operation,
+ * and return a {@code Sink} which accepts elements of the input type of
+ * this operation and which performs the operation, passing the results to
+ * the provided {@code Sink}.
+ *
+ * @apiNote
+ * The implementation may use the {@code flags} parameter to optimize the
+ * sink wrapping. For example, if the input is already {@code DISTINCT},
+ * the implementation for the {@code Stream#distinct()} method could just
+ * return the sink it was passed.
+ *
+ * @param flags The combined stream and operation flags up to, but not
+ * including, this operation
+ * @param sink sink to which elements should be sent after processing
+ * @return a sink which accepts elements, perform the operation upon
+ * each element, and passes the results (if any) to the provided
+ * {@code Sink}.
+ */
+ abstract Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink);
+
+ /**
+ * Performs a parallel evaluation of the operation using the specified
+ * {@code PipelineHelper} which describes the upstream intermediate
+ * operations. Only called on stateful operations. If {@link
+ * #opIsStateful()} returns true then implementations must override the
+ * default implementation.
+ *
+ * @implSpec The default implementation always throw
+ * {@code UnsupportedOperationException}.
+ *
+ * @param helper the pipeline helper describing the pipeline stages
+ * @param spliterator the source {@code Spliterator}
+ * @param generator the array generator
+ * @return a {@code Node} describing the result of the evaluation
+ */
+ <P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<E_OUT[]> generator) {
+ throw new UnsupportedOperationException("Parallel evaluation is not supported");
+ }
+
+ /**
+ * Returns a {@code Spliterator} describing a parallel evaluation of the
+ * operation, using the specified {@code PipelineHelper} which describes the
+ * upstream intermediate operations. Only called on stateful operations.
+ * It is not necessary (though acceptable) to do a full computation of the
+ * result here; it is preferable, if possible, to describe the result via a
+ * lazily evaluated spliterator.
+ *
+ * @implSpec The default implementation behaves as if:
+ * <pre>{@code
+ * return evaluateParallel(helper, i -> (E_OUT[]) new
+ * Object[i]).spliterator();
+ * }</pre>
+ * and is suitable for implementations that cannot do better than a full
+ * synchronous evaluation.
+ *
+ * @param helper the pipeline helper
+ * @param spliterator the source {@code Spliterator}
+ * @return a {@code Spliterator} describing the result of the evaluation
+ */
+ @SuppressWarnings("unchecked")
+ <P_IN> Spliterator<E_OUT> opEvaluateParallelLazy(PipelineHelper<E_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ return opEvaluateParallel(helper, spliterator, i -> (E_OUT[]) new Object[i]).spliterator();
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/AbstractShortCircuitTask.java b/ojluni/src/main/java/java/util/stream/AbstractShortCircuitTask.java
new file mode 100644
index 0000000..bbf09f0
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/AbstractShortCircuitTask.java
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Abstract class for fork-join tasks used to implement short-circuiting
+ * stream ops, which can produce a result without processing all elements of the
+ * stream.
+ *
+ * @param <P_IN> type of input elements to the pipeline
+ * @param <P_OUT> type of output elements from the pipeline
+ * @param <R> type of intermediate result, may be different from operation
+ * result type
+ * @param <K> type of child and sibling tasks
+ * @since 1.8
+ */
+@SuppressWarnings("serial")
+abstract class AbstractShortCircuitTask<P_IN, P_OUT, R,
+ K extends AbstractShortCircuitTask<P_IN, P_OUT, R, K>>
+ extends AbstractTask<P_IN, P_OUT, R, K> {
+ /**
+ * The result for this computation; this is shared among all tasks and set
+ * exactly once
+ */
+ protected final AtomicReference<R> sharedResult;
+
+ /**
+ * Indicates whether this task has been canceled. Tasks may cancel other
+ * tasks in the computation under various conditions, such as in a
+ * find-first operation, a task that finds a value will cancel all tasks
+ * that are later in the encounter order.
+ */
+ protected volatile boolean canceled;
+
+ /**
+ * Constructor for root tasks.
+ *
+ * @param helper the {@code PipelineHelper} describing the stream pipeline
+ * up to this operation
+ * @param spliterator the {@code Spliterator} describing the source for this
+ * pipeline
+ */
+ protected AbstractShortCircuitTask(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ sharedResult = new AtomicReference<>(null);
+ }
+
+ /**
+ * Constructor for non-root nodes.
+ *
+ * @param parent parent task in the computation tree
+ * @param spliterator the {@code Spliterator} for the portion of the
+ * computation tree described by this task
+ */
+ protected AbstractShortCircuitTask(K parent,
+ Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ sharedResult = parent.sharedResult;
+ }
+
+ /**
+ * Returns the value indicating the computation completed with no task
+ * finding a short-circuitable result. For example, for a "find" operation,
+ * this might be null or an empty {@code Optional}.
+ *
+ * @return the result to return when no task finds a result
+ */
+ protected abstract R getEmptyResult();
+
+ /**
+ * Overrides AbstractTask version to include checks for early
+ * exits while splitting or computing.
+ */
+ @Override
+ public void compute() {
+ Spliterator<P_IN> rs = spliterator, ls;
+ long sizeEstimate = rs.estimateSize();
+ long sizeThreshold = getTargetSize(sizeEstimate);
+ boolean forkRight = false;
+ @SuppressWarnings("unchecked") K task = (K) this;
+ AtomicReference<R> sr = sharedResult;
+ R result;
+ while ((result = sr.get()) == null) {
+ if (task.taskCanceled()) {
+ result = task.getEmptyResult();
+ break;
+ }
+ if (sizeEstimate <= sizeThreshold || (ls = rs.trySplit()) == null) {
+ result = task.doLeaf();
+ break;
+ }
+ K leftChild, rightChild, taskToFork;
+ task.leftChild = leftChild = task.makeChild(ls);
+ task.rightChild = rightChild = task.makeChild(rs);
+ task.setPendingCount(1);
+ if (forkRight) {
+ forkRight = false;
+ rs = ls;
+ task = leftChild;
+ taskToFork = rightChild;
+ }
+ else {
+ forkRight = true;
+ task = rightChild;
+ taskToFork = leftChild;
+ }
+ taskToFork.fork();
+ sizeEstimate = rs.estimateSize();
+ }
+ task.setLocalResult(result);
+ task.tryComplete();
+ }
+
+
+ /**
+ * Declares that a globally valid result has been found. If another task has
+ * not already found the answer, the result is installed in
+ * {@code sharedResult}. The {@code compute()} method will check
+ * {@code sharedResult} before proceeding with computation, so this causes
+ * the computation to terminate early.
+ *
+ * @param result the result found
+ */
+ protected void shortCircuit(R result) {
+ if (result != null)
+ sharedResult.compareAndSet(null, result);
+ }
+
+ /**
+ * Sets a local result for this task. If this task is the root, set the
+ * shared result instead (if not already set).
+ *
+ * @param localResult The result to set for this task
+ */
+ @Override
+ protected void setLocalResult(R localResult) {
+ if (isRoot()) {
+ if (localResult != null)
+ sharedResult.compareAndSet(null, localResult);
+ }
+ else
+ super.setLocalResult(localResult);
+ }
+
+ /**
+ * Retrieves the local result for this task
+ */
+ @Override
+ public R getRawResult() {
+ return getLocalResult();
+ }
+
+ /**
+ * Retrieves the local result for this task. If this task is the root,
+ * retrieves the shared result instead.
+ */
+ @Override
+ public R getLocalResult() {
+ if (isRoot()) {
+ R answer = sharedResult.get();
+ return (answer == null) ? getEmptyResult() : answer;
+ }
+ else
+ return super.getLocalResult();
+ }
+
+ /**
+ * Mark this task as canceled
+ */
+ protected void cancel() {
+ canceled = true;
+ }
+
+ /**
+ * Queries whether this task is canceled. A task is considered canceled if
+ * it or any of its parents have been canceled.
+ *
+ * @return {@code true} if this task or any parent is canceled.
+ */
+ protected boolean taskCanceled() {
+ boolean cancel = canceled;
+ if (!cancel) {
+ for (K parent = getParent(); !cancel && parent != null; parent = parent.getParent())
+ cancel = parent.canceled;
+ }
+
+ return cancel;
+ }
+
+ /**
+ * Cancels all tasks which succeed this one in the encounter order. This
+ * includes canceling all the current task's right sibling, as well as the
+ * later right siblings of all its parents.
+ */
+ protected void cancelLaterNodes() {
+ // Go up the tree, cancel right siblings of this node and all parents
+ for (@SuppressWarnings("unchecked") K parent = getParent(), node = (K) this;
+ parent != null;
+ node = parent, parent = parent.getParent()) {
+ // If node is a left child of parent, then has a right sibling
+ if (parent.leftChild == node) {
+ K rightSibling = parent.rightChild;
+ if (!rightSibling.canceled)
+ rightSibling.cancel();
+ }
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/AbstractSpinedBuffer.java b/ojluni/src/main/java/java/util/stream/AbstractSpinedBuffer.java
new file mode 100644
index 0000000..46fdf4d
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/AbstractSpinedBuffer.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+/**
+ * Base class for a data structure for gathering elements into a buffer and then
+ * iterating them. Maintains an array of increasingly sized arrays, so there is
+ * no copying cost associated with growing the data structure.
+ * @since 1.8
+ */
+abstract class AbstractSpinedBuffer {
+ /**
+ * Minimum power-of-two for the first chunk.
+ */
+ public static final int MIN_CHUNK_POWER = 4;
+
+ /**
+ * Minimum size for the first chunk.
+ */
+ public static final int MIN_CHUNK_SIZE = 1 << MIN_CHUNK_POWER;
+
+ /**
+ * Max power-of-two for chunks.
+ */
+ public static final int MAX_CHUNK_POWER = 30;
+
+ /**
+ * Minimum array size for array-of-chunks.
+ */
+ public static final int MIN_SPINE_SIZE = 8;
+
+
+ /**
+ * log2 of the size of the first chunk.
+ */
+ protected final int initialChunkPower;
+
+ /**
+ * Index of the *next* element to write; may point into, or just outside of,
+ * the current chunk.
+ */
+ protected int elementIndex;
+
+ /**
+ * Index of the *current* chunk in the spine array, if the spine array is
+ * non-null.
+ */
+ protected int spineIndex;
+
+ /**
+ * Count of elements in all prior chunks.
+ */
+ protected long[] priorElementCount;
+
+ /**
+ * Construct with an initial capacity of 16.
+ */
+ protected AbstractSpinedBuffer() {
+ this.initialChunkPower = MIN_CHUNK_POWER;
+ }
+
+ /**
+ * Construct with a specified initial capacity.
+ *
+ * @param initialCapacity The minimum expected number of elements
+ */
+ protected AbstractSpinedBuffer(int initialCapacity) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException("Illegal Capacity: "+ initialCapacity);
+
+ this.initialChunkPower = Math.max(MIN_CHUNK_POWER,
+ Integer.SIZE - Integer.numberOfLeadingZeros(initialCapacity - 1));
+ }
+
+ /**
+ * Is the buffer currently empty?
+ */
+ public boolean isEmpty() {
+ return (spineIndex == 0) && (elementIndex == 0);
+ }
+
+ /**
+ * How many elements are currently in the buffer?
+ */
+ public long count() {
+ return (spineIndex == 0)
+ ? elementIndex
+ : priorElementCount[spineIndex] + elementIndex;
+ }
+
+ /**
+ * How big should the nth chunk be?
+ */
+ protected int chunkSize(int n) {
+ int power = (n == 0 || n == 1)
+ ? initialChunkPower
+ : Math.min(initialChunkPower + n - 1, AbstractSpinedBuffer.MAX_CHUNK_POWER);
+ return 1 << power;
+ }
+
+ /**
+ * Remove all data from the buffer
+ */
+ public abstract void clear();
+}
diff --git a/ojluni/src/main/java/java/util/stream/AbstractTask.java b/ojluni/src/main/java/java/util/stream/AbstractTask.java
new file mode 100644
index 0000000..33de7d5
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/AbstractTask.java
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.concurrent.CountedCompleter;
+import java.util.concurrent.ForkJoinPool;
+
+/**
+ * Abstract base class for most fork-join tasks used to implement stream ops.
+ * Manages splitting logic, tracking of child tasks, and intermediate results.
+ * Each task is associated with a {@link Spliterator} that describes the portion
+ * of the input associated with the subtree rooted at this task.
+ * Tasks may be leaf nodes (which will traverse the elements of
+ * the {@code Spliterator}) or internal nodes (which split the
+ * {@code Spliterator} into multiple child tasks).
+ *
+ * @implNote
+ * <p>This class is based on {@link CountedCompleter}, a form of fork-join task
+ * where each task has a semaphore-like count of uncompleted children, and the
+ * task is implicitly completed and notified when its last child completes.
+ * Internal node tasks will likely override the {@code onCompletion} method from
+ * {@code CountedCompleter} to merge the results from child tasks into the
+ * current task's result.
+ *
+ * <p>Splitting and setting up the child task links is done by {@code compute()}
+ * for internal nodes. At {@code compute()} time for leaf nodes, it is
+ * guaranteed that the parent's child-related fields (including sibling links
+ * for the parent's children) will be set up for all children.
+ *
+ * <p>For example, a task that performs a reduce would override {@code doLeaf()}
+ * to perform a reduction on that leaf node's chunk using the
+ * {@code Spliterator}, and override {@code onCompletion()} to merge the results
+ * of the child tasks for internal nodes:
+ *
+ * <pre>{@code
+ * protected S doLeaf() {
+ * spliterator.forEach(...);
+ * return localReductionResult;
+ * }
+ *
+ * public void onCompletion(CountedCompleter caller) {
+ * if (!isLeaf()) {
+ * ReduceTask<P_IN, P_OUT, T, R> child = children;
+ * R result = child.getLocalResult();
+ * child = child.nextSibling;
+ * for (; child != null; child = child.nextSibling)
+ * result = combine(result, child.getLocalResult());
+ * setLocalResult(result);
+ * }
+ * }
+ * }</pre>
+ *
+ * <p>Serialization is not supported as there is no intention to serialize
+ * tasks managed by stream ops.
+ *
+ * @param <P_IN> Type of elements input to the pipeline
+ * @param <P_OUT> Type of elements output from the pipeline
+ * @param <R> Type of intermediate result, which may be different from operation
+ * result type
+ * @param <K> Type of parent, child and sibling tasks
+ * @since 1.8
+ */
+@SuppressWarnings("serial")
+abstract class AbstractTask<P_IN, P_OUT, R,
+ K extends AbstractTask<P_IN, P_OUT, R, K>>
+ extends CountedCompleter<R> {
+
+ /**
+ * Default target factor of leaf tasks for parallel decomposition.
+ * To allow load balancing, we over-partition, currently to approximately
+ * four tasks per processor, which enables others to help out
+ * if leaf tasks are uneven or some processors are otherwise busy.
+ */
+ static final int LEAF_TARGET = ForkJoinPool.getCommonPoolParallelism() << 2;
+
+ /** The pipeline helper, common to all tasks in a computation */
+ protected final PipelineHelper<P_OUT> helper;
+
+ /**
+ * The spliterator for the portion of the input associated with the subtree
+ * rooted at this task
+ */
+ protected Spliterator<P_IN> spliterator;
+
+ /** Target leaf size, common to all tasks in a computation */
+ protected long targetSize; // may be laziliy initialized
+
+ /**
+ * The left child.
+ * null if no children
+ * if non-null rightChild is non-null
+ */
+ protected K leftChild;
+
+ /**
+ * The right child.
+ * null if no children
+ * if non-null leftChild is non-null
+ */
+ protected K rightChild;
+
+ /** The result of this node, if completed */
+ private R localResult;
+
+ /**
+ * Constructor for root nodes.
+ *
+ * @param helper The {@code PipelineHelper} describing the stream pipeline
+ * up to this operation
+ * @param spliterator The {@code Spliterator} describing the source for this
+ * pipeline
+ */
+ protected AbstractTask(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(null);
+ this.helper = helper;
+ this.spliterator = spliterator;
+ this.targetSize = 0L;
+ }
+
+ /**
+ * Constructor for non-root nodes.
+ *
+ * @param parent this node's parent task
+ * @param spliterator {@code Spliterator} describing the subtree rooted at
+ * this node, obtained by splitting the parent {@code Spliterator}
+ */
+ protected AbstractTask(K parent,
+ Spliterator<P_IN> spliterator) {
+ super(parent);
+ this.spliterator = spliterator;
+ this.helper = parent.helper;
+ this.targetSize = parent.targetSize;
+ }
+
+ /**
+ * Constructs a new node of type T whose parent is the receiver; must call
+ * the AbstractTask(T, Spliterator) constructor with the receiver and the
+ * provided Spliterator.
+ *
+ * @param spliterator {@code Spliterator} describing the subtree rooted at
+ * this node, obtained by splitting the parent {@code Spliterator}
+ * @return newly constructed child node
+ */
+ protected abstract K makeChild(Spliterator<P_IN> spliterator);
+
+ /**
+ * Computes the result associated with a leaf node. Will be called by
+ * {@code compute()} and the result passed to @{code setLocalResult()}
+ *
+ * @return the computed result of a leaf node
+ */
+ protected abstract R doLeaf();
+
+ /**
+ * Returns a suggested target leaf size based on the initial size estimate.
+ *
+ * @return suggested target leaf size
+ */
+ public static long suggestTargetSize(long sizeEstimate) {
+ long est = sizeEstimate / LEAF_TARGET;
+ return est > 0L ? est : 1L;
+ }
+
+ /**
+ * Returns the targetSize, initializing it via the supplied
+ * size estimate if not already initialized.
+ */
+ protected final long getTargetSize(long sizeEstimate) {
+ long s;
+ return ((s = targetSize) != 0 ? s :
+ (targetSize = suggestTargetSize(sizeEstimate)));
+ }
+
+ /**
+ * Returns the local result, if any. Subclasses should use
+ * {@link #setLocalResult(Object)} and {@link #getLocalResult()} to manage
+ * results. This returns the local result so that calls from within the
+ * fork-join framework will return the correct result.
+ *
+ * @return local result for this node previously stored with
+ * {@link #setLocalResult}
+ */
+ @Override
+ public R getRawResult() {
+ return localResult;
+ }
+
+ /**
+ * Does nothing; instead, subclasses should use
+ * {@link #setLocalResult(Object)}} to manage results.
+ *
+ * @param result must be null, or an exception is thrown (this is a safety
+ * tripwire to detect when {@code setRawResult()} is being used
+ * instead of {@code setLocalResult()}
+ */
+ @Override
+ protected void setRawResult(R result) {
+ if (result != null)
+ throw new IllegalStateException();
+ }
+
+ /**
+ * Retrieves a result previously stored with {@link #setLocalResult}
+ *
+ * @return local result for this node previously stored with
+ * {@link #setLocalResult}
+ */
+ protected R getLocalResult() {
+ return localResult;
+ }
+
+ /**
+ * Associates the result with the task, can be retrieved with
+ * {@link #getLocalResult}
+ *
+ * @param localResult local result for this node
+ */
+ protected void setLocalResult(R localResult) {
+ this.localResult = localResult;
+ }
+
+ /**
+ * Indicates whether this task is a leaf node. (Only valid after
+ * {@link #compute} has been called on this node). If the node is not a
+ * leaf node, then children will be non-null and numChildren will be
+ * positive.
+ *
+ * @return {@code true} if this task is a leaf node
+ */
+ protected boolean isLeaf() {
+ return leftChild == null;
+ }
+
+ /**
+ * Indicates whether this task is the root node
+ *
+ * @return {@code true} if this task is the root node.
+ */
+ protected boolean isRoot() {
+ return getParent() == null;
+ }
+
+ /**
+ * Returns the parent of this task, or null if this task is the root
+ *
+ * @return the parent of this task, or null if this task is the root
+ */
+ @SuppressWarnings("unchecked")
+ protected K getParent() {
+ return (K) getCompleter();
+ }
+
+ /**
+ * Decides whether or not to split a task further or compute it
+ * directly. If computing directly, calls {@code doLeaf} and pass
+ * the result to {@code setRawResult}. Otherwise splits off
+ * subtasks, forking one and continuing as the other.
+ *
+ * <p> The method is structured to conserve resources across a
+ * range of uses. The loop continues with one of the child tasks
+ * when split, to avoid deep recursion. To cope with spliterators
+ * that may be systematically biased toward left-heavy or
+ * right-heavy splits, we alternate which child is forked versus
+ * continued in the loop.
+ */
+ @Override
+ public void compute() {
+ Spliterator<P_IN> rs = spliterator, ls; // right, left spliterators
+ long sizeEstimate = rs.estimateSize();
+ long sizeThreshold = getTargetSize(sizeEstimate);
+ boolean forkRight = false;
+ @SuppressWarnings("unchecked") K task = (K) this;
+ while (sizeEstimate > sizeThreshold && (ls = rs.trySplit()) != null) {
+ K leftChild, rightChild, taskToFork;
+ task.leftChild = leftChild = task.makeChild(ls);
+ task.rightChild = rightChild = task.makeChild(rs);
+ task.setPendingCount(1);
+ if (forkRight) {
+ forkRight = false;
+ rs = ls;
+ task = leftChild;
+ taskToFork = rightChild;
+ }
+ else {
+ forkRight = true;
+ task = rightChild;
+ taskToFork = leftChild;
+ }
+ taskToFork.fork();
+ sizeEstimate = rs.estimateSize();
+ }
+ task.setLocalResult(task.doLeaf());
+ task.tryComplete();
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implNote
+ * Clears spliterator and children fields. Overriders MUST call
+ * {@code super.onCompletion} as the last thing they do if they want these
+ * cleared.
+ */
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ spliterator = null;
+ leftChild = rightChild = null;
+ }
+
+ /**
+ * Returns whether this node is a "leftmost" node -- whether the path from
+ * the root to this node involves only traversing leftmost child links. For
+ * a leaf node, this means it is the first leaf node in the encounter order.
+ *
+ * @return {@code true} if this node is a "leftmost" node
+ */
+ protected boolean isLeftmostNode() {
+ @SuppressWarnings("unchecked")
+ K node = (K) this;
+ while (node != null) {
+ K parent = node.getParent();
+ if (parent != null && parent.leftChild != node)
+ return false;
+ node = parent;
+ }
+ return true;
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/BaseStream.java b/ojluni/src/main/java/java/util/stream/BaseStream.java
new file mode 100644
index 0000000..b0be000
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/BaseStream.java
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.nio.charset.Charset;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.IntConsumer;
+import java.util.function.Predicate;
+
+/**
+ * Base interface for streams, which are sequences of elements supporting
+ * sequential and parallel aggregate operations. The following example
+ * illustrates an aggregate operation using the stream types {@link Stream}
+ * and {@link IntStream}, computing the sum of the weights of the red widgets:
+ *
+ * <pre>{@code
+ * int sum = widgets.stream()
+ * .filter(w -> w.getColor() == RED)
+ * .mapToInt(w -> w.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * See the class documentation for {@link Stream} and the package documentation
+ * for <a href="package-summary.html">java.util.stream</a> for additional
+ * specification of streams, stream operations, stream pipelines, and
+ * parallelism, which governs the behavior of all stream types.
+ *
+ * @param <T> the type of the stream elements
+ * @param <S> the type of of the stream implementing {@code BaseStream}
+ * @since 1.8
+ * @see Stream
+ * @see IntStream
+ * @see LongStream
+ * @see DoubleStream
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface BaseStream<T, S extends BaseStream<T, S>>
+ extends AutoCloseable {
+ /**
+ * Returns an iterator for the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return the element iterator for this stream
+ */
+ Iterator<T> iterator();
+
+ /**
+ * Returns a spliterator for the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return the element spliterator for this stream
+ */
+ Spliterator<T> spliterator();
+
+ /**
+ * Returns whether this stream, if a terminal operation were to be executed,
+ * would execute in parallel. Calling this method after invoking an
+ * terminal stream operation method may yield unpredictable results.
+ *
+ * @return {@code true} if this stream would execute in parallel if executed
+ */
+ boolean isParallel();
+
+ /**
+ * Returns an equivalent stream that is sequential. May return
+ * itself, either because the stream was already sequential, or because
+ * the underlying stream state was modified to be sequential.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a sequential stream
+ */
+ S sequential();
+
+ /**
+ * Returns an equivalent stream that is parallel. May return
+ * itself, either because the stream was already parallel, or because
+ * the underlying stream state was modified to be parallel.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a parallel stream
+ */
+ S parallel();
+
+ /**
+ * Returns an equivalent stream that is
+ * <a href="package-summary.html#Ordering">unordered</a>. May return
+ * itself, either because the stream was already unordered, or because
+ * the underlying stream state was modified to be unordered.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return an unordered stream
+ */
+ S unordered();
+
+ /**
+ * Returns an equivalent stream with an additional close handler. Close
+ * handlers are run when the {@link #close()} method
+ * is called on the stream, and are executed in the order they were
+ * added. All close handlers are run, even if earlier close handlers throw
+ * exceptions. If any close handler throws an exception, the first
+ * exception thrown will be relayed to the caller of {@code close()}, with
+ * any remaining exceptions added to that exception as suppressed exceptions
+ * (unless one of the remaining exceptions is the same exception as the
+ * first exception, since an exception cannot suppress itself.) May
+ * return itself.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param closeHandler A task to execute when the stream is closed
+ * @return a stream with a handler that is run if the stream is closed
+ */
+ S onClose(Runnable closeHandler);
+
+ /**
+ * Closes this stream, causing all close handlers for this stream pipeline
+ * to be called.
+ *
+ * @see AutoCloseable#close()
+ */
+ @Override
+ void close();
+}
diff --git a/ojluni/src/main/java/java/util/stream/Collector.java b/ojluni/src/main/java/java/util/stream/Collector.java
new file mode 100644
index 0000000..e409d56
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Collector.java
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.function.BinaryOperator;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+/**
+ * A <a href="package-summary.html#Reduction">mutable reduction operation</a> that
+ * accumulates input elements into a mutable result container, optionally transforming
+ * the accumulated result into a final representation after all input elements
+ * have been processed. Reduction operations can be performed either sequentially
+ * or in parallel.
+ *
+ * <p>Examples of mutable reduction operations include:
+ * accumulating elements into a {@code Collection}; concatenating
+ * strings using a {@code StringBuilder}; computing summary information about
+ * elements such as sum, min, max, or average; computing "pivot table" summaries
+ * such as "maximum valued transaction by seller", etc. The class {@link Collectors}
+ * provides implementations of many common mutable reductions.
+ *
+ * <p>A {@code Collector} is specified by four functions that work together to
+ * accumulate entries into a mutable result container, and optionally perform
+ * a final transform on the result. They are: <ul>
+ * <li>creation of a new result container ({@link #supplier()})</li>
+ * <li>incorporating a new data element into a result container ({@link #accumulator()})</li>
+ * <li>combining two result containers into one ({@link #combiner()})</li>
+ * <li>performing an optional final transform on the container ({@link #finisher()})</li>
+ * </ul>
+ *
+ * <p>Collectors also have a set of characteristics, such as
+ * {@link Characteristics#CONCURRENT}, that provide hints that can be used by a
+ * reduction implementation to provide better performance.
+ *
+ * <p>A sequential implementation of a reduction using a collector would
+ * create a single result container using the supplier function, and invoke the
+ * accumulator function once for each input element. A parallel implementation
+ * would partition the input, create a result container for each partition,
+ * accumulate the contents of each partition into a subresult for that partition,
+ * and then use the combiner function to merge the subresults into a combined
+ * result.
+ *
+ * <p>To ensure that sequential and parallel executions produce equivalent
+ * results, the collector functions must satisfy an <em>identity</em> and an
+ * <a href="package-summary.html#Associativity">associativity</a> constraints.
+ *
+ * <p>The identity constraint says that for any partially accumulated result,
+ * combining it with an empty result container must produce an equivalent
+ * result. That is, for a partially accumulated result {@code a} that is the
+ * result of any series of accumulator and combiner invocations, {@code a} must
+ * be equivalent to {@code combiner.apply(a, supplier.get())}.
+ *
+ * <p>The associativity constraint says that splitting the computation must
+ * produce an equivalent result. That is, for any input elements {@code t1}
+ * and {@code t2}, the results {@code r1} and {@code r2} in the computation
+ * below must be equivalent:
+ * <pre>{@code
+ * A a1 = supplier.get();
+ * accumulator.accept(a1, t1);
+ * accumulator.accept(a1, t2);
+ * R r1 = finisher.apply(a1); // result without splitting
+ *
+ * A a2 = supplier.get();
+ * accumulator.accept(a2, t1);
+ * A a3 = supplier.get();
+ * accumulator.accept(a3, t2);
+ * R r2 = finisher.apply(combiner.apply(a2, a3)); // result with splitting
+ * } </pre>
+ *
+ * <p>For collectors that do not have the {@code UNORDERED} characteristic,
+ * two accumulated results {@code a1} and {@code a2} are equivalent if
+ * {@code finisher.apply(a1).equals(finisher.apply(a2))}. For unordered
+ * collectors, equivalence is relaxed to allow for non-equality related to
+ * differences in order. (For example, an unordered collector that accumulated
+ * elements to a {@code List} would consider two lists equivalent if they
+ * contained the same elements, ignoring order.)
+ *
+ * <p>Libraries that implement reduction based on {@code Collector}, such as
+ * {@link Stream#collect(Collector)}, must adhere to the following constraints:
+ * <ul>
+ * <li>The first argument passed to the accumulator function, both
+ * arguments passed to the combiner function, and the argument passed to the
+ * finisher function must be the result of a previous invocation of the
+ * result supplier, accumulator, or combiner functions.</li>
+ * <li>The implementation should not do anything with the result of any of
+ * the result supplier, accumulator, or combiner functions other than to
+ * pass them again to the accumulator, combiner, or finisher functions,
+ * or return them to the caller of the reduction operation.</li>
+ * <li>If a result is passed to the combiner or finisher
+ * function, and the same object is not returned from that function, it is
+ * never used again.</li>
+ * <li>Once a result is passed to the combiner or finisher function, it
+ * is never passed to the accumulator function again.</li>
+ * <li>For non-concurrent collectors, any result returned from the result
+ * supplier, accumulator, or combiner functions must be serially
+ * thread-confined. This enables collection to occur in parallel without
+ * the {@code Collector} needing to implement any additional synchronization.
+ * The reduction implementation must manage that the input is properly
+ * partitioned, that partitions are processed in isolation, and combining
+ * happens only after accumulation is complete.</li>
+ * <li>For concurrent collectors, an implementation is free to (but not
+ * required to) implement reduction concurrently. A concurrent reduction
+ * is one where the accumulator function is called concurrently from
+ * multiple threads, using the same concurrently-modifiable result container,
+ * rather than keeping the result isolated during accumulation.
+ * A concurrent reduction should only be applied if the collector has the
+ * {@link Characteristics#UNORDERED} characteristics or if the
+ * originating data is unordered.</li>
+ * </ul>
+ *
+ * <p>In addition to the predefined implementations in {@link Collectors}, the
+ * static factory methods {@link #of(Supplier, BiConsumer, BinaryOperator, Characteristics...)}
+ * can be used to construct collectors. For example, you could create a collector
+ * that accumulates widgets into a {@code TreeSet} with:
+ *
+ * <pre>{@code
+ * Collector<Widget, ?, TreeSet<Widget>> intoSet =
+ * Collector.of(TreeSet::new, TreeSet::add,
+ * (left, right) -> { left.addAll(right); return left; });
+ * }</pre>
+ *
+ * (This behavior is also implemented by the predefined collector
+ * {@link Collectors#toCollection(Supplier)}).
+ *
+ * @apiNote
+ * Performing a reduction operation with a {@code Collector} should produce a
+ * result equivalent to:
+ * <pre>{@code
+ * R container = collector.supplier().get();
+ * for (T t : data)
+ * collector.accumulator().accept(container, t);
+ * return collector.finisher().apply(container);
+ * }</pre>
+ *
+ * <p>However, the library is free to partition the input, perform the reduction
+ * on the partitions, and then use the combiner function to combine the partial
+ * results to achieve a parallel reduction. (Depending on the specific reduction
+ * operation, this may perform better or worse, depending on the relative cost
+ * of the accumulator and combiner functions.)
+ *
+ * <p>Collectors are designed to be <em>composed</em>; many of the methods
+ * in {@link Collectors} are functions that take a collector and produce
+ * a new collector. For example, given the following collector that computes
+ * the sum of the salaries of a stream of employees:
+ *
+ * <pre>{@code
+ * Collector<Employee, ?, Integer> summingSalaries
+ * = Collectors.summingInt(Employee::getSalary))
+ * }</pre>
+ *
+ * If we wanted to create a collector to tabulate the sum of salaries by
+ * department, we could reuse the "sum of salaries" logic using
+ * {@link Collectors#groupingBy(Function, Collector)}:
+ *
+ * <pre>{@code
+ * Collector<Employee, ?, Map<Department, Integer>> summingSalariesByDept
+ * = Collectors.groupingBy(Employee::getDepartment, summingSalaries);
+ * }</pre>
+ *
+ * @see Stream#collect(Collector)
+ * @see Collectors
+ *
+ * @param <T> the type of input elements to the reduction operation
+ * @param <A> the mutable accumulation type of the reduction operation (often
+ * hidden as an implementation detail)
+ * @param <R> the result type of the reduction operation
+ * @since 1.8
+ */
+public interface Collector<T, A, R> {
+ /**
+ * A function that creates and returns a new mutable result container.
+ *
+ * @return a function which returns a new, mutable result container
+ */
+ Supplier<A> supplier();
+
+ /**
+ * A function that folds a value into a mutable result container.
+ *
+ * @return a function which folds a value into a mutable result container
+ */
+ BiConsumer<A, T> accumulator();
+
+ /**
+ * A function that accepts two partial results and merges them. The
+ * combiner function may fold state from one argument into the other and
+ * return that, or may return a new result container.
+ *
+ * @return a function which combines two partial results into a combined
+ * result
+ */
+ BinaryOperator<A> combiner();
+
+ /**
+ * Perform the final transformation from the intermediate accumulation type
+ * {@code A} to the final result type {@code R}.
+ *
+ * <p>If the characteristic {@code IDENTITY_TRANSFORM} is
+ * set, this function may be presumed to be an identity transform with an
+ * unchecked cast from {@code A} to {@code R}.
+ *
+ * @return a function which transforms the intermediate result to the final
+ * result
+ */
+ Function<A, R> finisher();
+
+ /**
+ * Returns a {@code Set} of {@code Collector.Characteristics} indicating
+ * the characteristics of this Collector. This set should be immutable.
+ *
+ * @return an immutable set of collector characteristics
+ */
+ Set<Characteristics> characteristics();
+
+ /**
+ * Returns a new {@code Collector} described by the given {@code supplier},
+ * {@code accumulator}, and {@code combiner} functions. The resulting
+ * {@code Collector} has the {@code Collector.Characteristics.IDENTITY_FINISH}
+ * characteristic.
+ *
+ * @param supplier The supplier function for the new collector
+ * @param accumulator The accumulator function for the new collector
+ * @param combiner The combiner function for the new collector
+ * @param characteristics The collector characteristics for the new
+ * collector
+ * @param <T> The type of input elements for the new collector
+ * @param <R> The type of intermediate accumulation result, and final result,
+ * for the new collector
+ * @throws NullPointerException if any argument is null
+ * @return the new {@code Collector}
+ */
+ public static<T, R> Collector<T, R, R> of(Supplier<R> supplier,
+ BiConsumer<R, T> accumulator,
+ BinaryOperator<R> combiner,
+ Characteristics... characteristics) {
+ Objects.requireNonNull(supplier);
+ Objects.requireNonNull(accumulator);
+ Objects.requireNonNull(combiner);
+ Objects.requireNonNull(characteristics);
+ Set<Characteristics> cs = (characteristics.length == 0)
+ ? Collectors.CH_ID
+ : Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.IDENTITY_FINISH,
+ characteristics));
+ return new Collectors.CollectorImpl<>(supplier, accumulator, combiner, cs);
+ }
+
+ /**
+ * Returns a new {@code Collector} described by the given {@code supplier},
+ * {@code accumulator}, {@code combiner}, and {@code finisher} functions.
+ *
+ * @param supplier The supplier function for the new collector
+ * @param accumulator The accumulator function for the new collector
+ * @param combiner The combiner function for the new collector
+ * @param finisher The finisher function for the new collector
+ * @param characteristics The collector characteristics for the new
+ * collector
+ * @param <T> The type of input elements for the new collector
+ * @param <A> The intermediate accumulation type of the new collector
+ * @param <R> The final result type of the new collector
+ * @throws NullPointerException if any argument is null
+ * @return the new {@code Collector}
+ */
+ public static<T, A, R> Collector<T, A, R> of(Supplier<A> supplier,
+ BiConsumer<A, T> accumulator,
+ BinaryOperator<A> combiner,
+ Function<A, R> finisher,
+ Characteristics... characteristics) {
+ Objects.requireNonNull(supplier);
+ Objects.requireNonNull(accumulator);
+ Objects.requireNonNull(combiner);
+ Objects.requireNonNull(finisher);
+ Objects.requireNonNull(characteristics);
+ Set<Characteristics> cs = Collectors.CH_NOID;
+ if (characteristics.length > 0) {
+ cs = EnumSet.noneOf(Characteristics.class);
+ Collections.addAll(cs, characteristics);
+ cs = Collections.unmodifiableSet(cs);
+ }
+ return new Collectors.CollectorImpl<>(supplier, accumulator, combiner, finisher, cs);
+ }
+
+ /**
+ * Characteristics indicating properties of a {@code Collector}, which can
+ * be used to optimize reduction implementations.
+ */
+ enum Characteristics {
+ /**
+ * Indicates that this collector is <em>concurrent</em>, meaning that
+ * the result container can support the accumulator function being
+ * called concurrently with the same result container from multiple
+ * threads.
+ *
+ * <p>If a {@code CONCURRENT} collector is not also {@code UNORDERED},
+ * then it should only be evaluated concurrently if applied to an
+ * unordered data source.
+ */
+ CONCURRENT,
+
+ /**
+ * Indicates that the collection operation does not commit to preserving
+ * the encounter order of input elements. (This might be true if the
+ * result container has no intrinsic order, such as a {@link Set}.)
+ */
+ UNORDERED,
+
+ /**
+ * Indicates that the finisher function is the identity function and
+ * can be elided. If set, it must be the case that an unchecked cast
+ * from A to R will succeed.
+ */
+ IDENTITY_FINISH
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/Collectors.java b/ojluni/src/main/java/java/util/stream/Collectors.java
new file mode 100644
index 0000000..a338ec2
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Collectors.java
@@ -0,0 +1,1568 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.AbstractMap;
+import java.util.AbstractSet;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.DoubleSummaryStatistics;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.IntSummaryStatistics;
+import java.util.Iterator;
+import java.util.List;
+import java.util.LongSummaryStatistics;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.StringJoiner;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongFunction;
+
+/**
+ * Implementations of {@link Collector} that implement various useful reduction
+ * operations, such as accumulating elements into collections, summarizing
+ * elements according to various criteria, etc.
+ *
+ * <p>The following are examples of using the predefined collectors to perform
+ * common mutable reduction tasks:
+ *
+ * <pre>{@code
+ * // Accumulate names into a List
+ * List<String> list = people.stream().map(Person::getName).collect(Collectors.toList());
+ *
+ * // Accumulate names into a TreeSet
+ * Set<String> set = people.stream().map(Person::getName).collect(Collectors.toCollection(TreeSet::new));
+ *
+ * // Convert elements to strings and concatenate them, separated by commas
+ * String joined = things.stream()
+ * .map(Object::toString)
+ * .collect(Collectors.joining(", "));
+ *
+ * // Compute sum of salaries of employee
+ * int total = employees.stream()
+ * .collect(Collectors.summingInt(Employee::getSalary)));
+ *
+ * // Group employees by department
+ * Map<Department, List<Employee>> byDept
+ * = employees.stream()
+ * .collect(Collectors.groupingBy(Employee::getDepartment));
+ *
+ * // Compute sum of salaries by department
+ * Map<Department, Integer> totalByDept
+ * = employees.stream()
+ * .collect(Collectors.groupingBy(Employee::getDepartment,
+ * Collectors.summingInt(Employee::getSalary)));
+ *
+ * // Partition students into passing and failing
+ * Map<Boolean, List<Student>> passingFailing =
+ * students.stream()
+ * .collect(Collectors.partitioningBy(s -> s.getGrade() >= PASS_THRESHOLD));
+ *
+ * }</pre>
+ *
+ * @since 1.8
+ */
+public final class Collectors {
+
+ static final Set<Collector.Characteristics> CH_CONCURRENT_ID
+ = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.CONCURRENT,
+ Collector.Characteristics.UNORDERED,
+ Collector.Characteristics.IDENTITY_FINISH));
+ static final Set<Collector.Characteristics> CH_CONCURRENT_NOID
+ = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.CONCURRENT,
+ Collector.Characteristics.UNORDERED));
+ static final Set<Collector.Characteristics> CH_ID
+ = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.IDENTITY_FINISH));
+ static final Set<Collector.Characteristics> CH_UNORDERED_ID
+ = Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.UNORDERED,
+ Collector.Characteristics.IDENTITY_FINISH));
+ static final Set<Collector.Characteristics> CH_NOID = Collections.emptySet();
+
+ private Collectors() { }
+
+ /**
+ * Returns a merge function, suitable for use in
+ * {@link Map#merge(Object, Object, BiFunction) Map.merge()} or
+ * {@link #toMap(Function, Function, BinaryOperator) toMap()}, which always
+ * throws {@code IllegalStateException}. This can be used to enforce the
+ * assumption that the elements being collected are distinct.
+ *
+ * @param <T> the type of input arguments to the merge function
+ * @return a merge function which always throw {@code IllegalStateException}
+ */
+ private static <T> BinaryOperator<T> throwingMerger() {
+ return (u,v) -> { throw new IllegalStateException(String.format("Duplicate key %s", u)); };
+ }
+
+ @SuppressWarnings("unchecked")
+ private static <I, R> Function<I, R> castingIdentity() {
+ return i -> (R) i;
+ }
+
+ /**
+ * Simple implementation class for {@code Collector}.
+ *
+ * @param <T> the type of elements to be collected
+ * @param <R> the type of the result
+ */
+ static class CollectorImpl<T, A, R> implements Collector<T, A, R> {
+ private final Supplier<A> supplier;
+ private final BiConsumer<A, T> accumulator;
+ private final BinaryOperator<A> combiner;
+ private final Function<A, R> finisher;
+ private final Set<Characteristics> characteristics;
+
+ CollectorImpl(Supplier<A> supplier,
+ BiConsumer<A, T> accumulator,
+ BinaryOperator<A> combiner,
+ Function<A,R> finisher,
+ Set<Characteristics> characteristics) {
+ this.supplier = supplier;
+ this.accumulator = accumulator;
+ this.combiner = combiner;
+ this.finisher = finisher;
+ this.characteristics = characteristics;
+ }
+
+ CollectorImpl(Supplier<A> supplier,
+ BiConsumer<A, T> accumulator,
+ BinaryOperator<A> combiner,
+ Set<Characteristics> characteristics) {
+ this(supplier, accumulator, combiner, castingIdentity(), characteristics);
+ }
+
+ @Override
+ public BiConsumer<A, T> accumulator() {
+ return accumulator;
+ }
+
+ @Override
+ public Supplier<A> supplier() {
+ return supplier;
+ }
+
+ @Override
+ public BinaryOperator<A> combiner() {
+ return combiner;
+ }
+
+ @Override
+ public Function<A, R> finisher() {
+ return finisher;
+ }
+
+ @Override
+ public Set<Characteristics> characteristics() {
+ return characteristics;
+ }
+ }
+
+ /**
+ * Returns a {@code Collector} that accumulates the input elements into a
+ * new {@code Collection}, in encounter order. The {@code Collection} is
+ * created by the provided factory.
+ *
+ * @param <T> the type of the input elements
+ * @param <C> the type of the resulting {@code Collection}
+ * @param collectionFactory a {@code Supplier} which returns a new, empty
+ * {@code Collection} of the appropriate type
+ * @return a {@code Collector} which collects all the input elements into a
+ * {@code Collection}, in encounter order
+ */
+ public static <T, C extends Collection<T>>
+ Collector<T, ?, C> toCollection(Supplier<C> collectionFactory) {
+ return new CollectorImpl<>(collectionFactory, Collection<T>::add,
+ (r1, r2) -> { r1.addAll(r2); return r1; },
+ CH_ID);
+ }
+
+ /**
+ * Returns a {@code Collector} that accumulates the input elements into a
+ * new {@code List}. There are no guarantees on the type, mutability,
+ * serializability, or thread-safety of the {@code List} returned; if more
+ * control over the returned {@code List} is required, use {@link #toCollection(Supplier)}.
+ *
+ * @param <T> the type of the input elements
+ * @return a {@code Collector} which collects all the input elements into a
+ * {@code List}, in encounter order
+ */
+ public static <T>
+ Collector<T, ?, List<T>> toList() {
+ return new CollectorImpl<>((Supplier<List<T>>) ArrayList::new, List::add,
+ (left, right) -> { left.addAll(right); return left; },
+ CH_ID);
+ }
+
+ /**
+ * Returns a {@code Collector} that accumulates the input elements into a
+ * new {@code Set}. There are no guarantees on the type, mutability,
+ * serializability, or thread-safety of the {@code Set} returned; if more
+ * control over the returned {@code Set} is required, use
+ * {@link #toCollection(Supplier)}.
+ *
+ * <p>This is an {@link Collector.Characteristics#UNORDERED unordered}
+ * Collector.
+ *
+ * @param <T> the type of the input elements
+ * @return a {@code Collector} which collects all the input elements into a
+ * {@code Set}
+ */
+ public static <T>
+ Collector<T, ?, Set<T>> toSet() {
+ return new CollectorImpl<>((Supplier<Set<T>>) HashSet::new, Set::add,
+ (left, right) -> { left.addAll(right); return left; },
+ CH_UNORDERED_ID);
+ }
+
+ /**
+ * Returns a {@code Collector} that concatenates the input elements into a
+ * {@code String}, in encounter order.
+ *
+ * @return a {@code Collector} that concatenates the input elements into a
+ * {@code String}, in encounter order
+ */
+ public static Collector<CharSequence, ?, String> joining() {
+ return new CollectorImpl<CharSequence, StringBuilder, String>(
+ StringBuilder::new, StringBuilder::append,
+ (r1, r2) -> { r1.append(r2); return r1; },
+ StringBuilder::toString, CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} that concatenates the input elements,
+ * separated by the specified delimiter, in encounter order.
+ *
+ * @param delimiter the delimiter to be used between each element
+ * @return A {@code Collector} which concatenates CharSequence elements,
+ * separated by the specified delimiter, in encounter order
+ */
+ public static Collector<CharSequence, ?, String> joining(CharSequence delimiter) {
+ return joining(delimiter, "", "");
+ }
+
+ /**
+ * Returns a {@code Collector} that concatenates the input elements,
+ * separated by the specified delimiter, with the specified prefix and
+ * suffix, in encounter order.
+ *
+ * @param delimiter the delimiter to be used between each element
+ * @param prefix the sequence of characters to be used at the beginning
+ * of the joined result
+ * @param suffix the sequence of characters to be used at the end
+ * of the joined result
+ * @return A {@code Collector} which concatenates CharSequence elements,
+ * separated by the specified delimiter, in encounter order
+ */
+ public static Collector<CharSequence, ?, String> joining(CharSequence delimiter,
+ CharSequence prefix,
+ CharSequence suffix) {
+ return new CollectorImpl<>(
+ () -> new StringJoiner(delimiter, prefix, suffix),
+ StringJoiner::add, StringJoiner::merge,
+ StringJoiner::toString, CH_NOID);
+ }
+
+ /**
+ * {@code BinaryOperator<Map>} that merges the contents of its right
+ * argument into its left argument, using the provided merge function to
+ * handle duplicate keys.
+ *
+ * @param <K> type of the map keys
+ * @param <V> type of the map values
+ * @param <M> type of the map
+ * @param mergeFunction A merge function suitable for
+ * {@link Map#merge(Object, Object, BiFunction) Map.merge()}
+ * @return a merge function for two maps
+ */
+ private static <K, V, M extends Map<K,V>>
+ BinaryOperator<M> mapMerger(BinaryOperator<V> mergeFunction) {
+ return (m1, m2) -> {
+ for (Map.Entry<K,V> e : m2.entrySet())
+ m1.merge(e.getKey(), e.getValue(), mergeFunction);
+ return m1;
+ };
+ }
+
+ /**
+ * Adapts a {@code Collector} accepting elements of type {@code U} to one
+ * accepting elements of type {@code T} by applying a mapping function to
+ * each input element before accumulation.
+ *
+ * @apiNote
+ * The {@code mapping()} collectors are most useful when used in a
+ * multi-level reduction, such as downstream of a {@code groupingBy} or
+ * {@code partitioningBy}. For example, given a stream of
+ * {@code Person}, to accumulate the set of last names in each city:
+ * <pre>{@code
+ * Map<City, Set<String>> lastNamesByCity
+ * = people.stream().collect(groupingBy(Person::getCity,
+ * mapping(Person::getLastName, toSet())));
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param <U> type of elements accepted by downstream collector
+ * @param <A> intermediate accumulation type of the downstream collector
+ * @param <R> result type of collector
+ * @param mapper a function to be applied to the input elements
+ * @param downstream a collector which will accept mapped values
+ * @return a collector which applies the mapping function to the input
+ * elements and provides the mapped results to the downstream collector
+ */
+ public static <T, U, A, R>
+ Collector<T, ?, R> mapping(Function<? super T, ? extends U> mapper,
+ Collector<? super U, A, R> downstream) {
+ BiConsumer<A, ? super U> downstreamAccumulator = downstream.accumulator();
+ return new CollectorImpl<>(downstream.supplier(),
+ (r, t) -> downstreamAccumulator.accept(r, mapper.apply(t)),
+ downstream.combiner(), downstream.finisher(),
+ downstream.characteristics());
+ }
+
+ /**
+ * Adapts a {@code Collector} to perform an additional finishing
+ * transformation. For example, one could adapt the {@link #toList()}
+ * collector to always produce an immutable list with:
+ * <pre>{@code
+ * List<String> people
+ * = people.stream().collect(collectingAndThen(toList(), Collections::unmodifiableList));
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param <A> intermediate accumulation type of the downstream collector
+ * @param <R> result type of the downstream collector
+ * @param <RR> result type of the resulting collector
+ * @param downstream a collector
+ * @param finisher a function to be applied to the final result of the downstream collector
+ * @return a collector which performs the action of the downstream collector,
+ * followed by an additional finishing step
+ */
+ public static<T,A,R,RR> Collector<T,A,RR> collectingAndThen(Collector<T,A,R> downstream,
+ Function<R,RR> finisher) {
+ Set<Collector.Characteristics> characteristics = downstream.characteristics();
+ if (characteristics.contains(Collector.Characteristics.IDENTITY_FINISH)) {
+ if (characteristics.size() == 1)
+ characteristics = Collectors.CH_NOID;
+ else {
+ characteristics = EnumSet.copyOf(characteristics);
+ characteristics.remove(Collector.Characteristics.IDENTITY_FINISH);
+ characteristics = Collections.unmodifiableSet(characteristics);
+ }
+ }
+ return new CollectorImpl<>(downstream.supplier(),
+ downstream.accumulator(),
+ downstream.combiner(),
+ downstream.finisher().andThen(finisher),
+ characteristics);
+ }
+
+ /**
+ * Returns a {@code Collector} accepting elements of type {@code T} that
+ * counts the number of input elements. If no elements are present, the
+ * result is 0.
+ *
+ * @implSpec
+ * This produces a result equivalent to:
+ * <pre>{@code
+ * reducing(0L, e -> 1L, Long::sum)
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @return a {@code Collector} that counts the input elements
+ */
+ public static <T> Collector<T, ?, Long>
+ counting() {
+ return reducing(0L, e -> 1L, Long::sum);
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the minimal element according
+ * to a given {@code Comparator}, described as an {@code Optional<T>}.
+ *
+ * @implSpec
+ * This produces a result equivalent to:
+ * <pre>{@code
+ * reducing(BinaryOperator.minBy(comparator))
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param comparator a {@code Comparator} for comparing elements
+ * @return a {@code Collector} that produces the minimal value
+ */
+ public static <T> Collector<T, ?, Optional<T>>
+ minBy(Comparator<? super T> comparator) {
+ return reducing(BinaryOperator.minBy(comparator));
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the maximal element according
+ * to a given {@code Comparator}, described as an {@code Optional<T>}.
+ *
+ * @implSpec
+ * This produces a result equivalent to:
+ * <pre>{@code
+ * reducing(BinaryOperator.maxBy(comparator))
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param comparator a {@code Comparator} for comparing elements
+ * @return a {@code Collector} that produces the maximal value
+ */
+ public static <T> Collector<T, ?, Optional<T>>
+ maxBy(Comparator<? super T> comparator) {
+ return reducing(BinaryOperator.maxBy(comparator));
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the sum of a integer-valued
+ * function applied to the input elements. If no elements are present,
+ * the result is 0.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a function extracting the property to be summed
+ * @return a {@code Collector} that produces the sum of a derived property
+ */
+ public static <T> Collector<T, ?, Integer>
+ summingInt(ToIntFunction<? super T> mapper) {
+ return new CollectorImpl<>(
+ () -> new int[1],
+ (a, t) -> { a[0] += mapper.applyAsInt(t); },
+ (a, b) -> { a[0] += b[0]; return a; },
+ a -> a[0], CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the sum of a long-valued
+ * function applied to the input elements. If no elements are present,
+ * the result is 0.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a function extracting the property to be summed
+ * @return a {@code Collector} that produces the sum of a derived property
+ */
+ public static <T> Collector<T, ?, Long>
+ summingLong(ToLongFunction<? super T> mapper) {
+ return new CollectorImpl<>(
+ () -> new long[1],
+ (a, t) -> { a[0] += mapper.applyAsLong(t); },
+ (a, b) -> { a[0] += b[0]; return a; },
+ a -> a[0], CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the sum of a double-valued
+ * function applied to the input elements. If no elements are present,
+ * the result is 0.
+ *
+ * <p>The sum returned can vary depending upon the order in which
+ * values are recorded, due to accumulated rounding error in
+ * addition of values of differing magnitudes. Values sorted by increasing
+ * absolute magnitude tend to yield more accurate results. If any recorded
+ * value is a {@code NaN} or the sum is at any point a {@code NaN} then the
+ * sum will be {@code NaN}.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a function extracting the property to be summed
+ * @return a {@code Collector} that produces the sum of a derived property
+ */
+ public static <T> Collector<T, ?, Double>
+ summingDouble(ToDoubleFunction<? super T> mapper) {
+ /*
+ * In the arrays allocated for the collect operation, index 0
+ * holds the high-order bits of the running sum, index 1 holds
+ * the low-order bits of the sum computed via compensated
+ * summation, and index 2 holds the simple sum used to compute
+ * the proper result if the stream contains infinite values of
+ * the same sign.
+ */
+ return new CollectorImpl<>(
+ () -> new double[3],
+ (a, t) -> { sumWithCompensation(a, mapper.applyAsDouble(t));
+ a[2] += mapper.applyAsDouble(t);},
+ (a, b) -> { sumWithCompensation(a, b[0]);
+ a[2] += b[2];
+ return sumWithCompensation(a, b[1]); },
+ a -> computeFinalSum(a),
+ CH_NOID);
+ }
+
+ /**
+ * Incorporate a new double value using Kahan summation /
+ * compensation summation.
+ *
+ * High-order bits of the sum are in intermediateSum[0], low-order
+ * bits of the sum are in intermediateSum[1], any additional
+ * elements are application-specific.
+ *
+ * @param intermediateSum the high-order and low-order words of the intermediate sum
+ * @param value the name value to be included in the running sum
+ */
+ static double[] sumWithCompensation(double[] intermediateSum, double value) {
+ double tmp = value - intermediateSum[1];
+ double sum = intermediateSum[0];
+ double velvel = sum + tmp; // Little wolf of rounding error
+ intermediateSum[1] = (velvel - sum) - tmp;
+ intermediateSum[0] = velvel;
+ return intermediateSum;
+ }
+
+ /**
+ * If the compensated sum is spuriously NaN from accumulating one
+ * or more same-signed infinite values, return the
+ * correctly-signed infinity stored in the simple sum.
+ */
+ static double computeFinalSum(double[] summands) {
+ // Better error bounds to add both terms as the final sum
+ double tmp = summands[0] + summands[1];
+ double simpleSum = summands[summands.length - 1];
+ if (Double.isNaN(tmp) && Double.isInfinite(simpleSum))
+ return simpleSum;
+ else
+ return tmp;
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the arithmetic mean of an integer-valued
+ * function applied to the input elements. If no elements are present,
+ * the result is 0.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a function extracting the property to be summed
+ * @return a {@code Collector} that produces the sum of a derived property
+ */
+ public static <T> Collector<T, ?, Double>
+ averagingInt(ToIntFunction<? super T> mapper) {
+ return new CollectorImpl<>(
+ () -> new long[2],
+ (a, t) -> { a[0] += mapper.applyAsInt(t); a[1]++; },
+ (a, b) -> { a[0] += b[0]; a[1] += b[1]; return a; },
+ a -> (a[1] == 0) ? 0.0d : (double) a[0] / a[1], CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the arithmetic mean of a long-valued
+ * function applied to the input elements. If no elements are present,
+ * the result is 0.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a function extracting the property to be summed
+ * @return a {@code Collector} that produces the sum of a derived property
+ */
+ public static <T> Collector<T, ?, Double>
+ averagingLong(ToLongFunction<? super T> mapper) {
+ return new CollectorImpl<>(
+ () -> new long[2],
+ (a, t) -> { a[0] += mapper.applyAsLong(t); a[1]++; },
+ (a, b) -> { a[0] += b[0]; a[1] += b[1]; return a; },
+ a -> (a[1] == 0) ? 0.0d : (double) a[0] / a[1], CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} that produces the arithmetic mean of a double-valued
+ * function applied to the input elements. If no elements are present,
+ * the result is 0.
+ *
+ * <p>The average returned can vary depending upon the order in which
+ * values are recorded, due to accumulated rounding error in
+ * addition of values of differing magnitudes. Values sorted by increasing
+ * absolute magnitude tend to yield more accurate results. If any recorded
+ * value is a {@code NaN} or the sum is at any point a {@code NaN} then the
+ * average will be {@code NaN}.
+ *
+ * @implNote The {@code double} format can represent all
+ * consecutive integers in the range -2<sup>53</sup> to
+ * 2<sup>53</sup>. If the pipeline has more than 2<sup>53</sup>
+ * values, the divisor in the average computation will saturate at
+ * 2<sup>53</sup>, leading to additional numerical errors.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a function extracting the property to be summed
+ * @return a {@code Collector} that produces the sum of a derived property
+ */
+ public static <T> Collector<T, ?, Double>
+ averagingDouble(ToDoubleFunction<? super T> mapper) {
+ /*
+ * In the arrays allocated for the collect operation, index 0
+ * holds the high-order bits of the running sum, index 1 holds
+ * the low-order bits of the sum computed via compensated
+ * summation, and index 2 holds the number of values seen.
+ */
+ return new CollectorImpl<>(
+ () -> new double[4],
+ (a, t) -> { sumWithCompensation(a, mapper.applyAsDouble(t)); a[2]++; a[3]+= mapper.applyAsDouble(t);},
+ (a, b) -> { sumWithCompensation(a, b[0]); sumWithCompensation(a, b[1]); a[2] += b[2]; a[3] += b[3]; return a; },
+ a -> (a[2] == 0) ? 0.0d : (computeFinalSum(a) / a[2]),
+ CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} which performs a reduction of its
+ * input elements under a specified {@code BinaryOperator} using the
+ * provided identity.
+ *
+ * @apiNote
+ * The {@code reducing()} collectors are most useful when used in a
+ * multi-level reduction, downstream of {@code groupingBy} or
+ * {@code partitioningBy}. To perform a simple reduction on a stream,
+ * use {@link Stream#reduce(Object, BinaryOperator)}} instead.
+ *
+ * @param <T> element type for the input and output of the reduction
+ * @param identity the identity value for the reduction (also, the value
+ * that is returned when there are no input elements)
+ * @param op a {@code BinaryOperator<T>} used to reduce the input elements
+ * @return a {@code Collector} which implements the reduction operation
+ *
+ * @see #reducing(BinaryOperator)
+ * @see #reducing(Object, Function, BinaryOperator)
+ */
+ public static <T> Collector<T, ?, T>
+ reducing(T identity, BinaryOperator<T> op) {
+ return new CollectorImpl<>(
+ boxSupplier(identity),
+ (a, t) -> { a[0] = op.apply(a[0], t); },
+ (a, b) -> { a[0] = op.apply(a[0], b[0]); return a; },
+ a -> a[0],
+ CH_NOID);
+ }
+
+ @SuppressWarnings("unchecked")
+ private static <T> Supplier<T[]> boxSupplier(T identity) {
+ return () -> (T[]) new Object[] { identity };
+ }
+
+ /**
+ * Returns a {@code Collector} which performs a reduction of its
+ * input elements under a specified {@code BinaryOperator}. The result
+ * is described as an {@code Optional<T>}.
+ *
+ * @apiNote
+ * The {@code reducing()} collectors are most useful when used in a
+ * multi-level reduction, downstream of {@code groupingBy} or
+ * {@code partitioningBy}. To perform a simple reduction on a stream,
+ * use {@link Stream#reduce(BinaryOperator)} instead.
+ *
+ * <p>For example, given a stream of {@code Person}, to calculate tallest
+ * person in each city:
+ * <pre>{@code
+ * Comparator<Person> byHeight = Comparator.comparing(Person::getHeight);
+ * Map<City, Person> tallestByCity
+ * = people.stream().collect(groupingBy(Person::getCity, reducing(BinaryOperator.maxBy(byHeight))));
+ * }</pre>
+ *
+ * @param <T> element type for the input and output of the reduction
+ * @param op a {@code BinaryOperator<T>} used to reduce the input elements
+ * @return a {@code Collector} which implements the reduction operation
+ *
+ * @see #reducing(Object, BinaryOperator)
+ * @see #reducing(Object, Function, BinaryOperator)
+ */
+ public static <T> Collector<T, ?, Optional<T>>
+ reducing(BinaryOperator<T> op) {
+ class OptionalBox implements Consumer<T> {
+ T value = null;
+ boolean present = false;
+
+ @Override
+ public void accept(T t) {
+ if (present) {
+ value = op.apply(value, t);
+ }
+ else {
+ value = t;
+ present = true;
+ }
+ }
+ }
+
+ return new CollectorImpl<T, OptionalBox, Optional<T>>(
+ OptionalBox::new, OptionalBox::accept,
+ (a, b) -> { if (b.present) a.accept(b.value); return a; },
+ a -> Optional.ofNullable(a.value), CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} which performs a reduction of its
+ * input elements under a specified mapping function and
+ * {@code BinaryOperator}. This is a generalization of
+ * {@link #reducing(Object, BinaryOperator)} which allows a transformation
+ * of the elements before reduction.
+ *
+ * @apiNote
+ * The {@code reducing()} collectors are most useful when used in a
+ * multi-level reduction, downstream of {@code groupingBy} or
+ * {@code partitioningBy}. To perform a simple map-reduce on a stream,
+ * use {@link Stream#map(Function)} and {@link Stream#reduce(Object, BinaryOperator)}
+ * instead.
+ *
+ * <p>For example, given a stream of {@code Person}, to calculate the longest
+ * last name of residents in each city:
+ * <pre>{@code
+ * Comparator<String> byLength = Comparator.comparing(String::length);
+ * Map<City, String> longestLastNameByCity
+ * = people.stream().collect(groupingBy(Person::getCity,
+ * reducing(Person::getLastName, BinaryOperator.maxBy(byLength))));
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param <U> the type of the mapped values
+ * @param identity the identity value for the reduction (also, the value
+ * that is returned when there are no input elements)
+ * @param mapper a mapping function to apply to each input value
+ * @param op a {@code BinaryOperator<U>} used to reduce the mapped values
+ * @return a {@code Collector} implementing the map-reduce operation
+ *
+ * @see #reducing(Object, BinaryOperator)
+ * @see #reducing(BinaryOperator)
+ */
+ public static <T, U>
+ Collector<T, ?, U> reducing(U identity,
+ Function<? super T, ? extends U> mapper,
+ BinaryOperator<U> op) {
+ return new CollectorImpl<>(
+ boxSupplier(identity),
+ (a, t) -> { a[0] = op.apply(a[0], mapper.apply(t)); },
+ (a, b) -> { a[0] = op.apply(a[0], b[0]); return a; },
+ a -> a[0], CH_NOID);
+ }
+
+ /**
+ * Returns a {@code Collector} implementing a "group by" operation on
+ * input elements of type {@code T}, grouping elements according to a
+ * classification function, and returning the results in a {@code Map}.
+ *
+ * <p>The classification function maps elements to some key type {@code K}.
+ * The collector produces a {@code Map<K, List<T>>} whose keys are the
+ * values resulting from applying the classification function to the input
+ * elements, and whose corresponding values are {@code List}s containing the
+ * input elements which map to the associated key under the classification
+ * function.
+ *
+ * <p>There are no guarantees on the type, mutability, serializability, or
+ * thread-safety of the {@code Map} or {@code List} objects returned.
+ * @implSpec
+ * This produces a result similar to:
+ * <pre>{@code
+ * groupingBy(classifier, toList());
+ * }</pre>
+ *
+ * @implNote
+ * The returned {@code Collector} is not concurrent. For parallel stream
+ * pipelines, the {@code combiner} function operates by merging the keys
+ * from one map into another, which can be an expensive operation. If
+ * preservation of the order in which elements appear in the resulting {@code Map}
+ * collector is not required, using {@link #groupingByConcurrent(Function)}
+ * may offer better parallel performance.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the type of the keys
+ * @param classifier the classifier function mapping input elements to keys
+ * @return a {@code Collector} implementing the group-by operation
+ *
+ * @see #groupingBy(Function, Collector)
+ * @see #groupingBy(Function, Supplier, Collector)
+ * @see #groupingByConcurrent(Function)
+ */
+ public static <T, K> Collector<T, ?, Map<K, List<T>>>
+ groupingBy(Function<? super T, ? extends K> classifier) {
+ return groupingBy(classifier, toList());
+ }
+
+ /**
+ * Returns a {@code Collector} implementing a cascaded "group by" operation
+ * on input elements of type {@code T}, grouping elements according to a
+ * classification function, and then performing a reduction operation on
+ * the values associated with a given key using the specified downstream
+ * {@code Collector}.
+ *
+ * <p>The classification function maps elements to some key type {@code K}.
+ * The downstream collector operates on elements of type {@code T} and
+ * produces a result of type {@code D}. The resulting collector produces a
+ * {@code Map<K, D>}.
+ *
+ * <p>There are no guarantees on the type, mutability,
+ * serializability, or thread-safety of the {@code Map} returned.
+ *
+ * <p>For example, to compute the set of last names of people in each city:
+ * <pre>{@code
+ * Map<City, Set<String>> namesByCity
+ * = people.stream().collect(groupingBy(Person::getCity,
+ * mapping(Person::getLastName, toSet())));
+ * }</pre>
+ *
+ * @implNote
+ * The returned {@code Collector} is not concurrent. For parallel stream
+ * pipelines, the {@code combiner} function operates by merging the keys
+ * from one map into another, which can be an expensive operation. If
+ * preservation of the order in which elements are presented to the downstream
+ * collector is not required, using {@link #groupingByConcurrent(Function, Collector)}
+ * may offer better parallel performance.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the type of the keys
+ * @param <A> the intermediate accumulation type of the downstream collector
+ * @param <D> the result type of the downstream reduction
+ * @param classifier a classifier function mapping input elements to keys
+ * @param downstream a {@code Collector} implementing the downstream reduction
+ * @return a {@code Collector} implementing the cascaded group-by operation
+ * @see #groupingBy(Function)
+ *
+ * @see #groupingBy(Function, Supplier, Collector)
+ * @see #groupingByConcurrent(Function, Collector)
+ */
+ public static <T, K, A, D>
+ Collector<T, ?, Map<K, D>> groupingBy(Function<? super T, ? extends K> classifier,
+ Collector<? super T, A, D> downstream) {
+ return groupingBy(classifier, HashMap::new, downstream);
+ }
+
+ /**
+ * Returns a {@code Collector} implementing a cascaded "group by" operation
+ * on input elements of type {@code T}, grouping elements according to a
+ * classification function, and then performing a reduction operation on
+ * the values associated with a given key using the specified downstream
+ * {@code Collector}. The {@code Map} produced by the Collector is created
+ * with the supplied factory function.
+ *
+ * <p>The classification function maps elements to some key type {@code K}.
+ * The downstream collector operates on elements of type {@code T} and
+ * produces a result of type {@code D}. The resulting collector produces a
+ * {@code Map<K, D>}.
+ *
+ * <p>For example, to compute the set of last names of people in each city,
+ * where the city names are sorted:
+ * <pre>{@code
+ * Map<City, Set<String>> namesByCity
+ * = people.stream().collect(groupingBy(Person::getCity, TreeMap::new,
+ * mapping(Person::getLastName, toSet())));
+ * }</pre>
+ *
+ * @implNote
+ * The returned {@code Collector} is not concurrent. For parallel stream
+ * pipelines, the {@code combiner} function operates by merging the keys
+ * from one map into another, which can be an expensive operation. If
+ * preservation of the order in which elements are presented to the downstream
+ * collector is not required, using {@link #groupingByConcurrent(Function, Supplier, Collector)}
+ * may offer better parallel performance.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the type of the keys
+ * @param <A> the intermediate accumulation type of the downstream collector
+ * @param <D> the result type of the downstream reduction
+ * @param <M> the type of the resulting {@code Map}
+ * @param classifier a classifier function mapping input elements to keys
+ * @param downstream a {@code Collector} implementing the downstream reduction
+ * @param mapFactory a function which, when called, produces a new empty
+ * {@code Map} of the desired type
+ * @return a {@code Collector} implementing the cascaded group-by operation
+ *
+ * @see #groupingBy(Function, Collector)
+ * @see #groupingBy(Function)
+ * @see #groupingByConcurrent(Function, Supplier, Collector)
+ */
+ public static <T, K, D, A, M extends Map<K, D>>
+ Collector<T, ?, M> groupingBy(Function<? super T, ? extends K> classifier,
+ Supplier<M> mapFactory,
+ Collector<? super T, A, D> downstream) {
+ Supplier<A> downstreamSupplier = downstream.supplier();
+ BiConsumer<A, ? super T> downstreamAccumulator = downstream.accumulator();
+ BiConsumer<Map<K, A>, T> accumulator = (m, t) -> {
+ K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key");
+ A container = m.computeIfAbsent(key, k -> downstreamSupplier.get());
+ downstreamAccumulator.accept(container, t);
+ };
+ BinaryOperator<Map<K, A>> merger = Collectors.<K, A, Map<K, A>>mapMerger(downstream.combiner());
+ @SuppressWarnings("unchecked")
+ Supplier<Map<K, A>> mangledFactory = (Supplier<Map<K, A>>) mapFactory;
+
+ if (downstream.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)) {
+ return new CollectorImpl<>(mangledFactory, accumulator, merger, CH_ID);
+ }
+ else {
+ @SuppressWarnings("unchecked")
+ Function<A, A> downstreamFinisher = (Function<A, A>) downstream.finisher();
+ Function<Map<K, A>, M> finisher = intermediate -> {
+ intermediate.replaceAll((k, v) -> downstreamFinisher.apply(v));
+ @SuppressWarnings("unchecked")
+ M castResult = (M) intermediate;
+ return castResult;
+ };
+ return new CollectorImpl<>(mangledFactory, accumulator, merger, finisher, CH_NOID);
+ }
+ }
+
+ /**
+ * Returns a concurrent {@code Collector} implementing a "group by"
+ * operation on input elements of type {@code T}, grouping elements
+ * according to a classification function.
+ *
+ * <p>This is a {@link Collector.Characteristics#CONCURRENT concurrent} and
+ * {@link Collector.Characteristics#UNORDERED unordered} Collector.
+ *
+ * <p>The classification function maps elements to some key type {@code K}.
+ * The collector produces a {@code ConcurrentMap<K, List<T>>} whose keys are the
+ * values resulting from applying the classification function to the input
+ * elements, and whose corresponding values are {@code List}s containing the
+ * input elements which map to the associated key under the classification
+ * function.
+ *
+ * <p>There are no guarantees on the type, mutability, or serializability
+ * of the {@code Map} or {@code List} objects returned, or of the
+ * thread-safety of the {@code List} objects returned.
+ * @implSpec
+ * This produces a result similar to:
+ * <pre>{@code
+ * groupingByConcurrent(classifier, toList());
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the type of the keys
+ * @param classifier a classifier function mapping input elements to keys
+ * @return a concurrent, unordered {@code Collector} implementing the group-by operation
+ *
+ * @see #groupingBy(Function)
+ * @see #groupingByConcurrent(Function, Collector)
+ * @see #groupingByConcurrent(Function, Supplier, Collector)
+ */
+ public static <T, K>
+ Collector<T, ?, ConcurrentMap<K, List<T>>>
+ groupingByConcurrent(Function<? super T, ? extends K> classifier) {
+ return groupingByConcurrent(classifier, ConcurrentHashMap::new, toList());
+ }
+
+ /**
+ * Returns a concurrent {@code Collector} implementing a cascaded "group by"
+ * operation on input elements of type {@code T}, grouping elements
+ * according to a classification function, and then performing a reduction
+ * operation on the values associated with a given key using the specified
+ * downstream {@code Collector}.
+ *
+ * <p>This is a {@link Collector.Characteristics#CONCURRENT concurrent} and
+ * {@link Collector.Characteristics#UNORDERED unordered} Collector.
+ *
+ * <p>The classification function maps elements to some key type {@code K}.
+ * The downstream collector operates on elements of type {@code T} and
+ * produces a result of type {@code D}. The resulting collector produces a
+ * {@code Map<K, D>}.
+ *
+ * <p>For example, to compute the set of last names of people in each city,
+ * where the city names are sorted:
+ * <pre>{@code
+ * ConcurrentMap<City, Set<String>> namesByCity
+ * = people.stream().collect(groupingByConcurrent(Person::getCity,
+ * mapping(Person::getLastName, toSet())));
+ * }</pre>
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the type of the keys
+ * @param <A> the intermediate accumulation type of the downstream collector
+ * @param <D> the result type of the downstream reduction
+ * @param classifier a classifier function mapping input elements to keys
+ * @param downstream a {@code Collector} implementing the downstream reduction
+ * @return a concurrent, unordered {@code Collector} implementing the cascaded group-by operation
+ *
+ * @see #groupingBy(Function, Collector)
+ * @see #groupingByConcurrent(Function)
+ * @see #groupingByConcurrent(Function, Supplier, Collector)
+ */
+ public static <T, K, A, D>
+ Collector<T, ?, ConcurrentMap<K, D>> groupingByConcurrent(Function<? super T, ? extends K> classifier,
+ Collector<? super T, A, D> downstream) {
+ return groupingByConcurrent(classifier, ConcurrentHashMap::new, downstream);
+ }
+
+ /**
+ * Returns a concurrent {@code Collector} implementing a cascaded "group by"
+ * operation on input elements of type {@code T}, grouping elements
+ * according to a classification function, and then performing a reduction
+ * operation on the values associated with a given key using the specified
+ * downstream {@code Collector}. The {@code ConcurrentMap} produced by the
+ * Collector is created with the supplied factory function.
+ *
+ * <p>This is a {@link Collector.Characteristics#CONCURRENT concurrent} and
+ * {@link Collector.Characteristics#UNORDERED unordered} Collector.
+ *
+ * <p>The classification function maps elements to some key type {@code K}.
+ * The downstream collector operates on elements of type {@code T} and
+ * produces a result of type {@code D}. The resulting collector produces a
+ * {@code Map<K, D>}.
+ *
+ * <p>For example, to compute the set of last names of people in each city,
+ * where the city names are sorted:
+ * <pre>{@code
+ * ConcurrentMap<City, Set<String>> namesByCity
+ * = people.stream().collect(groupingBy(Person::getCity, ConcurrentSkipListMap::new,
+ * mapping(Person::getLastName, toSet())));
+ * }</pre>
+ *
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the type of the keys
+ * @param <A> the intermediate accumulation type of the downstream collector
+ * @param <D> the result type of the downstream reduction
+ * @param <M> the type of the resulting {@code ConcurrentMap}
+ * @param classifier a classifier function mapping input elements to keys
+ * @param downstream a {@code Collector} implementing the downstream reduction
+ * @param mapFactory a function which, when called, produces a new empty
+ * {@code ConcurrentMap} of the desired type
+ * @return a concurrent, unordered {@code Collector} implementing the cascaded group-by operation
+ *
+ * @see #groupingByConcurrent(Function)
+ * @see #groupingByConcurrent(Function, Collector)
+ * @see #groupingBy(Function, Supplier, Collector)
+ */
+ public static <T, K, A, D, M extends ConcurrentMap<K, D>>
+ Collector<T, ?, M> groupingByConcurrent(Function<? super T, ? extends K> classifier,
+ Supplier<M> mapFactory,
+ Collector<? super T, A, D> downstream) {
+ Supplier<A> downstreamSupplier = downstream.supplier();
+ BiConsumer<A, ? super T> downstreamAccumulator = downstream.accumulator();
+ BinaryOperator<ConcurrentMap<K, A>> merger = Collectors.<K, A, ConcurrentMap<K, A>>mapMerger(downstream.combiner());
+ @SuppressWarnings("unchecked")
+ Supplier<ConcurrentMap<K, A>> mangledFactory = (Supplier<ConcurrentMap<K, A>>) mapFactory;
+ BiConsumer<ConcurrentMap<K, A>, T> accumulator;
+ if (downstream.characteristics().contains(Collector.Characteristics.CONCURRENT)) {
+ accumulator = (m, t) -> {
+ K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key");
+ A resultContainer = m.computeIfAbsent(key, k -> downstreamSupplier.get());
+ downstreamAccumulator.accept(resultContainer, t);
+ };
+ }
+ else {
+ accumulator = (m, t) -> {
+ K key = Objects.requireNonNull(classifier.apply(t), "element cannot be mapped to a null key");
+ A resultContainer = m.computeIfAbsent(key, k -> downstreamSupplier.get());
+ synchronized (resultContainer) {
+ downstreamAccumulator.accept(resultContainer, t);
+ }
+ };
+ }
+
+ if (downstream.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)) {
+ return new CollectorImpl<>(mangledFactory, accumulator, merger, CH_CONCURRENT_ID);
+ }
+ else {
+ @SuppressWarnings("unchecked")
+ Function<A, A> downstreamFinisher = (Function<A, A>) downstream.finisher();
+ Function<ConcurrentMap<K, A>, M> finisher = intermediate -> {
+ intermediate.replaceAll((k, v) -> downstreamFinisher.apply(v));
+ @SuppressWarnings("unchecked")
+ M castResult = (M) intermediate;
+ return castResult;
+ };
+ return new CollectorImpl<>(mangledFactory, accumulator, merger, finisher, CH_CONCURRENT_NOID);
+ }
+ }
+
+ /**
+ * Returns a {@code Collector} which partitions the input elements according
+ * to a {@code Predicate}, and organizes them into a
+ * {@code Map<Boolean, List<T>>}.
+ *
+ * There are no guarantees on the type, mutability,
+ * serializability, or thread-safety of the {@code Map} returned.
+ *
+ * @param <T> the type of the input elements
+ * @param predicate a predicate used for classifying input elements
+ * @return a {@code Collector} implementing the partitioning operation
+ *
+ * @see #partitioningBy(Predicate, Collector)
+ */
+ public static <T>
+ Collector<T, ?, Map<Boolean, List<T>>> partitioningBy(Predicate<? super T> predicate) {
+ return partitioningBy(predicate, toList());
+ }
+
+ /**
+ * Returns a {@code Collector} which partitions the input elements according
+ * to a {@code Predicate}, reduces the values in each partition according to
+ * another {@code Collector}, and organizes them into a
+ * {@code Map<Boolean, D>} whose values are the result of the downstream
+ * reduction.
+ *
+ * <p>There are no guarantees on the type, mutability,
+ * serializability, or thread-safety of the {@code Map} returned.
+ *
+ * @param <T> the type of the input elements
+ * @param <A> the intermediate accumulation type of the downstream collector
+ * @param <D> the result type of the downstream reduction
+ * @param predicate a predicate used for classifying input elements
+ * @param downstream a {@code Collector} implementing the downstream
+ * reduction
+ * @return a {@code Collector} implementing the cascaded partitioning
+ * operation
+ *
+ * @see #partitioningBy(Predicate)
+ */
+ public static <T, D, A>
+ Collector<T, ?, Map<Boolean, D>> partitioningBy(Predicate<? super T> predicate,
+ Collector<? super T, A, D> downstream) {
+ BiConsumer<A, ? super T> downstreamAccumulator = downstream.accumulator();
+ BiConsumer<Partition<A>, T> accumulator = (result, t) ->
+ downstreamAccumulator.accept(predicate.test(t) ? result.forTrue : result.forFalse, t);
+ BinaryOperator<A> op = downstream.combiner();
+ BinaryOperator<Partition<A>> merger = (left, right) ->
+ new Partition<>(op.apply(left.forTrue, right.forTrue),
+ op.apply(left.forFalse, right.forFalse));
+ Supplier<Partition<A>> supplier = () ->
+ new Partition<>(downstream.supplier().get(),
+ downstream.supplier().get());
+ if (downstream.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)) {
+ return new CollectorImpl<>(supplier, accumulator, merger, CH_ID);
+ }
+ else {
+ Function<Partition<A>, Map<Boolean, D>> finisher = par ->
+ new Partition<>(downstream.finisher().apply(par.forTrue),
+ downstream.finisher().apply(par.forFalse));
+ return new CollectorImpl<>(supplier, accumulator, merger, finisher, CH_NOID);
+ }
+ }
+
+ /**
+ * Returns a {@code Collector} that accumulates elements into a
+ * {@code Map} whose keys and values are the result of applying the provided
+ * mapping functions to the input elements.
+ *
+ * <p>If the mapped keys contains duplicates (according to
+ * {@link Object#equals(Object)}), an {@code IllegalStateException} is
+ * thrown when the collection operation is performed. If the mapped keys
+ * may have duplicates, use {@link #toMap(Function, Function, BinaryOperator)}
+ * instead.
+ *
+ * @apiNote
+ * It is common for either the key or the value to be the input elements.
+ * In this case, the utility method
+ * {@link java.util.function.Function#identity()} may be helpful.
+ * For example, the following produces a {@code Map} mapping
+ * students to their grade point average:
+ * <pre>{@code
+ * Map<Student, Double> studentToGPA
+ * students.stream().collect(toMap(Functions.identity(),
+ * student -> computeGPA(student)));
+ * }</pre>
+ * And the following produces a {@code Map} mapping a unique identifier to
+ * students:
+ * <pre>{@code
+ * Map<String, Student> studentIdToStudent
+ * students.stream().collect(toMap(Student::getId,
+ * Functions.identity());
+ * }</pre>
+ *
+ * @implNote
+ * The returned {@code Collector} is not concurrent. For parallel stream
+ * pipelines, the {@code combiner} function operates by merging the keys
+ * from one map into another, which can be an expensive operation. If it is
+ * not required that results are inserted into the {@code Map} in encounter
+ * order, using {@link #toConcurrentMap(Function, Function)}
+ * may offer better parallel performance.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the output type of the key mapping function
+ * @param <U> the output type of the value mapping function
+ * @param keyMapper a mapping function to produce keys
+ * @param valueMapper a mapping function to produce values
+ * @return a {@code Collector} which collects elements into a {@code Map}
+ * whose keys and values are the result of applying mapping functions to
+ * the input elements
+ *
+ * @see #toMap(Function, Function, BinaryOperator)
+ * @see #toMap(Function, Function, BinaryOperator, Supplier)
+ * @see #toConcurrentMap(Function, Function)
+ */
+ public static <T, K, U>
+ Collector<T, ?, Map<K,U>> toMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper) {
+ return toMap(keyMapper, valueMapper, throwingMerger(), HashMap::new);
+ }
+
+ /**
+ * Returns a {@code Collector} that accumulates elements into a
+ * {@code Map} whose keys and values are the result of applying the provided
+ * mapping functions to the input elements.
+ *
+ * <p>If the mapped
+ * keys contains duplicates (according to {@link Object#equals(Object)}),
+ * the value mapping function is applied to each equal element, and the
+ * results are merged using the provided merging function.
+ *
+ * @apiNote
+ * There are multiple ways to deal with collisions between multiple elements
+ * mapping to the same key. The other forms of {@code toMap} simply use
+ * a merge function that throws unconditionally, but you can easily write
+ * more flexible merge policies. For example, if you have a stream
+ * of {@code Person}, and you want to produce a "phone book" mapping name to
+ * address, but it is possible that two persons have the same name, you can
+ * do as follows to gracefully deals with these collisions, and produce a
+ * {@code Map} mapping names to a concatenated list of addresses:
+ * <pre>{@code
+ * Map<String, String> phoneBook
+ * people.stream().collect(toMap(Person::getName,
+ * Person::getAddress,
+ * (s, a) -> s + ", " + a));
+ * }</pre>
+ *
+ * @implNote
+ * The returned {@code Collector} is not concurrent. For parallel stream
+ * pipelines, the {@code combiner} function operates by merging the keys
+ * from one map into another, which can be an expensive operation. If it is
+ * not required that results are merged into the {@code Map} in encounter
+ * order, using {@link #toConcurrentMap(Function, Function, BinaryOperator)}
+ * may offer better parallel performance.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the output type of the key mapping function
+ * @param <U> the output type of the value mapping function
+ * @param keyMapper a mapping function to produce keys
+ * @param valueMapper a mapping function to produce values
+ * @param mergeFunction a merge function, used to resolve collisions between
+ * values associated with the same key, as supplied
+ * to {@link Map#merge(Object, Object, BiFunction)}
+ * @return a {@code Collector} which collects elements into a {@code Map}
+ * whose keys are the result of applying a key mapping function to the input
+ * elements, and whose values are the result of applying a value mapping
+ * function to all input elements equal to the key and combining them
+ * using the merge function
+ *
+ * @see #toMap(Function, Function)
+ * @see #toMap(Function, Function, BinaryOperator, Supplier)
+ * @see #toConcurrentMap(Function, Function, BinaryOperator)
+ */
+ public static <T, K, U>
+ Collector<T, ?, Map<K,U>> toMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper,
+ BinaryOperator<U> mergeFunction) {
+ return toMap(keyMapper, valueMapper, mergeFunction, HashMap::new);
+ }
+
+ /**
+ * Returns a {@code Collector} that accumulates elements into a
+ * {@code Map} whose keys and values are the result of applying the provided
+ * mapping functions to the input elements.
+ *
+ * <p>If the mapped
+ * keys contains duplicates (according to {@link Object#equals(Object)}),
+ * the value mapping function is applied to each equal element, and the
+ * results are merged using the provided merging function. The {@code Map}
+ * is created by a provided supplier function.
+ *
+ * @implNote
+ * The returned {@code Collector} is not concurrent. For parallel stream
+ * pipelines, the {@code combiner} function operates by merging the keys
+ * from one map into another, which can be an expensive operation. If it is
+ * not required that results are merged into the {@code Map} in encounter
+ * order, using {@link #toConcurrentMap(Function, Function, BinaryOperator, Supplier)}
+ * may offer better parallel performance.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the output type of the key mapping function
+ * @param <U> the output type of the value mapping function
+ * @param <M> the type of the resulting {@code Map}
+ * @param keyMapper a mapping function to produce keys
+ * @param valueMapper a mapping function to produce values
+ * @param mergeFunction a merge function, used to resolve collisions between
+ * values associated with the same key, as supplied
+ * to {@link Map#merge(Object, Object, BiFunction)}
+ * @param mapSupplier a function which returns a new, empty {@code Map} into
+ * which the results will be inserted
+ * @return a {@code Collector} which collects elements into a {@code Map}
+ * whose keys are the result of applying a key mapping function to the input
+ * elements, and whose values are the result of applying a value mapping
+ * function to all input elements equal to the key and combining them
+ * using the merge function
+ *
+ * @see #toMap(Function, Function)
+ * @see #toMap(Function, Function, BinaryOperator)
+ * @see #toConcurrentMap(Function, Function, BinaryOperator, Supplier)
+ */
+ public static <T, K, U, M extends Map<K, U>>
+ Collector<T, ?, M> toMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper,
+ BinaryOperator<U> mergeFunction,
+ Supplier<M> mapSupplier) {
+ BiConsumer<M, T> accumulator
+ = (map, element) -> map.merge(keyMapper.apply(element),
+ valueMapper.apply(element), mergeFunction);
+ return new CollectorImpl<>(mapSupplier, accumulator, mapMerger(mergeFunction), CH_ID);
+ }
+
+ /**
+ * Returns a concurrent {@code Collector} that accumulates elements into a
+ * {@code ConcurrentMap} whose keys and values are the result of applying
+ * the provided mapping functions to the input elements.
+ *
+ * <p>If the mapped keys contains duplicates (according to
+ * {@link Object#equals(Object)}), an {@code IllegalStateException} is
+ * thrown when the collection operation is performed. If the mapped keys
+ * may have duplicates, use
+ * {@link #toConcurrentMap(Function, Function, BinaryOperator)} instead.
+ *
+ * @apiNote
+ * It is common for either the key or the value to be the input elements.
+ * In this case, the utility method
+ * {@link java.util.function.Function#identity()} may be helpful.
+ * For example, the following produces a {@code Map} mapping
+ * students to their grade point average:
+ * <pre>{@code
+ * Map<Student, Double> studentToGPA
+ * students.stream().collect(toMap(Functions.identity(),
+ * student -> computeGPA(student)));
+ * }</pre>
+ * And the following produces a {@code Map} mapping a unique identifier to
+ * students:
+ * <pre>{@code
+ * Map<String, Student> studentIdToStudent
+ * students.stream().collect(toConcurrentMap(Student::getId,
+ * Functions.identity());
+ * }</pre>
+ *
+ * <p>This is a {@link Collector.Characteristics#CONCURRENT concurrent} and
+ * {@link Collector.Characteristics#UNORDERED unordered} Collector.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the output type of the key mapping function
+ * @param <U> the output type of the value mapping function
+ * @param keyMapper the mapping function to produce keys
+ * @param valueMapper the mapping function to produce values
+ * @return a concurrent, unordered {@code Collector} which collects elements into a
+ * {@code ConcurrentMap} whose keys are the result of applying a key mapping
+ * function to the input elements, and whose values are the result of
+ * applying a value mapping function to the input elements
+ *
+ * @see #toMap(Function, Function)
+ * @see #toConcurrentMap(Function, Function, BinaryOperator)
+ * @see #toConcurrentMap(Function, Function, BinaryOperator, Supplier)
+ */
+ public static <T, K, U>
+ Collector<T, ?, ConcurrentMap<K,U>> toConcurrentMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper) {
+ return toConcurrentMap(keyMapper, valueMapper, throwingMerger(), ConcurrentHashMap::new);
+ }
+
+ /**
+ * Returns a concurrent {@code Collector} that accumulates elements into a
+ * {@code ConcurrentMap} whose keys and values are the result of applying
+ * the provided mapping functions to the input elements.
+ *
+ * <p>If the mapped keys contains duplicates (according to {@link Object#equals(Object)}),
+ * the value mapping function is applied to each equal element, and the
+ * results are merged using the provided merging function.
+ *
+ * @apiNote
+ * There are multiple ways to deal with collisions between multiple elements
+ * mapping to the same key. The other forms of {@code toConcurrentMap} simply use
+ * a merge function that throws unconditionally, but you can easily write
+ * more flexible merge policies. For example, if you have a stream
+ * of {@code Person}, and you want to produce a "phone book" mapping name to
+ * address, but it is possible that two persons have the same name, you can
+ * do as follows to gracefully deals with these collisions, and produce a
+ * {@code Map} mapping names to a concatenated list of addresses:
+ * <pre>{@code
+ * Map<String, String> phoneBook
+ * people.stream().collect(toConcurrentMap(Person::getName,
+ * Person::getAddress,
+ * (s, a) -> s + ", " + a));
+ * }</pre>
+ *
+ * <p>This is a {@link Collector.Characteristics#CONCURRENT concurrent} and
+ * {@link Collector.Characteristics#UNORDERED unordered} Collector.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the output type of the key mapping function
+ * @param <U> the output type of the value mapping function
+ * @param keyMapper a mapping function to produce keys
+ * @param valueMapper a mapping function to produce values
+ * @param mergeFunction a merge function, used to resolve collisions between
+ * values associated with the same key, as supplied
+ * to {@link Map#merge(Object, Object, BiFunction)}
+ * @return a concurrent, unordered {@code Collector} which collects elements into a
+ * {@code ConcurrentMap} whose keys are the result of applying a key mapping
+ * function to the input elements, and whose values are the result of
+ * applying a value mapping function to all input elements equal to the key
+ * and combining them using the merge function
+ *
+ * @see #toConcurrentMap(Function, Function)
+ * @see #toConcurrentMap(Function, Function, BinaryOperator, Supplier)
+ * @see #toMap(Function, Function, BinaryOperator)
+ */
+ public static <T, K, U>
+ Collector<T, ?, ConcurrentMap<K,U>>
+ toConcurrentMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper,
+ BinaryOperator<U> mergeFunction) {
+ return toConcurrentMap(keyMapper, valueMapper, mergeFunction, ConcurrentHashMap::new);
+ }
+
+ /**
+ * Returns a concurrent {@code Collector} that accumulates elements into a
+ * {@code ConcurrentMap} whose keys and values are the result of applying
+ * the provided mapping functions to the input elements.
+ *
+ * <p>If the mapped keys contains duplicates (according to {@link Object#equals(Object)}),
+ * the value mapping function is applied to each equal element, and the
+ * results are merged using the provided merging function. The
+ * {@code ConcurrentMap} is created by a provided supplier function.
+ *
+ * <p>This is a {@link Collector.Characteristics#CONCURRENT concurrent} and
+ * {@link Collector.Characteristics#UNORDERED unordered} Collector.
+ *
+ * @param <T> the type of the input elements
+ * @param <K> the output type of the key mapping function
+ * @param <U> the output type of the value mapping function
+ * @param <M> the type of the resulting {@code ConcurrentMap}
+ * @param keyMapper a mapping function to produce keys
+ * @param valueMapper a mapping function to produce values
+ * @param mergeFunction a merge function, used to resolve collisions between
+ * values associated with the same key, as supplied
+ * to {@link Map#merge(Object, Object, BiFunction)}
+ * @param mapSupplier a function which returns a new, empty {@code Map} into
+ * which the results will be inserted
+ * @return a concurrent, unordered {@code Collector} which collects elements into a
+ * {@code ConcurrentMap} whose keys are the result of applying a key mapping
+ * function to the input elements, and whose values are the result of
+ * applying a value mapping function to all input elements equal to the key
+ * and combining them using the merge function
+ *
+ * @see #toConcurrentMap(Function, Function)
+ * @see #toConcurrentMap(Function, Function, BinaryOperator)
+ * @see #toMap(Function, Function, BinaryOperator, Supplier)
+ */
+ public static <T, K, U, M extends ConcurrentMap<K, U>>
+ Collector<T, ?, M> toConcurrentMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper,
+ BinaryOperator<U> mergeFunction,
+ Supplier<M> mapSupplier) {
+ BiConsumer<M, T> accumulator
+ = (map, element) -> map.merge(keyMapper.apply(element),
+ valueMapper.apply(element), mergeFunction);
+ return new CollectorImpl<>(mapSupplier, accumulator, mapMerger(mergeFunction), CH_CONCURRENT_ID);
+ }
+
+ /**
+ * Returns a {@code Collector} which applies an {@code int}-producing
+ * mapping function to each input element, and returns summary statistics
+ * for the resulting values.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a mapping function to apply to each element
+ * @return a {@code Collector} implementing the summary-statistics reduction
+ *
+ * @see #summarizingDouble(ToDoubleFunction)
+ * @see #summarizingLong(ToLongFunction)
+ */
+ public static <T>
+ Collector<T, ?, IntSummaryStatistics> summarizingInt(ToIntFunction<? super T> mapper) {
+ return new CollectorImpl<T, IntSummaryStatistics, IntSummaryStatistics>(
+ IntSummaryStatistics::new,
+ (r, t) -> r.accept(mapper.applyAsInt(t)),
+ (l, r) -> { l.combine(r); return l; }, CH_ID);
+ }
+
+ /**
+ * Returns a {@code Collector} which applies an {@code long}-producing
+ * mapping function to each input element, and returns summary statistics
+ * for the resulting values.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper the mapping function to apply to each element
+ * @return a {@code Collector} implementing the summary-statistics reduction
+ *
+ * @see #summarizingDouble(ToDoubleFunction)
+ * @see #summarizingInt(ToIntFunction)
+ */
+ public static <T>
+ Collector<T, ?, LongSummaryStatistics> summarizingLong(ToLongFunction<? super T> mapper) {
+ return new CollectorImpl<T, LongSummaryStatistics, LongSummaryStatistics>(
+ LongSummaryStatistics::new,
+ (r, t) -> r.accept(mapper.applyAsLong(t)),
+ (l, r) -> { l.combine(r); return l; }, CH_ID);
+ }
+
+ /**
+ * Returns a {@code Collector} which applies an {@code double}-producing
+ * mapping function to each input element, and returns summary statistics
+ * for the resulting values.
+ *
+ * @param <T> the type of the input elements
+ * @param mapper a mapping function to apply to each element
+ * @return a {@code Collector} implementing the summary-statistics reduction
+ *
+ * @see #summarizingLong(ToLongFunction)
+ * @see #summarizingInt(ToIntFunction)
+ */
+ public static <T>
+ Collector<T, ?, DoubleSummaryStatistics> summarizingDouble(ToDoubleFunction<? super T> mapper) {
+ return new CollectorImpl<T, DoubleSummaryStatistics, DoubleSummaryStatistics>(
+ DoubleSummaryStatistics::new,
+ (r, t) -> r.accept(mapper.applyAsDouble(t)),
+ (l, r) -> { l.combine(r); return l; }, CH_ID);
+ }
+
+ /**
+ * Implementation class used by partitioningBy.
+ */
+ private static final class Partition<T>
+ extends AbstractMap<Boolean, T>
+ implements Map<Boolean, T> {
+ final T forTrue;
+ final T forFalse;
+
+ Partition(T forTrue, T forFalse) {
+ this.forTrue = forTrue;
+ this.forFalse = forFalse;
+ }
+
+ @Override
+ public Set<Map.Entry<Boolean, T>> entrySet() {
+ return new AbstractSet<Map.Entry<Boolean, T>>() {
+ @Override
+ public Iterator<Map.Entry<Boolean, T>> iterator() {
+ Map.Entry<Boolean, T> falseEntry = new SimpleImmutableEntry<>(false, forFalse);
+ Map.Entry<Boolean, T> trueEntry = new SimpleImmutableEntry<>(true, forTrue);
+ return Arrays.asList(falseEntry, trueEntry).iterator();
+ }
+
+ @Override
+ public int size() {
+ return 2;
+ }
+ };
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/DistinctOps.java b/ojluni/src/main/java/java/util/stream/DistinctOps.java
new file mode 100644
index 0000000..66d7cb7
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/DistinctOps.java
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.Objects;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.IntFunction;
+
+/**
+ * Factory methods for transforming streams into duplicate-free streams, using
+ * {@link Object#equals(Object)} to determine equality.
+ *
+ * @since 1.8
+ */
+final class DistinctOps {
+
+ private DistinctOps() { }
+
+ /**
+ * Appends a "distinct" operation to the provided stream, and returns the
+ * new stream.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ * @return the new stream
+ */
+ static <T> ReferencePipeline<T, T> makeRef(AbstractPipeline<?, T, ?> upstream) {
+ return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE,
+ StreamOpFlag.IS_DISTINCT | StreamOpFlag.NOT_SIZED) {
+
+ <P_IN> Node<T> reduce(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
+ // If the stream is SORTED then it should also be ORDERED so the following will also
+ // preserve the sort order
+ TerminalOp<T, LinkedHashSet<T>> reduceOp
+ = ReduceOps.<T, LinkedHashSet<T>>makeRef(LinkedHashSet::new, LinkedHashSet::add,
+ LinkedHashSet::addAll);
+ return Nodes.node(reduceOp.evaluateParallel(helper, spliterator));
+ }
+
+ @Override
+ <P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<T[]> generator) {
+ if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
+ // No-op
+ return helper.evaluate(spliterator, false, generator);
+ }
+ else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ return reduce(helper, spliterator);
+ }
+ else {
+ // Holder of null state since ConcurrentHashMap does not support null values
+ AtomicBoolean seenNull = new AtomicBoolean(false);
+ ConcurrentHashMap<T, Boolean> map = new ConcurrentHashMap<>();
+ TerminalOp<T, Void> forEachOp = ForEachOps.makeRef(t -> {
+ if (t == null)
+ seenNull.set(true);
+ else
+ map.putIfAbsent(t, Boolean.TRUE);
+ }, false);
+ forEachOp.evaluateParallel(helper, spliterator);
+
+ // If null has been seen then copy the key set into a HashSet that supports null values
+ // and add null
+ Set<T> keys = map.keySet();
+ if (seenNull.get()) {
+ // TODO Implement a more efficient set-union view, rather than copying
+ keys = new HashSet<>(keys);
+ keys.add(null);
+ }
+ return Nodes.node(keys);
+ }
+ }
+
+ @Override
+ <P_IN> Spliterator<T> opEvaluateParallelLazy(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
+ if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
+ // No-op
+ return helper.wrapSpliterator(spliterator);
+ }
+ else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ // Not lazy, barrier required to preserve order
+ return reduce(helper, spliterator).spliterator();
+ }
+ else {
+ // Lazy
+ return new StreamSpliterators.DistinctSpliterator<>(helper.wrapSpliterator(spliterator));
+ }
+ }
+
+ @Override
+ Sink<T> opWrapSink(int flags, Sink<T> sink) {
+ Objects.requireNonNull(sink);
+
+ if (StreamOpFlag.DISTINCT.isKnown(flags)) {
+ return sink;
+ } else if (StreamOpFlag.SORTED.isKnown(flags)) {
+ return new Sink.ChainedReference<T, T>(sink) {
+ boolean seenNull;
+ T lastSeen;
+
+ @Override
+ public void begin(long size) {
+ seenNull = false;
+ lastSeen = null;
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void end() {
+ seenNull = false;
+ lastSeen = null;
+ downstream.end();
+ }
+
+ @Override
+ public void accept(T t) {
+ if (t == null) {
+ if (!seenNull) {
+ seenNull = true;
+ downstream.accept(lastSeen = null);
+ }
+ } else if (lastSeen == null || !t.equals(lastSeen)) {
+ downstream.accept(lastSeen = t);
+ }
+ }
+ };
+ } else {
+ return new Sink.ChainedReference<T, T>(sink) {
+ Set<T> seen;
+
+ @Override
+ public void begin(long size) {
+ seen = new HashSet<>();
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void end() {
+ seen = null;
+ downstream.end();
+ }
+
+ @Override
+ public void accept(T t) {
+ if (!seen.contains(t)) {
+ seen.add(t);
+ downstream.accept(t);
+ }
+ }
+ };
+ }
+ }
+ };
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/DoublePipeline.java b/ojluni/src/main/java/java/util/stream/DoublePipeline.java
new file mode 100644
index 0000000..3b6335b
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/DoublePipeline.java
@@ -0,0 +1,639 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.DoubleSummaryStatistics;
+import java.util.Objects;
+import java.util.OptionalDouble;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiConsumer;
+import java.util.function.BinaryOperator;
+import java.util.function.DoubleBinaryOperator;
+import java.util.function.DoubleConsumer;
+import java.util.function.DoubleFunction;
+import java.util.function.DoublePredicate;
+import java.util.function.DoubleToIntFunction;
+import java.util.function.DoubleToLongFunction;
+import java.util.function.DoubleUnaryOperator;
+import java.util.function.IntFunction;
+import java.util.function.ObjDoubleConsumer;
+import java.util.function.Supplier;
+
+/**
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code double}.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ *
+ * @since 1.8
+ */
+abstract class DoublePipeline<E_IN>
+ extends AbstractPipeline<E_IN, Double, DoubleStream>
+ implements DoubleStream {
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ */
+ DoublePipeline(Supplier<? extends Spliterator<Double>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ */
+ DoublePipeline(Spliterator<Double> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for appending an intermediate operation onto an existing
+ * pipeline.
+ *
+ * @param upstream the upstream element source.
+ * @param opFlags the operation flags
+ */
+ DoublePipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
+ super(upstream, opFlags);
+ }
+
+ /**
+ * Adapt a {@code Sink<Double> to a {@code DoubleConsumer}, ideally simply
+ * by casting.
+ */
+ private static DoubleConsumer adapt(Sink<Double> sink) {
+ if (sink instanceof DoubleConsumer) {
+ return (DoubleConsumer) sink;
+ } else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(AbstractPipeline.class,
+ "using DoubleStream.adapt(Sink<Double> s)");
+ return sink::accept;
+ }
+ }
+
+ /**
+ * Adapt a {@code Spliterator<Double>} to a {@code Spliterator.OfDouble}.
+ *
+ * @implNote
+ * The implementation attempts to cast to a Spliterator.OfDouble, and throws
+ * an exception if this cast is not possible.
+ */
+ private static Spliterator.OfDouble adapt(Spliterator<Double> s) {
+ if (s instanceof Spliterator.OfDouble) {
+ return (Spliterator.OfDouble) s;
+ } else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(AbstractPipeline.class,
+ "using DoubleStream.adapt(Spliterator<Double> s)");
+ throw new UnsupportedOperationException("DoubleStream.adapt(Spliterator<Double> s)");
+ }
+ }
+
+
+ // Shape-specific methods
+
+ @Override
+ final StreamShape getOutputShape() {
+ return StreamShape.DOUBLE_VALUE;
+ }
+
+ @Override
+ final <P_IN> Node<Double> evaluateToNode(PipelineHelper<Double> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree,
+ IntFunction<Double[]> generator) {
+ return Nodes.collectDouble(helper, spliterator, flattenTree);
+ }
+
+ @Override
+ final <P_IN> Spliterator<Double> wrap(PipelineHelper<Double> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean isParallel) {
+ return new StreamSpliterators.DoubleWrappingSpliterator<>(ph, supplier, isParallel);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final Spliterator.OfDouble lazySpliterator(Supplier<? extends Spliterator<Double>> supplier) {
+ return new StreamSpliterators.DelegatingSpliterator.OfDouble((Supplier<Spliterator.OfDouble>) supplier);
+ }
+
+ @Override
+ final void forEachWithCancel(Spliterator<Double> spliterator, Sink<Double> sink) {
+ Spliterator.OfDouble spl = adapt(spliterator);
+ DoubleConsumer adaptedSink = adapt(sink);
+ do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
+ }
+
+ @Override
+ final Node.Builder<Double> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Double[]> generator) {
+ return Nodes.doubleBuilder(exactSizeIfKnown);
+ }
+
+
+ // DoubleStream
+
+ @Override
+ public final PrimitiveIterator.OfDouble iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+ @Override
+ public final Spliterator.OfDouble spliterator() {
+ return adapt(super.spliterator());
+ }
+
+ // Stateless intermediate ops from DoubleStream
+
+ @Override
+ public final Stream<Double> boxed() {
+ return mapToObj(Double::valueOf);
+ }
+
+ @Override
+ public final DoubleStream map(DoubleUnaryOperator mapper) {
+ Objects.requireNonNull(mapper);
+ return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedDouble<Double>(sink) {
+ @Override
+ public void accept(double t) {
+ downstream.accept(mapper.applyAsDouble(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final <U> Stream<U> mapToObj(DoubleFunction<? extends U> mapper) {
+ Objects.requireNonNull(mapper);
+ return new ReferencePipeline.StatelessOp<Double, U>(this, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<U> sink) {
+ return new Sink.ChainedDouble<U>(sink) {
+ @Override
+ public void accept(double t) {
+ downstream.accept(mapper.apply(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final IntStream mapToInt(DoubleToIntFunction mapper) {
+ Objects.requireNonNull(mapper);
+ return new IntPipeline.StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedDouble<Integer>(sink) {
+ @Override
+ public void accept(double t) {
+ downstream.accept(mapper.applyAsInt(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final LongStream mapToLong(DoubleToLongFunction mapper) {
+ Objects.requireNonNull(mapper);
+ return new LongPipeline.StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedDouble<Long>(sink) {
+ @Override
+ public void accept(double t) {
+ downstream.accept(mapper.applyAsLong(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
+ return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedDouble<Double>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(double t) {
+ try (DoubleStream result = mapper.apply(t)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(i -> downstream.accept(i));
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public DoubleStream unordered() {
+ if (!isOrdered())
+ return this;
+ return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE, StreamOpFlag.NOT_ORDERED) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ return sink;
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream filter(DoublePredicate predicate) {
+ Objects.requireNonNull(predicate);
+ return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedDouble<Double>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(double t) {
+ if (predicate.test(t))
+ downstream.accept(t);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream peek(DoubleConsumer action) {
+ Objects.requireNonNull(action);
+ return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
+ 0) {
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedDouble<Double>(sink) {
+ @Override
+ public void accept(double t) {
+ action.accept(t);
+ downstream.accept(t);
+ }
+ };
+ }
+ };
+ }
+
+ // Stateful intermediate ops from DoubleStream
+
+ @Override
+ public final DoubleStream limit(long maxSize) {
+ if (maxSize < 0)
+ throw new IllegalArgumentException(Long.toString(maxSize));
+ return SliceOps.makeDouble(this, (long) 0, maxSize);
+ }
+
+ @Override
+ public final DoubleStream skip(long n) {
+ if (n < 0)
+ throw new IllegalArgumentException(Long.toString(n));
+ if (n == 0)
+ return this;
+ else {
+ long limit = -1;
+ return SliceOps.makeDouble(this, n, limit);
+ }
+ }
+
+ @Override
+ public final DoubleStream sorted() {
+ return SortedOps.makeDouble(this);
+ }
+
+ @Override
+ public final DoubleStream distinct() {
+ // While functional and quick to implement, this approach is not very efficient.
+ // An efficient version requires a double-specific map/set implementation.
+ return boxed().distinct().mapToDouble(i -> (double) i);
+ }
+
+ // Terminal ops from DoubleStream
+
+ @Override
+ public void forEach(DoubleConsumer consumer) {
+ evaluate(ForEachOps.makeDouble(consumer, false));
+ }
+
+ @Override
+ public void forEachOrdered(DoubleConsumer consumer) {
+ evaluate(ForEachOps.makeDouble(consumer, true));
+ }
+
+ @Override
+ public final double sum() {
+ /*
+ * In the arrays allocated for the collect operation, index 0
+ * holds the high-order bits of the running sum, index 1 holds
+ * the low-order bits of the sum computed via compensated
+ * summation, and index 2 holds the simple sum used to compute
+ * the proper result if the stream contains infinite values of
+ * the same sign.
+ */
+ double[] summation = collect(() -> new double[3],
+ (ll, d) -> {
+ Collectors.sumWithCompensation(ll, d);
+ ll[2] += d;
+ },
+ (ll, rr) -> {
+ Collectors.sumWithCompensation(ll, rr[0]);
+ Collectors.sumWithCompensation(ll, rr[1]);
+ ll[2] += rr[2];
+ });
+
+ return Collectors.computeFinalSum(summation);
+ }
+
+ @Override
+ public final OptionalDouble min() {
+ return reduce(Math::min);
+ }
+
+ @Override
+ public final OptionalDouble max() {
+ return reduce(Math::max);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implNote The {@code double} format can represent all
+ * consecutive integers in the range -2<sup>53</sup> to
+ * 2<sup>53</sup>. If the pipeline has more than 2<sup>53</sup>
+ * values, the divisor in the average computation will saturate at
+ * 2<sup>53</sup>, leading to additional numerical errors.
+ */
+ @Override
+ public final OptionalDouble average() {
+ /*
+ * In the arrays allocated for the collect operation, index 0
+ * holds the high-order bits of the running sum, index 1 holds
+ * the low-order bits of the sum computed via compensated
+ * summation, index 2 holds the number of values seen, index 3
+ * holds the simple sum.
+ */
+ double[] avg = collect(() -> new double[4],
+ (ll, d) -> {
+ ll[2]++;
+ Collectors.sumWithCompensation(ll, d);
+ ll[3] += d;
+ },
+ (ll, rr) -> {
+ Collectors.sumWithCompensation(ll, rr[0]);
+ Collectors.sumWithCompensation(ll, rr[1]);
+ ll[2] += rr[2];
+ ll[3] += rr[3];
+ });
+ return avg[2] > 0
+ ? OptionalDouble.of(Collectors.computeFinalSum(avg) / avg[2])
+ : OptionalDouble.empty();
+ }
+
+ @Override
+ public final long count() {
+ return mapToLong(e -> 1L).sum();
+ }
+
+ @Override
+ public final DoubleSummaryStatistics summaryStatistics() {
+ return collect(DoubleSummaryStatistics::new, DoubleSummaryStatistics::accept,
+ DoubleSummaryStatistics::combine);
+ }
+
+ @Override
+ public final double reduce(double identity, DoubleBinaryOperator op) {
+ return evaluate(ReduceOps.makeDouble(identity, op));
+ }
+
+ @Override
+ public final OptionalDouble reduce(DoubleBinaryOperator op) {
+ return evaluate(ReduceOps.makeDouble(op));
+ }
+
+ @Override
+ public final <R> R collect(Supplier<R> supplier,
+ ObjDoubleConsumer<R> accumulator,
+ BiConsumer<R, R> combiner) {
+ BinaryOperator<R> operator = (left, right) -> {
+ combiner.accept(left, right);
+ return left;
+ };
+ return evaluate(ReduceOps.makeDouble(supplier, accumulator, operator));
+ }
+
+ @Override
+ public final boolean anyMatch(DoublePredicate predicate) {
+ return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.ANY));
+ }
+
+ @Override
+ public final boolean allMatch(DoublePredicate predicate) {
+ return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.ALL));
+ }
+
+ @Override
+ public final boolean noneMatch(DoublePredicate predicate) {
+ return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.NONE));
+ }
+
+ @Override
+ public final OptionalDouble findFirst() {
+ return evaluate(FindOps.makeDouble(true));
+ }
+
+ @Override
+ public final OptionalDouble findAny() {
+ return evaluate(FindOps.makeDouble(false));
+ }
+
+ @Override
+ public final double[] toArray() {
+ return Nodes.flattenDouble((Node.OfDouble) evaluateToArrayNode(Double[]::new))
+ .asPrimitiveArray();
+ }
+
+ //
+
+ /**
+ * Source stage of a DoubleStream
+ *
+ * @param <E_IN> type of elements in the upstream source
+ */
+ static class Head<E_IN> extends DoublePipeline<E_IN> {
+ /**
+ * Constructor for the source stage of a DoubleStream.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream
+ * source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ Head(Supplier<? extends Spliterator<Double>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the source stage of a DoubleStream.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ Head(Spliterator<Double> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ final Sink<E_IN> opWrapSink(int flags, Sink<Double> sink) {
+ throw new UnsupportedOperationException();
+ }
+
+ // Optimized sequential terminal operations for the head of the pipeline
+
+ @Override
+ public void forEach(DoubleConsumer consumer) {
+ if (!isParallel()) {
+ adapt(sourceStageSpliterator()).forEachRemaining(consumer);
+ }
+ else {
+ super.forEach(consumer);
+ }
+ }
+
+ @Override
+ public void forEachOrdered(DoubleConsumer consumer) {
+ if (!isParallel()) {
+ adapt(sourceStageSpliterator()).forEachRemaining(consumer);
+ }
+ else {
+ super.forEachOrdered(consumer);
+ }
+ }
+
+ }
+
+ /**
+ * Base class for a stateless intermediate stage of a DoubleStream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ abstract static class StatelessOp<E_IN> extends DoublePipeline<E_IN> {
+ /**
+ * Construct a new DoubleStream by appending a stateless intermediate
+ * operation to an existing stream.
+ *
+ * @param upstream the upstream pipeline stage
+ * @param inputShape the stream shape for the upstream pipeline stage
+ * @param opFlags operation flags for the new stage
+ */
+ StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return false;
+ }
+ }
+
+ /**
+ * Base class for a stateful intermediate stage of a DoubleStream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ abstract static class StatefulOp<E_IN> extends DoublePipeline<E_IN> {
+ /**
+ * Construct a new DoubleStream by appending a stateful intermediate
+ * operation to an existing stream.
+ *
+ * @param upstream the upstream pipeline stage
+ * @param inputShape the stream shape for the upstream pipeline stage
+ * @param opFlags operation flags for the new stage
+ */
+ StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return true;
+ }
+
+ @Override
+ abstract <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Double[]> generator);
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/DoubleStream.java b/ojluni/src/main/java/java/util/stream/DoubleStream.java
new file mode 100644
index 0000000..347587e
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/DoubleStream.java
@@ -0,0 +1,892 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.DoubleSummaryStatistics;
+import java.util.Objects;
+import java.util.OptionalDouble;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiConsumer;
+import java.util.function.DoubleBinaryOperator;
+import java.util.function.DoubleConsumer;
+import java.util.function.DoubleFunction;
+import java.util.function.DoublePredicate;
+import java.util.function.DoubleSupplier;
+import java.util.function.DoubleToIntFunction;
+import java.util.function.DoubleToLongFunction;
+import java.util.function.DoubleUnaryOperator;
+import java.util.function.Function;
+import java.util.function.ObjDoubleConsumer;
+import java.util.function.Supplier;
+
+/**
+ * A sequence of primitive double-valued elements supporting sequential and parallel
+ * aggregate operations. This is the {@code double} primitive specialization of
+ * {@link Stream}.
+ *
+ * <p>The following example illustrates an aggregate operation using
+ * {@link Stream} and {@link DoubleStream}, computing the sum of the weights of the
+ * red widgets:
+ *
+ * <pre>{@code
+ * double sum = widgets.stream()
+ * .filter(w -> w.getColor() == RED)
+ * .mapToDouble(w -> w.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * See the class documentation for {@link Stream} and the package documentation
+ * for <a href="package-summary.html">java.util.stream</a> for additional
+ * specification of streams, stream operations, stream pipelines, and
+ * parallelism.
+ *
+ * @since 1.8
+ * @see Stream
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface DoubleStream extends BaseStream<Double, DoubleStream> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to each element to determine if it
+ * should be included
+ * @return the new stream
+ */
+ DoubleStream filter(DoublePredicate predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ DoubleStream map(DoubleUnaryOperator mapper);
+
+ /**
+ * Returns an object-valued {@code Stream} consisting of the results of
+ * applying the given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param <U> the element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ <U> Stream<U> mapToObj(DoubleFunction<? extends U> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ IntStream mapToInt(DoubleToIntFunction mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ LongStream mapToLong(DoubleToLongFunction mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of a mapped stream produced by applying
+ * the provided mapping function to each element. Each mapped stream is
+ * {@link java.util.stream.BaseStream#close() closed} after its contents
+ * have been placed into this stream. (If a mapped stream is {@code null}
+ * an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces a
+ * {@code DoubleStream} of new values
+ * @return the new stream
+ * @see Stream#flatMap(Function)
+ */
+ DoubleStream flatMap(DoubleFunction<? extends DoubleStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements of this stream. The
+ * elements are compared for equality according to
+ * {@link java.lang.Double#compare(double, double)}.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the result stream
+ */
+ DoubleStream distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream in sorted
+ * order. The elements are compared for equality according to
+ * {@link java.lang.Double#compare(double, double)}.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the result stream
+ */
+ DoubleStream sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * DoubleStream.of(1, 2, 3, 4)
+ * .filter(e -> e > 2)
+ * .peek(e -> System.out.println("Filtered value: " + e))
+ * .map(e -> e * e)
+ * .peek(e -> System.out.println("Mapped value: " + e))
+ * .sum();
+ * }</pre>
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ DoubleStream peek(DoubleConsumer action);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code limit()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code maxSize}, since {@code limit(n)}
+ * is constrained to return not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(DoubleSupplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code limit()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code limit()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ DoubleStream limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after discarding the first {@code n} elements of the stream.
+ * If this stream contains fewer than {@code n} elements then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code skip()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code n}, since {@code skip(n)}
+ * is constrained to skip not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(DoubleSupplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code skip()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code skip()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param n the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code n} is negative
+ */
+ DoubleStream skip(long n);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(DoubleConsumer action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(DoubleConsumer)
+ */
+ void forEachOrdered(DoubleConsumer action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ double[] toArray();
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * double result = identity;
+ * for (double element : this stream)
+ * result = accumulator.applyAsDouble(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code x},
+ * {@code accumulator.apply(identity, x)} is equal to {@code x}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, and average are all special cases of reduction.
+ * Summing a stream of numbers can be expressed as:
+
+ * <pre>{@code
+ * double sum = numbers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * double sum = numbers.reduce(0, Double::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param op an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ * @see #sum()
+ * @see #min()
+ * @see #max()
+ * @see #average()
+ */
+ double reduce(double identity, DoubleBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code OptionalDouble} describing the reduced
+ * value, if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * double result = null;
+ * for (double element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.applyAsDouble(result, element);
+ * }
+ * return foundAny ? OptionalDouble.of(result) : OptionalDouble.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param op an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(double, DoubleBinaryOperator)
+ */
+ OptionalDouble reduce(DoubleBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable result container,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = supplier.get();
+ * for (double element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(double, DoubleBinaryOperator)}, {@code collect}
+ * operations can be parallelized without requiring additional
+ * synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <R> type of the result
+ * @param supplier a function that creates a new result container. For a
+ * parallel execution, this function may be called
+ * multiple times and must return a fresh value each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for incorporating an additional element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values, which must be
+ * compatible with the accumulator function
+ * @return the result of the reduction
+ * @see Stream#collect(Supplier, BiConsumer, BiConsumer)
+ */
+ <R> R collect(Supplier<R> supplier,
+ ObjDoubleConsumer<R> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Returns the sum of elements in this stream.
+ *
+ * Summation is a special case of a <a
+ * href="package-summary.html#Reduction">reduction</a>. If
+ * floating-point summation were exact, this method would be
+ * equivalent to:
+ *
+ * <pre>{@code
+ * return reduce(0, Double::sum);
+ * }</pre>
+ *
+ * However, since floating-point summation is not exact, the above
+ * code is not necessarily equivalent to the summation computation
+ * done by this method.
+ *
+ * <p>If any stream element is a NaN or the sum is at any point a NaN
+ * then the sum will be NaN.
+ *
+ * The value of a floating-point sum is a function both
+ * of the input values as well as the order of addition
+ * operations. The order of addition operations of this method is
+ * intentionally not defined to allow for implementation
+ * flexibility to improve the speed and accuracy of the computed
+ * result.
+ *
+ * In particular, this method may be implemented using compensated
+ * summation or other technique to reduce the error bound in the
+ * numerical sum compared to a simple summation of {@code double}
+ * values.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Elements sorted by increasing absolute magnitude tend
+ * to yield more accurate results.
+ *
+ * @return the sum of elements in this stream
+ */
+ double sum();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the minimum element of this
+ * stream, or an empty OptionalDouble if this stream is empty. The minimum
+ * element will be {@code Double.NaN} if any stream element was NaN. Unlike
+ * the numerical comparison operators, this method considers negative zero
+ * to be strictly smaller than positive zero. This is a special case of a
+ * <a href="package-summary.html#Reduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return reduce(Double::min);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalDouble} containing the minimum element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble min();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the maximum element of this
+ * stream, or an empty OptionalDouble if this stream is empty. The maximum
+ * element will be {@code Double.NaN} if any stream element was NaN. Unlike
+ * the numerical comparison operators, this method considers negative zero
+ * to be strictly smaller than positive zero. This is a
+ * special case of a
+ * <a href="package-summary.html#Reduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return reduce(Double::max);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalDouble} containing the maximum element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble max();
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#Reduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return mapToLong(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the arithmetic
+ * mean of elements of this stream, or an empty optional if this
+ * stream is empty.
+ *
+ * If any recorded value is a NaN or the sum is at any point a NaN
+ * then the average will be NaN.
+ *
+ * <p>The average returned can vary depending upon the order in
+ * which values are recorded.
+ *
+ * This method may be implemented using compensated summation or
+ * other technique to reduce the error bound in the {@link #sum
+ * numerical sum} used to compute the average.
+ *
+ * <p>The average is a special case of a <a
+ * href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Elements sorted by increasing absolute magnitude tend
+ * to yield more accurate results.
+ *
+ * @return an {@code OptionalDouble} containing the average element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble average();
+
+ /**
+ * Returns a {@code DoubleSummaryStatistics} describing various summary data
+ * about the elements of this stream. This is a special
+ * case of a <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return a {@code DoubleSummaryStatistics} describing various summary data
+ * about the elements of this stream
+ */
+ DoubleSummaryStatistics summaryStatistics();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result. If the stream is empty then
+ * {@code false} is returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>existential quantification</em> of the
+ * predicate over the elements of the stream (for some x P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate, otherwise {@code false}
+ */
+ boolean anyMatch(DoublePredicate predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * predicate over the elements of the stream (for all x P(x)). If the
+ * stream is empty, the quantification is said to be <em>vacuously
+ * satisfied</em> and is always {@code true} (regardless of P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either all elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean allMatch(DoublePredicate predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * negated predicate over the elements of the stream (for all x ~P(x)). If
+ * the stream is empty, the quantification is said to be vacuously satisfied
+ * and is always {@code true}, regardless of P(x).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either no elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean noneMatch(DoublePredicate predicate);
+
+ /**
+ * Returns an {@link OptionalDouble} describing the first element of this
+ * stream, or an empty {@code OptionalDouble} if the stream is empty. If
+ * the stream has no encounter order, then any element may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code OptionalDouble} describing the first element of this
+ * stream, or an empty {@code OptionalDouble} if the stream is empty
+ */
+ OptionalDouble findFirst();
+
+ /**
+ * Returns an {@link OptionalDouble} describing some element of the stream,
+ * or an empty {@code OptionalDouble} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If a stable result
+ * is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code OptionalDouble} describing some element of this stream,
+ * or an empty {@code OptionalDouble} if the stream is empty
+ * @see #findFirst()
+ */
+ OptionalDouble findAny();
+
+ /**
+ * Returns a {@code Stream} consisting of the elements of this stream,
+ * boxed to {@code Double}.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a {@code Stream} consistent of the elements of this stream,
+ * each boxed to a {@code Double}
+ */
+ Stream<Double> boxed();
+
+ @Override
+ DoubleStream sequential();
+
+ @Override
+ DoubleStream parallel();
+
+ @Override
+ PrimitiveIterator.OfDouble iterator();
+
+ @Override
+ Spliterator.OfDouble spliterator();
+
+
+ // Static factories
+
+ /**
+ * Returns a builder for a {@code DoubleStream}.
+ *
+ * @return a stream builder
+ */
+ public static Builder builder() {
+ return new Streams.DoubleStreamBuilderImpl();
+ }
+
+ /**
+ * Returns an empty sequential {@code DoubleStream}.
+ *
+ * @return an empty sequential stream
+ */
+ public static DoubleStream empty() {
+ return StreamSupport.doubleStream(Spliterators.emptyDoubleSpliterator(), false);
+ }
+
+ /**
+ * Returns a sequential {@code DoubleStream} containing a single element.
+ *
+ * @param t the single element
+ * @return a singleton sequential stream
+ */
+ public static DoubleStream of(double t) {
+ return StreamSupport.doubleStream(new Streams.DoubleStreamBuilderImpl(t), false);
+ }
+
+ /**
+ * Returns a sequential ordered stream whose elements are the specified values.
+ *
+ * @param values the elements of the new stream
+ * @return the new stream
+ */
+ public static DoubleStream of(double... values) {
+ return Arrays.stream(values);
+ }
+
+ /**
+ * Returns an infinite sequential ordered {@code DoubleStream} produced by iterative
+ * application of a function {@code f} to an initial element {@code seed},
+ * producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
+ * {@code f(f(seed))}, etc.
+ *
+ * <p>The first element (position {@code 0}) in the {@code DoubleStream}
+ * will be the provided {@code seed}. For {@code n > 0}, the element at
+ * position {@code n}, will be the result of applying the function {@code f}
+ * to the element at position {@code n - 1}.
+ *
+ * @param seed the initial element
+ * @param f a function to be applied to to the previous element to produce
+ * a new element
+ * @return a new sequential {@code DoubleStream}
+ */
+ public static DoubleStream iterate(final double seed, final DoubleUnaryOperator f) {
+ Objects.requireNonNull(f);
+ final PrimitiveIterator.OfDouble iterator = new PrimitiveIterator.OfDouble() {
+ double t = seed;
+
+ @Override
+ public boolean hasNext() {
+ return true;
+ }
+
+ @Override
+ public double nextDouble() {
+ double v = t;
+ t = f.applyAsDouble(t);
+ return v;
+ }
+ };
+ return StreamSupport.doubleStream(Spliterators.spliteratorUnknownSize(
+ iterator,
+ Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
+ }
+
+ /**
+ * Returns an infinite sequential unordered stream where each element is
+ * generated by the provided {@code DoubleSupplier}. This is suitable for
+ * generating constant streams, streams of random elements, etc.
+ *
+ * @param s the {@code DoubleSupplier} for generated elements
+ * @return a new infinite sequential unordered {@code DoubleStream}
+ */
+ public static DoubleStream generate(DoubleSupplier s) {
+ Objects.requireNonNull(s);
+ return StreamSupport.doubleStream(
+ new StreamSpliterators.InfiniteSupplyingSpliterator.OfDouble(Long.MAX_VALUE, s), false);
+ }
+
+ /**
+ * Creates a lazily concatenated stream whose elements are all the
+ * elements of the first stream followed by all the elements of the
+ * second stream. The resulting stream is ordered if both
+ * of the input streams are ordered, and parallel if either of the input
+ * streams is parallel. When the resulting stream is closed, the close
+ * handlers for both input streams are invoked.
+ *
+ * @implNote
+ * Use caution when constructing streams from repeated concatenation.
+ * Accessing an element of a deeply concatenated stream can result in deep
+ * call chains, or even {@code StackOverflowException}.
+ *
+ * @param a the first stream
+ * @param b the second stream
+ * @return the concatenation of the two input streams
+ */
+ public static DoubleStream concat(DoubleStream a, DoubleStream b) {
+ Objects.requireNonNull(a);
+ Objects.requireNonNull(b);
+
+ Spliterator.OfDouble split = new Streams.ConcatSpliterator.OfDouble(
+ a.spliterator(), b.spliterator());
+ DoubleStream stream = StreamSupport.doubleStream(split, a.isParallel() || b.isParallel());
+ return stream.onClose(Streams.composedClose(a, b));
+ }
+
+ /**
+ * A mutable builder for a {@code DoubleStream}.
+ *
+ * <p>A stream builder has a lifecycle, which starts in a building
+ * phase, during which elements can be added, and then transitions to a built
+ * phase, after which elements may not be added. The built phase
+ * begins when the {@link #build()} method is called, which creates an
+ * ordered stream whose elements are the elements that were added to the
+ * stream builder, in the order they were added.
+ *
+ * @see DoubleStream#builder()
+ * @since 1.8
+ */
+ public interface Builder extends DoubleConsumer {
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ @Override
+ void accept(double t);
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @implSpec
+ * The default implementation behaves as if:
+ * <pre>{@code
+ * accept(t)
+ * return this;
+ * }</pre>
+ *
+ * @param t the element to add
+ * @return {@code this} builder
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ default Builder add(double t) {
+ accept(t);
+ return this;
+ }
+
+ /**
+ * Builds the stream, transitioning this builder to the built state.
+ * An {@code IllegalStateException} is thrown if there are further
+ * attempts to operate on the builder after it has entered the built
+ * state.
+ *
+ * @return the built stream
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ DoubleStream build();
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/FindOps.java b/ojluni/src/main/java/java/util/stream/FindOps.java
new file mode 100644
index 0000000..197d99c
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/FindOps.java
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Optional;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.Spliterator;
+import java.util.concurrent.CountedCompleter;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+/**
+ * Factory for instances of a short-circuiting {@code TerminalOp} that searches
+ * for an element in a stream pipeline, and terminates when it finds one.
+ * Supported variants include find-first (find the first element in the
+ * encounter order) and find-any (find any element, may not be the first in
+ * encounter order.)
+ *
+ * @since 1.8
+ */
+final class FindOps {
+
+ private FindOps() { }
+
+ /**
+ * Constructs a {@code TerminalOp} for streams of objects.
+ *
+ * @param <T> the type of elements of the stream
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static <T> TerminalOp<T, Optional<T>> makeRef(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.REFERENCE, Optional.empty(),
+ Optional::isPresent, FindSink.OfRef::new);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} for streams of ints.
+ *
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static TerminalOp<Integer, OptionalInt> makeInt(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.INT_VALUE, OptionalInt.empty(),
+ OptionalInt::isPresent, FindSink.OfInt::new);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} for streams of longs.
+ *
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static TerminalOp<Long, OptionalLong> makeLong(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.LONG_VALUE, OptionalLong.empty(),
+ OptionalLong::isPresent, FindSink.OfLong::new);
+ }
+
+ /**
+ * Constructs a {@code FindOp} for streams of doubles.
+ *
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static TerminalOp<Double, OptionalDouble> makeDouble(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.DOUBLE_VALUE, OptionalDouble.empty(),
+ OptionalDouble::isPresent, FindSink.OfDouble::new);
+ }
+
+ /**
+ * A short-circuiting {@code TerminalOp} that searches for an element in a
+ * stream pipeline, and terminates when it finds one. Implements both
+ * find-first (find the first element in the encounter order) and find-any
+ * (find any element, may not be the first in encounter order.)
+ *
+ * @param <T> the output type of the stream pipeline
+ * @param <O> the result type of the find operation, typically an optional
+ * type
+ */
+ private static final class FindOp<T, O> implements TerminalOp<T, O> {
+ private final StreamShape shape;
+ final boolean mustFindFirst;
+ final O emptyValue;
+ final Predicate<O> presentPredicate;
+ final Supplier<TerminalSink<T, O>> sinkSupplier;
+
+ /**
+ * Constructs a {@code FindOp}.
+ *
+ * @param mustFindFirst if true, must find the first element in
+ * encounter order, otherwise can find any element
+ * @param shape stream shape of elements to search
+ * @param emptyValue result value corresponding to "found nothing"
+ * @param presentPredicate {@code Predicate} on result value
+ * corresponding to "found something"
+ * @param sinkSupplier supplier for a {@code TerminalSink} implementing
+ * the matching functionality
+ */
+ FindOp(boolean mustFindFirst,
+ StreamShape shape,
+ O emptyValue,
+ Predicate<O> presentPredicate,
+ Supplier<TerminalSink<T, O>> sinkSupplier) {
+ this.mustFindFirst = mustFindFirst;
+ this.shape = shape;
+ this.emptyValue = emptyValue;
+ this.presentPredicate = presentPredicate;
+ this.sinkSupplier = sinkSupplier;
+ }
+
+ @Override
+ public int getOpFlags() {
+ return StreamOpFlag.IS_SHORT_CIRCUIT | (mustFindFirst ? 0 : StreamOpFlag.NOT_ORDERED);
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return shape;
+ }
+
+ @Override
+ public <S> O evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ O result = helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).get();
+ return result != null ? result : emptyValue;
+ }
+
+ @Override
+ public <P_IN> O evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator) {
+ return new FindTask<>(this, helper, spliterator).invoke();
+ }
+ }
+
+ /**
+ * Implementation of @{code TerminalSink} that implements the find
+ * functionality, requesting cancellation when something has been found
+ *
+ * @param <T> The type of input element
+ * @param <O> The result type, typically an optional type
+ */
+ private static abstract class FindSink<T, O> implements TerminalSink<T, O> {
+ boolean hasValue;
+ T value;
+
+ FindSink() {} // Avoid creation of special accessor
+
+ @Override
+ public void accept(T value) {
+ if (!hasValue) {
+ hasValue = true;
+ this.value = value;
+ }
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return hasValue;
+ }
+
+ /** Specialization of {@code FindSink} for reference streams */
+ static final class OfRef<T> extends FindSink<T, Optional<T>> {
+ @Override
+ public Optional<T> get() {
+ return hasValue ? Optional.of(value) : null;
+ }
+ }
+
+ /** Specialization of {@code FindSink} for int streams */
+ static final class OfInt extends FindSink<Integer, OptionalInt>
+ implements Sink.OfInt {
+ @Override
+ public void accept(int value) {
+ // Boxing is OK here, since few values will actually flow into the sink
+ accept((Integer) value);
+ }
+
+ @Override
+ public OptionalInt get() {
+ return hasValue ? OptionalInt.of(value) : null;
+ }
+ }
+
+ /** Specialization of {@code FindSink} for long streams */
+ static final class OfLong extends FindSink<Long, OptionalLong>
+ implements Sink.OfLong {
+ @Override
+ public void accept(long value) {
+ // Boxing is OK here, since few values will actually flow into the sink
+ accept((Long) value);
+ }
+
+ @Override
+ public OptionalLong get() {
+ return hasValue ? OptionalLong.of(value) : null;
+ }
+ }
+
+ /** Specialization of {@code FindSink} for double streams */
+ static final class OfDouble extends FindSink<Double, OptionalDouble>
+ implements Sink.OfDouble {
+ @Override
+ public void accept(double value) {
+ // Boxing is OK here, since few values will actually flow into the sink
+ accept((Double) value);
+ }
+
+ @Override
+ public OptionalDouble get() {
+ return hasValue ? OptionalDouble.of(value) : null;
+ }
+ }
+ }
+
+ /**
+ * {@code ForkJoinTask} implementing parallel short-circuiting search
+ * @param <P_IN> Input element type to the stream pipeline
+ * @param <P_OUT> Output element type from the stream pipeline
+ * @param <O> Result type from the find operation
+ */
+ @SuppressWarnings("serial")
+ private static final class FindTask<P_IN, P_OUT, O>
+ extends AbstractShortCircuitTask<P_IN, P_OUT, O, FindTask<P_IN, P_OUT, O>> {
+ private final FindOp<P_OUT, O> op;
+
+ FindTask(FindOp<P_OUT, O> op,
+ PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ this.op = op;
+ }
+
+ FindTask(FindTask<P_IN, P_OUT, O> parent, Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ this.op = parent.op;
+ }
+
+ @Override
+ protected FindTask<P_IN, P_OUT, O> makeChild(Spliterator<P_IN> spliterator) {
+ return new FindTask<>(this, spliterator);
+ }
+
+ @Override
+ protected O getEmptyResult() {
+ return op.emptyValue;
+ }
+
+ private void foundResult(O answer) {
+ if (isLeftmostNode())
+ shortCircuit(answer);
+ else
+ cancelLaterNodes();
+ }
+
+ @Override
+ protected O doLeaf() {
+ O result = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).get();
+ if (!op.mustFindFirst) {
+ if (result != null)
+ shortCircuit(result);
+ return null;
+ }
+ else {
+ if (result != null) {
+ foundResult(result);
+ return result;
+ }
+ else
+ return null;
+ }
+ }
+
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ if (op.mustFindFirst) {
+ for (FindTask<P_IN, P_OUT, O> child = leftChild, p = null; child != p;
+ p = child, child = rightChild) {
+ O result = child.getLocalResult();
+ if (result != null && op.presentPredicate.test(result)) {
+ setLocalResult(result);
+ foundResult(result);
+ break;
+ }
+ }
+ }
+ super.onCompletion(caller);
+ }
+ }
+}
+
diff --git a/ojluni/src/main/java/java/util/stream/ForEachOps.java b/ojluni/src/main/java/java/util/stream/ForEachOps.java
new file mode 100644
index 0000000..b527f05
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/ForEachOps.java
@@ -0,0 +1,508 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountedCompleter;
+import java.util.concurrent.ForkJoinTask;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
+
+/**
+ * Factory for creating instances of {@code TerminalOp} that perform an
+ * action for every element of a stream. Supported variants include unordered
+ * traversal (elements are provided to the {@code Consumer} as soon as they are
+ * available), and ordered traversal (elements are provided to the
+ * {@code Consumer} in encounter order.)
+ *
+ * <p>Elements are provided to the {@code Consumer} on whatever thread and
+ * whatever order they become available. For ordered traversals, it is
+ * guaranteed that processing an element <em>happens-before</em> processing
+ * subsequent elements in the encounter order.
+ *
+ * <p>Exceptions occurring as a result of sending an element to the
+ * {@code Consumer} will be relayed to the caller and traversal will be
+ * prematurely terminated.
+ *
+ * @since 1.8
+ */
+final class ForEachOps {
+
+ private ForEachOps() { }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of a stream.
+ *
+ * @param action the {@code Consumer} that receives all elements of a
+ * stream
+ * @param ordered whether an ordered traversal is requested
+ * @param <T> the type of the stream elements
+ * @return the {@code TerminalOp} instance
+ */
+ public static <T> TerminalOp<T, Void> makeRef(Consumer<? super T> action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfRef<>(action, ordered);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of an {@code IntStream}.
+ *
+ * @param action the {@code IntConsumer} that receives all elements of a
+ * stream
+ * @param ordered whether an ordered traversal is requested
+ * @return the {@code TerminalOp} instance
+ */
+ public static TerminalOp<Integer, Void> makeInt(IntConsumer action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfInt(action, ordered);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of a {@code LongStream}.
+ *
+ * @param action the {@code LongConsumer} that receives all elements of a
+ * stream
+ * @param ordered whether an ordered traversal is requested
+ * @return the {@code TerminalOp} instance
+ */
+ public static TerminalOp<Long, Void> makeLong(LongConsumer action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfLong(action, ordered);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of a {@code DoubleStream}.
+ *
+ * @param action the {@code DoubleConsumer} that receives all elements of
+ * a stream
+ * @param ordered whether an ordered traversal is requested
+ * @return the {@code TerminalOp} instance
+ */
+ public static TerminalOp<Double, Void> makeDouble(DoubleConsumer action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfDouble(action, ordered);
+ }
+
+ /**
+ * A {@code TerminalOp} that evaluates a stream pipeline and sends the
+ * output to itself as a {@code TerminalSink}. Elements will be sent in
+ * whatever thread they become available. If the traversal is unordered,
+ * they will be sent independent of the stream's encounter order.
+ *
+ * <p>This terminal operation is stateless. For parallel evaluation, each
+ * leaf instance of a {@code ForEachTask} will send elements to the same
+ * {@code TerminalSink} reference that is an instance of this class.
+ *
+ * @param <T> the output type of the stream pipeline
+ */
+ static abstract class ForEachOp<T>
+ implements TerminalOp<T, Void>, TerminalSink<T, Void> {
+ private final boolean ordered;
+
+ protected ForEachOp(boolean ordered) {
+ this.ordered = ordered;
+ }
+
+ // TerminalOp
+
+ @Override
+ public int getOpFlags() {
+ return ordered ? 0 : StreamOpFlag.NOT_ORDERED;
+ }
+
+ @Override
+ public <S> Void evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ return helper.wrapAndCopyInto(this, spliterator).get();
+ }
+
+ @Override
+ public <S> Void evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ if (ordered)
+ new ForEachOrderedTask<>(helper, spliterator, this).invoke();
+ else
+ new ForEachTask<>(helper, spliterator, helper.wrapSink(this)).invoke();
+ return null;
+ }
+
+ // TerminalSink
+
+ @Override
+ public Void get() {
+ return null;
+ }
+
+ // Implementations
+
+ /** Implementation class for reference streams */
+ static final class OfRef<T> extends ForEachOp<T> {
+ final Consumer<? super T> consumer;
+
+ OfRef(Consumer<? super T> consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public void accept(T t) {
+ consumer.accept(t);
+ }
+ }
+
+ /** Implementation class for {@code IntStream} */
+ static final class OfInt extends ForEachOp<Integer>
+ implements Sink.OfInt {
+ final IntConsumer consumer;
+
+ OfInt(IntConsumer consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return StreamShape.INT_VALUE;
+ }
+
+ @Override
+ public void accept(int t) {
+ consumer.accept(t);
+ }
+ }
+
+ /** Implementation class for {@code LongStream} */
+ static final class OfLong extends ForEachOp<Long>
+ implements Sink.OfLong {
+ final LongConsumer consumer;
+
+ OfLong(LongConsumer consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return StreamShape.LONG_VALUE;
+ }
+
+ @Override
+ public void accept(long t) {
+ consumer.accept(t);
+ }
+ }
+
+ /** Implementation class for {@code DoubleStream} */
+ static final class OfDouble extends ForEachOp<Double>
+ implements Sink.OfDouble {
+ final DoubleConsumer consumer;
+
+ OfDouble(DoubleConsumer consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return StreamShape.DOUBLE_VALUE;
+ }
+
+ @Override
+ public void accept(double t) {
+ consumer.accept(t);
+ }
+ }
+ }
+
+ /** A {@code ForkJoinTask} for performing a parallel for-each operation */
+ @SuppressWarnings("serial")
+ static final class ForEachTask<S, T> extends CountedCompleter<Void> {
+ private Spliterator<S> spliterator;
+ private final Sink<S> sink;
+ private final PipelineHelper<T> helper;
+ private long targetSize;
+
+ ForEachTask(PipelineHelper<T> helper,
+ Spliterator<S> spliterator,
+ Sink<S> sink) {
+ super(null);
+ this.sink = sink;
+ this.helper = helper;
+ this.spliterator = spliterator;
+ this.targetSize = 0L;
+ }
+
+ ForEachTask(ForEachTask<S, T> parent, Spliterator<S> spliterator) {
+ super(parent);
+ this.spliterator = spliterator;
+ this.sink = parent.sink;
+ this.targetSize = parent.targetSize;
+ this.helper = parent.helper;
+ }
+
+ // Similar to AbstractTask but doesn't need to track child tasks
+ public void compute() {
+ Spliterator<S> rightSplit = spliterator, leftSplit;
+ long sizeEstimate = rightSplit.estimateSize(), sizeThreshold;
+ if ((sizeThreshold = targetSize) == 0L)
+ targetSize = sizeThreshold = AbstractTask.suggestTargetSize(sizeEstimate);
+ boolean isShortCircuit = StreamOpFlag.SHORT_CIRCUIT.isKnown(helper.getStreamAndOpFlags());
+ boolean forkRight = false;
+ Sink<S> taskSink = sink;
+ ForEachTask<S, T> task = this;
+ while (!isShortCircuit || !taskSink.cancellationRequested()) {
+ if (sizeEstimate <= sizeThreshold ||
+ (leftSplit = rightSplit.trySplit()) == null) {
+ task.helper.copyInto(taskSink, rightSplit);
+ break;
+ }
+ ForEachTask<S, T> leftTask = new ForEachTask<>(task, leftSplit);
+ task.addToPendingCount(1);
+ ForEachTask<S, T> taskToFork;
+ if (forkRight) {
+ forkRight = false;
+ rightSplit = leftSplit;
+ taskToFork = task;
+ task = leftTask;
+ }
+ else {
+ forkRight = true;
+ taskToFork = leftTask;
+ }
+ taskToFork.fork();
+ sizeEstimate = rightSplit.estimateSize();
+ }
+ task.spliterator = null;
+ task.propagateCompletion();
+ }
+ }
+
+ /**
+ * A {@code ForkJoinTask} for performing a parallel for-each operation
+ * which visits the elements in encounter order
+ */
+ @SuppressWarnings("serial")
+ static final class ForEachOrderedTask<S, T> extends CountedCompleter<Void> {
+ /*
+ * Our goal is to ensure that the elements associated with a task are
+ * processed according to an in-order traversal of the computation tree.
+ * We use completion counts for representing these dependencies, so that
+ * a task does not complete until all the tasks preceding it in this
+ * order complete. We use the "completion map" to associate the next
+ * task in this order for any left child. We increase the pending count
+ * of any node on the right side of such a mapping by one to indicate
+ * its dependency, and when a node on the left side of such a mapping
+ * completes, it decrements the pending count of its corresponding right
+ * side. As the computation tree is expanded by splitting, we must
+ * atomically update the mappings to maintain the invariant that the
+ * completion map maps left children to the next node in the in-order
+ * traversal.
+ *
+ * Take, for example, the following computation tree of tasks:
+ *
+ * a
+ * / \
+ * b c
+ * / \ / \
+ * d e f g
+ *
+ * The complete map will contain (not necessarily all at the same time)
+ * the following associations:
+ *
+ * d -> e
+ * b -> f
+ * f -> g
+ *
+ * Tasks e, f, g will have their pending counts increased by 1.
+ *
+ * The following relationships hold:
+ *
+ * - completion of d "happens-before" e;
+ * - completion of d and e "happens-before b;
+ * - completion of b "happens-before" f; and
+ * - completion of f "happens-before" g
+ *
+ * Thus overall the "happens-before" relationship holds for the
+ * reporting of elements, covered by tasks d, e, f and g, as specified
+ * by the forEachOrdered operation.
+ */
+
+ private final PipelineHelper<T> helper;
+ private Spliterator<S> spliterator;
+ private final long targetSize;
+ private final ConcurrentHashMap<ForEachOrderedTask<S, T>, ForEachOrderedTask<S, T>> completionMap;
+ private final Sink<T> action;
+ private final ForEachOrderedTask<S, T> leftPredecessor;
+ private Node<T> node;
+
+ protected ForEachOrderedTask(PipelineHelper<T> helper,
+ Spliterator<S> spliterator,
+ Sink<T> action) {
+ super(null);
+ this.helper = helper;
+ this.spliterator = spliterator;
+ this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize());
+ // Size map to avoid concurrent re-sizes
+ this.completionMap = new ConcurrentHashMap<>(Math.max(16, AbstractTask.LEAF_TARGET << 1));
+ this.action = action;
+ this.leftPredecessor = null;
+ }
+
+ ForEachOrderedTask(ForEachOrderedTask<S, T> parent,
+ Spliterator<S> spliterator,
+ ForEachOrderedTask<S, T> leftPredecessor) {
+ super(parent);
+ this.helper = parent.helper;
+ this.spliterator = spliterator;
+ this.targetSize = parent.targetSize;
+ this.completionMap = parent.completionMap;
+ this.action = parent.action;
+ this.leftPredecessor = leftPredecessor;
+ }
+
+ @Override
+ public final void compute() {
+ doCompute(this);
+ }
+
+ private static <S, T> void doCompute(ForEachOrderedTask<S, T> task) {
+ Spliterator<S> rightSplit = task.spliterator, leftSplit;
+ long sizeThreshold = task.targetSize;
+ boolean forkRight = false;
+ while (rightSplit.estimateSize() > sizeThreshold &&
+ (leftSplit = rightSplit.trySplit()) != null) {
+ ForEachOrderedTask<S, T> leftChild =
+ new ForEachOrderedTask<>(task, leftSplit, task.leftPredecessor);
+ ForEachOrderedTask<S, T> rightChild =
+ new ForEachOrderedTask<>(task, rightSplit, leftChild);
+
+ // Fork the parent task
+ // Completion of the left and right children "happens-before"
+ // completion of the parent
+ task.addToPendingCount(1);
+ // Completion of the left child "happens-before" completion of
+ // the right child
+ rightChild.addToPendingCount(1);
+ task.completionMap.put(leftChild, rightChild);
+
+ // If task is not on the left spine
+ if (task.leftPredecessor != null) {
+ /*
+ * Completion of left-predecessor, or left subtree,
+ * "happens-before" completion of left-most leaf node of
+ * right subtree.
+ * The left child's pending count needs to be updated before
+ * it is associated in the completion map, otherwise the
+ * left child can complete prematurely and violate the
+ * "happens-before" constraint.
+ */
+ leftChild.addToPendingCount(1);
+ // Update association of left-predecessor to left-most
+ // leaf node of right subtree
+ if (task.completionMap.replace(task.leftPredecessor, task, leftChild)) {
+ // If replaced, adjust the pending count of the parent
+ // to complete when its children complete
+ task.addToPendingCount(-1);
+ } else {
+ // Left-predecessor has already completed, parent's
+ // pending count is adjusted by left-predecessor;
+ // left child is ready to complete
+ leftChild.addToPendingCount(-1);
+ }
+ }
+
+ ForEachOrderedTask<S, T> taskToFork;
+ if (forkRight) {
+ forkRight = false;
+ rightSplit = leftSplit;
+ task = leftChild;
+ taskToFork = rightChild;
+ }
+ else {
+ forkRight = true;
+ task = rightChild;
+ taskToFork = leftChild;
+ }
+ taskToFork.fork();
+ }
+
+ /*
+ * Task's pending count is either 0 or 1. If 1 then the completion
+ * map will contain a value that is task, and two calls to
+ * tryComplete are required for completion, one below and one
+ * triggered by the completion of task's left-predecessor in
+ * onCompletion. Therefore there is no data race within the if
+ * block.
+ */
+ if (task.getPendingCount() > 0) {
+ // Cannot complete just yet so buffer elements into a Node
+ // for use when completion occurs
+ @SuppressWarnings("unchecked")
+ IntFunction<T[]> generator = size -> (T[]) new Object[size];
+ Node.Builder<T> nb = task.helper.makeNodeBuilder(
+ task.helper.exactOutputSizeIfKnown(rightSplit),
+ generator);
+ task.node = task.helper.wrapAndCopyInto(nb, rightSplit).build();
+ task.spliterator = null;
+ }
+ task.tryComplete();
+ }
+
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ if (node != null) {
+ // Dump buffered elements from this leaf into the sink
+ node.forEach(action);
+ node = null;
+ }
+ else if (spliterator != null) {
+ // Dump elements output from this leaf's pipeline into the sink
+ helper.wrapAndCopyInto(action, spliterator);
+ spliterator = null;
+ }
+
+ // The completion of this task *and* the dumping of elements
+ // "happens-before" completion of the associated left-most leaf task
+ // of right subtree (if any, which can be this task's right sibling)
+ //
+ ForEachOrderedTask<S, T> leftDescendant = completionMap.remove(this);
+ if (leftDescendant != null)
+ leftDescendant.tryComplete();
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/IntPipeline.java b/ojluni/src/main/java/java/util/stream/IntPipeline.java
new file mode 100644
index 0000000..5c3a8f1
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/IntPipeline.java
@@ -0,0 +1,633 @@
+/*
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.IntSummaryStatistics;
+import java.util.Objects;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiConsumer;
+import java.util.function.BinaryOperator;
+import java.util.function.IntBinaryOperator;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.IntPredicate;
+import java.util.function.IntToDoubleFunction;
+import java.util.function.IntToLongFunction;
+import java.util.function.IntUnaryOperator;
+import java.util.function.ObjIntConsumer;
+import java.util.function.Supplier;
+
+/**
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code int}.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+abstract class IntPipeline<E_IN>
+ extends AbstractPipeline<E_IN, Integer, IntStream>
+ implements IntStream {
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream source
+ * @param sourceFlags The source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ IntPipeline(Supplier<? extends Spliterator<Integer>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags The source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ IntPipeline(Spliterator<Integer> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for appending an intermediate operation onto an existing
+ * pipeline.
+ *
+ * @param upstream the upstream element source
+ * @param opFlags the operation flags for the new operation
+ */
+ IntPipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
+ super(upstream, opFlags);
+ }
+
+ /**
+ * Adapt a {@code Sink<Integer> to an {@code IntConsumer}, ideally simply
+ * by casting.
+ */
+ private static IntConsumer adapt(Sink<Integer> sink) {
+ if (sink instanceof IntConsumer) {
+ return (IntConsumer) sink;
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(AbstractPipeline.class,
+ "using IntStream.adapt(Sink<Integer> s)");
+ return sink::accept;
+ }
+ }
+
+ /**
+ * Adapt a {@code Spliterator<Integer>} to a {@code Spliterator.OfInt}.
+ *
+ * @implNote
+ * The implementation attempts to cast to a Spliterator.OfInt, and throws an
+ * exception if this cast is not possible.
+ */
+ private static Spliterator.OfInt adapt(Spliterator<Integer> s) {
+ if (s instanceof Spliterator.OfInt) {
+ return (Spliterator.OfInt) s;
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(AbstractPipeline.class,
+ "using IntStream.adapt(Spliterator<Integer> s)");
+ throw new UnsupportedOperationException("IntStream.adapt(Spliterator<Integer> s)");
+ }
+ }
+
+
+ // Shape-specific methods
+
+ @Override
+ final StreamShape getOutputShape() {
+ return StreamShape.INT_VALUE;
+ }
+
+ @Override
+ final <P_IN> Node<Integer> evaluateToNode(PipelineHelper<Integer> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree,
+ IntFunction<Integer[]> generator) {
+ return Nodes.collectInt(helper, spliterator, flattenTree);
+ }
+
+ @Override
+ final <P_IN> Spliterator<Integer> wrap(PipelineHelper<Integer> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean isParallel) {
+ return new StreamSpliterators.IntWrappingSpliterator<>(ph, supplier, isParallel);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final Spliterator.OfInt lazySpliterator(Supplier<? extends Spliterator<Integer>> supplier) {
+ return new StreamSpliterators.DelegatingSpliterator.OfInt((Supplier<Spliterator.OfInt>) supplier);
+ }
+
+ @Override
+ final void forEachWithCancel(Spliterator<Integer> spliterator, Sink<Integer> sink) {
+ Spliterator.OfInt spl = adapt(spliterator);
+ IntConsumer adaptedSink = adapt(sink);
+ do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
+ }
+
+ @Override
+ final Node.Builder<Integer> makeNodeBuilder(long exactSizeIfKnown,
+ IntFunction<Integer[]> generator) {
+ return Nodes.intBuilder(exactSizeIfKnown);
+ }
+
+
+ // IntStream
+
+ @Override
+ public final PrimitiveIterator.OfInt iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+ @Override
+ public final Spliterator.OfInt spliterator() {
+ return adapt(super.spliterator());
+ }
+
+ // Stateless intermediate ops from IntStream
+
+ @Override
+ public final LongStream asLongStream() {
+ return new LongPipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedInt<Long>(sink) {
+ @Override
+ public void accept(int t) {
+ downstream.accept((long) t);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream asDoubleStream() {
+ return new DoublePipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedInt<Double>(sink) {
+ @Override
+ public void accept(int t) {
+ downstream.accept((double) t);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final Stream<Integer> boxed() {
+ return mapToObj(Integer::valueOf);
+ }
+
+ @Override
+ public final IntStream map(IntUnaryOperator mapper) {
+ Objects.requireNonNull(mapper);
+ return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedInt<Integer>(sink) {
+ @Override
+ public void accept(int t) {
+ downstream.accept(mapper.applyAsInt(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final <U> Stream<U> mapToObj(IntFunction<? extends U> mapper) {
+ Objects.requireNonNull(mapper);
+ return new ReferencePipeline.StatelessOp<Integer, U>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<U> sink) {
+ return new Sink.ChainedInt<U>(sink) {
+ @Override
+ public void accept(int t) {
+ downstream.accept(mapper.apply(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final LongStream mapToLong(IntToLongFunction mapper) {
+ Objects.requireNonNull(mapper);
+ return new LongPipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedInt<Long>(sink) {
+ @Override
+ public void accept(int t) {
+ downstream.accept(mapper.applyAsLong(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream mapToDouble(IntToDoubleFunction mapper) {
+ Objects.requireNonNull(mapper);
+ return new DoublePipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedInt<Double>(sink) {
+ @Override
+ public void accept(int t) {
+ downstream.accept(mapper.applyAsDouble(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final IntStream flatMap(IntFunction<? extends IntStream> mapper) {
+ return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedInt<Integer>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(int t) {
+ try (IntStream result = mapper.apply(t)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(i -> downstream.accept(i));
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public IntStream unordered() {
+ if (!isOrdered())
+ return this;
+ return new StatelessOp<Integer>(this, StreamShape.INT_VALUE, StreamOpFlag.NOT_ORDERED) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ return sink;
+ }
+ };
+ }
+
+ @Override
+ public final IntStream filter(IntPredicate predicate) {
+ Objects.requireNonNull(predicate);
+ return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedInt<Integer>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(int t) {
+ if (predicate.test(t))
+ downstream.accept(t);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final IntStream peek(IntConsumer action) {
+ Objects.requireNonNull(action);
+ return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
+ 0) {
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedInt<Integer>(sink) {
+ @Override
+ public void accept(int t) {
+ action.accept(t);
+ downstream.accept(t);
+ }
+ };
+ }
+ };
+ }
+
+ // Stateful intermediate ops from IntStream
+
+ @Override
+ public final IntStream limit(long maxSize) {
+ if (maxSize < 0)
+ throw new IllegalArgumentException(Long.toString(maxSize));
+ return SliceOps.makeInt(this, 0, maxSize);
+ }
+
+ @Override
+ public final IntStream skip(long n) {
+ if (n < 0)
+ throw new IllegalArgumentException(Long.toString(n));
+ if (n == 0)
+ return this;
+ else
+ return SliceOps.makeInt(this, n, -1);
+ }
+
+ @Override
+ public final IntStream sorted() {
+ return SortedOps.makeInt(this);
+ }
+
+ @Override
+ public final IntStream distinct() {
+ // While functional and quick to implement, this approach is not very efficient.
+ // An efficient version requires an int-specific map/set implementation.
+ return boxed().distinct().mapToInt(i -> i);
+ }
+
+ // Terminal ops from IntStream
+
+ @Override
+ public void forEach(IntConsumer action) {
+ evaluate(ForEachOps.makeInt(action, false));
+ }
+
+ @Override
+ public void forEachOrdered(IntConsumer action) {
+ evaluate(ForEachOps.makeInt(action, true));
+ }
+
+ @Override
+ public final int sum() {
+ return reduce(0, Integer::sum);
+ }
+
+ @Override
+ public final OptionalInt min() {
+ return reduce(Math::min);
+ }
+
+ @Override
+ public final OptionalInt max() {
+ return reduce(Math::max);
+ }
+
+ @Override
+ public final long count() {
+ return mapToLong(e -> 1L).sum();
+ }
+
+ @Override
+ public final OptionalDouble average() {
+ long[] avg = collect(() -> new long[2],
+ (ll, i) -> {
+ ll[0]++;
+ ll[1] += i;
+ },
+ (ll, rr) -> {
+ ll[0] += rr[0];
+ ll[1] += rr[1];
+ });
+ return avg[0] > 0
+ ? OptionalDouble.of((double) avg[1] / avg[0])
+ : OptionalDouble.empty();
+ }
+
+ @Override
+ public final IntSummaryStatistics summaryStatistics() {
+ return collect(IntSummaryStatistics::new, IntSummaryStatistics::accept,
+ IntSummaryStatistics::combine);
+ }
+
+ @Override
+ public final int reduce(int identity, IntBinaryOperator op) {
+ return evaluate(ReduceOps.makeInt(identity, op));
+ }
+
+ @Override
+ public final OptionalInt reduce(IntBinaryOperator op) {
+ return evaluate(ReduceOps.makeInt(op));
+ }
+
+ @Override
+ public final <R> R collect(Supplier<R> supplier,
+ ObjIntConsumer<R> accumulator,
+ BiConsumer<R, R> combiner) {
+ BinaryOperator<R> operator = (left, right) -> {
+ combiner.accept(left, right);
+ return left;
+ };
+ return evaluate(ReduceOps.makeInt(supplier, accumulator, operator));
+ }
+
+ @Override
+ public final boolean anyMatch(IntPredicate predicate) {
+ return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.ANY));
+ }
+
+ @Override
+ public final boolean allMatch(IntPredicate predicate) {
+ return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.ALL));
+ }
+
+ @Override
+ public final boolean noneMatch(IntPredicate predicate) {
+ return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.NONE));
+ }
+
+ @Override
+ public final OptionalInt findFirst() {
+ return evaluate(FindOps.makeInt(true));
+ }
+
+ @Override
+ public final OptionalInt findAny() {
+ return evaluate(FindOps.makeInt(false));
+ }
+
+ @Override
+ public final int[] toArray() {
+ return Nodes.flattenInt((Node.OfInt) evaluateToArrayNode(Integer[]::new))
+ .asPrimitiveArray();
+ }
+
+ //
+
+ /**
+ * Source stage of an IntStream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ static class Head<E_IN> extends IntPipeline<E_IN> {
+ /**
+ * Constructor for the source stage of an IntStream.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream
+ * source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ Head(Supplier<? extends Spliterator<Integer>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the source stage of an IntStream.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ Head(Spliterator<Integer> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ final Sink<E_IN> opWrapSink(int flags, Sink<Integer> sink) {
+ throw new UnsupportedOperationException();
+ }
+
+ // Optimized sequential terminal operations for the head of the pipeline
+
+ @Override
+ public void forEach(IntConsumer action) {
+ if (!isParallel()) {
+ adapt(sourceStageSpliterator()).forEachRemaining(action);
+ }
+ else {
+ super.forEach(action);
+ }
+ }
+
+ @Override
+ public void forEachOrdered(IntConsumer action) {
+ if (!isParallel()) {
+ adapt(sourceStageSpliterator()).forEachRemaining(action);
+ }
+ else {
+ super.forEachOrdered(action);
+ }
+ }
+ }
+
+ /**
+ * Base class for a stateless intermediate stage of an IntStream
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ abstract static class StatelessOp<E_IN> extends IntPipeline<E_IN> {
+ /**
+ * Construct a new IntStream by appending a stateless intermediate
+ * operation to an existing stream.
+ * @param upstream The upstream pipeline stage
+ * @param inputShape The stream shape for the upstream pipeline stage
+ * @param opFlags Operation flags for the new stage
+ */
+ StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return false;
+ }
+ }
+
+ /**
+ * Base class for a stateful intermediate stage of an IntStream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ abstract static class StatefulOp<E_IN> extends IntPipeline<E_IN> {
+ /**
+ * Construct a new IntStream by appending a stateful intermediate
+ * operation to an existing stream.
+ * @param upstream The upstream pipeline stage
+ * @param inputShape The stream shape for the upstream pipeline stage
+ * @param opFlags Operation flags for the new stage
+ */
+ StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return true;
+ }
+
+ @Override
+ abstract <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Integer[]> generator);
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/IntStream.java b/ojluni/src/main/java/java/util/stream/IntStream.java
new file mode 100644
index 0000000..94c2924
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/IntStream.java
@@ -0,0 +1,912 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Arrays;
+import java.util.IntSummaryStatistics;
+import java.util.Objects;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.function.IntBinaryOperator;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.IntPredicate;
+import java.util.function.IntSupplier;
+import java.util.function.IntToDoubleFunction;
+import java.util.function.IntToLongFunction;
+import java.util.function.IntUnaryOperator;
+import java.util.function.ObjIntConsumer;
+import java.util.function.Supplier;
+
+/**
+ * A sequence of primitive int-valued elements supporting sequential and parallel
+ * aggregate operations. This is the {@code int} primitive specialization of
+ * {@link Stream}.
+ *
+ * <p>The following example illustrates an aggregate operation using
+ * {@link Stream} and {@link IntStream}, computing the sum of the weights of the
+ * red widgets:
+ *
+ * <pre>{@code
+ * int sum = widgets.stream()
+ * .filter(w -> w.getColor() == RED)
+ * .mapToInt(w -> w.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * See the class documentation for {@link Stream} and the package documentation
+ * for <a href="package-summary.html">java.util.stream</a> for additional
+ * specification of streams, stream operations, stream pipelines, and
+ * parallelism.
+ *
+ * @since 1.8
+ * @see Stream
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface IntStream extends BaseStream<Integer, IntStream> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to each element to determine if it
+ * should be included
+ * @return the new stream
+ */
+ IntStream filter(IntPredicate predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ IntStream map(IntUnaryOperator mapper);
+
+ /**
+ * Returns an object-valued {@code Stream} consisting of the results of
+ * applying the given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param <U> the element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ <U> Stream<U> mapToObj(IntFunction<? extends U> mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ LongStream mapToLong(IntToLongFunction mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ DoubleStream mapToDouble(IntToDoubleFunction mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of a mapped stream produced by applying
+ * the provided mapping function to each element. Each mapped stream is
+ * {@link java.util.stream.BaseStream#close() closed} after its contents
+ * have been placed into this stream. (If a mapped stream is {@code null}
+ * an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces an
+ * {@code IntStream} of new values
+ * @return the new stream
+ * @see Stream#flatMap(Function)
+ */
+ IntStream flatMap(IntFunction<? extends IntStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ IntStream distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream in sorted
+ * order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ IntStream sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * IntStream.of(1, 2, 3, 4)
+ * .filter(e -> e > 2)
+ * .peek(e -> System.out.println("Filtered value: " + e))
+ * .map(e -> e * e)
+ * .peek(e -> System.out.println("Mapped value: " + e))
+ * .sum();
+ * }</pre>
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ IntStream peek(IntConsumer action);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code limit()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code maxSize}, since {@code limit(n)}
+ * is constrained to return not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(IntSupplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code limit()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code limit()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ IntStream limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after discarding the first {@code n} elements of the stream.
+ * If this stream contains fewer than {@code n} elements then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code skip()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code n}, since {@code skip(n)}
+ * is constrained to skip not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(IntSupplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code skip()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code skip()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param n the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code n} is negative
+ */
+ IntStream skip(long n);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(IntConsumer action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(IntConsumer)
+ */
+ void forEachOrdered(IntConsumer action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ int[] toArray();
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * int result = identity;
+ * for (int element : this stream)
+ * result = accumulator.applyAsInt(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code x},
+ * {@code accumulator.apply(identity, x)} is equal to {@code x}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, and average are all special cases of reduction.
+ * Summing a stream of numbers can be expressed as:
+ *
+ * <pre>{@code
+ * int sum = integers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * int sum = integers.reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param op an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ * @see #sum()
+ * @see #min()
+ * @see #max()
+ * @see #average()
+ */
+ int reduce(int identity, IntBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code OptionalInt} describing the reduced value,
+ * if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * int result = null;
+ * for (int element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.applyAsInt(result, element);
+ * }
+ * return foundAny ? OptionalInt.of(result) : OptionalInt.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param op an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(int, IntBinaryOperator)
+ */
+ OptionalInt reduce(IntBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable result container,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = supplier.get();
+ * for (int element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(int, IntBinaryOperator)}, {@code collect} operations
+ * can be parallelized without requiring additional synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <R> type of the result
+ * @param supplier a function that creates a new result container. For a
+ * parallel execution, this function may be called
+ * multiple times and must return a fresh value each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for incorporating an additional element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values, which must be
+ * compatible with the accumulator function
+ * @return the result of the reduction
+ * @see Stream#collect(Supplier, BiConsumer, BiConsumer)
+ */
+ <R> R collect(Supplier<R> supplier,
+ ObjIntConsumer<R> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Returns the sum of elements in this stream. This is a special case
+ * of a <a href="package-summary.html#Reduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return the sum of elements in this stream
+ */
+ int sum();
+
+ /**
+ * Returns an {@code OptionalInt} describing the minimum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#Reduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Integer::min);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return an {@code OptionalInt} containing the minimum element of this
+ * stream, or an empty {@code OptionalInt} if the stream is empty
+ */
+ OptionalInt min();
+
+ /**
+ * Returns an {@code OptionalInt} describing the maximum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#Reduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Integer::max);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalInt} containing the maximum element of this
+ * stream, or an empty {@code OptionalInt} if the stream is empty
+ */
+ OptionalInt max();
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#Reduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return mapToLong(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the arithmetic mean of elements of
+ * this stream, or an empty optional if this stream is empty. This is a
+ * special case of a
+ * <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalDouble} containing the average element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble average();
+
+ /**
+ * Returns an {@code IntSummaryStatistics} describing various
+ * summary data about the elements of this stream. This is a special
+ * case of a <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code IntSummaryStatistics} describing various summary data
+ * about the elements of this stream
+ */
+ IntSummaryStatistics summaryStatistics();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result. If the stream is empty then
+ * {@code false} is returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>existential quantification</em> of the
+ * predicate over the elements of the stream (for some x P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate, otherwise {@code false}
+ */
+ boolean anyMatch(IntPredicate predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * predicate over the elements of the stream (for all x P(x)). If the
+ * stream is empty, the quantification is said to be <em>vacuously
+ * satisfied</em> and is always {@code true} (regardless of P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either all elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean allMatch(IntPredicate predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * negated predicate over the elements of the stream (for all x ~P(x)). If
+ * the stream is empty, the quantification is said to be vacuously satisfied
+ * and is always {@code true}, regardless of P(x).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either no elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean noneMatch(IntPredicate predicate);
+
+ /**
+ * Returns an {@link OptionalInt} describing the first element of this
+ * stream, or an empty {@code OptionalInt} if the stream is empty. If the
+ * stream has no encounter order, then any element may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code OptionalInt} describing the first element of this stream,
+ * or an empty {@code OptionalInt} if the stream is empty
+ */
+ OptionalInt findFirst();
+
+ /**
+ * Returns an {@link OptionalInt} describing some element of the stream, or
+ * an empty {@code OptionalInt} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If a stable result
+ * is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code OptionalInt} describing some element of this stream, or
+ * an empty {@code OptionalInt} if the stream is empty
+ * @see #findFirst()
+ */
+ OptionalInt findAny();
+
+ /**
+ * Returns a {@code LongStream} consisting of the elements of this stream,
+ * converted to {@code long}.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a {@code LongStream} consisting of the elements of this stream,
+ * converted to {@code long}
+ */
+ LongStream asLongStream();
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}
+ */
+ DoubleStream asDoubleStream();
+
+ /**
+ * Returns a {@code Stream} consisting of the elements of this stream,
+ * each boxed to an {@code Integer}.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a {@code Stream} consistent of the elements of this stream,
+ * each boxed to an {@code Integer}
+ */
+ Stream<Integer> boxed();
+
+ @Override
+ IntStream sequential();
+
+ @Override
+ IntStream parallel();
+
+ @Override
+ PrimitiveIterator.OfInt iterator();
+
+ @Override
+ Spliterator.OfInt spliterator();
+
+ // Static factories
+
+ /**
+ * Returns a builder for an {@code IntStream}.
+ *
+ * @return a stream builder
+ */
+ public static Builder builder() {
+ return new Streams.IntStreamBuilderImpl();
+ }
+
+ /**
+ * Returns an empty sequential {@code IntStream}.
+ *
+ * @return an empty sequential stream
+ */
+ public static IntStream empty() {
+ return StreamSupport.intStream(Spliterators.emptyIntSpliterator(), false);
+ }
+
+ /**
+ * Returns a sequential {@code IntStream} containing a single element.
+ *
+ * @param t the single element
+ * @return a singleton sequential stream
+ */
+ public static IntStream of(int t) {
+ return StreamSupport.intStream(new Streams.IntStreamBuilderImpl(t), false);
+ }
+
+ /**
+ * Returns a sequential ordered stream whose elements are the specified values.
+ *
+ * @param values the elements of the new stream
+ * @return the new stream
+ */
+ public static IntStream of(int... values) {
+ return Arrays.stream(values);
+ }
+
+ /**
+ * Returns an infinite sequential ordered {@code IntStream} produced by iterative
+ * application of a function {@code f} to an initial element {@code seed},
+ * producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
+ * {@code f(f(seed))}, etc.
+ *
+ * <p>The first element (position {@code 0}) in the {@code IntStream} will be
+ * the provided {@code seed}. For {@code n > 0}, the element at position
+ * {@code n}, will be the result of applying the function {@code f} to the
+ * element at position {@code n - 1}.
+ *
+ * @param seed the initial element
+ * @param f a function to be applied to to the previous element to produce
+ * a new element
+ * @return A new sequential {@code IntStream}
+ */
+ public static IntStream iterate(final int seed, final IntUnaryOperator f) {
+ Objects.requireNonNull(f);
+ final PrimitiveIterator.OfInt iterator = new PrimitiveIterator.OfInt() {
+ int t = seed;
+
+ @Override
+ public boolean hasNext() {
+ return true;
+ }
+
+ @Override
+ public int nextInt() {
+ int v = t;
+ t = f.applyAsInt(t);
+ return v;
+ }
+ };
+ return StreamSupport.intStream(Spliterators.spliteratorUnknownSize(
+ iterator,
+ Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
+ }
+
+ /**
+ * Returns an infinite sequential unordered stream where each element is
+ * generated by the provided {@code IntSupplier}. This is suitable for
+ * generating constant streams, streams of random elements, etc.
+ *
+ * @param s the {@code IntSupplier} for generated elements
+ * @return a new infinite sequential unordered {@code IntStream}
+ */
+ public static IntStream generate(IntSupplier s) {
+ Objects.requireNonNull(s);
+ return StreamSupport.intStream(
+ new StreamSpliterators.InfiniteSupplyingSpliterator.OfInt(Long.MAX_VALUE, s), false);
+ }
+
+ /**
+ * Returns a sequential ordered {@code IntStream} from {@code startInclusive}
+ * (inclusive) to {@code endExclusive} (exclusive) by an incremental step of
+ * {@code 1}.
+ *
+ * @apiNote
+ * <p>An equivalent sequence of increasing values can be produced
+ * sequentially using a {@code for} loop as follows:
+ * <pre>{@code
+ * for (int i = startInclusive; i < endExclusive ; i++) { ... }
+ * }</pre>
+ *
+ * @param startInclusive the (inclusive) initial value
+ * @param endExclusive the exclusive upper bound
+ * @return a sequential {@code IntStream} for the range of {@code int}
+ * elements
+ */
+ public static IntStream range(int startInclusive, int endExclusive) {
+ if (startInclusive >= endExclusive) {
+ return empty();
+ } else {
+ return StreamSupport.intStream(
+ new Streams.RangeIntSpliterator(startInclusive, endExclusive, false), false);
+ }
+ }
+
+ /**
+ * Returns a sequential ordered {@code IntStream} from {@code startInclusive}
+ * (inclusive) to {@code endInclusive} (inclusive) by an incremental step of
+ * {@code 1}.
+ *
+ * @apiNote
+ * <p>An equivalent sequence of increasing values can be produced
+ * sequentially using a {@code for} loop as follows:
+ * <pre>{@code
+ * for (int i = startInclusive; i <= endInclusive ; i++) { ... }
+ * }</pre>
+ *
+ * @param startInclusive the (inclusive) initial value
+ * @param endInclusive the inclusive upper bound
+ * @return a sequential {@code IntStream} for the range of {@code int}
+ * elements
+ */
+ public static IntStream rangeClosed(int startInclusive, int endInclusive) {
+ if (startInclusive > endInclusive) {
+ return empty();
+ } else {
+ return StreamSupport.intStream(
+ new Streams.RangeIntSpliterator(startInclusive, endInclusive, true), false);
+ }
+ }
+
+ /**
+ * Creates a lazily concatenated stream whose elements are all the
+ * elements of the first stream followed by all the elements of the
+ * second stream. The resulting stream is ordered if both
+ * of the input streams are ordered, and parallel if either of the input
+ * streams is parallel. When the resulting stream is closed, the close
+ * handlers for both input streams are invoked.
+ *
+ * @implNote
+ * Use caution when constructing streams from repeated concatenation.
+ * Accessing an element of a deeply concatenated stream can result in deep
+ * call chains, or even {@code StackOverflowException}.
+ *
+ * @param a the first stream
+ * @param b the second stream
+ * @return the concatenation of the two input streams
+ */
+ public static IntStream concat(IntStream a, IntStream b) {
+ Objects.requireNonNull(a);
+ Objects.requireNonNull(b);
+
+ Spliterator.OfInt split = new Streams.ConcatSpliterator.OfInt(
+ a.spliterator(), b.spliterator());
+ IntStream stream = StreamSupport.intStream(split, a.isParallel() || b.isParallel());
+ return stream.onClose(Streams.composedClose(a, b));
+ }
+
+ /**
+ * A mutable builder for an {@code IntStream}.
+ *
+ * <p>A stream builder has a lifecycle, which starts in a building
+ * phase, during which elements can be added, and then transitions to a built
+ * phase, after which elements may not be added. The built phase
+ * begins when the {@link #build()} method is called, which creates an
+ * ordered stream whose elements are the elements that were added to the
+ * stream builder, in the order they were added.
+ *
+ * @see IntStream#builder()
+ * @since 1.8
+ */
+ public interface Builder extends IntConsumer {
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ @Override
+ void accept(int t);
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @implSpec
+ * The default implementation behaves as if:
+ * <pre>{@code
+ * accept(t)
+ * return this;
+ * }</pre>
+ *
+ * @param t the element to add
+ * @return {@code this} builder
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ default Builder add(int t) {
+ accept(t);
+ return this;
+ }
+
+ /**
+ * Builds the stream, transitioning this builder to the built state.
+ * An {@code IllegalStateException} is thrown if there are further
+ * attempts to operate on the builder after it has entered the built
+ * state.
+ *
+ * @return the built stream
+ * @throws IllegalStateException if the builder has already transitioned to
+ * the built state
+ */
+ IntStream build();
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/LongPipeline.java b/ojluni/src/main/java/java/util/stream/LongPipeline.java
new file mode 100644
index 0000000..88e919e
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/LongPipeline.java
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.LongSummaryStatistics;
+import java.util.Objects;
+import java.util.OptionalDouble;
+import java.util.OptionalLong;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiConsumer;
+import java.util.function.BinaryOperator;
+import java.util.function.IntFunction;
+import java.util.function.LongBinaryOperator;
+import java.util.function.LongConsumer;
+import java.util.function.LongFunction;
+import java.util.function.LongPredicate;
+import java.util.function.LongToDoubleFunction;
+import java.util.function.LongToIntFunction;
+import java.util.function.LongUnaryOperator;
+import java.util.function.ObjLongConsumer;
+import java.util.function.Supplier;
+
+/**
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code long}.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+abstract class LongPipeline<E_IN>
+ extends AbstractPipeline<E_IN, Long, LongStream>
+ implements LongStream {
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ LongPipeline(Supplier<? extends Spliterator<Long>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ LongPipeline(Spliterator<Long> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for appending an intermediate operation onto an existing pipeline.
+ *
+ * @param upstream the upstream element source.
+ * @param opFlags the operation flags
+ */
+ LongPipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
+ super(upstream, opFlags);
+ }
+
+ /**
+ * Adapt a {@code Sink<Long> to an {@code LongConsumer}, ideally simply
+ * by casting.
+ */
+ private static LongConsumer adapt(Sink<Long> sink) {
+ if (sink instanceof LongConsumer) {
+ return (LongConsumer) sink;
+ } else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(AbstractPipeline.class,
+ "using LongStream.adapt(Sink<Long> s)");
+ return sink::accept;
+ }
+ }
+
+ /**
+ * Adapt a {@code Spliterator<Long>} to a {@code Spliterator.OfLong}.
+ *
+ * @implNote
+ * The implementation attempts to cast to a Spliterator.OfLong, and throws
+ * an exception if this cast is not possible.
+ */
+ private static Spliterator.OfLong adapt(Spliterator<Long> s) {
+ if (s instanceof Spliterator.OfLong) {
+ return (Spliterator.OfLong) s;
+ } else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(AbstractPipeline.class,
+ "using LongStream.adapt(Spliterator<Long> s)");
+ throw new UnsupportedOperationException("LongStream.adapt(Spliterator<Long> s)");
+ }
+ }
+
+
+ // Shape-specific methods
+
+ @Override
+ final StreamShape getOutputShape() {
+ return StreamShape.LONG_VALUE;
+ }
+
+ @Override
+ final <P_IN> Node<Long> evaluateToNode(PipelineHelper<Long> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree,
+ IntFunction<Long[]> generator) {
+ return Nodes.collectLong(helper, spliterator, flattenTree);
+ }
+
+ @Override
+ final <P_IN> Spliterator<Long> wrap(PipelineHelper<Long> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean isParallel) {
+ return new StreamSpliterators.LongWrappingSpliterator<>(ph, supplier, isParallel);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ final Spliterator.OfLong lazySpliterator(Supplier<? extends Spliterator<Long>> supplier) {
+ return new StreamSpliterators.DelegatingSpliterator.OfLong((Supplier<Spliterator.OfLong>) supplier);
+ }
+
+ @Override
+ final void forEachWithCancel(Spliterator<Long> spliterator, Sink<Long> sink) {
+ Spliterator.OfLong spl = adapt(spliterator);
+ LongConsumer adaptedSink = adapt(sink);
+ do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
+ }
+
+ @Override
+ final Node.Builder<Long> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Long[]> generator) {
+ return Nodes.longBuilder(exactSizeIfKnown);
+ }
+
+
+ // LongStream
+
+ @Override
+ public final PrimitiveIterator.OfLong iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+ @Override
+ public final Spliterator.OfLong spliterator() {
+ return adapt(super.spliterator());
+ }
+
+ // Stateless intermediate ops from LongStream
+
+ @Override
+ public final DoubleStream asDoubleStream() {
+ return new DoublePipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedLong<Double>(sink) {
+ @Override
+ public void accept(long t) {
+ downstream.accept((double) t);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final Stream<Long> boxed() {
+ return mapToObj(Long::valueOf);
+ }
+
+ @Override
+ public final LongStream map(LongUnaryOperator mapper) {
+ Objects.requireNonNull(mapper);
+ return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedLong<Long>(sink) {
+ @Override
+ public void accept(long t) {
+ downstream.accept(mapper.applyAsLong(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final <U> Stream<U> mapToObj(LongFunction<? extends U> mapper) {
+ Objects.requireNonNull(mapper);
+ return new ReferencePipeline.StatelessOp<Long, U>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<U> sink) {
+ return new Sink.ChainedLong<U>(sink) {
+ @Override
+ public void accept(long t) {
+ downstream.accept(mapper.apply(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final IntStream mapToInt(LongToIntFunction mapper) {
+ Objects.requireNonNull(mapper);
+ return new IntPipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedLong<Integer>(sink) {
+ @Override
+ public void accept(long t) {
+ downstream.accept(mapper.applyAsInt(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream mapToDouble(LongToDoubleFunction mapper) {
+ Objects.requireNonNull(mapper);
+ return new DoublePipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedLong<Double>(sink) {
+ @Override
+ public void accept(long t) {
+ downstream.accept(mapper.applyAsDouble(t));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final LongStream flatMap(LongFunction<? extends LongStream> mapper) {
+ return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedLong<Long>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(long t) {
+ try (LongStream result = mapper.apply(t)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(i -> downstream.accept(i));
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public LongStream unordered() {
+ if (!isOrdered())
+ return this;
+ return new StatelessOp<Long>(this, StreamShape.LONG_VALUE, StreamOpFlag.NOT_ORDERED) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ return sink;
+ }
+ };
+ }
+
+ @Override
+ public final LongStream filter(LongPredicate predicate) {
+ Objects.requireNonNull(predicate);
+ return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedLong<Long>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(long t) {
+ if (predicate.test(t))
+ downstream.accept(t);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final LongStream peek(LongConsumer action) {
+ Objects.requireNonNull(action);
+ return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
+ 0) {
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedLong<Long>(sink) {
+ @Override
+ public void accept(long t) {
+ action.accept(t);
+ downstream.accept(t);
+ }
+ };
+ }
+ };
+ }
+
+ // Stateful intermediate ops from LongStream
+
+ @Override
+ public final LongStream limit(long maxSize) {
+ if (maxSize < 0)
+ throw new IllegalArgumentException(Long.toString(maxSize));
+ return SliceOps.makeLong(this, 0, maxSize);
+ }
+
+ @Override
+ public final LongStream skip(long n) {
+ if (n < 0)
+ throw new IllegalArgumentException(Long.toString(n));
+ if (n == 0)
+ return this;
+ else
+ return SliceOps.makeLong(this, n, -1);
+ }
+
+ @Override
+ public final LongStream sorted() {
+ return SortedOps.makeLong(this);
+ }
+
+ @Override
+ public final LongStream distinct() {
+ // While functional and quick to implement, this approach is not very efficient.
+ // An efficient version requires a long-specific map/set implementation.
+ return boxed().distinct().mapToLong(i -> (long) i);
+ }
+
+ // Terminal ops from LongStream
+
+ @Override
+ public void forEach(LongConsumer action) {
+ evaluate(ForEachOps.makeLong(action, false));
+ }
+
+ @Override
+ public void forEachOrdered(LongConsumer action) {
+ evaluate(ForEachOps.makeLong(action, true));
+ }
+
+ @Override
+ public final long sum() {
+ // use better algorithm to compensate for intermediate overflow?
+ return reduce(0, Long::sum);
+ }
+
+ @Override
+ public final OptionalLong min() {
+ return reduce(Math::min);
+ }
+
+ @Override
+ public final OptionalLong max() {
+ return reduce(Math::max);
+ }
+
+ @Override
+ public final OptionalDouble average() {
+ long[] avg = collect(() -> new long[2],
+ (ll, i) -> {
+ ll[0]++;
+ ll[1] += i;
+ },
+ (ll, rr) -> {
+ ll[0] += rr[0];
+ ll[1] += rr[1];
+ });
+ return avg[0] > 0
+ ? OptionalDouble.of((double) avg[1] / avg[0])
+ : OptionalDouble.empty();
+ }
+
+ @Override
+ public final long count() {
+ return map(e -> 1L).sum();
+ }
+
+ @Override
+ public final LongSummaryStatistics summaryStatistics() {
+ return collect(LongSummaryStatistics::new, LongSummaryStatistics::accept,
+ LongSummaryStatistics::combine);
+ }
+
+ @Override
+ public final long reduce(long identity, LongBinaryOperator op) {
+ return evaluate(ReduceOps.makeLong(identity, op));
+ }
+
+ @Override
+ public final OptionalLong reduce(LongBinaryOperator op) {
+ return evaluate(ReduceOps.makeLong(op));
+ }
+
+ @Override
+ public final <R> R collect(Supplier<R> supplier,
+ ObjLongConsumer<R> accumulator,
+ BiConsumer<R, R> combiner) {
+ BinaryOperator<R> operator = (left, right) -> {
+ combiner.accept(left, right);
+ return left;
+ };
+ return evaluate(ReduceOps.makeLong(supplier, accumulator, operator));
+ }
+
+ @Override
+ public final boolean anyMatch(LongPredicate predicate) {
+ return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.ANY));
+ }
+
+ @Override
+ public final boolean allMatch(LongPredicate predicate) {
+ return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.ALL));
+ }
+
+ @Override
+ public final boolean noneMatch(LongPredicate predicate) {
+ return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.NONE));
+ }
+
+ @Override
+ public final OptionalLong findFirst() {
+ return evaluate(FindOps.makeLong(true));
+ }
+
+ @Override
+ public final OptionalLong findAny() {
+ return evaluate(FindOps.makeLong(false));
+ }
+
+ @Override
+ public final long[] toArray() {
+ return Nodes.flattenLong((Node.OfLong) evaluateToArrayNode(Long[]::new))
+ .asPrimitiveArray();
+ }
+
+
+ //
+
+ /**
+ * Source stage of a LongPipeline.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ static class Head<E_IN> extends LongPipeline<E_IN> {
+ /**
+ * Constructor for the source stage of a LongStream.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream
+ * source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ Head(Supplier<? extends Spliterator<Long>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the source stage of a LongStream.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ Head(Spliterator<Long> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ final Sink<E_IN> opWrapSink(int flags, Sink<Long> sink) {
+ throw new UnsupportedOperationException();
+ }
+
+ // Optimized sequential terminal operations for the head of the pipeline
+
+ @Override
+ public void forEach(LongConsumer action) {
+ if (!isParallel()) {
+ adapt(sourceStageSpliterator()).forEachRemaining(action);
+ } else {
+ super.forEach(action);
+ }
+ }
+
+ @Override
+ public void forEachOrdered(LongConsumer action) {
+ if (!isParallel()) {
+ adapt(sourceStageSpliterator()).forEachRemaining(action);
+ } else {
+ super.forEachOrdered(action);
+ }
+ }
+ }
+
+ /** Base class for a stateless intermediate stage of a LongStream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ abstract static class StatelessOp<E_IN> extends LongPipeline<E_IN> {
+ /**
+ * Construct a new LongStream by appending a stateless intermediate
+ * operation to an existing stream.
+ * @param upstream The upstream pipeline stage
+ * @param inputShape The stream shape for the upstream pipeline stage
+ * @param opFlags Operation flags for the new stage
+ */
+ StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return false;
+ }
+ }
+
+ /**
+ * Base class for a stateful intermediate stage of a LongStream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @since 1.8
+ */
+ abstract static class StatefulOp<E_IN> extends LongPipeline<E_IN> {
+ /**
+ * Construct a new LongStream by appending a stateful intermediate
+ * operation to an existing stream.
+ * @param upstream The upstream pipeline stage
+ * @param inputShape The stream shape for the upstream pipeline stage
+ * @param opFlags Operation flags for the new stage
+ */
+ StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return true;
+ }
+
+ @Override
+ abstract <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Long[]> generator);
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/LongStream.java b/ojluni/src/main/java/java/util/stream/LongStream.java
new file mode 100644
index 0000000..33d74ec
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/LongStream.java
@@ -0,0 +1,920 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.LongSummaryStatistics;
+import java.util.Objects;
+import java.util.OptionalDouble;
+import java.util.OptionalLong;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.function.LongBinaryOperator;
+import java.util.function.LongConsumer;
+import java.util.function.LongFunction;
+import java.util.function.LongPredicate;
+import java.util.function.LongSupplier;
+import java.util.function.LongToDoubleFunction;
+import java.util.function.LongToIntFunction;
+import java.util.function.LongUnaryOperator;
+import java.util.function.ObjLongConsumer;
+import java.util.function.Supplier;
+
+/**
+ * A sequence of primitive long-valued elements supporting sequential and parallel
+ * aggregate operations. This is the {@code long} primitive specialization of
+ * {@link Stream}.
+ *
+ * <p>The following example illustrates an aggregate operation using
+ * {@link Stream} and {@link LongStream}, computing the sum of the weights of the
+ * red widgets:
+ *
+ * <pre>{@code
+ * long sum = widgets.stream()
+ * .filter(w -> w.getColor() == RED)
+ * .mapToLong(w -> w.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * See the class documentation for {@link Stream} and the package documentation
+ * for <a href="package-summary.html">java.util.stream</a> for additional
+ * specification of streams, stream operations, stream pipelines, and
+ * parallelism.
+ *
+ * @since 1.8
+ * @see Stream
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface LongStream extends BaseStream<Long, LongStream> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to each element to determine if it
+ * should be included
+ * @return the new stream
+ */
+ LongStream filter(LongPredicate predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ LongStream map(LongUnaryOperator mapper);
+
+ /**
+ * Returns an object-valued {@code Stream} consisting of the results of
+ * applying the given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param <U> the element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ <U> Stream<U> mapToObj(LongFunction<? extends U> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ IntStream mapToInt(LongToIntFunction mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ DoubleStream mapToDouble(LongToDoubleFunction mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of a mapped stream produced by applying
+ * the provided mapping function to each element. Each mapped stream is
+ * {@link java.util.stream.BaseStream#close() closed} after its contents
+ * have been placed into this stream. (If a mapped stream is {@code null}
+ * an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces a
+ * {@code LongStream} of new values
+ * @return the new stream
+ * @see Stream#flatMap(Function)
+ */
+ LongStream flatMap(LongFunction<? extends LongStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ LongStream distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream in sorted
+ * order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ LongStream sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * LongStream.of(1, 2, 3, 4)
+ * .filter(e -> e > 2)
+ * .peek(e -> System.out.println("Filtered value: " + e))
+ * .map(e -> e * e)
+ * .peek(e -> System.out.println("Mapped value: " + e))
+ * .sum();
+ * }</pre>
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ LongStream peek(LongConsumer action);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code limit()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code maxSize}, since {@code limit(n)}
+ * is constrained to return not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(LongSupplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code limit()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code limit()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ LongStream limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after discarding the first {@code n} elements of the stream.
+ * If this stream contains fewer than {@code n} elements then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code skip()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code n}, since {@code skip(n)}
+ * is constrained to skip not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(LongSupplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code skip()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code skip()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param n the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code n} is negative
+ */
+ LongStream skip(long n);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(LongConsumer action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(LongConsumer)
+ */
+ void forEachOrdered(LongConsumer action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ long[] toArray();
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * long result = identity;
+ * for (long element : this stream)
+ * result = accumulator.applyAsLong(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code x},
+ * {@code accumulator.apply(identity, x)} is equal to {@code x}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, and average are all special cases of reduction.
+ * Summing a stream of numbers can be expressed as:
+ *
+ * <pre>{@code
+ * long sum = integers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * long sum = integers.reduce(0, Long::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param op an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ * @see #sum()
+ * @see #min()
+ * @see #max()
+ * @see #average()
+ */
+ long reduce(long identity, LongBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code OptionalLong} describing the reduced value,
+ * if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * long result = null;
+ * for (long element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.applyAsLong(result, element);
+ * }
+ * return foundAny ? OptionalLong.of(result) : OptionalLong.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param op an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(long, LongBinaryOperator)
+ */
+ OptionalLong reduce(LongBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable result container,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = supplier.get();
+ * for (long element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(long, LongBinaryOperator)}, {@code collect} operations
+ * can be parallelized without requiring additional synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <R> type of the result
+ * @param supplier a function that creates a new result container. For a
+ * parallel execution, this function may be called
+ * multiple times and must return a fresh value each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for incorporating an additional element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values, which must be
+ * compatible with the accumulator function
+ * @return the result of the reduction
+ * @see Stream#collect(Supplier, BiConsumer, BiConsumer)
+ */
+ <R> R collect(Supplier<R> supplier,
+ ObjLongConsumer<R> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Returns the sum of elements in this stream. This is a special case
+ * of a <a href="package-summary.html#Reduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(0, Long::sum);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return the sum of elements in this stream
+ */
+ long sum();
+
+ /**
+ * Returns an {@code OptionalLong} describing the minimum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#Reduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Long::min);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return an {@code OptionalLong} containing the minimum element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty
+ */
+ OptionalLong min();
+
+ /**
+ * Returns an {@code OptionalLong} describing the maximum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#Reduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Long::max);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalLong} containing the maximum element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty
+ */
+ OptionalLong max();
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#Reduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return map(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the arithmetic mean of elements of
+ * this stream, or an empty optional if this stream is empty. This is a
+ * special case of a
+ * <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalDouble} containing the average element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble average();
+
+ /**
+ * Returns a {@code LongSummaryStatistics} describing various summary data
+ * about the elements of this stream. This is a special case of a
+ * <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return a {@code LongSummaryStatistics} describing various summary data
+ * about the elements of this stream
+ */
+ LongSummaryStatistics summaryStatistics();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result. If the stream is empty then
+ * {@code false} is returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>existential quantification</em> of the
+ * predicate over the elements of the stream (for some x P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate, otherwise {@code false}
+ */
+ boolean anyMatch(LongPredicate predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * predicate over the elements of the stream (for all x P(x)). If the
+ * stream is empty, the quantification is said to be <em>vacuously
+ * satisfied</em> and is always {@code true} (regardless of P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either all elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean allMatch(LongPredicate predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * negated predicate over the elements of the stream (for all x ~P(x)). If
+ * the stream is empty, the quantification is said to be vacuously satisfied
+ * and is always {@code true}, regardless of P(x).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either no elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean noneMatch(LongPredicate predicate);
+
+ /**
+ * Returns an {@link OptionalLong} describing the first element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty. If the
+ * stream has no encounter order, then any element may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code OptionalLong} describing the first element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty
+ */
+ OptionalLong findFirst();
+
+ /**
+ * Returns an {@link OptionalLong} describing some element of the stream, or
+ * an empty {@code OptionalLong} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If a stable result
+ * is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code OptionalLong} describing some element of this stream,
+ * or an empty {@code OptionalLong} if the stream is empty
+ * @see #findFirst()
+ */
+ OptionalLong findAny();
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}
+ */
+ DoubleStream asDoubleStream();
+
+ /**
+ * Returns a {@code Stream} consisting of the elements of this stream,
+ * each boxed to a {@code Long}.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a {@code Stream} consistent of the elements of this stream,
+ * each boxed to {@code Long}
+ */
+ Stream<Long> boxed();
+
+ @Override
+ LongStream sequential();
+
+ @Override
+ LongStream parallel();
+
+ @Override
+ PrimitiveIterator.OfLong iterator();
+
+ @Override
+ Spliterator.OfLong spliterator();
+
+ // Static factories
+
+ /**
+ * Returns a builder for a {@code LongStream}.
+ *
+ * @return a stream builder
+ */
+ public static Builder builder() {
+ return new Streams.LongStreamBuilderImpl();
+ }
+
+ /**
+ * Returns an empty sequential {@code LongStream}.
+ *
+ * @return an empty sequential stream
+ */
+ public static LongStream empty() {
+ return StreamSupport.longStream(Spliterators.emptyLongSpliterator(), false);
+ }
+
+ /**
+ * Returns a sequential {@code LongStream} containing a single element.
+ *
+ * @param t the single element
+ * @return a singleton sequential stream
+ */
+ public static LongStream of(long t) {
+ return StreamSupport.longStream(new Streams.LongStreamBuilderImpl(t), false);
+ }
+
+ /**
+ * Returns a sequential ordered stream whose elements are the specified values.
+ *
+ * @param values the elements of the new stream
+ * @return the new stream
+ */
+ public static LongStream of(long... values) {
+ return Arrays.stream(values);
+ }
+
+ /**
+ * Returns an infinite sequential ordered {@code LongStream} produced by iterative
+ * application of a function {@code f} to an initial element {@code seed},
+ * producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
+ * {@code f(f(seed))}, etc.
+ *
+ * <p>The first element (position {@code 0}) in the {@code LongStream} will
+ * be the provided {@code seed}. For {@code n > 0}, the element at position
+ * {@code n}, will be the result of applying the function {@code f} to the
+ * element at position {@code n - 1}.
+ *
+ * @param seed the initial element
+ * @param f a function to be applied to to the previous element to produce
+ * a new element
+ * @return a new sequential {@code LongStream}
+ */
+ public static LongStream iterate(final long seed, final LongUnaryOperator f) {
+ Objects.requireNonNull(f);
+ final PrimitiveIterator.OfLong iterator = new PrimitiveIterator.OfLong() {
+ long t = seed;
+
+ @Override
+ public boolean hasNext() {
+ return true;
+ }
+
+ @Override
+ public long nextLong() {
+ long v = t;
+ t = f.applyAsLong(t);
+ return v;
+ }
+ };
+ return StreamSupport.longStream(Spliterators.spliteratorUnknownSize(
+ iterator,
+ Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
+ }
+
+ /**
+ * Returns an infinite sequential unordered stream where each element is
+ * generated by the provided {@code LongSupplier}. This is suitable for
+ * generating constant streams, streams of random elements, etc.
+ *
+ * @param s the {@code LongSupplier} for generated elements
+ * @return a new infinite sequential unordered {@code LongStream}
+ */
+ public static LongStream generate(LongSupplier s) {
+ Objects.requireNonNull(s);
+ return StreamSupport.longStream(
+ new StreamSpliterators.InfiniteSupplyingSpliterator.OfLong(Long.MAX_VALUE, s), false);
+ }
+
+ /**
+ * Returns a sequential ordered {@code LongStream} from {@code startInclusive}
+ * (inclusive) to {@code endExclusive} (exclusive) by an incremental step of
+ * {@code 1}.
+ *
+ * @apiNote
+ * <p>An equivalent sequence of increasing values can be produced
+ * sequentially using a {@code for} loop as follows:
+ * <pre>{@code
+ * for (long i = startInclusive; i < endExclusive ; i++) { ... }
+ * }</pre>
+ *
+ * @param startInclusive the (inclusive) initial value
+ * @param endExclusive the exclusive upper bound
+ * @return a sequential {@code LongStream} for the range of {@code long}
+ * elements
+ */
+ public static LongStream range(long startInclusive, final long endExclusive) {
+ if (startInclusive >= endExclusive) {
+ return empty();
+ } else if (endExclusive - startInclusive < 0) {
+ // Size of range > Long.MAX_VALUE
+ // Split the range in two and concatenate
+ // Note: if the range is [Long.MIN_VALUE, Long.MAX_VALUE) then
+ // the lower range, [Long.MIN_VALUE, 0) will be further split in two
+ // Android-changed: no divideUnsigned support yet
+ long m = startInclusive + ((endExclusive - startInclusive) / 2) + 1;
+ return concat(range(startInclusive, m), range(m, endExclusive));
+ } else {
+ return StreamSupport.longStream(
+ new Streams.RangeLongSpliterator(startInclusive, endExclusive, false), false);
+ }
+ }
+
+ /**
+ * Returns a sequential ordered {@code LongStream} from {@code startInclusive}
+ * (inclusive) to {@code endInclusive} (inclusive) by an incremental step of
+ * {@code 1}.
+ *
+ * @apiNote
+ * <p>An equivalent sequence of increasing values can be produced
+ * sequentially using a {@code for} loop as follows:
+ * <pre>{@code
+ * for (long i = startInclusive; i <= endInclusive ; i++) { ... }
+ * }</pre>
+ *
+ * @param startInclusive the (inclusive) initial value
+ * @param endInclusive the inclusive upper bound
+ * @return a sequential {@code LongStream} for the range of {@code long}
+ * elements
+ */
+ public static LongStream rangeClosed(long startInclusive, final long endInclusive) {
+ if (startInclusive > endInclusive) {
+ return empty();
+ } else if (endInclusive - startInclusive + 1 <= 0) {
+ // Size of range > Long.MAX_VALUE
+ // Split the range in two and concatenate
+ // Note: if the range is [Long.MIN_VALUE, Long.MAX_VALUE] then
+ // the lower range, [Long.MIN_VALUE, 0), and upper range,
+ // [0, Long.MAX_VALUE], will both be further split in two
+ // Android-changed: no divideUnsigned support yet
+ long m = startInclusive + ((endInclusive - startInclusive) / 2) + 1;
+ return concat(range(startInclusive, m), rangeClosed(m, endInclusive));
+ } else {
+ return StreamSupport.longStream(
+ new Streams.RangeLongSpliterator(startInclusive, endInclusive, true), false);
+ }
+ }
+
+ /**
+ * Creates a lazily concatenated stream whose elements are all the
+ * elements of the first stream followed by all the elements of the
+ * second stream. The resulting stream is ordered if both
+ * of the input streams are ordered, and parallel if either of the input
+ * streams is parallel. When the resulting stream is closed, the close
+ * handlers for both input streams are invoked.
+ *
+ * @implNote
+ * Use caution when constructing streams from repeated concatenation.
+ * Accessing an element of a deeply concatenated stream can result in deep
+ * call chains, or even {@code StackOverflowException}.
+ *
+ * @param a the first stream
+ * @param b the second stream
+ * @return the concatenation of the two input streams
+ */
+ public static LongStream concat(LongStream a, LongStream b) {
+ Objects.requireNonNull(a);
+ Objects.requireNonNull(b);
+
+ Spliterator.OfLong split = new Streams.ConcatSpliterator.OfLong(
+ a.spliterator(), b.spliterator());
+ LongStream stream = StreamSupport.longStream(split, a.isParallel() || b.isParallel());
+ return stream.onClose(Streams.composedClose(a, b));
+ }
+
+ /**
+ * A mutable builder for a {@code LongStream}.
+ *
+ * <p>A stream builder has a lifecycle, which starts in a building
+ * phase, during which elements can be added, and then transitions to a built
+ * phase, after which elements may not be added. The built phase begins
+ * begins when the {@link #build()} method is called, which creates an
+ * ordered stream whose elements are the elements that were added to the
+ * stream builder, in the order they were added.
+ *
+ * @see LongStream#builder()
+ * @since 1.8
+ */
+ public interface Builder extends LongConsumer {
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ @Override
+ void accept(long t);
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @implSpec
+ * The default implementation behaves as if:
+ * <pre>{@code
+ * accept(t)
+ * return this;
+ * }</pre>
+ *
+ * @param t the element to add
+ * @return {@code this} builder
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ default Builder add(long t) {
+ accept(t);
+ return this;
+ }
+
+ /**
+ * Builds the stream, transitioning this builder to the built state.
+ * An {@code IllegalStateException} is thrown if there are further
+ * attempts to operate on the builder after it has entered the built
+ * state.
+ *
+ * @return the built stream
+ * @throws IllegalStateException if the builder has already transitioned
+ * to the built state
+ */
+ LongStream build();
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/MatchOps.java b/ojluni/src/main/java/java/util/stream/MatchOps.java
new file mode 100644
index 0000000..cc809e4
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/MatchOps.java
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.function.DoublePredicate;
+import java.util.function.IntPredicate;
+import java.util.function.LongPredicate;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+/**
+ * Factory for instances of a short-circuiting {@code TerminalOp} that implement
+ * quantified predicate matching on the elements of a stream. Supported variants
+ * include match-all, match-any, and match-none.
+ *
+ * @since 1.8
+ */
+final class MatchOps {
+
+ private MatchOps() { }
+
+ /**
+ * Enum describing quantified match options -- all match, any match, none
+ * match.
+ */
+ enum MatchKind {
+ /** Do all elements match the predicate? */
+ ANY(true, true),
+
+ /** Do any elements match the predicate? */
+ ALL(false, false),
+
+ /** Do no elements match the predicate? */
+ NONE(true, false);
+
+ private final boolean stopOnPredicateMatches;
+ private final boolean shortCircuitResult;
+
+ private MatchKind(boolean stopOnPredicateMatches,
+ boolean shortCircuitResult) {
+ this.stopOnPredicateMatches = stopOnPredicateMatches;
+ this.shortCircuitResult = shortCircuitResult;
+ }
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for a Stream.
+ *
+ * @param <T> the type of stream elements
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static <T> TerminalOp<T, Boolean> makeRef(Predicate<? super T> predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<T> {
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(T t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ return new MatchOp<>(StreamShape.REFERENCE, matchKind, MatchSink::new);
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for an {@code IntStream}.
+ *
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static TerminalOp<Integer, Boolean> makeInt(IntPredicate predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<Integer> implements Sink.OfInt {
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(int t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ return new MatchOp<>(StreamShape.INT_VALUE, matchKind, MatchSink::new);
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for a {@code LongStream}.
+ *
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static TerminalOp<Long, Boolean> makeLong(LongPredicate predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<Long> implements Sink.OfLong {
+
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(long t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ return new MatchOp<>(StreamShape.LONG_VALUE, matchKind, MatchSink::new);
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for a {@code DoubleStream}.
+ *
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static TerminalOp<Double, Boolean> makeDouble(DoublePredicate predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<Double> implements Sink.OfDouble {
+
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(double t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ return new MatchOp<>(StreamShape.DOUBLE_VALUE, matchKind, MatchSink::new);
+ }
+
+ /**
+ * A short-circuiting {@code TerminalOp} that evaluates a predicate on the
+ * elements of a stream and determines whether all, any or none of those
+ * elements match the predicate.
+ *
+ * @param <T> the output type of the stream pipeline
+ */
+ private static final class MatchOp<T> implements TerminalOp<T, Boolean> {
+ private final StreamShape inputShape;
+ final MatchKind matchKind;
+ final Supplier<BooleanTerminalSink<T>> sinkSupplier;
+
+ /**
+ * Constructs a {@code MatchOp}.
+ *
+ * @param shape the output shape of the stream pipeline
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @param sinkSupplier {@code Supplier} for a {@code Sink} of the
+ * appropriate shape which implements the matching operation
+ */
+ MatchOp(StreamShape shape,
+ MatchKind matchKind,
+ Supplier<BooleanTerminalSink<T>> sinkSupplier) {
+ this.inputShape = shape;
+ this.matchKind = matchKind;
+ this.sinkSupplier = sinkSupplier;
+ }
+
+ @Override
+ public int getOpFlags() {
+ return StreamOpFlag.IS_SHORT_CIRCUIT | StreamOpFlag.NOT_ORDERED;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return inputShape;
+ }
+
+ @Override
+ public <S> Boolean evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ return helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).getAndClearState();
+ }
+
+ @Override
+ public <S> Boolean evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ // Approach for parallel implementation:
+ // - Decompose as per usual
+ // - run match on leaf chunks, call result "b"
+ // - if b == matchKind.shortCircuitOn, complete early and return b
+ // - else if we complete normally, return !shortCircuitOn
+
+ return new MatchTask<>(this, helper, spliterator).invoke();
+ }
+ }
+
+ /**
+ * Boolean specific terminal sink to avoid the boxing costs when returning
+ * results. Subclasses implement the shape-specific functionality.
+ *
+ * @param <T> The output type of the stream pipeline
+ */
+ private static abstract class BooleanTerminalSink<T> implements Sink<T> {
+ boolean stop;
+ boolean value;
+
+ BooleanTerminalSink(MatchKind matchKind) {
+ value = !matchKind.shortCircuitResult;
+ }
+
+ public boolean getAndClearState() {
+ return value;
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return stop;
+ }
+ }
+
+ /**
+ * ForkJoinTask implementation to implement a parallel short-circuiting
+ * quantified match
+ *
+ * @param <P_IN> the type of source elements for the pipeline
+ * @param <P_OUT> the type of output elements for the pipeline
+ */
+ @SuppressWarnings("serial")
+ private static final class MatchTask<P_IN, P_OUT>
+ extends AbstractShortCircuitTask<P_IN, P_OUT, Boolean, MatchTask<P_IN, P_OUT>> {
+ private final MatchOp<P_OUT> op;
+
+ /**
+ * Constructor for root node
+ */
+ MatchTask(MatchOp<P_OUT> op, PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ this.op = op;
+ }
+
+ /**
+ * Constructor for non-root node
+ */
+ MatchTask(MatchTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ this.op = parent.op;
+ }
+
+ @Override
+ protected MatchTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
+ return new MatchTask<>(this, spliterator);
+ }
+
+ @Override
+ protected Boolean doLeaf() {
+ boolean b = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).getAndClearState();
+ if (b == op.matchKind.shortCircuitResult)
+ shortCircuit(b);
+ return null;
+ }
+
+ @Override
+ protected Boolean getEmptyResult() {
+ return !op.matchKind.shortCircuitResult;
+ }
+ }
+}
+
diff --git a/ojluni/src/main/java/java/util/stream/Node.java b/ojluni/src/main/java/java/util/stream/Node.java
new file mode 100644
index 0000000..2b4360b
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Node.java
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
+
+/**
+ * An immutable container for describing an ordered sequence of elements of some
+ * type {@code T}.
+ *
+ * <p>A {@code Node} contains a fixed number of elements, which can be accessed
+ * via the {@link #count}, {@link #spliterator}, {@link #forEach},
+ * {@link #asArray}, or {@link #copyInto} methods. A {@code Node} may have zero
+ * or more child {@code Node}s; if it has no children (accessed via
+ * {@link #getChildCount} and {@link #getChild(int)}, it is considered <em>flat
+ * </em> or a <em>leaf</em>; if it has children, it is considered an
+ * <em>internal</em> node. The size of an internal node is the sum of sizes of
+ * its children.
+ *
+ * @apiNote
+ * <p>A {@code Node} typically does not store the elements directly, but instead
+ * mediates access to one or more existing (effectively immutable) data
+ * structures such as a {@code Collection}, array, or a set of other
+ * {@code Node}s. Commonly {@code Node}s are formed into a tree whose shape
+ * corresponds to the computation tree that produced the elements that are
+ * contained in the leaf nodes. The use of {@code Node} within the stream
+ * framework is largely to avoid copying data unnecessarily during parallel
+ * operations.
+ *
+ * @param <T> the type of elements.
+ * @since 1.8
+ */
+interface Node<T> {
+
+ /**
+ * Returns a {@link Spliterator} describing the elements contained in this
+ * {@code Node}.
+ *
+ * @return a {@code Spliterator} describing the elements contained in this
+ * {@code Node}
+ */
+ Spliterator<T> spliterator();
+
+ /**
+ * Traverses the elements of this node, and invoke the provided
+ * {@code Consumer} with each element. Elements are provided in encounter
+ * order if the source for the {@code Node} has a defined encounter order.
+ *
+ * @param consumer a {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}
+ */
+ void forEach(Consumer<? super T> consumer);
+
+ /**
+ * Returns the number of child nodes of this node.
+ *
+ * @implSpec The default implementation returns zero.
+ *
+ * @return the number of child nodes
+ */
+ default int getChildCount() {
+ return 0;
+ }
+
+ /**
+ * Retrieves the child {@code Node} at a given index.
+ *
+ * @implSpec The default implementation always throws
+ * {@code IndexOutOfBoundsException}.
+ *
+ * @param i the index to the child node
+ * @return the child node
+ * @throws IndexOutOfBoundsException if the index is less than 0 or greater
+ * than or equal to the number of child nodes
+ */
+ default Node<T> getChild(int i) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ /**
+ * Return a node describing a subsequence of the elements of this node,
+ * starting at the given inclusive start offset and ending at the given
+ * exclusive end offset.
+ *
+ * @param from The (inclusive) starting offset of elements to include, must
+ * be in range 0..count().
+ * @param to The (exclusive) end offset of elements to include, must be
+ * in range 0..count().
+ * @param generator A function to be used to create a new array, if needed,
+ * for reference nodes.
+ * @return the truncated node
+ */
+ default Node<T> truncate(long from, long to, IntFunction<T[]> generator) {
+ if (from == 0 && to == count())
+ return this;
+ Spliterator<T> spliterator = spliterator();
+ long size = to - from;
+ Node.Builder<T> nodeBuilder = Nodes.builder(size, generator);
+ nodeBuilder.begin(size);
+ for (int i = 0; i < from && spliterator.tryAdvance(e -> { }); i++) { }
+ for (int i = 0; (i < size) && spliterator.tryAdvance(nodeBuilder); i++) { }
+ nodeBuilder.end();
+ return nodeBuilder.build();
+ }
+
+ /**
+ * Provides an array view of the contents of this node.
+ *
+ * <p>Depending on the underlying implementation, this may return a
+ * reference to an internal array rather than a copy. Since the returned
+ * array may be shared, the returned array should not be modified. The
+ * {@code generator} function may be consulted to create the array if a new
+ * array needs to be created.
+ *
+ * @param generator a factory function which takes an integer parameter and
+ * returns a new, empty array of that size and of the appropriate
+ * array type
+ * @return an array containing the contents of this {@code Node}
+ */
+ T[] asArray(IntFunction<T[]> generator);
+
+ /**
+ * Copies the content of this {@code Node} into an array, starting at a
+ * given offset into the array. It is the caller's responsibility to ensure
+ * there is sufficient room in the array, otherwise unspecified behaviour
+ * will occur if the array length is less than the number of elements
+ * contained in this node.
+ *
+ * @param array the array into which to copy the contents of this
+ * {@code Node}
+ * @param offset the starting offset within the array
+ * @throws IndexOutOfBoundsException if copying would cause access of data
+ * outside array bounds
+ * @throws NullPointerException if {@code array} is {@code null}
+ */
+ void copyInto(T[] array, int offset);
+
+ /**
+ * Gets the {@code StreamShape} associated with this {@code Node}.
+ *
+ * @implSpec The default in {@code Node} returns
+ * {@code StreamShape.REFERENCE}
+ *
+ * @return the stream shape associated with this node
+ */
+ default StreamShape getShape() {
+ return StreamShape.REFERENCE;
+ }
+
+ /**
+ * Returns the number of elements contained in this node.
+ *
+ * @return the number of elements contained in this node
+ */
+ long count();
+
+ /**
+ * A mutable builder for a {@code Node} that implements {@link Sink}, which
+ * builds a flat node containing the elements that have been pushed to it.
+ */
+ interface Builder<T> extends Sink<T> {
+
+ /**
+ * Builds the node. Should be called after all elements have been
+ * pushed and signalled with an invocation of {@link Sink#end()}.
+ *
+ * @return the resulting {@code Node}
+ */
+ Node<T> build();
+
+ /**
+ * Specialized @{code Node.Builder} for int elements
+ */
+ interface OfInt extends Node.Builder<Integer>, Sink.OfInt {
+ @Override
+ Node.OfInt build();
+ }
+
+ /**
+ * Specialized @{code Node.Builder} for long elements
+ */
+ interface OfLong extends Node.Builder<Long>, Sink.OfLong {
+ @Override
+ Node.OfLong build();
+ }
+
+ /**
+ * Specialized @{code Node.Builder} for double elements
+ */
+ interface OfDouble extends Node.Builder<Double>, Sink.OfDouble {
+ @Override
+ Node.OfDouble build();
+ }
+ }
+
+ public interface OfPrimitive<T, T_CONS, T_ARR,
+ T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>,
+ T_NODE extends OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, T_NODE>>
+ extends Node<T> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return a {@link Spliterator.OfPrimitive} describing the elements of
+ * this node
+ */
+ @Override
+ T_SPLITR spliterator();
+
+ /**
+ * Traverses the elements of this node, and invoke the provided
+ * {@code action} with each element.
+ *
+ * @param action a consumer that is to be invoked with each
+ * element in this {@code Node.OfPrimitive}
+ */
+ @SuppressWarnings("overloads")
+ void forEach(T_CONS action);
+
+ @Override
+ default T_NODE getChild(int i) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ T_NODE truncate(long from, long to, IntFunction<T[]> generator);
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes the generator to create
+ * an instance of a boxed primitive array with a length of
+ * {@link #count()} and then invokes {@link #copyInto(T[], int)} with
+ * that array at an offset of 0.
+ */
+ @Override
+ default T[] asArray(IntFunction<T[]> generator) {
+ if (java.util.stream.Tripwire.ENABLED)
+ java.util.stream.Tripwire.trip(getClass(), "{0} calling Node.OfPrimitive.asArray");
+
+ long size = count();
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ T[] boxed = generator.apply((int) count());
+ copyInto(boxed, 0);
+ return boxed;
+ }
+
+ /**
+ * Views this node as a primitive array.
+ *
+ * <p>Depending on the underlying implementation this may return a
+ * reference to an internal array rather than a copy. It is the callers
+ * responsibility to decide if either this node or the array is utilized
+ * as the primary reference for the data.</p>
+ *
+ * @return an array containing the contents of this {@code Node}
+ */
+ T_ARR asPrimitiveArray();
+
+ /**
+ * Creates a new primitive array.
+ *
+ * @param count the length of the primitive array.
+ * @return the new primitive array.
+ */
+ T_ARR newArray(int count);
+
+ /**
+ * Copies the content of this {@code Node} into a primitive array,
+ * starting at a given offset into the array. It is the caller's
+ * responsibility to ensure there is sufficient room in the array.
+ *
+ * @param array the array into which to copy the contents of this
+ * {@code Node}
+ * @param offset the starting offset within the array
+ * @throws IndexOutOfBoundsException if copying would cause access of
+ * data outside array bounds
+ * @throws NullPointerException if {@code array} is {@code null}
+ */
+ void copyInto(T_ARR array, int offset);
+ }
+
+ /**
+ * Specialized {@code Node} for int elements
+ */
+ interface OfInt extends OfPrimitive<Integer, IntConsumer, int[], Spliterator.OfInt, OfInt> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param consumer a {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}. If this is an
+ * {@code IntConsumer}, it is cast to {@code IntConsumer} so the
+ * elements may be processed without boxing.
+ */
+ @Override
+ default void forEach(Consumer<? super Integer> consumer) {
+ if (consumer instanceof IntConsumer) {
+ forEach((IntConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfInt.forEachRemaining(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes {@link #asPrimitiveArray()} to
+ * obtain an int[] array then and copies the elements from that int[]
+ * array into the boxed Integer[] array. This is not efficient and it
+ * is recommended to invoke {@link #copyInto(Object, int)}.
+ */
+ @Override
+ default void copyInto(Integer[] boxed, int offset) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Integer[], int)");
+
+ int[] array = asPrimitiveArray();
+ for (int i = 0; i < array.length; i++) {
+ boxed[offset + i] = array[i];
+ }
+ }
+
+ @Override
+ default Node.OfInt truncate(long from, long to, IntFunction<Integer[]> generator) {
+ if (from == 0 && to == count())
+ return this;
+ long size = to - from;
+ Spliterator.OfInt spliterator = spliterator();
+ Node.Builder.OfInt nodeBuilder = Nodes.intBuilder(size);
+ nodeBuilder.begin(size);
+ for (int i = 0; i < from && spliterator.tryAdvance((IntConsumer) e -> { }); i++) { }
+ for (int i = 0; (i < size) && spliterator.tryAdvance((IntConsumer) nodeBuilder); i++) { }
+ nodeBuilder.end();
+ return nodeBuilder.build();
+ }
+
+ @Override
+ default int[] newArray(int count) {
+ return new int[count];
+ }
+
+ /**
+ * {@inheritDoc}
+ * @implSpec The default in {@code Node.OfInt} returns
+ * {@code StreamShape.INT_VALUE}
+ */
+ default StreamShape getShape() {
+ return StreamShape.INT_VALUE;
+ }
+ }
+
+ /**
+ * Specialized {@code Node} for long elements
+ */
+ interface OfLong extends OfPrimitive<Long, LongConsumer, long[], Spliterator.OfLong, OfLong> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param consumer A {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}. If this is an
+ * {@code LongConsumer}, it is cast to {@code LongConsumer} so
+ * the elements may be processed without boxing.
+ */
+ @Override
+ default void forEach(Consumer<? super Long> consumer) {
+ if (consumer instanceof LongConsumer) {
+ forEach((LongConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes {@link #asPrimitiveArray()}
+ * to obtain a long[] array then and copies the elements from that
+ * long[] array into the boxed Long[] array. This is not efficient and
+ * it is recommended to invoke {@link #copyInto(Object, int)}.
+ */
+ @Override
+ default void copyInto(Long[] boxed, int offset) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Long[], int)");
+
+ long[] array = asPrimitiveArray();
+ for (int i = 0; i < array.length; i++) {
+ boxed[offset + i] = array[i];
+ }
+ }
+
+ @Override
+ default Node.OfLong truncate(long from, long to, IntFunction<Long[]> generator) {
+ if (from == 0 && to == count())
+ return this;
+ long size = to - from;
+ Spliterator.OfLong spliterator = spliterator();
+ Node.Builder.OfLong nodeBuilder = Nodes.longBuilder(size);
+ nodeBuilder.begin(size);
+ for (int i = 0; i < from && spliterator.tryAdvance((LongConsumer) e -> { }); i++) { }
+ for (int i = 0; (i < size) && spliterator.tryAdvance((LongConsumer) nodeBuilder); i++) { }
+ nodeBuilder.end();
+ return nodeBuilder.build();
+ }
+
+ @Override
+ default long[] newArray(int count) {
+ return new long[count];
+ }
+
+ /**
+ * {@inheritDoc}
+ * @implSpec The default in {@code Node.OfLong} returns
+ * {@code StreamShape.LONG_VALUE}
+ */
+ default StreamShape getShape() {
+ return StreamShape.LONG_VALUE;
+ }
+ }
+
+ /**
+ * Specialized {@code Node} for double elements
+ */
+ interface OfDouble extends OfPrimitive<Double, DoubleConsumer, double[], Spliterator.OfDouble, OfDouble> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param consumer A {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}. If this is an
+ * {@code DoubleConsumer}, it is cast to {@code DoubleConsumer}
+ * so the elements may be processed without boxing.
+ */
+ @Override
+ default void forEach(Consumer<? super Double> consumer) {
+ if (consumer instanceof DoubleConsumer) {
+ forEach((DoubleConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ //
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes {@link #asPrimitiveArray()}
+ * to obtain a double[] array then and copies the elements from that
+ * double[] array into the boxed Double[] array. This is not efficient
+ * and it is recommended to invoke {@link #copyInto(Object, int)}.
+ */
+ @Override
+ default void copyInto(Double[] boxed, int offset) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfDouble.copyInto(Double[], int)");
+
+ double[] array = asPrimitiveArray();
+ for (int i = 0; i < array.length; i++) {
+ boxed[offset + i] = array[i];
+ }
+ }
+
+ @Override
+ default Node.OfDouble truncate(long from, long to, IntFunction<Double[]> generator) {
+ if (from == 0 && to == count())
+ return this;
+ long size = to - from;
+ Spliterator.OfDouble spliterator = spliterator();
+ Node.Builder.OfDouble nodeBuilder = Nodes.doubleBuilder(size);
+ nodeBuilder.begin(size);
+ for (int i = 0; i < from && spliterator.tryAdvance((DoubleConsumer) e -> { }); i++) { }
+ for (int i = 0; (i < size) && spliterator.tryAdvance((DoubleConsumer) nodeBuilder); i++) { }
+ nodeBuilder.end();
+ return nodeBuilder.build();
+ }
+
+ @Override
+ default double[] newArray(int count) {
+ return new double[count];
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec The default in {@code Node.OfDouble} returns
+ * {@code StreamShape.DOUBLE_VALUE}
+ */
+ default StreamShape getShape() {
+ return StreamShape.DOUBLE_VALUE;
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/Nodes.java b/ojluni/src/main/java/java/util/stream/Nodes.java
new file mode 100644
index 0000000..c18540c
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Nodes.java
@@ -0,0 +1,2227 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.List;
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.concurrent.CountedCompleter;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
+import java.util.function.LongFunction;
+
+/**
+ * Factory methods for constructing implementations of {@link Node} and
+ * {@link Node.Builder} and their primitive specializations. Fork/Join tasks
+ * for collecting output from a {@link PipelineHelper} to a {@link Node} and
+ * flattening {@link Node}s.
+ *
+ * @since 1.8
+ */
+final class Nodes {
+
+ private Nodes() {
+ throw new Error("no instances");
+ }
+
+ /**
+ * The maximum size of an array that can be allocated.
+ */
+ static final long MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
+
+ // IllegalArgumentException messages
+ static final String BAD_SIZE = "Stream size exceeds max array size";
+
+ @SuppressWarnings("rawtypes")
+ private static final Node EMPTY_NODE = new EmptyNode.OfRef();
+ private static final Node.OfInt EMPTY_INT_NODE = new EmptyNode.OfInt();
+ private static final Node.OfLong EMPTY_LONG_NODE = new EmptyNode.OfLong();
+ private static final Node.OfDouble EMPTY_DOUBLE_NODE = new EmptyNode.OfDouble();
+
+ // General shape-based node creation methods
+
+ /**
+ * Produces an empty node whose count is zero, has no children and no content.
+ *
+ * @param <T> the type of elements of the created node
+ * @param shape the shape of the node to be created
+ * @return an empty node.
+ */
+ @SuppressWarnings("unchecked")
+ static <T> Node<T> emptyNode(StreamShape shape) {
+ switch (shape) {
+ case REFERENCE: return (Node<T>) EMPTY_NODE;
+ case INT_VALUE: return (Node<T>) EMPTY_INT_NODE;
+ case LONG_VALUE: return (Node<T>) EMPTY_LONG_NODE;
+ case DOUBLE_VALUE: return (Node<T>) EMPTY_DOUBLE_NODE;
+ default:
+ throw new IllegalStateException("Unknown shape " + shape);
+ }
+ }
+
+ /**
+ * Produces a concatenated {@link Node} that has two or more children.
+ * <p>The count of the concatenated node is equal to the sum of the count
+ * of each child. Traversal of the concatenated node traverses the content
+ * of each child in encounter order of the list of children. Splitting a
+ * spliterator obtained from the concatenated node preserves the encounter
+ * order of the list of children.
+ *
+ * <p>The result may be a concatenated node, the input sole node if the size
+ * of the list is 1, or an empty node.
+ *
+ * @param <T> the type of elements of the concatenated node
+ * @param shape the shape of the concatenated node to be created
+ * @param left the left input node
+ * @param right the right input node
+ * @return a {@code Node} covering the elements of the input nodes
+ * @throws IllegalStateException if all {@link Node} elements of the list
+ * are an not instance of type supported by this factory.
+ */
+ @SuppressWarnings("unchecked")
+ static <T> Node<T> conc(StreamShape shape, Node<T> left, Node<T> right) {
+ switch (shape) {
+ case REFERENCE:
+ return new ConcNode<>(left, right);
+ case INT_VALUE:
+ return (Node<T>) new ConcNode.OfInt((Node.OfInt) left, (Node.OfInt) right);
+ case LONG_VALUE:
+ return (Node<T>) new ConcNode.OfLong((Node.OfLong) left, (Node.OfLong) right);
+ case DOUBLE_VALUE:
+ return (Node<T>) new ConcNode.OfDouble((Node.OfDouble) left, (Node.OfDouble) right);
+ default:
+ throw new IllegalStateException("Unknown shape " + shape);
+ }
+ }
+
+ // Reference-based node methods
+
+ /**
+ * Produces a {@link Node} describing an array.
+ *
+ * <p>The node will hold a reference to the array and will not make a copy.
+ *
+ * @param <T> the type of elements held by the node
+ * @param array the array
+ * @return a node holding an array
+ */
+ static <T> Node<T> node(T[] array) {
+ return new ArrayNode<>(array);
+ }
+
+ /**
+ * Produces a {@link Node} describing a {@link Collection}.
+ * <p>
+ * The node will hold a reference to the collection and will not make a copy.
+ *
+ * @param <T> the type of elements held by the node
+ * @param c the collection
+ * @return a node holding a collection
+ */
+ static <T> Node<T> node(Collection<T> c) {
+ return new CollectionNode<>(c);
+ }
+
+ /**
+ * Produces a {@link Node.Builder}.
+ *
+ * @param exactSizeIfKnown -1 if a variable size builder is requested,
+ * otherwise the exact capacity desired. A fixed capacity builder will
+ * fail if the wrong number of elements are added to the builder.
+ * @param generator the array factory
+ * @param <T> the type of elements of the node builder
+ * @return a {@code Node.Builder}
+ */
+ static <T> Node.Builder<T> builder(long exactSizeIfKnown, IntFunction<T[]> generator) {
+ return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
+ ? new FixedNodeBuilder<>(exactSizeIfKnown, generator)
+ : builder();
+ }
+
+ /**
+ * Produces a variable size @{link Node.Builder}.
+ *
+ * @param <T> the type of elements of the node builder
+ * @return a {@code Node.Builder}
+ */
+ static <T> Node.Builder<T> builder() {
+ return new SpinedNodeBuilder<>();
+ }
+
+ // Int nodes
+
+ /**
+ * Produces a {@link Node.OfInt} describing an int[] array.
+ *
+ * <p>The node will hold a reference to the array and will not make a copy.
+ *
+ * @param array the array
+ * @return a node holding an array
+ */
+ static Node.OfInt node(int[] array) {
+ return new IntArrayNode(array);
+ }
+
+ /**
+ * Produces a {@link Node.Builder.OfInt}.
+ *
+ * @param exactSizeIfKnown -1 if a variable size builder is requested,
+ * otherwise the exact capacity desired. A fixed capacity builder will
+ * fail if the wrong number of elements are added to the builder.
+ * @return a {@code Node.Builder.OfInt}
+ */
+ static Node.Builder.OfInt intBuilder(long exactSizeIfKnown) {
+ return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
+ ? new IntFixedNodeBuilder(exactSizeIfKnown)
+ : intBuilder();
+ }
+
+ /**
+ * Produces a variable size @{link Node.Builder.OfInt}.
+ *
+ * @return a {@code Node.Builder.OfInt}
+ */
+ static Node.Builder.OfInt intBuilder() {
+ return new IntSpinedNodeBuilder();
+ }
+
+ // Long nodes
+
+ /**
+ * Produces a {@link Node.OfLong} describing a long[] array.
+ * <p>
+ * The node will hold a reference to the array and will not make a copy.
+ *
+ * @param array the array
+ * @return a node holding an array
+ */
+ static Node.OfLong node(final long[] array) {
+ return new LongArrayNode(array);
+ }
+
+ /**
+ * Produces a {@link Node.Builder.OfLong}.
+ *
+ * @param exactSizeIfKnown -1 if a variable size builder is requested,
+ * otherwise the exact capacity desired. A fixed capacity builder will
+ * fail if the wrong number of elements are added to the builder.
+ * @return a {@code Node.Builder.OfLong}
+ */
+ static Node.Builder.OfLong longBuilder(long exactSizeIfKnown) {
+ return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
+ ? new LongFixedNodeBuilder(exactSizeIfKnown)
+ : longBuilder();
+ }
+
+ /**
+ * Produces a variable size @{link Node.Builder.OfLong}.
+ *
+ * @return a {@code Node.Builder.OfLong}
+ */
+ static Node.Builder.OfLong longBuilder() {
+ return new LongSpinedNodeBuilder();
+ }
+
+ // Double nodes
+
+ /**
+ * Produces a {@link Node.OfDouble} describing a double[] array.
+ *
+ * <p>The node will hold a reference to the array and will not make a copy.
+ *
+ * @param array the array
+ * @return a node holding an array
+ */
+ static Node.OfDouble node(final double[] array) {
+ return new DoubleArrayNode(array);
+ }
+
+ /**
+ * Produces a {@link Node.Builder.OfDouble}.
+ *
+ * @param exactSizeIfKnown -1 if a variable size builder is requested,
+ * otherwise the exact capacity desired. A fixed capacity builder will
+ * fail if the wrong number of elements are added to the builder.
+ * @return a {@code Node.Builder.OfDouble}
+ */
+ static Node.Builder.OfDouble doubleBuilder(long exactSizeIfKnown) {
+ return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
+ ? new DoubleFixedNodeBuilder(exactSizeIfKnown)
+ : doubleBuilder();
+ }
+
+ /**
+ * Produces a variable size @{link Node.Builder.OfDouble}.
+ *
+ * @return a {@code Node.Builder.OfDouble}
+ */
+ static Node.Builder.OfDouble doubleBuilder() {
+ return new DoubleSpinedNodeBuilder();
+ }
+
+ // Parallel evaluation of pipelines to nodes
+
+ /**
+ * Collect, in parallel, elements output from a pipeline and describe those
+ * elements with a {@link Node}.
+ *
+ * @implSpec
+ * If the exact size of the output from the pipeline is known and the source
+ * {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
+ * then a flat {@link Node} will be returned whose content is an array,
+ * since the size is known the array can be constructed in advance and
+ * output elements can be placed into the array concurrently by leaf
+ * tasks at the correct offsets. If the exact size is not known, output
+ * elements are collected into a conc-node whose shape mirrors that
+ * of the computation. This conc-node can then be flattened in
+ * parallel to produce a flat {@code Node} if desired.
+ *
+ * @param helper the pipeline helper describing the pipeline
+ * @param flattenTree whether a conc node should be flattened into a node
+ * describing an array before returning
+ * @param generator the array generator
+ * @return a {@link Node} describing the output elements
+ */
+ public static <P_IN, P_OUT> Node<P_OUT> collect(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree,
+ IntFunction<P_OUT[]> generator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ P_OUT[] array = generator.apply((int) size);
+ new SizedCollectorTask.OfRef<>(spliterator, helper, array).invoke();
+ return node(array);
+ } else {
+ Node<P_OUT> node = new CollectorTask.OfRef<>(helper, generator, spliterator).invoke();
+ return flattenTree ? flatten(node, generator) : node;
+ }
+ }
+
+ /**
+ * Collect, in parallel, elements output from an int-valued pipeline and
+ * describe those elements with a {@link Node.OfInt}.
+ *
+ * @implSpec
+ * If the exact size of the output from the pipeline is known and the source
+ * {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
+ * then a flat {@link Node} will be returned whose content is an array,
+ * since the size is known the array can be constructed in advance and
+ * output elements can be placed into the array concurrently by leaf
+ * tasks at the correct offsets. If the exact size is not known, output
+ * elements are collected into a conc-node whose shape mirrors that
+ * of the computation. This conc-node can then be flattened in
+ * parallel to produce a flat {@code Node.OfInt} if desired.
+ *
+ * @param <P_IN> the type of elements from the source Spliterator
+ * @param helper the pipeline helper describing the pipeline
+ * @param flattenTree whether a conc node should be flattened into a node
+ * describing an array before returning
+ * @return a {@link Node.OfInt} describing the output elements
+ */
+ public static <P_IN> Node.OfInt collectInt(PipelineHelper<Integer> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ int[] array = new int[(int) size];
+ new SizedCollectorTask.OfInt<>(spliterator, helper, array).invoke();
+ return node(array);
+ }
+ else {
+ Node.OfInt node = new CollectorTask.OfInt<>(helper, spliterator).invoke();
+ return flattenTree ? flattenInt(node) : node;
+ }
+ }
+
+ /**
+ * Collect, in parallel, elements output from a long-valued pipeline and
+ * describe those elements with a {@link Node.OfLong}.
+ *
+ * @implSpec
+ * If the exact size of the output from the pipeline is known and the source
+ * {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
+ * then a flat {@link Node} will be returned whose content is an array,
+ * since the size is known the array can be constructed in advance and
+ * output elements can be placed into the array concurrently by leaf
+ * tasks at the correct offsets. If the exact size is not known, output
+ * elements are collected into a conc-node whose shape mirrors that
+ * of the computation. This conc-node can then be flattened in
+ * parallel to produce a flat {@code Node.OfLong} if desired.
+ *
+ * @param <P_IN> the type of elements from the source Spliterator
+ * @param helper the pipeline helper describing the pipeline
+ * @param flattenTree whether a conc node should be flattened into a node
+ * describing an array before returning
+ * @return a {@link Node.OfLong} describing the output elements
+ */
+ public static <P_IN> Node.OfLong collectLong(PipelineHelper<Long> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ long[] array = new long[(int) size];
+ new SizedCollectorTask.OfLong<>(spliterator, helper, array).invoke();
+ return node(array);
+ }
+ else {
+ Node.OfLong node = new CollectorTask.OfLong<>(helper, spliterator).invoke();
+ return flattenTree ? flattenLong(node) : node;
+ }
+ }
+
+ /**
+ * Collect, in parallel, elements output from n double-valued pipeline and
+ * describe those elements with a {@link Node.OfDouble}.
+ *
+ * @implSpec
+ * If the exact size of the output from the pipeline is known and the source
+ * {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
+ * then a flat {@link Node} will be returned whose content is an array,
+ * since the size is known the array can be constructed in advance and
+ * output elements can be placed into the array concurrently by leaf
+ * tasks at the correct offsets. If the exact size is not known, output
+ * elements are collected into a conc-node whose shape mirrors that
+ * of the computation. This conc-node can then be flattened in
+ * parallel to produce a flat {@code Node.OfDouble} if desired.
+ *
+ * @param <P_IN> the type of elements from the source Spliterator
+ * @param helper the pipeline helper describing the pipeline
+ * @param flattenTree whether a conc node should be flattened into a node
+ * describing an array before returning
+ * @return a {@link Node.OfDouble} describing the output elements
+ */
+ public static <P_IN> Node.OfDouble collectDouble(PipelineHelper<Double> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ double[] array = new double[(int) size];
+ new SizedCollectorTask.OfDouble<>(spliterator, helper, array).invoke();
+ return node(array);
+ }
+ else {
+ Node.OfDouble node = new CollectorTask.OfDouble<>(helper, spliterator).invoke();
+ return flattenTree ? flattenDouble(node) : node;
+ }
+ }
+
+ // Parallel flattening of nodes
+
+ /**
+ * Flatten, in parallel, a {@link Node}. A flattened node is one that has
+ * no children. If the node is already flat, it is simply returned.
+ *
+ * @implSpec
+ * If a new node is to be created, the generator is used to create an array
+ * whose length is {@link Node#count()}. Then the node tree is traversed
+ * and leaf node elements are placed in the array concurrently by leaf tasks
+ * at the correct offsets.
+ *
+ * @param <T> type of elements contained by the node
+ * @param node the node to flatten
+ * @param generator the array factory used to create array instances
+ * @return a flat {@code Node}
+ */
+ public static <T> Node<T> flatten(Node<T> node, IntFunction<T[]> generator) {
+ if (node.getChildCount() > 0) {
+ long size = node.count();
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ T[] array = generator.apply((int) size);
+ new ToArrayTask.OfRef<>(node, array, 0).invoke();
+ return node(array);
+ } else {
+ return node;
+ }
+ }
+
+ /**
+ * Flatten, in parallel, a {@link Node.OfInt}. A flattened node is one that
+ * has no children. If the node is already flat, it is simply returned.
+ *
+ * @implSpec
+ * If a new node is to be created, a new int[] array is created whose length
+ * is {@link Node#count()}. Then the node tree is traversed and leaf node
+ * elements are placed in the array concurrently by leaf tasks at the
+ * correct offsets.
+ *
+ * @param node the node to flatten
+ * @return a flat {@code Node.OfInt}
+ */
+ public static Node.OfInt flattenInt(Node.OfInt node) {
+ if (node.getChildCount() > 0) {
+ long size = node.count();
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ int[] array = new int[(int) size];
+ new ToArrayTask.OfInt(node, array, 0).invoke();
+ return node(array);
+ } else {
+ return node;
+ }
+ }
+
+ /**
+ * Flatten, in parallel, a {@link Node.OfLong}. A flattened node is one that
+ * has no children. If the node is already flat, it is simply returned.
+ *
+ * @implSpec
+ * If a new node is to be created, a new long[] array is created whose length
+ * is {@link Node#count()}. Then the node tree is traversed and leaf node
+ * elements are placed in the array concurrently by leaf tasks at the
+ * correct offsets.
+ *
+ * @param node the node to flatten
+ * @return a flat {@code Node.OfLong}
+ */
+ public static Node.OfLong flattenLong(Node.OfLong node) {
+ if (node.getChildCount() > 0) {
+ long size = node.count();
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ long[] array = new long[(int) size];
+ new ToArrayTask.OfLong(node, array, 0).invoke();
+ return node(array);
+ } else {
+ return node;
+ }
+ }
+
+ /**
+ * Flatten, in parallel, a {@link Node.OfDouble}. A flattened node is one that
+ * has no children. If the node is already flat, it is simply returned.
+ *
+ * @implSpec
+ * If a new node is to be created, a new double[] array is created whose length
+ * is {@link Node#count()}. Then the node tree is traversed and leaf node
+ * elements are placed in the array concurrently by leaf tasks at the
+ * correct offsets.
+ *
+ * @param node the node to flatten
+ * @return a flat {@code Node.OfDouble}
+ */
+ public static Node.OfDouble flattenDouble(Node.OfDouble node) {
+ if (node.getChildCount() > 0) {
+ long size = node.count();
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ double[] array = new double[(int) size];
+ new ToArrayTask.OfDouble(node, array, 0).invoke();
+ return node(array);
+ } else {
+ return node;
+ }
+ }
+
+ // Implementations
+
+ private static abstract class EmptyNode<T, T_ARR, T_CONS> implements Node<T> {
+ EmptyNode() { }
+
+ @Override
+ public T[] asArray(IntFunction<T[]> generator) {
+ return generator.apply(0);
+ }
+
+ public void copyInto(T_ARR array, int offset) { }
+
+ @Override
+ public long count() {
+ return 0;
+ }
+
+ public void forEach(T_CONS consumer) { }
+
+ private static class OfRef<T> extends EmptyNode<T, T[], Consumer<? super T>> {
+ private OfRef() {
+ super();
+ }
+
+ @Override
+ public Spliterator<T> spliterator() {
+ return Spliterators.emptySpliterator();
+ }
+ }
+
+ private static final class OfInt
+ extends EmptyNode<Integer, int[], IntConsumer>
+ implements Node.OfInt {
+
+ OfInt() { } // Avoid creation of special accessor
+
+ @Override
+ public Spliterator.OfInt spliterator() {
+ return Spliterators.emptyIntSpliterator();
+ }
+
+ @Override
+ public int[] asPrimitiveArray() {
+ return EMPTY_INT_ARRAY;
+ }
+ }
+
+ private static final class OfLong
+ extends EmptyNode<Long, long[], LongConsumer>
+ implements Node.OfLong {
+
+ OfLong() { } // Avoid creation of special accessor
+
+ @Override
+ public Spliterator.OfLong spliterator() {
+ return Spliterators.emptyLongSpliterator();
+ }
+
+ @Override
+ public long[] asPrimitiveArray() {
+ return EMPTY_LONG_ARRAY;
+ }
+ }
+
+ private static final class OfDouble
+ extends EmptyNode<Double, double[], DoubleConsumer>
+ implements Node.OfDouble {
+
+ OfDouble() { } // Avoid creation of special accessor
+
+ @Override
+ public Spliterator.OfDouble spliterator() {
+ return Spliterators.emptyDoubleSpliterator();
+ }
+
+ @Override
+ public double[] asPrimitiveArray() {
+ return EMPTY_DOUBLE_ARRAY;
+ }
+ }
+ }
+
+ /** Node class for a reference array */
+ private static class ArrayNode<T> implements Node<T> {
+ final T[] array;
+ int curSize;
+
+ @SuppressWarnings("unchecked")
+ ArrayNode(long size, IntFunction<T[]> generator) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ this.array = generator.apply((int) size);
+ this.curSize = 0;
+ }
+
+ ArrayNode(T[] array) {
+ this.array = array;
+ this.curSize = array.length;
+ }
+
+ // Node
+
+ @Override
+ public Spliterator<T> spliterator() {
+ return Arrays.spliterator(array, 0, curSize);
+ }
+
+ @Override
+ public void copyInto(T[] dest, int destOffset) {
+ System.arraycopy(array, 0, dest, destOffset, curSize);
+ }
+
+ @Override
+ public T[] asArray(IntFunction<T[]> generator) {
+ if (array.length == curSize) {
+ return array;
+ } else {
+ throw new IllegalStateException();
+ }
+ }
+
+ @Override
+ public long count() {
+ return curSize;
+ }
+
+ @Override
+ public void forEach(Consumer<? super T> consumer) {
+ for (int i = 0; i < curSize; i++) {
+ consumer.accept(array[i]);
+ }
+ }
+
+ //
+
+ @Override
+ public String toString() {
+ return String.format("ArrayNode[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ /** Node class for a Collection */
+ private static final class CollectionNode<T> implements Node<T> {
+ private final Collection<T> c;
+
+ CollectionNode(Collection<T> c) {
+ this.c = c;
+ }
+
+ // Node
+
+ @Override
+ public Spliterator<T> spliterator() {
+ return c.stream().spliterator();
+ }
+
+ @Override
+ public void copyInto(T[] array, int offset) {
+ for (T t : c)
+ array[offset++] = t;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public T[] asArray(IntFunction<T[]> generator) {
+ return c.toArray(generator.apply(c.size()));
+ }
+
+ @Override
+ public long count() {
+ return c.size();
+ }
+
+ @Override
+ public void forEach(Consumer<? super T> consumer) {
+ c.forEach(consumer);
+ }
+
+ //
+
+ @Override
+ public String toString() {
+ return String.format("CollectionNode[%d][%s]", c.size(), c);
+ }
+ }
+
+ /**
+ * Node class for an internal node with two or more children
+ */
+ private static abstract class AbstractConcNode<T, T_NODE extends Node<T>> implements Node<T> {
+ protected final T_NODE left;
+ protected final T_NODE right;
+ private final long size;
+
+ AbstractConcNode(T_NODE left, T_NODE right) {
+ this.left = left;
+ this.right = right;
+ // The Node count will be required when the Node spliterator is
+ // obtained and it is cheaper to aggressively calculate bottom up
+ // as the tree is built rather than later on from the top down
+ // traversing the tree
+ this.size = left.count() + right.count();
+ }
+
+ @Override
+ public int getChildCount() {
+ return 2;
+ }
+
+ @Override
+ public T_NODE getChild(int i) {
+ if (i == 0) return left;
+ if (i == 1) return right;
+ throw new IndexOutOfBoundsException();
+ }
+
+ @Override
+ public long count() {
+ return size;
+ }
+ }
+
+ static final class ConcNode<T>
+ extends AbstractConcNode<T, Node<T>>
+ implements Node<T> {
+
+ ConcNode(Node<T> left, Node<T> right) {
+ super(left, right);
+ }
+
+ @Override
+ public Spliterator<T> spliterator() {
+ return new Nodes.InternalNodeSpliterator.OfRef<>(this);
+ }
+
+ @Override
+ public void copyInto(T[] array, int offset) {
+ Objects.requireNonNull(array);
+ left.copyInto(array, offset);
+ // Cast to int is safe since it is the callers responsibility to
+ // ensure that there is sufficient room in the array
+ right.copyInto(array, offset + (int) left.count());
+ }
+
+ @Override
+ public T[] asArray(IntFunction<T[]> generator) {
+ long size = count();
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ T[] array = generator.apply((int) size);
+ copyInto(array, 0);
+ return array;
+ }
+
+ @Override
+ public void forEach(Consumer<? super T> consumer) {
+ left.forEach(consumer);
+ right.forEach(consumer);
+ }
+
+ @Override
+ public Node<T> truncate(long from, long to, IntFunction<T[]> generator) {
+ if (from == 0 && to == count())
+ return this;
+ long leftCount = left.count();
+ if (from >= leftCount)
+ return right.truncate(from - leftCount, to - leftCount, generator);
+ else if (to <= leftCount)
+ return left.truncate(from, to, generator);
+ else {
+ return Nodes.conc(getShape(), left.truncate(from, leftCount, generator),
+ right.truncate(0, to - leftCount, generator));
+ }
+ }
+
+ @Override
+ public String toString() {
+ if (count() < 32) {
+ return String.format("ConcNode[%s.%s]", left, right);
+ } else {
+ return String.format("ConcNode[size=%d]", count());
+ }
+ }
+
+ private abstract static class OfPrimitive<E, T_CONS, T_ARR,
+ T_SPLITR extends Spliterator.OfPrimitive<E, T_CONS, T_SPLITR>,
+ T_NODE extends Node.OfPrimitive<E, T_CONS, T_ARR, T_SPLITR, T_NODE>>
+ extends AbstractConcNode<E, T_NODE>
+ implements Node.OfPrimitive<E, T_CONS, T_ARR, T_SPLITR, T_NODE> {
+
+ OfPrimitive(T_NODE left, T_NODE right) {
+ super(left, right);
+ }
+
+ @Override
+ public void forEach(T_CONS consumer) {
+ left.forEach(consumer);
+ right.forEach(consumer);
+ }
+
+ @Override
+ public void copyInto(T_ARR array, int offset) {
+ left.copyInto(array, offset);
+ // Cast to int is safe since it is the callers responsibility to
+ // ensure that there is sufficient room in the array
+ right.copyInto(array, offset + (int) left.count());
+ }
+
+ @Override
+ public T_ARR asPrimitiveArray() {
+ long size = count();
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ T_ARR array = newArray((int) size);
+ copyInto(array, 0);
+ return array;
+ }
+
+ @Override
+ public String toString() {
+ if (count() < 32)
+ return String.format("%s[%s.%s]", this.getClass().getName(), left, right);
+ else
+ return String.format("%s[size=%d]", this.getClass().getName(), count());
+ }
+ }
+
+ static final class OfInt
+ extends ConcNode.OfPrimitive<Integer, IntConsumer, int[], Spliterator.OfInt, Node.OfInt>
+ implements Node.OfInt {
+
+ OfInt(Node.OfInt left, Node.OfInt right) {
+ super(left, right);
+ }
+
+ @Override
+ public Spliterator.OfInt spliterator() {
+ return new InternalNodeSpliterator.OfInt(this);
+ }
+ }
+
+ static final class OfLong
+ extends ConcNode.OfPrimitive<Long, LongConsumer, long[], Spliterator.OfLong, Node.OfLong>
+ implements Node.OfLong {
+
+ OfLong(Node.OfLong left, Node.OfLong right) {
+ super(left, right);
+ }
+
+ @Override
+ public Spliterator.OfLong spliterator() {
+ return new InternalNodeSpliterator.OfLong(this);
+ }
+ }
+
+ static final class OfDouble
+ extends ConcNode.OfPrimitive<Double, DoubleConsumer, double[], Spliterator.OfDouble, Node.OfDouble>
+ implements Node.OfDouble {
+
+ OfDouble(Node.OfDouble left, Node.OfDouble right) {
+ super(left, right);
+ }
+
+ @Override
+ public Spliterator.OfDouble spliterator() {
+ return new InternalNodeSpliterator.OfDouble(this);
+ }
+ }
+ }
+
+ /** Abstract class for spliterator for all internal node classes */
+ private static abstract class InternalNodeSpliterator<T,
+ S extends Spliterator<T>,
+ N extends Node<T>>
+ implements Spliterator<T> {
+ // Node we are pointing to
+ // null if full traversal has occurred
+ N curNode;
+
+ // next child of curNode to consume
+ int curChildIndex;
+
+ // The spliterator of the curNode if that node is last and has no children.
+ // This spliterator will be delegated to for splitting and traversing.
+ // null if curNode has children
+ S lastNodeSpliterator;
+
+ // spliterator used while traversing with tryAdvance
+ // null if no partial traversal has occurred
+ S tryAdvanceSpliterator;
+
+ // node stack used when traversing to search and find leaf nodes
+ // null if no partial traversal has occurred
+ Deque<N> tryAdvanceStack;
+
+ InternalNodeSpliterator(N curNode) {
+ this.curNode = curNode;
+ }
+
+ /**
+ * Initiate a stack containing, in left-to-right order, the child nodes
+ * covered by this spliterator
+ */
+ @SuppressWarnings("unchecked")
+ protected final Deque<N> initStack() {
+ // Bias size to the case where leaf nodes are close to this node
+ // 8 is the minimum initial capacity for the ArrayDeque implementation
+ Deque<N> stack = new ArrayDeque<>(8);
+ for (int i = curNode.getChildCount() - 1; i >= curChildIndex; i--)
+ stack.addFirst((N) curNode.getChild(i));
+ return stack;
+ }
+
+ /**
+ * Depth first search, in left-to-right order, of the node tree, using
+ * an explicit stack, to find the next non-empty leaf node.
+ */
+ @SuppressWarnings("unchecked")
+ protected final N findNextLeafNode(Deque<N> stack) {
+ N n = null;
+ while ((n = stack.pollFirst()) != null) {
+ if (n.getChildCount() == 0) {
+ if (n.count() > 0)
+ return n;
+ } else {
+ for (int i = n.getChildCount() - 1; i >= 0; i--)
+ stack.addFirst((N) n.getChild(i));
+ }
+ }
+
+ return null;
+ }
+
+ @SuppressWarnings("unchecked")
+ protected final boolean initTryAdvance() {
+ if (curNode == null)
+ return false;
+
+ if (tryAdvanceSpliterator == null) {
+ if (lastNodeSpliterator == null) {
+ // Initiate the node stack
+ tryAdvanceStack = initStack();
+ N leaf = findNextLeafNode(tryAdvanceStack);
+ if (leaf != null)
+ tryAdvanceSpliterator = (S) leaf.spliterator();
+ else {
+ // A non-empty leaf node was not found
+ // No elements to traverse
+ curNode = null;
+ return false;
+ }
+ }
+ else
+ tryAdvanceSpliterator = lastNodeSpliterator;
+ }
+ return true;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public final S trySplit() {
+ if (curNode == null || tryAdvanceSpliterator != null)
+ return null; // Cannot split if fully or partially traversed
+ else if (lastNodeSpliterator != null)
+ return (S) lastNodeSpliterator.trySplit();
+ else if (curChildIndex < curNode.getChildCount() - 1)
+ return (S) curNode.getChild(curChildIndex++).spliterator();
+ else {
+ curNode = (N) curNode.getChild(curChildIndex);
+ if (curNode.getChildCount() == 0) {
+ lastNodeSpliterator = (S) curNode.spliterator();
+ return (S) lastNodeSpliterator.trySplit();
+ }
+ else {
+ curChildIndex = 0;
+ return (S) curNode.getChild(curChildIndex++).spliterator();
+ }
+ }
+ }
+
+ @Override
+ public final long estimateSize() {
+ if (curNode == null)
+ return 0;
+
+ // Will not reflect the effects of partial traversal.
+ // This is compliant with the specification
+ if (lastNodeSpliterator != null)
+ return lastNodeSpliterator.estimateSize();
+ else {
+ long size = 0;
+ for (int i = curChildIndex; i < curNode.getChildCount(); i++)
+ size += curNode.getChild(i).count();
+ return size;
+ }
+ }
+
+ @Override
+ public final int characteristics() {
+ return Spliterator.SIZED;
+ }
+
+ private static final class OfRef<T>
+ extends InternalNodeSpliterator<T, Spliterator<T>, Node<T>> {
+
+ OfRef(Node<T> curNode) {
+ super(curNode);
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> consumer) {
+ if (!initTryAdvance())
+ return false;
+
+ boolean hasNext = tryAdvanceSpliterator.tryAdvance(consumer);
+ if (!hasNext) {
+ if (lastNodeSpliterator == null) {
+ // Advance to the spliterator of the next non-empty leaf node
+ Node<T> leaf = findNextLeafNode(tryAdvanceStack);
+ if (leaf != null) {
+ tryAdvanceSpliterator = leaf.spliterator();
+ // Since the node is not-empty the spliterator can be advanced
+ return tryAdvanceSpliterator.tryAdvance(consumer);
+ }
+ }
+ // No more elements to traverse
+ curNode = null;
+ }
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> consumer) {
+ if (curNode == null)
+ return;
+
+ if (tryAdvanceSpliterator == null) {
+ if (lastNodeSpliterator == null) {
+ Deque<Node<T>> stack = initStack();
+ Node<T> leaf;
+ while ((leaf = findNextLeafNode(stack)) != null) {
+ leaf.forEach(consumer);
+ }
+ curNode = null;
+ }
+ else
+ lastNodeSpliterator.forEachRemaining(consumer);
+ }
+ else
+ while(tryAdvance(consumer)) { }
+ }
+ }
+
+ private static abstract class OfPrimitive<T, T_CONS, T_ARR,
+ T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>,
+ N extends Node.OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, N>>
+ extends InternalNodeSpliterator<T, T_SPLITR, N>
+ implements Spliterator.OfPrimitive<T, T_CONS, T_SPLITR> {
+
+ OfPrimitive(N cur) {
+ super(cur);
+ }
+
+ @Override
+ public boolean tryAdvance(T_CONS consumer) {
+ if (!initTryAdvance())
+ return false;
+
+ boolean hasNext = tryAdvanceSpliterator.tryAdvance(consumer);
+ if (!hasNext) {
+ if (lastNodeSpliterator == null) {
+ // Advance to the spliterator of the next non-empty leaf node
+ N leaf = findNextLeafNode(tryAdvanceStack);
+ if (leaf != null) {
+ tryAdvanceSpliterator = leaf.spliterator();
+ // Since the node is not-empty the spliterator can be advanced
+ return tryAdvanceSpliterator.tryAdvance(consumer);
+ }
+ }
+ // No more elements to traverse
+ curNode = null;
+ }
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(T_CONS consumer) {
+ if (curNode == null)
+ return;
+
+ if (tryAdvanceSpliterator == null) {
+ if (lastNodeSpliterator == null) {
+ Deque<N> stack = initStack();
+ N leaf;
+ while ((leaf = findNextLeafNode(stack)) != null) {
+ leaf.forEach(consumer);
+ }
+ curNode = null;
+ }
+ else
+ lastNodeSpliterator.forEachRemaining(consumer);
+ }
+ else
+ while(tryAdvance(consumer)) { }
+ }
+ }
+
+ private static final class OfInt
+ extends OfPrimitive<Integer, IntConsumer, int[], Spliterator.OfInt, Node.OfInt>
+ implements Spliterator.OfInt {
+
+ OfInt(Node.OfInt cur) {
+ super(cur);
+ }
+ }
+
+ private static final class OfLong
+ extends OfPrimitive<Long, LongConsumer, long[], Spliterator.OfLong, Node.OfLong>
+ implements Spliterator.OfLong {
+
+ OfLong(Node.OfLong cur) {
+ super(cur);
+ }
+ }
+
+ private static final class OfDouble
+ extends OfPrimitive<Double, DoubleConsumer, double[], Spliterator.OfDouble, Node.OfDouble>
+ implements Spliterator.OfDouble {
+
+ OfDouble(Node.OfDouble cur) {
+ super(cur);
+ }
+ }
+ }
+
+ /**
+ * Fixed-sized builder class for reference nodes
+ */
+ private static final class FixedNodeBuilder<T>
+ extends ArrayNode<T>
+ implements Node.Builder<T> {
+
+ FixedNodeBuilder(long size, IntFunction<T[]> generator) {
+ super(size, generator);
+ assert size < MAX_ARRAY_SIZE;
+ }
+
+ @Override
+ public Node<T> build() {
+ if (curSize < array.length)
+ throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
+ curSize, array.length));
+ return this;
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size != array.length)
+ throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
+ size, array.length));
+ curSize = 0;
+ }
+
+ @Override
+ public void accept(T t) {
+ if (curSize < array.length) {
+ array[curSize++] = t;
+ } else {
+ throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
+ array.length));
+ }
+ }
+
+ @Override
+ public void end() {
+ if (curSize < array.length)
+ throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+
+ @Override
+ public String toString() {
+ return String.format("FixedNodeBuilder[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ /**
+ * Variable-sized builder class for reference nodes
+ */
+ private static final class SpinedNodeBuilder<T>
+ extends SpinedBuffer<T>
+ implements Node<T>, Node.Builder<T> {
+ private boolean building = false;
+
+ SpinedNodeBuilder() {} // Avoid creation of special accessor
+
+ @Override
+ public Spliterator<T> spliterator() {
+ assert !building : "during building";
+ return super.spliterator();
+ }
+
+ @Override
+ public void forEach(Consumer<? super T> consumer) {
+ assert !building : "during building";
+ super.forEach(consumer);
+ }
+
+ //
+ @Override
+ public void begin(long size) {
+ assert !building : "was already building";
+ building = true;
+ clear();
+ ensureCapacity(size);
+ }
+
+ @Override
+ public void accept(T t) {
+ assert building : "not building";
+ super.accept(t);
+ }
+
+ @Override
+ public void end() {
+ assert building : "was not building";
+ building = false;
+ // @@@ check begin(size) and size
+ }
+
+ @Override
+ public void copyInto(T[] array, int offset) {
+ assert !building : "during building";
+ super.copyInto(array, offset);
+ }
+
+ @Override
+ public T[] asArray(IntFunction<T[]> arrayFactory) {
+ assert !building : "during building";
+ return super.asArray(arrayFactory);
+ }
+
+ @Override
+ public Node<T> build() {
+ assert !building : "during building";
+ return this;
+ }
+ }
+
+ //
+
+ private static final int[] EMPTY_INT_ARRAY = new int[0];
+ private static final long[] EMPTY_LONG_ARRAY = new long[0];
+ private static final double[] EMPTY_DOUBLE_ARRAY = new double[0];
+
+ private static class IntArrayNode implements Node.OfInt {
+ final int[] array;
+ int curSize;
+
+ IntArrayNode(long size) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ this.array = new int[(int) size];
+ this.curSize = 0;
+ }
+
+ IntArrayNode(int[] array) {
+ this.array = array;
+ this.curSize = array.length;
+ }
+
+ // Node
+
+ @Override
+ public Spliterator.OfInt spliterator() {
+ return Arrays.spliterator(array, 0, curSize);
+ }
+
+ @Override
+ public int[] asPrimitiveArray() {
+ if (array.length == curSize) {
+ return array;
+ } else {
+ return Arrays.copyOf(array, curSize);
+ }
+ }
+
+ @Override
+ public void copyInto(int[] dest, int destOffset) {
+ System.arraycopy(array, 0, dest, destOffset, curSize);
+ }
+
+ @Override
+ public long count() {
+ return curSize;
+ }
+
+ @Override
+ public void forEach(IntConsumer consumer) {
+ for (int i = 0; i < curSize; i++) {
+ consumer.accept(array[i]);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return String.format("IntArrayNode[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ private static class LongArrayNode implements Node.OfLong {
+ final long[] array;
+ int curSize;
+
+ LongArrayNode(long size) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ this.array = new long[(int) size];
+ this.curSize = 0;
+ }
+
+ LongArrayNode(long[] array) {
+ this.array = array;
+ this.curSize = array.length;
+ }
+
+ @Override
+ public Spliterator.OfLong spliterator() {
+ return Arrays.spliterator(array, 0, curSize);
+ }
+
+ @Override
+ public long[] asPrimitiveArray() {
+ if (array.length == curSize) {
+ return array;
+ } else {
+ return Arrays.copyOf(array, curSize);
+ }
+ }
+
+ @Override
+ public void copyInto(long[] dest, int destOffset) {
+ System.arraycopy(array, 0, dest, destOffset, curSize);
+ }
+
+ @Override
+ public long count() {
+ return curSize;
+ }
+
+ @Override
+ public void forEach(LongConsumer consumer) {
+ for (int i = 0; i < curSize; i++) {
+ consumer.accept(array[i]);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return String.format("LongArrayNode[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ private static class DoubleArrayNode implements Node.OfDouble {
+ final double[] array;
+ int curSize;
+
+ DoubleArrayNode(long size) {
+ if (size >= MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(BAD_SIZE);
+ this.array = new double[(int) size];
+ this.curSize = 0;
+ }
+
+ DoubleArrayNode(double[] array) {
+ this.array = array;
+ this.curSize = array.length;
+ }
+
+ @Override
+ public Spliterator.OfDouble spliterator() {
+ return Arrays.spliterator(array, 0, curSize);
+ }
+
+ @Override
+ public double[] asPrimitiveArray() {
+ if (array.length == curSize) {
+ return array;
+ } else {
+ return Arrays.copyOf(array, curSize);
+ }
+ }
+
+ @Override
+ public void copyInto(double[] dest, int destOffset) {
+ System.arraycopy(array, 0, dest, destOffset, curSize);
+ }
+
+ @Override
+ public long count() {
+ return curSize;
+ }
+
+ @Override
+ public void forEach(DoubleConsumer consumer) {
+ for (int i = 0; i < curSize; i++) {
+ consumer.accept(array[i]);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return String.format("DoubleArrayNode[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ private static final class IntFixedNodeBuilder
+ extends IntArrayNode
+ implements Node.Builder.OfInt {
+
+ IntFixedNodeBuilder(long size) {
+ super(size);
+ assert size < MAX_ARRAY_SIZE;
+ }
+
+ @Override
+ public Node.OfInt build() {
+ if (curSize < array.length) {
+ throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+
+ return this;
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size != array.length) {
+ throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
+ size, array.length));
+ }
+
+ curSize = 0;
+ }
+
+ @Override
+ public void accept(int i) {
+ if (curSize < array.length) {
+ array[curSize++] = i;
+ } else {
+ throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
+ array.length));
+ }
+ }
+
+ @Override
+ public void end() {
+ if (curSize < array.length) {
+ throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return String.format("IntFixedNodeBuilder[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ private static final class LongFixedNodeBuilder
+ extends LongArrayNode
+ implements Node.Builder.OfLong {
+
+ LongFixedNodeBuilder(long size) {
+ super(size);
+ assert size < MAX_ARRAY_SIZE;
+ }
+
+ @Override
+ public Node.OfLong build() {
+ if (curSize < array.length) {
+ throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+
+ return this;
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size != array.length) {
+ throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
+ size, array.length));
+ }
+
+ curSize = 0;
+ }
+
+ @Override
+ public void accept(long i) {
+ if (curSize < array.length) {
+ array[curSize++] = i;
+ } else {
+ throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
+ array.length));
+ }
+ }
+
+ @Override
+ public void end() {
+ if (curSize < array.length) {
+ throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return String.format("LongFixedNodeBuilder[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ private static final class DoubleFixedNodeBuilder
+ extends DoubleArrayNode
+ implements Node.Builder.OfDouble {
+
+ DoubleFixedNodeBuilder(long size) {
+ super(size);
+ assert size < MAX_ARRAY_SIZE;
+ }
+
+ @Override
+ public Node.OfDouble build() {
+ if (curSize < array.length) {
+ throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+
+ return this;
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size != array.length) {
+ throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
+ size, array.length));
+ }
+
+ curSize = 0;
+ }
+
+ @Override
+ public void accept(double i) {
+ if (curSize < array.length) {
+ array[curSize++] = i;
+ } else {
+ throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
+ array.length));
+ }
+ }
+
+ @Override
+ public void end() {
+ if (curSize < array.length) {
+ throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
+ curSize, array.length));
+ }
+ }
+
+ @Override
+ public String toString() {
+ return String.format("DoubleFixedNodeBuilder[%d][%s]",
+ array.length - curSize, Arrays.toString(array));
+ }
+ }
+
+ private static final class IntSpinedNodeBuilder
+ extends SpinedBuffer.OfInt
+ implements Node.OfInt, Node.Builder.OfInt {
+ private boolean building = false;
+
+ IntSpinedNodeBuilder() {} // Avoid creation of special accessor
+
+ @Override
+ public Spliterator.OfInt spliterator() {
+ assert !building : "during building";
+ return super.spliterator();
+ }
+
+ @Override
+ public void forEach(IntConsumer consumer) {
+ assert !building : "during building";
+ super.forEach(consumer);
+ }
+
+ //
+ @Override
+ public void begin(long size) {
+ assert !building : "was already building";
+ building = true;
+ clear();
+ ensureCapacity(size);
+ }
+
+ @Override
+ public void accept(int i) {
+ assert building : "not building";
+ super.accept(i);
+ }
+
+ @Override
+ public void end() {
+ assert building : "was not building";
+ building = false;
+ // @@@ check begin(size) and size
+ }
+
+ @Override
+ public void copyInto(int[] array, int offset) throws IndexOutOfBoundsException {
+ assert !building : "during building";
+ super.copyInto(array, offset);
+ }
+
+ @Override
+ public int[] asPrimitiveArray() {
+ assert !building : "during building";
+ return super.asPrimitiveArray();
+ }
+
+ @Override
+ public Node.OfInt build() {
+ assert !building : "during building";
+ return this;
+ }
+ }
+
+ private static final class LongSpinedNodeBuilder
+ extends SpinedBuffer.OfLong
+ implements Node.OfLong, Node.Builder.OfLong {
+ private boolean building = false;
+
+ LongSpinedNodeBuilder() {} // Avoid creation of special accessor
+
+ @Override
+ public Spliterator.OfLong spliterator() {
+ assert !building : "during building";
+ return super.spliterator();
+ }
+
+ @Override
+ public void forEach(LongConsumer consumer) {
+ assert !building : "during building";
+ super.forEach(consumer);
+ }
+
+ //
+ @Override
+ public void begin(long size) {
+ assert !building : "was already building";
+ building = true;
+ clear();
+ ensureCapacity(size);
+ }
+
+ @Override
+ public void accept(long i) {
+ assert building : "not building";
+ super.accept(i);
+ }
+
+ @Override
+ public void end() {
+ assert building : "was not building";
+ building = false;
+ // @@@ check begin(size) and size
+ }
+
+ @Override
+ public void copyInto(long[] array, int offset) {
+ assert !building : "during building";
+ super.copyInto(array, offset);
+ }
+
+ @Override
+ public long[] asPrimitiveArray() {
+ assert !building : "during building";
+ return super.asPrimitiveArray();
+ }
+
+ @Override
+ public Node.OfLong build() {
+ assert !building : "during building";
+ return this;
+ }
+ }
+
+ private static final class DoubleSpinedNodeBuilder
+ extends SpinedBuffer.OfDouble
+ implements Node.OfDouble, Node.Builder.OfDouble {
+ private boolean building = false;
+
+ DoubleSpinedNodeBuilder() {} // Avoid creation of special accessor
+
+ @Override
+ public Spliterator.OfDouble spliterator() {
+ assert !building : "during building";
+ return super.spliterator();
+ }
+
+ @Override
+ public void forEach(DoubleConsumer consumer) {
+ assert !building : "during building";
+ super.forEach(consumer);
+ }
+
+ //
+ @Override
+ public void begin(long size) {
+ assert !building : "was already building";
+ building = true;
+ clear();
+ ensureCapacity(size);
+ }
+
+ @Override
+ public void accept(double i) {
+ assert building : "not building";
+ super.accept(i);
+ }
+
+ @Override
+ public void end() {
+ assert building : "was not building";
+ building = false;
+ // @@@ check begin(size) and size
+ }
+
+ @Override
+ public void copyInto(double[] array, int offset) {
+ assert !building : "during building";
+ super.copyInto(array, offset);
+ }
+
+ @Override
+ public double[] asPrimitiveArray() {
+ assert !building : "during building";
+ return super.asPrimitiveArray();
+ }
+
+ @Override
+ public Node.OfDouble build() {
+ assert !building : "during building";
+ return this;
+ }
+ }
+
+ /*
+ * This and subclasses are not intended to be serializable
+ */
+ @SuppressWarnings("serial")
+ private static abstract class SizedCollectorTask<P_IN, P_OUT, T_SINK extends Sink<P_OUT>,
+ K extends SizedCollectorTask<P_IN, P_OUT, T_SINK, K>>
+ extends CountedCompleter<Void>
+ implements Sink<P_OUT> {
+ protected final Spliterator<P_IN> spliterator;
+ protected final PipelineHelper<P_OUT> helper;
+ protected final long targetSize;
+ protected long offset;
+ protected long length;
+ // For Sink implementation
+ protected int index, fence;
+
+ SizedCollectorTask(Spliterator<P_IN> spliterator,
+ PipelineHelper<P_OUT> helper,
+ int arrayLength) {
+ assert spliterator.hasCharacteristics(Spliterator.SUBSIZED);
+ this.spliterator = spliterator;
+ this.helper = helper;
+ this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize());
+ this.offset = 0;
+ this.length = arrayLength;
+ }
+
+ SizedCollectorTask(K parent, Spliterator<P_IN> spliterator,
+ long offset, long length, int arrayLength) {
+ super(parent);
+ assert spliterator.hasCharacteristics(Spliterator.SUBSIZED);
+ this.spliterator = spliterator;
+ this.helper = parent.helper;
+ this.targetSize = parent.targetSize;
+ this.offset = offset;
+ this.length = length;
+
+ if (offset < 0 || length < 0 || (offset + length - 1 >= arrayLength)) {
+ throw new IllegalArgumentException(
+ String.format("offset and length interval [%d, %d + %d) is not within array size interval [0, %d)",
+ offset, offset, length, arrayLength));
+ }
+ }
+
+ @Override
+ public void compute() {
+ SizedCollectorTask<P_IN, P_OUT, T_SINK, K> task = this;
+ Spliterator<P_IN> rightSplit = spliterator, leftSplit;
+ while (rightSplit.estimateSize() > task.targetSize &&
+ (leftSplit = rightSplit.trySplit()) != null) {
+ task.setPendingCount(1);
+ long leftSplitSize = leftSplit.estimateSize();
+ task.makeChild(leftSplit, task.offset, leftSplitSize).fork();
+ task = task.makeChild(rightSplit, task.offset + leftSplitSize,
+ task.length - leftSplitSize);
+ }
+
+ assert task.offset + task.length < MAX_ARRAY_SIZE;
+ @SuppressWarnings("unchecked")
+ T_SINK sink = (T_SINK) task;
+ task.helper.wrapAndCopyInto(sink, rightSplit);
+ task.propagateCompletion();
+ }
+
+ abstract K makeChild(Spliterator<P_IN> spliterator, long offset, long size);
+
+ @Override
+ public void begin(long size) {
+ if (size > length)
+ throw new IllegalStateException("size passed to Sink.begin exceeds array length");
+ // Casts to int are safe since absolute size is verified to be within
+ // bounds when the root concrete SizedCollectorTask is constructed
+ // with the shared array
+ index = (int) offset;
+ fence = index + (int) length;
+ }
+
+ @SuppressWarnings("serial")
+ static final class OfRef<P_IN, P_OUT>
+ extends SizedCollectorTask<P_IN, P_OUT, Sink<P_OUT>, OfRef<P_IN, P_OUT>>
+ implements Sink<P_OUT> {
+ private final P_OUT[] array;
+
+ OfRef(Spliterator<P_IN> spliterator, PipelineHelper<P_OUT> helper, P_OUT[] array) {
+ super(spliterator, helper, array.length);
+ this.array = array;
+ }
+
+ OfRef(OfRef<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator,
+ long offset, long length) {
+ super(parent, spliterator, offset, length, parent.array.length);
+ this.array = parent.array;
+ }
+
+ @Override
+ OfRef<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator,
+ long offset, long size) {
+ return new OfRef<>(this, spliterator, offset, size);
+ }
+
+ @Override
+ public void accept(P_OUT value) {
+ if (index >= fence) {
+ throw new IndexOutOfBoundsException(Integer.toString(index));
+ }
+ array[index++] = value;
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class OfInt<P_IN>
+ extends SizedCollectorTask<P_IN, Integer, Sink.OfInt, OfInt<P_IN>>
+ implements Sink.OfInt {
+ private final int[] array;
+
+ OfInt(Spliterator<P_IN> spliterator, PipelineHelper<Integer> helper, int[] array) {
+ super(spliterator, helper, array.length);
+ this.array = array;
+ }
+
+ OfInt(SizedCollectorTask.OfInt<P_IN> parent, Spliterator<P_IN> spliterator,
+ long offset, long length) {
+ super(parent, spliterator, offset, length, parent.array.length);
+ this.array = parent.array;
+ }
+
+ @Override
+ SizedCollectorTask.OfInt<P_IN> makeChild(Spliterator<P_IN> spliterator,
+ long offset, long size) {
+ return new SizedCollectorTask.OfInt<>(this, spliterator, offset, size);
+ }
+
+ @Override
+ public void accept(int value) {
+ if (index >= fence) {
+ throw new IndexOutOfBoundsException(Integer.toString(index));
+ }
+ array[index++] = value;
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class OfLong<P_IN>
+ extends SizedCollectorTask<P_IN, Long, Sink.OfLong, OfLong<P_IN>>
+ implements Sink.OfLong {
+ private final long[] array;
+
+ OfLong(Spliterator<P_IN> spliterator, PipelineHelper<Long> helper, long[] array) {
+ super(spliterator, helper, array.length);
+ this.array = array;
+ }
+
+ OfLong(SizedCollectorTask.OfLong<P_IN> parent, Spliterator<P_IN> spliterator,
+ long offset, long length) {
+ super(parent, spliterator, offset, length, parent.array.length);
+ this.array = parent.array;
+ }
+
+ @Override
+ SizedCollectorTask.OfLong<P_IN> makeChild(Spliterator<P_IN> spliterator,
+ long offset, long size) {
+ return new SizedCollectorTask.OfLong<>(this, spliterator, offset, size);
+ }
+
+ @Override
+ public void accept(long value) {
+ if (index >= fence) {
+ throw new IndexOutOfBoundsException(Integer.toString(index));
+ }
+ array[index++] = value;
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static final class OfDouble<P_IN>
+ extends SizedCollectorTask<P_IN, Double, Sink.OfDouble, OfDouble<P_IN>>
+ implements Sink.OfDouble {
+ private final double[] array;
+
+ OfDouble(Spliterator<P_IN> spliterator, PipelineHelper<Double> helper, double[] array) {
+ super(spliterator, helper, array.length);
+ this.array = array;
+ }
+
+ OfDouble(SizedCollectorTask.OfDouble<P_IN> parent, Spliterator<P_IN> spliterator,
+ long offset, long length) {
+ super(parent, spliterator, offset, length, parent.array.length);
+ this.array = parent.array;
+ }
+
+ @Override
+ SizedCollectorTask.OfDouble<P_IN> makeChild(Spliterator<P_IN> spliterator,
+ long offset, long size) {
+ return new SizedCollectorTask.OfDouble<>(this, spliterator, offset, size);
+ }
+
+ @Override
+ public void accept(double value) {
+ if (index >= fence) {
+ throw new IndexOutOfBoundsException(Integer.toString(index));
+ }
+ array[index++] = value;
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static abstract class ToArrayTask<T, T_NODE extends Node<T>,
+ K extends ToArrayTask<T, T_NODE, K>>
+ extends CountedCompleter<Void> {
+ protected final T_NODE node;
+ protected final int offset;
+
+ ToArrayTask(T_NODE node, int offset) {
+ this.node = node;
+ this.offset = offset;
+ }
+
+ ToArrayTask(K parent, T_NODE node, int offset) {
+ super(parent);
+ this.node = node;
+ this.offset = offset;
+ }
+
+ abstract void copyNodeToArray();
+
+ abstract K makeChild(int childIndex, int offset);
+
+ @Override
+ public void compute() {
+ ToArrayTask<T, T_NODE, K> task = this;
+ while (true) {
+ if (task.node.getChildCount() == 0) {
+ task.copyNodeToArray();
+ task.propagateCompletion();
+ return;
+ }
+ else {
+ task.setPendingCount(task.node.getChildCount() - 1);
+
+ int size = 0;
+ int i = 0;
+ for (;i < task.node.getChildCount() - 1; i++) {
+ K leftTask = task.makeChild(i, task.offset + size);
+ size += leftTask.node.count();
+ leftTask.fork();
+ }
+ task = task.makeChild(i, task.offset + size);
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfRef<T>
+ extends ToArrayTask<T, Node<T>, OfRef<T>> {
+ private final T[] array;
+
+ private OfRef(Node<T> node, T[] array, int offset) {
+ super(node, offset);
+ this.array = array;
+ }
+
+ private OfRef(OfRef<T> parent, Node<T> node, int offset) {
+ super(parent, node, offset);
+ this.array = parent.array;
+ }
+
+ @Override
+ OfRef<T> makeChild(int childIndex, int offset) {
+ return new OfRef<>(this, node.getChild(childIndex), offset);
+ }
+
+ @Override
+ void copyNodeToArray() {
+ node.copyInto(array, offset);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static class OfPrimitive<T, T_CONS, T_ARR,
+ T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>,
+ T_NODE extends Node.OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, T_NODE>>
+ extends ToArrayTask<T, T_NODE, OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, T_NODE>> {
+ private final T_ARR array;
+
+ private OfPrimitive(T_NODE node, T_ARR array, int offset) {
+ super(node, offset);
+ this.array = array;
+ }
+
+ private OfPrimitive(OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, T_NODE> parent, T_NODE node, int offset) {
+ super(parent, node, offset);
+ this.array = parent.array;
+ }
+
+ @Override
+ OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, T_NODE> makeChild(int childIndex, int offset) {
+ return new OfPrimitive<>(this, node.getChild(childIndex), offset);
+ }
+
+ @Override
+ void copyNodeToArray() {
+ node.copyInto(array, offset);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfInt
+ extends OfPrimitive<Integer, IntConsumer, int[], Spliterator.OfInt, Node.OfInt> {
+ private OfInt(Node.OfInt node, int[] array, int offset) {
+ super(node, array, offset);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfLong
+ extends OfPrimitive<Long, LongConsumer, long[], Spliterator.OfLong, Node.OfLong> {
+ private OfLong(Node.OfLong node, long[] array, int offset) {
+ super(node, array, offset);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfDouble
+ extends OfPrimitive<Double, DoubleConsumer, double[], Spliterator.OfDouble, Node.OfDouble> {
+ private OfDouble(Node.OfDouble node, double[] array, int offset) {
+ super(node, array, offset);
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static class CollectorTask<P_IN, P_OUT, T_NODE extends Node<P_OUT>, T_BUILDER extends Node.Builder<P_OUT>>
+ extends AbstractTask<P_IN, P_OUT, T_NODE, CollectorTask<P_IN, P_OUT, T_NODE, T_BUILDER>> {
+ protected final PipelineHelper<P_OUT> helper;
+ protected final LongFunction<T_BUILDER> builderFactory;
+ protected final BinaryOperator<T_NODE> concFactory;
+
+ CollectorTask(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ LongFunction<T_BUILDER> builderFactory,
+ BinaryOperator<T_NODE> concFactory) {
+ super(helper, spliterator);
+ this.helper = helper;
+ this.builderFactory = builderFactory;
+ this.concFactory = concFactory;
+ }
+
+ CollectorTask(CollectorTask<P_IN, P_OUT, T_NODE, T_BUILDER> parent,
+ Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ helper = parent.helper;
+ builderFactory = parent.builderFactory;
+ concFactory = parent.concFactory;
+ }
+
+ @Override
+ protected CollectorTask<P_IN, P_OUT, T_NODE, T_BUILDER> makeChild(Spliterator<P_IN> spliterator) {
+ return new CollectorTask<>(this, spliterator);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected T_NODE doLeaf() {
+ T_BUILDER builder = builderFactory.apply(helper.exactOutputSizeIfKnown(spliterator));
+ return (T_NODE) helper.wrapAndCopyInto(builder, spliterator).build();
+ }
+
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ if (!isLeaf())
+ setLocalResult(concFactory.apply(leftChild.getLocalResult(), rightChild.getLocalResult()));
+ super.onCompletion(caller);
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfRef<P_IN, P_OUT>
+ extends CollectorTask<P_IN, P_OUT, Node<P_OUT>, Node.Builder<P_OUT>> {
+ OfRef(PipelineHelper<P_OUT> helper,
+ IntFunction<P_OUT[]> generator,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator, s -> builder(s, generator), ConcNode::new);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfInt<P_IN>
+ extends CollectorTask<P_IN, Integer, Node.OfInt, Node.Builder.OfInt> {
+ OfInt(PipelineHelper<Integer> helper, Spliterator<P_IN> spliterator) {
+ super(helper, spliterator, Nodes::intBuilder, ConcNode.OfInt::new);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfLong<P_IN>
+ extends CollectorTask<P_IN, Long, Node.OfLong, Node.Builder.OfLong> {
+ OfLong(PipelineHelper<Long> helper, Spliterator<P_IN> spliterator) {
+ super(helper, spliterator, Nodes::longBuilder, ConcNode.OfLong::new);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static final class OfDouble<P_IN>
+ extends CollectorTask<P_IN, Double, Node.OfDouble, Node.Builder.OfDouble> {
+ OfDouble(PipelineHelper<Double> helper, Spliterator<P_IN> spliterator) {
+ super(helper, spliterator, Nodes::doubleBuilder, ConcNode.OfDouble::new);
+ }
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/PipelineHelper.java b/ojluni/src/main/java/java/util/stream/PipelineHelper.java
new file mode 100644
index 0000000..f510131
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/PipelineHelper.java
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.function.IntFunction;
+
+/**
+ * Helper class for executing <a href="package-summary.html#StreamOps">
+ * stream pipelines</a>, capturing all of the information about a stream
+ * pipeline (output shape, intermediate operations, stream flags, parallelism,
+ * etc) in one place.
+ *
+ * <p>
+ * A {@code PipelineHelper} describes the initial segment of a stream pipeline,
+ * including its source, intermediate operations, and may additionally
+ * incorporate information about the terminal (or stateful) operation which
+ * follows the last intermediate operation described by this
+ * {@code PipelineHelper}. The {@code PipelineHelper} is passed to the
+ * {@link TerminalOp#evaluateParallel(PipelineHelper, java.util.Spliterator)},
+ * {@link TerminalOp#evaluateSequential(PipelineHelper, java.util.Spliterator)},
+ * and {@link AbstractPipeline#opEvaluateParallel(PipelineHelper, java.util.Spliterator,
+ * java.util.function.IntFunction)}, methods, which can use the
+ * {@code PipelineHelper} to access information about the pipeline such as
+ * head shape, stream flags, and size, and use the helper methods
+ * such as {@link #wrapAndCopyInto(Sink, Spliterator)},
+ * {@link #copyInto(Sink, Spliterator)}, and {@link #wrapSink(Sink)} to execute
+ * pipeline operations.
+ *
+ * @param <P_OUT> type of output elements from the pipeline
+ * @since 1.8
+ */
+abstract class PipelineHelper<P_OUT> {
+
+ /**
+ * Gets the stream shape for the source of the pipeline segment.
+ *
+ * @return the stream shape for the source of the pipeline segment.
+ */
+ abstract StreamShape getSourceShape();
+
+ /**
+ * Gets the combined stream and operation flags for the output of the described
+ * pipeline. This will incorporate stream flags from the stream source, all
+ * the intermediate operations and the terminal operation.
+ *
+ * @return the combined stream and operation flags
+ * @see StreamOpFlag
+ */
+ abstract int getStreamAndOpFlags();
+
+ /**
+ * Returns the exact output size of the portion of the output resulting from
+ * applying the pipeline stages described by this {@code PipelineHelper} to
+ * the the portion of the input described by the provided
+ * {@code Spliterator}, if known. If not known or known infinite, will
+ * return {@code -1}.
+ *
+ * @apiNote
+ * The exact output size is known if the {@code Spliterator} has the
+ * {@code SIZED} characteristic, and the operation flags
+ * {@link StreamOpFlag#SIZED} is known on the combined stream and operation
+ * flags.
+ *
+ * @param spliterator the spliterator describing the relevant portion of the
+ * source data
+ * @return the exact size if known, or -1 if infinite or unknown
+ */
+ abstract<P_IN> long exactOutputSizeIfKnown(Spliterator<P_IN> spliterator);
+
+ /**
+ * Applies the pipeline stages described by this {@code PipelineHelper} to
+ * the provided {@code Spliterator} and send the results to the provided
+ * {@code Sink}.
+ *
+ * @implSpec
+ * The implementation behaves as if:
+ * <pre>{@code
+ * intoWrapped(wrapSink(sink), spliterator);
+ * }</pre>
+ *
+ * @param sink the {@code Sink} to receive the results
+ * @param spliterator the spliterator describing the source input to process
+ */
+ abstract<P_IN, S extends Sink<P_OUT>> S wrapAndCopyInto(S sink, Spliterator<P_IN> spliterator);
+
+ /**
+ * Pushes elements obtained from the {@code Spliterator} into the provided
+ * {@code Sink}. If the stream pipeline is known to have short-circuiting
+ * stages in it (see {@link StreamOpFlag#SHORT_CIRCUIT}), the
+ * {@link Sink#cancellationRequested()} is checked after each
+ * element, stopping if cancellation is requested.
+ *
+ * @implSpec
+ * This method conforms to the {@code Sink} protocol of calling
+ * {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and
+ * calling {@code Sink.end} after all elements have been pushed.
+ *
+ * @param wrappedSink the destination {@code Sink}
+ * @param spliterator the source {@code Spliterator}
+ */
+ abstract<P_IN> void copyInto(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator);
+
+ /**
+ * Pushes elements obtained from the {@code Spliterator} into the provided
+ * {@code Sink}, checking {@link Sink#cancellationRequested()} after each
+ * element, and stopping if cancellation is requested.
+ *
+ * @implSpec
+ * This method conforms to the {@code Sink} protocol of calling
+ * {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and
+ * calling {@code Sink.end} after all elements have been pushed or if
+ * cancellation is requested.
+ *
+ * @param wrappedSink the destination {@code Sink}
+ * @param spliterator the source {@code Spliterator}
+ */
+ abstract <P_IN> void copyIntoWithCancel(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator);
+
+ /**
+ * Takes a {@code Sink} that accepts elements of the output type of the
+ * {@code PipelineHelper}, and wrap it with a {@code Sink} that accepts
+ * elements of the input type and implements all the intermediate operations
+ * described by this {@code PipelineHelper}, delivering the result into the
+ * provided {@code Sink}.
+ *
+ * @param sink the {@code Sink} to receive the results
+ * @return a {@code Sink} that implements the pipeline stages and sends
+ * results to the provided {@code Sink}
+ */
+ abstract<P_IN> Sink<P_IN> wrapSink(Sink<P_OUT> sink);
+
+ /**
+ *
+ * @param spliterator
+ * @param <P_IN>
+ * @return
+ */
+ abstract<P_IN> Spliterator<P_OUT> wrapSpliterator(Spliterator<P_IN> spliterator);
+
+ /**
+ * Constructs a @{link Node.Builder} compatible with the output shape of
+ * this {@code PipelineHelper}.
+ *
+ * @param exactSizeIfKnown if >=0 then a builder will be created that has a
+ * fixed capacity of exactly sizeIfKnown elements; if < 0 then the
+ * builder has variable capacity. A fixed capacity builder will fail
+ * if an element is added after the builder has reached capacity.
+ * @param generator a factory function for array instances
+ * @return a {@code Node.Builder} compatible with the output shape of this
+ * {@code PipelineHelper}
+ */
+ abstract Node.Builder<P_OUT> makeNodeBuilder(long exactSizeIfKnown,
+ IntFunction<P_OUT[]> generator);
+
+ /**
+ * Collects all output elements resulting from applying the pipeline stages
+ * to the source {@code Spliterator} into a {@code Node}.
+ *
+ * @implNote
+ * If the pipeline has no intermediate operations and the source is backed
+ * by a {@code Node} then that {@code Node} will be returned (or flattened
+ * and then returned). This reduces copying for a pipeline consisting of a
+ * stateful operation followed by a terminal operation that returns an
+ * array, such as:
+ * <pre>{@code
+ * stream.sorted().toArray();
+ * }</pre>
+ *
+ * @param spliterator the source {@code Spliterator}
+ * @param flatten if true and the pipeline is a parallel pipeline then the
+ * {@code Node} returned will contain no children, otherwise the
+ * {@code Node} may represent the root in a tree that reflects the
+ * shape of the computation tree.
+ * @param generator a factory function for array instances
+ * @return the {@code Node} containing all output elements
+ */
+ abstract<P_IN> Node<P_OUT> evaluate(Spliterator<P_IN> spliterator,
+ boolean flatten,
+ IntFunction<P_OUT[]> generator);
+}
diff --git a/ojluni/src/main/java/java/util/stream/ReduceOps.java b/ojluni/src/main/java/java/util/stream/ReduceOps.java
new file mode 100644
index 0000000..3a0f81a
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/ReduceOps.java
@@ -0,0 +1,761 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Optional;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.Spliterator;
+import java.util.concurrent.CountedCompleter;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.DoubleBinaryOperator;
+import java.util.function.IntBinaryOperator;
+import java.util.function.LongBinaryOperator;
+import java.util.function.ObjDoubleConsumer;
+import java.util.function.ObjIntConsumer;
+import java.util.function.ObjLongConsumer;
+import java.util.function.Supplier;
+
+/**
+ * Factory for creating instances of {@code TerminalOp} that implement
+ * reductions.
+ *
+ * @since 1.8
+ */
+final class ReduceOps {
+
+ private ReduceOps() { }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * reference values.
+ *
+ * @param <T> the type of the input elements
+ * @param <U> the type of the result
+ * @param seed the identity element for the reduction
+ * @param reducer the accumulating function that incorporates an additional
+ * input element into the result
+ * @param combiner the combining function that combines two intermediate
+ * results
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static <T, U> TerminalOp<T, U>
+ makeRef(U seed, BiFunction<U, ? super T, U> reducer, BinaryOperator<U> combiner) {
+ Objects.requireNonNull(reducer);
+ Objects.requireNonNull(combiner);
+ class ReducingSink extends Box<U> implements AccumulatingSink<T, U, ReducingSink> {
+ @Override
+ public void begin(long size) {
+ state = seed;
+ }
+
+ @Override
+ public void accept(T t) {
+ state = reducer.apply(state, t);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ state = combiner.apply(state, other.state);
+ }
+ }
+ return new ReduceOp<T, U, ReducingSink>(StreamShape.REFERENCE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * reference values producing an optional reference result.
+ *
+ * @param <T> The type of the input elements, and the type of the result
+ * @param operator The reducing function
+ * @return A {@code TerminalOp} implementing the reduction
+ */
+ public static <T> TerminalOp<T, Optional<T>>
+ makeRef(BinaryOperator<T> operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<T, Optional<T>, ReducingSink> {
+ private boolean empty;
+ private T state;
+
+ public void begin(long size) {
+ empty = true;
+ state = null;
+ }
+
+ @Override
+ public void accept(T t) {
+ if (empty) {
+ empty = false;
+ state = t;
+ } else {
+ state = operator.apply(state, t);
+ }
+ }
+
+ @Override
+ public Optional<T> get() {
+ return empty ? Optional.empty() : Optional.of(state);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ if (!other.empty)
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<T, Optional<T>, ReducingSink>(StreamShape.REFERENCE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a mutable reduce on
+ * reference values.
+ *
+ * @param <T> the type of the input elements
+ * @param <I> the type of the intermediate reduction result
+ * @param collector a {@code Collector} defining the reduction
+ * @return a {@code ReduceOp} implementing the reduction
+ */
+ public static <T, I> TerminalOp<T, I>
+ makeRef(Collector<? super T, I, ?> collector) {
+ Supplier<I> supplier = Objects.requireNonNull(collector).supplier();
+ BiConsumer<I, ? super T> accumulator = collector.accumulator();
+ BinaryOperator<I> combiner = collector.combiner();
+ class ReducingSink extends Box<I>
+ implements AccumulatingSink<T, I, ReducingSink> {
+ @Override
+ public void begin(long size) {
+ state = supplier.get();
+ }
+
+ @Override
+ public void accept(T t) {
+ accumulator.accept(state, t);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ state = combiner.apply(state, other.state);
+ }
+ }
+ return new ReduceOp<T, I, ReducingSink>(StreamShape.REFERENCE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+
+ @Override
+ public int getOpFlags() {
+ return collector.characteristics().contains(Collector.Characteristics.UNORDERED)
+ ? StreamOpFlag.NOT_ORDERED
+ : 0;
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a mutable reduce on
+ * reference values.
+ *
+ * @param <T> the type of the input elements
+ * @param <R> the type of the result
+ * @param seedFactory a factory to produce a new base accumulator
+ * @param accumulator a function to incorporate an element into an
+ * accumulator
+ * @param reducer a function to combine an accumulator into another
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static <T, R> TerminalOp<T, R>
+ makeRef(Supplier<R> seedFactory,
+ BiConsumer<R, ? super T> accumulator,
+ BiConsumer<R,R> reducer) {
+ Objects.requireNonNull(seedFactory);
+ Objects.requireNonNull(accumulator);
+ Objects.requireNonNull(reducer);
+ class ReducingSink extends Box<R>
+ implements AccumulatingSink<T, R, ReducingSink> {
+ @Override
+ public void begin(long size) {
+ state = seedFactory.get();
+ }
+
+ @Override
+ public void accept(T t) {
+ accumulator.accept(state, t);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ reducer.accept(state, other.state);
+ }
+ }
+ return new ReduceOp<T, R, ReducingSink>(StreamShape.REFERENCE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * {@code int} values.
+ *
+ * @param identity the identity for the combining function
+ * @param operator the combining function
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static TerminalOp<Integer, Integer>
+ makeInt(int identity, IntBinaryOperator operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<Integer, Integer, ReducingSink>, Sink.OfInt {
+ private int state;
+
+ @Override
+ public void begin(long size) {
+ state = identity;
+ }
+
+ @Override
+ public void accept(int t) {
+ state = operator.applyAsInt(state, t);
+ }
+
+ @Override
+ public Integer get() {
+ return state;
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<Integer, Integer, ReducingSink>(StreamShape.INT_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * {@code int} values, producing an optional integer result.
+ *
+ * @param operator the combining function
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static TerminalOp<Integer, OptionalInt>
+ makeInt(IntBinaryOperator operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<Integer, OptionalInt, ReducingSink>, Sink.OfInt {
+ private boolean empty;
+ private int state;
+
+ public void begin(long size) {
+ empty = true;
+ state = 0;
+ }
+
+ @Override
+ public void accept(int t) {
+ if (empty) {
+ empty = false;
+ state = t;
+ }
+ else {
+ state = operator.applyAsInt(state, t);
+ }
+ }
+
+ @Override
+ public OptionalInt get() {
+ return empty ? OptionalInt.empty() : OptionalInt.of(state);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ if (!other.empty)
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<Integer, OptionalInt, ReducingSink>(StreamShape.INT_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a mutable reduce on
+ * {@code int} values.
+ *
+ * @param <R> The type of the result
+ * @param supplier a factory to produce a new accumulator of the result type
+ * @param accumulator a function to incorporate an int into an
+ * accumulator
+ * @param combiner a function to combine an accumulator into another
+ * @return A {@code ReduceOp} implementing the reduction
+ */
+ public static <R> TerminalOp<Integer, R>
+ makeInt(Supplier<R> supplier,
+ ObjIntConsumer<R> accumulator,
+ BinaryOperator<R> combiner) {
+ Objects.requireNonNull(supplier);
+ Objects.requireNonNull(accumulator);
+ Objects.requireNonNull(combiner);
+ class ReducingSink extends Box<R>
+ implements AccumulatingSink<Integer, R, ReducingSink>, Sink.OfInt {
+ @Override
+ public void begin(long size) {
+ state = supplier.get();
+ }
+
+ @Override
+ public void accept(int t) {
+ accumulator.accept(state, t);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ state = combiner.apply(state, other.state);
+ }
+ }
+ return new ReduceOp<Integer, R, ReducingSink>(StreamShape.INT_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * {@code long} values.
+ *
+ * @param identity the identity for the combining function
+ * @param operator the combining function
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static TerminalOp<Long, Long>
+ makeLong(long identity, LongBinaryOperator operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<Long, Long, ReducingSink>, Sink.OfLong {
+ private long state;
+
+ @Override
+ public void begin(long size) {
+ state = identity;
+ }
+
+ @Override
+ public void accept(long t) {
+ state = operator.applyAsLong(state, t);
+ }
+
+ @Override
+ public Long get() {
+ return state;
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<Long, Long, ReducingSink>(StreamShape.LONG_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * {@code long} values, producing an optional long result.
+ *
+ * @param operator the combining function
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static TerminalOp<Long, OptionalLong>
+ makeLong(LongBinaryOperator operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<Long, OptionalLong, ReducingSink>, Sink.OfLong {
+ private boolean empty;
+ private long state;
+
+ public void begin(long size) {
+ empty = true;
+ state = 0;
+ }
+
+ @Override
+ public void accept(long t) {
+ if (empty) {
+ empty = false;
+ state = t;
+ }
+ else {
+ state = operator.applyAsLong(state, t);
+ }
+ }
+
+ @Override
+ public OptionalLong get() {
+ return empty ? OptionalLong.empty() : OptionalLong.of(state);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ if (!other.empty)
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<Long, OptionalLong, ReducingSink>(StreamShape.LONG_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a mutable reduce on
+ * {@code long} values.
+ *
+ * @param <R> the type of the result
+ * @param supplier a factory to produce a new accumulator of the result type
+ * @param accumulator a function to incorporate an int into an
+ * accumulator
+ * @param combiner a function to combine an accumulator into another
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static <R> TerminalOp<Long, R>
+ makeLong(Supplier<R> supplier,
+ ObjLongConsumer<R> accumulator,
+ BinaryOperator<R> combiner) {
+ Objects.requireNonNull(supplier);
+ Objects.requireNonNull(accumulator);
+ Objects.requireNonNull(combiner);
+ class ReducingSink extends Box<R>
+ implements AccumulatingSink<Long, R, ReducingSink>, Sink.OfLong {
+ @Override
+ public void begin(long size) {
+ state = supplier.get();
+ }
+
+ @Override
+ public void accept(long t) {
+ accumulator.accept(state, t);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ state = combiner.apply(state, other.state);
+ }
+ }
+ return new ReduceOp<Long, R, ReducingSink>(StreamShape.LONG_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * {@code double} values.
+ *
+ * @param identity the identity for the combining function
+ * @param operator the combining function
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static TerminalOp<Double, Double>
+ makeDouble(double identity, DoubleBinaryOperator operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<Double, Double, ReducingSink>, Sink.OfDouble {
+ private double state;
+
+ @Override
+ public void begin(long size) {
+ state = identity;
+ }
+
+ @Override
+ public void accept(double t) {
+ state = operator.applyAsDouble(state, t);
+ }
+
+ @Override
+ public Double get() {
+ return state;
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<Double, Double, ReducingSink>(StreamShape.DOUBLE_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a functional reduce on
+ * {@code double} values, producing an optional double result.
+ *
+ * @param operator the combining function
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static TerminalOp<Double, OptionalDouble>
+ makeDouble(DoubleBinaryOperator operator) {
+ Objects.requireNonNull(operator);
+ class ReducingSink
+ implements AccumulatingSink<Double, OptionalDouble, ReducingSink>, Sink.OfDouble {
+ private boolean empty;
+ private double state;
+
+ public void begin(long size) {
+ empty = true;
+ state = 0;
+ }
+
+ @Override
+ public void accept(double t) {
+ if (empty) {
+ empty = false;
+ state = t;
+ }
+ else {
+ state = operator.applyAsDouble(state, t);
+ }
+ }
+
+ @Override
+ public OptionalDouble get() {
+ return empty ? OptionalDouble.empty() : OptionalDouble.of(state);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ if (!other.empty)
+ accept(other.state);
+ }
+ }
+ return new ReduceOp<Double, OptionalDouble, ReducingSink>(StreamShape.DOUBLE_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that implements a mutable reduce on
+ * {@code double} values.
+ *
+ * @param <R> the type of the result
+ * @param supplier a factory to produce a new accumulator of the result type
+ * @param accumulator a function to incorporate an int into an
+ * accumulator
+ * @param combiner a function to combine an accumulator into another
+ * @return a {@code TerminalOp} implementing the reduction
+ */
+ public static <R> TerminalOp<Double, R>
+ makeDouble(Supplier<R> supplier,
+ ObjDoubleConsumer<R> accumulator,
+ BinaryOperator<R> combiner) {
+ Objects.requireNonNull(supplier);
+ Objects.requireNonNull(accumulator);
+ Objects.requireNonNull(combiner);
+ class ReducingSink extends Box<R>
+ implements AccumulatingSink<Double, R, ReducingSink>, Sink.OfDouble {
+ @Override
+ public void begin(long size) {
+ state = supplier.get();
+ }
+
+ @Override
+ public void accept(double t) {
+ accumulator.accept(state, t);
+ }
+
+ @Override
+ public void combine(ReducingSink other) {
+ state = combiner.apply(state, other.state);
+ }
+ }
+ return new ReduceOp<Double, R, ReducingSink>(StreamShape.DOUBLE_VALUE) {
+ @Override
+ public ReducingSink makeSink() {
+ return new ReducingSink();
+ }
+ };
+ }
+
+ /**
+ * A type of {@code TerminalSink} that implements an associative reducing
+ * operation on elements of type {@code T} and producing a result of type
+ * {@code R}.
+ *
+ * @param <T> the type of input element to the combining operation
+ * @param <R> the result type
+ * @param <K> the type of the {@code AccumulatingSink}.
+ */
+ private interface AccumulatingSink<T, R, K extends AccumulatingSink<T, R, K>>
+ extends TerminalSink<T, R> {
+ public void combine(K other);
+ }
+
+ /**
+ * State box for a single state element, used as a base class for
+ * {@code AccumulatingSink} instances
+ *
+ * @param <U> The type of the state element
+ */
+ private static abstract class Box<U> {
+ U state;
+
+ Box() {} // Avoid creation of special accessor
+
+ public U get() {
+ return state;
+ }
+ }
+
+ /**
+ * A {@code TerminalOp} that evaluates a stream pipeline and sends the
+ * output into an {@code AccumulatingSink}, which performs a reduce
+ * operation. The {@code AccumulatingSink} must represent an associative
+ * reducing operation.
+ *
+ * @param <T> the output type of the stream pipeline
+ * @param <R> the result type of the reducing operation
+ * @param <S> the type of the {@code AccumulatingSink}
+ */
+ private static abstract class ReduceOp<T, R, S extends AccumulatingSink<T, R, S>>
+ implements TerminalOp<T, R> {
+ private final StreamShape inputShape;
+
+ /**
+ * Create a {@code ReduceOp} of the specified stream shape which uses
+ * the specified {@code Supplier} to create accumulating sinks.
+ *
+ * @param shape The shape of the stream pipeline
+ */
+ ReduceOp(StreamShape shape) {
+ inputShape = shape;
+ }
+
+ public abstract S makeSink();
+
+ @Override
+ public StreamShape inputShape() {
+ return inputShape;
+ }
+
+ @Override
+ public <P_IN> R evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator) {
+ return helper.wrapAndCopyInto(makeSink(), spliterator).get();
+ }
+
+ @Override
+ public <P_IN> R evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator) {
+ return new ReduceTask<>(this, helper, spliterator).invoke().get();
+ }
+ }
+
+ /**
+ * A {@code ForkJoinTask} for performing a parallel reduce operation.
+ */
+ @SuppressWarnings("serial")
+ private static final class ReduceTask<P_IN, P_OUT, R,
+ S extends AccumulatingSink<P_OUT, R, S>>
+ extends AbstractTask<P_IN, P_OUT, S, ReduceTask<P_IN, P_OUT, R, S>> {
+ private final ReduceOp<P_OUT, R, S> op;
+
+ ReduceTask(ReduceOp<P_OUT, R, S> op,
+ PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ this.op = op;
+ }
+
+ ReduceTask(ReduceTask<P_IN, P_OUT, R, S> parent,
+ Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ this.op = parent.op;
+ }
+
+ @Override
+ protected ReduceTask<P_IN, P_OUT, R, S> makeChild(Spliterator<P_IN> spliterator) {
+ return new ReduceTask<>(this, spliterator);
+ }
+
+ @Override
+ protected S doLeaf() {
+ return helper.wrapAndCopyInto(op.makeSink(), spliterator);
+ }
+
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ if (!isLeaf()) {
+ S leftResult = leftChild.getLocalResult();
+ leftResult.combine(rightChild.getLocalResult());
+ setLocalResult(leftResult);
+ }
+ // GC spliterator, left and right child
+ super.onCompletion(caller);
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/ReferencePipeline.java b/ojluni/src/main/java/java/util/stream/ReferencePipeline.java
new file mode 100644
index 0000000..8f5da0e
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/ReferencePipeline.java
@@ -0,0 +1,661 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.Function;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongFunction;
+
+/**
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code U}.
+ *
+ * @param <P_IN> type of elements in the upstream source
+ * @param <P_OUT> type of elements in produced by this stage
+ *
+ * @since 1.8
+ */
+abstract class ReferencePipeline<P_IN, P_OUT>
+ extends AbstractPipeline<P_IN, P_OUT, Stream<P_OUT>>
+ implements Stream<P_OUT> {
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ ReferencePipeline(Supplier<? extends Spliterator<?>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the head of a stream pipeline.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags The source flags for the stream source, described in
+ * {@link StreamOpFlag}
+ * @param parallel {@code true} if the pipeline is parallel
+ */
+ ReferencePipeline(Spliterator<?> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for appending an intermediate operation onto an existing
+ * pipeline.
+ *
+ * @param upstream the upstream element source.
+ */
+ ReferencePipeline(AbstractPipeline<?, P_IN, ?> upstream, int opFlags) {
+ super(upstream, opFlags);
+ }
+
+ // Shape-specific methods
+
+ @Override
+ final StreamShape getOutputShape() {
+ return StreamShape.REFERENCE;
+ }
+
+ @Override
+ final <P_IN> Node<P_OUT> evaluateToNode(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ boolean flattenTree,
+ IntFunction<P_OUT[]> generator) {
+ return Nodes.collect(helper, spliterator, flattenTree, generator);
+ }
+
+ @Override
+ final <P_IN> Spliterator<P_OUT> wrap(PipelineHelper<P_OUT> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean isParallel) {
+ return new StreamSpliterators.WrappingSpliterator<>(ph, supplier, isParallel);
+ }
+
+ @Override
+ final Spliterator<P_OUT> lazySpliterator(Supplier<? extends Spliterator<P_OUT>> supplier) {
+ return new StreamSpliterators.DelegatingSpliterator<>(supplier);
+ }
+
+ @Override
+ final void forEachWithCancel(Spliterator<P_OUT> spliterator, Sink<P_OUT> sink) {
+ do { } while (!sink.cancellationRequested() && spliterator.tryAdvance(sink));
+ }
+
+ @Override
+ final Node.Builder<P_OUT> makeNodeBuilder(long exactSizeIfKnown, IntFunction<P_OUT[]> generator) {
+ return Nodes.builder(exactSizeIfKnown, generator);
+ }
+
+
+ // BaseStream
+
+ @Override
+ public final Iterator<P_OUT> iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+
+ // Stream
+
+ // Stateless intermediate operations from Stream
+
+ @Override
+ public Stream<P_OUT> unordered() {
+ if (!isOrdered())
+ return this;
+ return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE, StreamOpFlag.NOT_ORDERED) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
+ return sink;
+ }
+ };
+ }
+
+ @Override
+ public final Stream<P_OUT> filter(Predicate<? super P_OUT> predicate) {
+ Objects.requireNonNull(predicate);
+ return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
+ return new Sink.ChainedReference<P_OUT, P_OUT>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(P_OUT u) {
+ if (predicate.test(u))
+ downstream.accept(u);
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public final <R> Stream<R> map(Function<? super P_OUT, ? extends R> mapper) {
+ Objects.requireNonNull(mapper);
+ return new StatelessOp<P_OUT, R>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<R> sink) {
+ return new Sink.ChainedReference<P_OUT, R>(sink) {
+ @Override
+ public void accept(P_OUT u) {
+ downstream.accept(mapper.apply(u));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final IntStream mapToInt(ToIntFunction<? super P_OUT> mapper) {
+ Objects.requireNonNull(mapper);
+ return new IntPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedReference<P_OUT, Integer>(sink) {
+ @Override
+ public void accept(P_OUT u) {
+ downstream.accept(mapper.applyAsInt(u));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final LongStream mapToLong(ToLongFunction<? super P_OUT> mapper) {
+ Objects.requireNonNull(mapper);
+ return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedReference<P_OUT, Long>(sink) {
+ @Override
+ public void accept(P_OUT u) {
+ downstream.accept(mapper.applyAsLong(u));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream mapToDouble(ToDoubleFunction<? super P_OUT> mapper) {
+ Objects.requireNonNull(mapper);
+ return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedReference<P_OUT, Double>(sink) {
+ @Override
+ public void accept(P_OUT u) {
+ downstream.accept(mapper.applyAsDouble(u));
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final <R> Stream<R> flatMap(Function<? super P_OUT, ? extends Stream<? extends R>> mapper) {
+ Objects.requireNonNull(mapper);
+ // We can do better than this, by polling cancellationRequested when stream is infinite
+ return new StatelessOp<P_OUT, R>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<R> sink) {
+ return new Sink.ChainedReference<P_OUT, R>(sink) {
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(P_OUT u) {
+ try (Stream<? extends R> result = mapper.apply(u)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(downstream);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final IntStream flatMapToInt(Function<? super P_OUT, ? extends IntStream> mapper) {
+ Objects.requireNonNull(mapper);
+ // We can do better than this, by polling cancellationRequested when stream is infinite
+ return new IntPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedReference<P_OUT, Integer>(sink) {
+ IntConsumer downstreamAsInt = downstream::accept;
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(P_OUT u) {
+ try (IntStream result = mapper.apply(u)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(downstreamAsInt);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final DoubleStream flatMapToDouble(Function<? super P_OUT, ? extends DoubleStream> mapper) {
+ Objects.requireNonNull(mapper);
+ // We can do better than this, by polling cancellationRequested when stream is infinite
+ return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedReference<P_OUT, Double>(sink) {
+ DoubleConsumer downstreamAsDouble = downstream::accept;
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(P_OUT u) {
+ try (DoubleStream result = mapper.apply(u)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(downstreamAsDouble);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final LongStream flatMapToLong(Function<? super P_OUT, ? extends LongStream> mapper) {
+ Objects.requireNonNull(mapper);
+ // We can do better than this, by polling cancellationRequested when stream is infinite
+ return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
+ StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedReference<P_OUT, Long>(sink) {
+ LongConsumer downstreamAsLong = downstream::accept;
+ @Override
+ public void begin(long size) {
+ downstream.begin(-1);
+ }
+
+ @Override
+ public void accept(P_OUT u) {
+ try (LongStream result = mapper.apply(u)) {
+ // We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
+ if (result != null)
+ result.sequential().forEach(downstreamAsLong);
+ }
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ public final Stream<P_OUT> peek(Consumer<? super P_OUT> action) {
+ Objects.requireNonNull(action);
+ return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE,
+ 0) {
+ @Override
+ Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
+ return new Sink.ChainedReference<P_OUT, P_OUT>(sink) {
+ @Override
+ public void accept(P_OUT u) {
+ action.accept(u);
+ downstream.accept(u);
+ }
+ };
+ }
+ };
+ }
+
+ // Stateful intermediate operations from Stream
+
+ @Override
+ public final Stream<P_OUT> distinct() {
+ return DistinctOps.makeRef(this);
+ }
+
+ @Override
+ public final Stream<P_OUT> sorted() {
+ return SortedOps.makeRef(this);
+ }
+
+ @Override
+ public final Stream<P_OUT> sorted(Comparator<? super P_OUT> comparator) {
+ return SortedOps.makeRef(this, comparator);
+ }
+
+ @Override
+ public final Stream<P_OUT> limit(long maxSize) {
+ if (maxSize < 0)
+ throw new IllegalArgumentException(Long.toString(maxSize));
+ return SliceOps.makeRef(this, 0, maxSize);
+ }
+
+ @Override
+ public final Stream<P_OUT> skip(long n) {
+ if (n < 0)
+ throw new IllegalArgumentException(Long.toString(n));
+ if (n == 0)
+ return this;
+ else
+ return SliceOps.makeRef(this, n, -1);
+ }
+
+ // Terminal operations from Stream
+
+ @Override
+ public void forEach(Consumer<? super P_OUT> action) {
+ evaluate(ForEachOps.makeRef(action, false));
+ }
+
+ @Override
+ public void forEachOrdered(Consumer<? super P_OUT> action) {
+ evaluate(ForEachOps.makeRef(action, true));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public final <A> A[] toArray(IntFunction<A[]> generator) {
+ // Since A has no relation to U (not possible to declare that A is an upper bound of U)
+ // there will be no static type checking.
+ // Therefore use a raw type and assume A == U rather than propagating the separation of A and U
+ // throughout the code-base.
+ // The runtime type of U is never checked for equality with the component type of the runtime type of A[].
+ // Runtime checking will be performed when an element is stored in A[], thus if A is not a
+ // super type of U an ArrayStoreException will be thrown.
+ @SuppressWarnings("rawtypes")
+ IntFunction rawGenerator = (IntFunction) generator;
+ return (A[]) Nodes.flatten(evaluateToArrayNode(rawGenerator), rawGenerator)
+ .asArray(rawGenerator);
+ }
+
+ @Override
+ public final Object[] toArray() {
+ return toArray(Object[]::new);
+ }
+
+ @Override
+ public final boolean anyMatch(Predicate<? super P_OUT> predicate) {
+ return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.ANY));
+ }
+
+ @Override
+ public final boolean allMatch(Predicate<? super P_OUT> predicate) {
+ return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.ALL));
+ }
+
+ @Override
+ public final boolean noneMatch(Predicate<? super P_OUT> predicate) {
+ return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.NONE));
+ }
+
+ @Override
+ public final Optional<P_OUT> findFirst() {
+ return evaluate(FindOps.makeRef(true));
+ }
+
+ @Override
+ public final Optional<P_OUT> findAny() {
+ return evaluate(FindOps.makeRef(false));
+ }
+
+ @Override
+ public final P_OUT reduce(final P_OUT identity, final BinaryOperator<P_OUT> accumulator) {
+ return evaluate(ReduceOps.makeRef(identity, accumulator, accumulator));
+ }
+
+ @Override
+ public final Optional<P_OUT> reduce(BinaryOperator<P_OUT> accumulator) {
+ return evaluate(ReduceOps.makeRef(accumulator));
+ }
+
+ @Override
+ public final <R> R reduce(R identity, BiFunction<R, ? super P_OUT, R> accumulator, BinaryOperator<R> combiner) {
+ return evaluate(ReduceOps.makeRef(identity, accumulator, combiner));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public final <R, A> R collect(Collector<? super P_OUT, A, R> collector) {
+ A container;
+ if (isParallel()
+ && (collector.characteristics().contains(Collector.Characteristics.CONCURRENT))
+ && (!isOrdered() || collector.characteristics().contains(Collector.Characteristics.UNORDERED))) {
+ container = collector.supplier().get();
+ BiConsumer<A, ? super P_OUT> accumulator = collector.accumulator();
+ forEach(u -> accumulator.accept(container, u));
+ }
+ else {
+ container = evaluate(ReduceOps.makeRef(collector));
+ }
+ return collector.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)
+ ? (R) container
+ : collector.finisher().apply(container);
+ }
+
+ @Override
+ public final <R> R collect(Supplier<R> supplier,
+ BiConsumer<R, ? super P_OUT> accumulator,
+ BiConsumer<R, R> combiner) {
+ return evaluate(ReduceOps.makeRef(supplier, accumulator, combiner));
+ }
+
+ @Override
+ public final Optional<P_OUT> max(Comparator<? super P_OUT> comparator) {
+ return reduce(BinaryOperator.maxBy(comparator));
+ }
+
+ @Override
+ public final Optional<P_OUT> min(Comparator<? super P_OUT> comparator) {
+ return reduce(BinaryOperator.minBy(comparator));
+
+ }
+
+ @Override
+ public final long count() {
+ return mapToLong(e -> 1L).sum();
+ }
+
+
+ //
+
+ /**
+ * Source stage of a ReferencePipeline.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @param <E_OUT> type of elements in produced by this stage
+ * @since 1.8
+ */
+ static class Head<E_IN, E_OUT> extends ReferencePipeline<E_IN, E_OUT> {
+ /**
+ * Constructor for the source stage of a Stream.
+ *
+ * @param source {@code Supplier<Spliterator>} describing the stream
+ * source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ */
+ Head(Supplier<? extends Spliterator<?>> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ /**
+ * Constructor for the source stage of a Stream.
+ *
+ * @param source {@code Spliterator} describing the stream source
+ * @param sourceFlags the source flags for the stream source, described
+ * in {@link StreamOpFlag}
+ */
+ Head(Spliterator<?> source,
+ int sourceFlags, boolean parallel) {
+ super(source, sourceFlags, parallel);
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ final Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink) {
+ throw new UnsupportedOperationException();
+ }
+
+ // Optimized sequential terminal operations for the head of the pipeline
+
+ @Override
+ public void forEach(Consumer<? super E_OUT> action) {
+ if (!isParallel()) {
+ sourceStageSpliterator().forEachRemaining(action);
+ }
+ else {
+ super.forEach(action);
+ }
+ }
+
+ @Override
+ public void forEachOrdered(Consumer<? super E_OUT> action) {
+ if (!isParallel()) {
+ sourceStageSpliterator().forEachRemaining(action);
+ }
+ else {
+ super.forEachOrdered(action);
+ }
+ }
+ }
+
+ /**
+ * Base class for a stateless intermediate stage of a Stream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @param <E_OUT> type of elements in produced by this stage
+ * @since 1.8
+ */
+ abstract static class StatelessOp<E_IN, E_OUT>
+ extends ReferencePipeline<E_IN, E_OUT> {
+ /**
+ * Construct a new Stream by appending a stateless intermediate
+ * operation to an existing stream.
+ *
+ * @param upstream The upstream pipeline stage
+ * @param inputShape The stream shape for the upstream pipeline stage
+ * @param opFlags Operation flags for the new stage
+ */
+ StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return false;
+ }
+ }
+
+ /**
+ * Base class for a stateful intermediate stage of a Stream.
+ *
+ * @param <E_IN> type of elements in the upstream source
+ * @param <E_OUT> type of elements in produced by this stage
+ * @since 1.8
+ */
+ abstract static class StatefulOp<E_IN, E_OUT>
+ extends ReferencePipeline<E_IN, E_OUT> {
+ /**
+ * Construct a new Stream by appending a stateful intermediate operation
+ * to an existing stream.
+ * @param upstream The upstream pipeline stage
+ * @param inputShape The stream shape for the upstream pipeline stage
+ * @param opFlags Operation flags for the new stage
+ */
+ StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
+ StreamShape inputShape,
+ int opFlags) {
+ super(upstream, opFlags);
+ assert upstream.getOutputShape() == inputShape;
+ }
+
+ @Override
+ final boolean opIsStateful() {
+ return true;
+ }
+
+ @Override
+ abstract <P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<E_OUT[]> generator);
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/Sink.java b/ojluni/src/main/java/java/util/stream/Sink.java
new file mode 100644
index 0000000..d2a366d
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Sink.java
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.LongConsumer;
+
+/**
+ * An extension of {@link Consumer} used to conduct values through the stages of
+ * a stream pipeline, with additional methods to manage size information,
+ * control flow, etc. Before calling the {@code accept()} method on a
+ * {@code Sink} for the first time, you must first call the {@code begin()}
+ * method to inform it that data is coming (optionally informing the sink how
+ * much data is coming), and after all data has been sent, you must call the
+ * {@code end()} method. After calling {@code end()}, you should not call
+ * {@code accept()} without again calling {@code begin()}. {@code Sink} also
+ * offers a mechanism by which the sink can cooperatively signal that it does
+ * not wish to receive any more data (the {@code cancellationRequested()}
+ * method), which a source can poll before sending more data to the
+ * {@code Sink}.
+ *
+ * <p>A sink may be in one of two states: an initial state and an active state.
+ * It starts out in the initial state; the {@code begin()} method transitions
+ * it to the active state, and the {@code end()} method transitions it back into
+ * the initial state, where it can be re-used. Data-accepting methods (such as
+ * {@code accept()} are only valid in the active state.
+ *
+ * @apiNote
+ * A stream pipeline consists of a source, zero or more intermediate stages
+ * (such as filtering or mapping), and a terminal stage, such as reduction or
+ * for-each. For concreteness, consider the pipeline:
+ *
+ * <pre>{@code
+ * int longestStringLengthStartingWithA
+ * = strings.stream()
+ * .filter(s -> s.startsWith("A"))
+ * .mapToInt(String::length)
+ * .max();
+ * }</pre>
+ *
+ * <p>Here, we have three stages, filtering, mapping, and reducing. The
+ * filtering stage consumes strings and emits a subset of those strings; the
+ * mapping stage consumes strings and emits ints; the reduction stage consumes
+ * those ints and computes the maximal value.
+ *
+ * <p>A {@code Sink} instance is used to represent each stage of this pipeline,
+ * whether the stage accepts objects, ints, longs, or doubles. Sink has entry
+ * points for {@code accept(Object)}, {@code accept(int)}, etc, so that we do
+ * not need a specialized interface for each primitive specialization. (It
+ * might be called a "kitchen sink" for this omnivorous tendency.) The entry
+ * point to the pipeline is the {@code Sink} for the filtering stage, which
+ * sends some elements "downstream" -- into the {@code Sink} for the mapping
+ * stage, which in turn sends integral values downstream into the {@code Sink}
+ * for the reduction stage. The {@code Sink} implementations associated with a
+ * given stage is expected to know the data type for the next stage, and call
+ * the correct {@code accept} method on its downstream {@code Sink}. Similarly,
+ * each stage must implement the correct {@code accept} method corresponding to
+ * the data type it accepts.
+ *
+ * <p>The specialized subtypes such as {@link Sink.OfInt} override
+ * {@code accept(Object)} to call the appropriate primitive specialization of
+ * {@code accept}, implement the appropriate primitive specialization of
+ * {@code Consumer}, and re-abstract the appropriate primitive specialization of
+ * {@code accept}.
+ *
+ * <p>The chaining subtypes such as {@link ChainedInt} not only implement
+ * {@code Sink.OfInt}, but also maintain a {@code downstream} field which
+ * represents the downstream {@code Sink}, and implement the methods
+ * {@code begin()}, {@code end()}, and {@code cancellationRequested()} to
+ * delegate to the downstream {@code Sink}. Most implementations of
+ * intermediate operations will use these chaining wrappers. For example, the
+ * mapping stage in the above example would look like:
+ *
+ * <pre>{@code
+ * IntSink is = new Sink.ChainedReference<U>(sink) {
+ * public void accept(U u) {
+ * downstream.accept(mapper.applyAsInt(u));
+ * }
+ * };
+ * }</pre>
+ *
+ * <p>Here, we implement {@code Sink.ChainedReference<U>}, meaning that we expect
+ * to receive elements of type {@code U} as input, and pass the downstream sink
+ * to the constructor. Because the next stage expects to receive integers, we
+ * must call the {@code accept(int)} method when emitting values to the downstream.
+ * The {@code accept()} method applies the mapping function from {@code U} to
+ * {@code int} and passes the resulting value to the downstream {@code Sink}.
+ *
+ * @param <T> type of elements for value streams
+ * @since 1.8
+ */
+interface Sink<T> extends Consumer<T> {
+ /**
+ * Resets the sink state to receive a fresh data set. This must be called
+ * before sending any data to the sink. After calling {@link #end()},
+ * you may call this method to reset the sink for another calculation.
+ * @param size The exact size of the data to be pushed downstream, if
+ * known or {@code -1} if unknown or infinite.
+ *
+ * <p>Prior to this call, the sink must be in the initial state, and after
+ * this call it is in the active state.
+ */
+ default void begin(long size) {}
+
+ /**
+ * Indicates that all elements have been pushed. If the {@code Sink} is
+ * stateful, it should send any stored state downstream at this time, and
+ * should clear any accumulated state (and associated resources).
+ *
+ * <p>Prior to this call, the sink must be in the active state, and after
+ * this call it is returned to the initial state.
+ */
+ default void end() {}
+
+ /**
+ * Indicates that this {@code Sink} does not wish to receive any more data.
+ *
+ * @implSpec The default implementation always returns false.
+ *
+ * @return true if cancellation is requested
+ */
+ default boolean cancellationRequested() {
+ return false;
+ }
+
+ /**
+ * Accepts an int value.
+ *
+ * @implSpec The default implementation throws IllegalStateException.
+ *
+ * @throws IllegalStateException if this sink does not accept int values
+ */
+ default void accept(int value) {
+ throw new IllegalStateException("called wrong accept method");
+ }
+
+ /**
+ * Accepts a long value.
+ *
+ * @implSpec The default implementation throws IllegalStateException.
+ *
+ * @throws IllegalStateException if this sink does not accept long values
+ */
+ default void accept(long value) {
+ throw new IllegalStateException("called wrong accept method");
+ }
+
+ /**
+ * Accepts a double value.
+ *
+ * @implSpec The default implementation throws IllegalStateException.
+ *
+ * @throws IllegalStateException if this sink does not accept double values
+ */
+ default void accept(double value) {
+ throw new IllegalStateException("called wrong accept method");
+ }
+
+ /**
+ * {@code Sink} that implements {@code Sink<Integer>}, re-abstracts
+ * {@code accept(int)}, and wires {@code accept(Integer)} to bridge to
+ * {@code accept(int)}.
+ */
+ interface OfInt extends Sink<Integer>, IntConsumer {
+ @Override
+ void accept(int value);
+
+ @Override
+ default void accept(Integer i) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
+ accept(i.intValue());
+ }
+ }
+
+ /**
+ * {@code Sink} that implements {@code Sink<Long>}, re-abstracts
+ * {@code accept(long)}, and wires {@code accept(Long)} to bridge to
+ * {@code accept(long)}.
+ */
+ interface OfLong extends Sink<Long>, LongConsumer {
+ @Override
+ void accept(long value);
+
+ @Override
+ default void accept(Long i) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Sink.OfLong.accept(Long)");
+ accept(i.longValue());
+ }
+ }
+
+ /**
+ * {@code Sink} that implements {@code Sink<Double>}, re-abstracts
+ * {@code accept(double)}, and wires {@code accept(Double)} to bridge to
+ * {@code accept(double)}.
+ */
+ interface OfDouble extends Sink<Double>, DoubleConsumer {
+ @Override
+ void accept(double value);
+
+ @Override
+ default void accept(Double i) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Sink.OfDouble.accept(Double)");
+ accept(i.doubleValue());
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink<T>}. The
+ * implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedReference<T, E_OUT> implements Sink<T> {
+ protected final Sink<? super E_OUT> downstream;
+
+ public ChainedReference(Sink<? super E_OUT> downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation designed for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink.OfInt}.
+ * The implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedInt<E_OUT> implements Sink.OfInt {
+ protected final Sink<? super E_OUT> downstream;
+
+ public ChainedInt(Sink<? super E_OUT> downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation designed for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink.OfLong}.
+ * The implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedLong<E_OUT> implements Sink.OfLong {
+ protected final Sink<? super E_OUT> downstream;
+
+ public ChainedLong(Sink<? super E_OUT> downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation designed for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink.OfDouble}.
+ * The implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedDouble<E_OUT> implements Sink.OfDouble {
+ protected final Sink<? super E_OUT> downstream;
+
+ public ChainedDouble(Sink<? super E_OUT> downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/SliceOps.java b/ojluni/src/main/java/java/util/stream/SliceOps.java
new file mode 100644
index 0000000..34d5530
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/SliceOps.java
@@ -0,0 +1,715 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.concurrent.CountedCompleter;
+import java.util.function.IntFunction;
+
+/**
+ * Factory for instances of a short-circuiting stateful intermediate operations
+ * that produce subsequences of their input stream.
+ *
+ * @since 1.8
+ */
+final class SliceOps {
+
+ // No instances
+ private SliceOps() { }
+
+ /**
+ * Calculates the sliced size given the current size, number of elements
+ * skip, and the number of elements to limit.
+ *
+ * @param size the current size
+ * @param skip the number of elements to skip, assumed to be >= 0
+ * @param limit the number of elements to limit, assumed to be >= 0, with
+ * a value of {@code Long.MAX_VALUE} if there is no limit
+ * @return the sliced size
+ */
+ private static long calcSize(long size, long skip, long limit) {
+ return size >= 0 ? Math.max(-1, Math.min(size - skip, limit)) : -1;
+ }
+
+ /**
+ * Calculates the slice fence, which is one past the index of the slice
+ * range
+ * @param skip the number of elements to skip, assumed to be >= 0
+ * @param limit the number of elements to limit, assumed to be >= 0, with
+ * a value of {@code Long.MAX_VALUE} if there is no limit
+ * @return the slice fence.
+ */
+ private static long calcSliceFence(long skip, long limit) {
+ long sliceFence = limit >= 0 ? skip + limit : Long.MAX_VALUE;
+ // Check for overflow
+ return (sliceFence >= 0) ? sliceFence : Long.MAX_VALUE;
+ }
+
+ /**
+ * Creates a slice spliterator given a stream shape governing the
+ * spliterator type. Requires that the underlying Spliterator
+ * be SUBSIZED.
+ */
+ @SuppressWarnings("unchecked")
+ private static <P_IN> Spliterator<P_IN> sliceSpliterator(StreamShape shape,
+ Spliterator<P_IN> s,
+ long skip, long limit) {
+ assert s.hasCharacteristics(Spliterator.SUBSIZED);
+ long sliceFence = calcSliceFence(skip, limit);
+ switch (shape) {
+ case REFERENCE:
+ return new StreamSpliterators
+ .SliceSpliterator.OfRef<>(s, skip, sliceFence);
+ case INT_VALUE:
+ return (Spliterator<P_IN>) new StreamSpliterators
+ .SliceSpliterator.OfInt((Spliterator.OfInt) s, skip, sliceFence);
+ case LONG_VALUE:
+ return (Spliterator<P_IN>) new StreamSpliterators
+ .SliceSpliterator.OfLong((Spliterator.OfLong) s, skip, sliceFence);
+ case DOUBLE_VALUE:
+ return (Spliterator<P_IN>) new StreamSpliterators
+ .SliceSpliterator.OfDouble((Spliterator.OfDouble) s, skip, sliceFence);
+ default:
+ throw new IllegalStateException("Unknown shape " + shape);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static <T> IntFunction<T[]> castingArray() {
+ return size -> (T[]) new Object[size];
+ }
+
+ /**
+ * Appends a "slice" operation to the provided stream. The slice operation
+ * may be may be skip-only, limit-only, or skip-and-limit.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ * @param skip the number of elements to skip. Must be >= 0.
+ * @param limit the maximum size of the resulting stream, or -1 if no limit
+ * is to be imposed
+ */
+ public static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
+ long skip, long limit) {
+ if (skip < 0)
+ throw new IllegalArgumentException("Skip must be non-negative: " + skip);
+
+ return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE,
+ flags(limit)) {
+ Spliterator<T> unorderedSkipLimitSpliterator(Spliterator<T> s,
+ long skip, long limit, long sizeIfKnown) {
+ if (skip <= sizeIfKnown) {
+ // Use just the limit if the number of elements
+ // to skip is <= the known pipeline size
+ limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
+ skip = 0;
+ }
+ return new StreamSpliterators.UnorderedSliceSpliterator.OfRef<>(s, skip, limit);
+ }
+
+ @Override
+ <P_IN> Spliterator<T> opEvaluateParallelLazy(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ return new StreamSpliterators.SliceSpliterator.OfRef<>(
+ helper.wrapSpliterator(spliterator),
+ skip,
+ calcSliceFence(skip, limit));
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ return unorderedSkipLimitSpliterator(
+ helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ }
+ else {
+ // @@@ OOMEs will occur for LongStream.longs().filter(i -> true).limit(n)
+ // regardless of the value of n
+ // Need to adjust the target size of splitting for the
+ // SliceTask from say (size / k) to say min(size / k, 1 << 14)
+ // This will limit the size of the buffers created at the leaf nodes
+ // cancellation will be more aggressive cancelling later tasks
+ // if the target slice size has been reached from a given task,
+ // cancellation should also clear local results if any
+ return new SliceTask<>(this, helper, spliterator, castingArray(), skip, limit).
+ invoke().spliterator();
+ }
+ }
+
+ @Override
+ <P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<T[]> generator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ // Because the pipeline is SIZED the slice spliterator
+ // can be created from the source, this requires matching
+ // to shape of the source, and is potentially more efficient
+ // than creating the slice spliterator from the pipeline
+ // wrapping spliterator
+ Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
+ return Nodes.collect(helper, s, true, generator);
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ Spliterator<T> s = unorderedSkipLimitSpliterator(
+ helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ // Collect using this pipeline, which is empty and therefore
+ // can be used with the pipeline wrapping spliterator
+ // Note that we cannot create a slice spliterator from
+ // the source spliterator if the pipeline is not SIZED
+ return Nodes.collect(this, s, true, generator);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
+ invoke();
+ }
+ }
+
+ @Override
+ Sink<T> opWrapSink(int flags, Sink<T> sink) {
+ return new Sink.ChainedReference<T, T>(sink) {
+ long n = skip;
+ long m = limit >= 0 ? limit : Long.MAX_VALUE;
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(calcSize(size, skip, m));
+ }
+
+ @Override
+ public void accept(T t) {
+ if (n == 0) {
+ if (m > 0) {
+ m--;
+ downstream.accept(t);
+ }
+ }
+ else {
+ n--;
+ }
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return m == 0 || downstream.cancellationRequested();
+ }
+ };
+ }
+ };
+ }
+
+ /**
+ * Appends a "slice" operation to the provided IntStream. The slice
+ * operation may be may be skip-only, limit-only, or skip-and-limit.
+ *
+ * @param upstream An IntStream
+ * @param skip The number of elements to skip. Must be >= 0.
+ * @param limit The maximum size of the resulting stream, or -1 if no limit
+ * is to be imposed
+ */
+ public static IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream,
+ long skip, long limit) {
+ if (skip < 0)
+ throw new IllegalArgumentException("Skip must be non-negative: " + skip);
+
+ return new IntPipeline.StatefulOp<Integer>(upstream, StreamShape.INT_VALUE,
+ flags(limit)) {
+ Spliterator.OfInt unorderedSkipLimitSpliterator(
+ Spliterator.OfInt s, long skip, long limit, long sizeIfKnown) {
+ if (skip <= sizeIfKnown) {
+ // Use just the limit if the number of elements
+ // to skip is <= the known pipeline size
+ limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
+ skip = 0;
+ }
+ return new StreamSpliterators.UnorderedSliceSpliterator.OfInt(s, skip, limit);
+ }
+
+ @Override
+ <P_IN> Spliterator<Integer> opEvaluateParallelLazy(PipelineHelper<Integer> helper,
+ Spliterator<P_IN> spliterator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ return new StreamSpliterators.SliceSpliterator.OfInt(
+ (Spliterator.OfInt) helper.wrapSpliterator(spliterator),
+ skip,
+ calcSliceFence(skip, limit));
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ return unorderedSkipLimitSpliterator(
+ (Spliterator.OfInt) helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, Integer[]::new, skip, limit).
+ invoke().spliterator();
+ }
+ }
+
+ @Override
+ <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Integer[]> generator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ // Because the pipeline is SIZED the slice spliterator
+ // can be created from the source, this requires matching
+ // to shape of the source, and is potentially more efficient
+ // than creating the slice spliterator from the pipeline
+ // wrapping spliterator
+ Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
+ return Nodes.collectInt(helper, s, true);
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ Spliterator.OfInt s = unorderedSkipLimitSpliterator(
+ (Spliterator.OfInt) helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ // Collect using this pipeline, which is empty and therefore
+ // can be used with the pipeline wrapping spliterator
+ // Note that we cannot create a slice spliterator from
+ // the source spliterator if the pipeline is not SIZED
+ return Nodes.collectInt(this, s, true);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
+ invoke();
+ }
+ }
+
+ @Override
+ Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ return new Sink.ChainedInt<Integer>(sink) {
+ long n = skip;
+ long m = limit >= 0 ? limit : Long.MAX_VALUE;
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(calcSize(size, skip, m));
+ }
+
+ @Override
+ public void accept(int t) {
+ if (n == 0) {
+ if (m > 0) {
+ m--;
+ downstream.accept(t);
+ }
+ }
+ else {
+ n--;
+ }
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return m == 0 || downstream.cancellationRequested();
+ }
+ };
+ }
+ };
+ }
+
+ /**
+ * Appends a "slice" operation to the provided LongStream. The slice
+ * operation may be may be skip-only, limit-only, or skip-and-limit.
+ *
+ * @param upstream A LongStream
+ * @param skip The number of elements to skip. Must be >= 0.
+ * @param limit The maximum size of the resulting stream, or -1 if no limit
+ * is to be imposed
+ */
+ public static LongStream makeLong(AbstractPipeline<?, Long, ?> upstream,
+ long skip, long limit) {
+ if (skip < 0)
+ throw new IllegalArgumentException("Skip must be non-negative: " + skip);
+
+ return new LongPipeline.StatefulOp<Long>(upstream, StreamShape.LONG_VALUE,
+ flags(limit)) {
+ Spliterator.OfLong unorderedSkipLimitSpliterator(
+ Spliterator.OfLong s, long skip, long limit, long sizeIfKnown) {
+ if (skip <= sizeIfKnown) {
+ // Use just the limit if the number of elements
+ // to skip is <= the known pipeline size
+ limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
+ skip = 0;
+ }
+ return new StreamSpliterators.UnorderedSliceSpliterator.OfLong(s, skip, limit);
+ }
+
+ @Override
+ <P_IN> Spliterator<Long> opEvaluateParallelLazy(PipelineHelper<Long> helper,
+ Spliterator<P_IN> spliterator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ return new StreamSpliterators.SliceSpliterator.OfLong(
+ (Spliterator.OfLong) helper.wrapSpliterator(spliterator),
+ skip,
+ calcSliceFence(skip, limit));
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ return unorderedSkipLimitSpliterator(
+ (Spliterator.OfLong) helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, Long[]::new, skip, limit).
+ invoke().spliterator();
+ }
+ }
+
+ @Override
+ <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Long[]> generator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ // Because the pipeline is SIZED the slice spliterator
+ // can be created from the source, this requires matching
+ // to shape of the source, and is potentially more efficient
+ // than creating the slice spliterator from the pipeline
+ // wrapping spliterator
+ Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
+ return Nodes.collectLong(helper, s, true);
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ Spliterator.OfLong s = unorderedSkipLimitSpliterator(
+ (Spliterator.OfLong) helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ // Collect using this pipeline, which is empty and therefore
+ // can be used with the pipeline wrapping spliterator
+ // Note that we cannot create a slice spliterator from
+ // the source spliterator if the pipeline is not SIZED
+ return Nodes.collectLong(this, s, true);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
+ invoke();
+ }
+ }
+
+ @Override
+ Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ return new Sink.ChainedLong<Long>(sink) {
+ long n = skip;
+ long m = limit >= 0 ? limit : Long.MAX_VALUE;
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(calcSize(size, skip, m));
+ }
+
+ @Override
+ public void accept(long t) {
+ if (n == 0) {
+ if (m > 0) {
+ m--;
+ downstream.accept(t);
+ }
+ }
+ else {
+ n--;
+ }
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return m == 0 || downstream.cancellationRequested();
+ }
+ };
+ }
+ };
+ }
+
+ /**
+ * Appends a "slice" operation to the provided DoubleStream. The slice
+ * operation may be may be skip-only, limit-only, or skip-and-limit.
+ *
+ * @param upstream A DoubleStream
+ * @param skip The number of elements to skip. Must be >= 0.
+ * @param limit The maximum size of the resulting stream, or -1 if no limit
+ * is to be imposed
+ */
+ public static DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream,
+ long skip, long limit) {
+ if (skip < 0)
+ throw new IllegalArgumentException("Skip must be non-negative: " + skip);
+
+ return new DoublePipeline.StatefulOp<Double>(upstream, StreamShape.DOUBLE_VALUE,
+ flags(limit)) {
+ Spliterator.OfDouble unorderedSkipLimitSpliterator(
+ Spliterator.OfDouble s, long skip, long limit, long sizeIfKnown) {
+ if (skip <= sizeIfKnown) {
+ // Use just the limit if the number of elements
+ // to skip is <= the known pipeline size
+ limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
+ skip = 0;
+ }
+ return new StreamSpliterators.UnorderedSliceSpliterator.OfDouble(s, skip, limit);
+ }
+
+ @Override
+ <P_IN> Spliterator<Double> opEvaluateParallelLazy(PipelineHelper<Double> helper,
+ Spliterator<P_IN> spliterator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ return new StreamSpliterators.SliceSpliterator.OfDouble(
+ (Spliterator.OfDouble) helper.wrapSpliterator(spliterator),
+ skip,
+ calcSliceFence(skip, limit));
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ return unorderedSkipLimitSpliterator(
+ (Spliterator.OfDouble) helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, Double[]::new, skip, limit).
+ invoke().spliterator();
+ }
+ }
+
+ @Override
+ <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Double[]> generator) {
+ long size = helper.exactOutputSizeIfKnown(spliterator);
+ if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
+ // Because the pipeline is SIZED the slice spliterator
+ // can be created from the source, this requires matching
+ // to shape of the source, and is potentially more efficient
+ // than creating the slice spliterator from the pipeline
+ // wrapping spliterator
+ Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
+ return Nodes.collectDouble(helper, s, true);
+ } else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
+ Spliterator.OfDouble s = unorderedSkipLimitSpliterator(
+ (Spliterator.OfDouble) helper.wrapSpliterator(spliterator),
+ skip, limit, size);
+ // Collect using this pipeline, which is empty and therefore
+ // can be used with the pipeline wrapping spliterator
+ // Note that we cannot create a slice spliterator from
+ // the source spliterator if the pipeline is not SIZED
+ return Nodes.collectDouble(this, s, true);
+ }
+ else {
+ return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
+ invoke();
+ }
+ }
+
+ @Override
+ Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ return new Sink.ChainedDouble<Double>(sink) {
+ long n = skip;
+ long m = limit >= 0 ? limit : Long.MAX_VALUE;
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(calcSize(size, skip, m));
+ }
+
+ @Override
+ public void accept(double t) {
+ if (n == 0) {
+ if (m > 0) {
+ m--;
+ downstream.accept(t);
+ }
+ }
+ else {
+ n--;
+ }
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return m == 0 || downstream.cancellationRequested();
+ }
+ };
+ }
+ };
+ }
+
+ private static int flags(long limit) {
+ return StreamOpFlag.NOT_SIZED | ((limit != -1) ? StreamOpFlag.IS_SHORT_CIRCUIT : 0);
+ }
+
+ /**
+ * {@code ForkJoinTask} implementing slice computation.
+ *
+ * @param <P_IN> Input element type to the stream pipeline
+ * @param <P_OUT> Output element type from the stream pipeline
+ */
+ @SuppressWarnings("serial")
+ private static final class SliceTask<P_IN, P_OUT>
+ extends AbstractShortCircuitTask<P_IN, P_OUT, Node<P_OUT>, SliceTask<P_IN, P_OUT>> {
+ private final AbstractPipeline<P_OUT, P_OUT, ?> op;
+ private final IntFunction<P_OUT[]> generator;
+ private final long targetOffset, targetSize;
+ private long thisNodeSize;
+
+ private volatile boolean completed;
+
+ SliceTask(AbstractPipeline<P_OUT, P_OUT, ?> op,
+ PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<P_OUT[]> generator,
+ long offset, long size) {
+ super(helper, spliterator);
+ this.op = op;
+ this.generator = generator;
+ this.targetOffset = offset;
+ this.targetSize = size;
+ }
+
+ SliceTask(SliceTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ this.op = parent.op;
+ this.generator = parent.generator;
+ this.targetOffset = parent.targetOffset;
+ this.targetSize = parent.targetSize;
+ }
+
+ @Override
+ protected SliceTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
+ return new SliceTask<>(this, spliterator);
+ }
+
+ @Override
+ protected final Node<P_OUT> getEmptyResult() {
+ return Nodes.emptyNode(op.getOutputShape());
+ }
+
+ @Override
+ protected final Node<P_OUT> doLeaf() {
+ if (isRoot()) {
+ long sizeIfKnown = StreamOpFlag.SIZED.isPreserved(op.sourceOrOpFlags)
+ ? op.exactOutputSizeIfKnown(spliterator)
+ : -1;
+ final Node.Builder<P_OUT> nb = op.makeNodeBuilder(sizeIfKnown, generator);
+ Sink<P_OUT> opSink = op.opWrapSink(helper.getStreamAndOpFlags(), nb);
+ helper.copyIntoWithCancel(helper.wrapSink(opSink), spliterator);
+ // There is no need to truncate since the op performs the
+ // skipping and limiting of elements
+ return nb.build();
+ }
+ else {
+ Node<P_OUT> node = helper.wrapAndCopyInto(helper.makeNodeBuilder(-1, generator),
+ spliterator).build();
+ thisNodeSize = node.count();
+ completed = true;
+ spliterator = null;
+ return node;
+ }
+ }
+
+ @Override
+ public final void onCompletion(CountedCompleter<?> caller) {
+ if (!isLeaf()) {
+ Node<P_OUT> result;
+ thisNodeSize = leftChild.thisNodeSize + rightChild.thisNodeSize;
+ if (canceled) {
+ thisNodeSize = 0;
+ result = getEmptyResult();
+ }
+ else if (thisNodeSize == 0)
+ result = getEmptyResult();
+ else if (leftChild.thisNodeSize == 0)
+ result = rightChild.getLocalResult();
+ else {
+ result = Nodes.conc(op.getOutputShape(),
+ leftChild.getLocalResult(), rightChild.getLocalResult());
+ }
+ setLocalResult(isRoot() ? doTruncate(result) : result);
+ completed = true;
+ }
+ if (targetSize >= 0
+ && !isRoot()
+ && isLeftCompleted(targetOffset + targetSize))
+ cancelLaterNodes();
+
+ super.onCompletion(caller);
+ }
+
+ @Override
+ protected void cancel() {
+ super.cancel();
+ if (completed)
+ setLocalResult(getEmptyResult());
+ }
+
+ private Node<P_OUT> doTruncate(Node<P_OUT> input) {
+ long to = targetSize >= 0 ? Math.min(input.count(), targetOffset + targetSize) : thisNodeSize;
+ return input.truncate(targetOffset, to, generator);
+ }
+
+ /**
+ * Determine if the number of completed elements in this node and nodes
+ * to the left of this node is greater than or equal to the target size.
+ *
+ * @param target the target size
+ * @return true if the number of elements is greater than or equal to
+ * the target size, otherwise false.
+ */
+ private boolean isLeftCompleted(long target) {
+ long size = completed ? thisNodeSize : completedSize(target);
+ if (size >= target)
+ return true;
+ for (SliceTask<P_IN, P_OUT> parent = getParent(), node = this;
+ parent != null;
+ node = parent, parent = parent.getParent()) {
+ if (node == parent.rightChild) {
+ SliceTask<P_IN, P_OUT> left = parent.leftChild;
+ if (left != null) {
+ size += left.completedSize(target);
+ if (size >= target)
+ return true;
+ }
+ }
+ }
+ return size >= target;
+ }
+
+ /**
+ * Compute the number of completed elements in this node.
+ * <p>
+ * Computation terminates if all nodes have been processed or the
+ * number of completed elements is greater than or equal to the target
+ * size.
+ *
+ * @param target the target size
+ * @return return the number of completed elements
+ */
+ private long completedSize(long target) {
+ if (completed)
+ return thisNodeSize;
+ else {
+ SliceTask<P_IN, P_OUT> left = leftChild;
+ SliceTask<P_IN, P_OUT> right = rightChild;
+ if (left == null || right == null) {
+ // must be completed
+ return thisNodeSize;
+ }
+ else {
+ long leftSize = left.completedSize(target);
+ return (leftSize >= target) ? leftSize : leftSize + right.completedSize(target);
+ }
+ }
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/SortedOps.java b/ojluni/src/main/java/java/util/stream/SortedOps.java
new file mode 100644
index 0000000..592b609
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/SortedOps.java
@@ -0,0 +1,701 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.function.IntFunction;
+
+
+/**
+ * Factory methods for transforming streams into sorted streams.
+ *
+ * @since 1.8
+ */
+final class SortedOps {
+
+ private SortedOps() { }
+
+ /**
+ * Appends a "sorted" operation to the provided stream.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ */
+ static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream) {
+ return new OfRef<>(upstream);
+ }
+
+ /**
+ * Appends a "sorted" operation to the provided stream.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ * @param comparator the comparator to order elements by
+ */
+ static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
+ Comparator<? super T> comparator) {
+ return new OfRef<>(upstream, comparator);
+ }
+
+ /**
+ * Appends a "sorted" operation to the provided stream.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ */
+ static <T> IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream) {
+ return new OfInt(upstream);
+ }
+
+ /**
+ * Appends a "sorted" operation to the provided stream.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ */
+ static <T> LongStream makeLong(AbstractPipeline<?, Long, ?> upstream) {
+ return new OfLong(upstream);
+ }
+
+ /**
+ * Appends a "sorted" operation to the provided stream.
+ *
+ * @param <T> the type of both input and output elements
+ * @param upstream a reference stream with element type T
+ */
+ static <T> DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream) {
+ return new OfDouble(upstream);
+ }
+
+ /**
+ * Specialized subtype for sorting reference streams
+ */
+ private static final class OfRef<T> extends ReferencePipeline.StatefulOp<T, T> {
+ /**
+ * Comparator used for sorting
+ */
+ private final boolean isNaturalSort;
+ private final Comparator<? super T> comparator;
+
+ /**
+ * Sort using natural order of {@literal <T>} which must be
+ * {@code Comparable}.
+ */
+ OfRef(AbstractPipeline<?, T, ?> upstream) {
+ super(upstream, StreamShape.REFERENCE,
+ StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
+ this.isNaturalSort = true;
+ // Will throw CCE when we try to sort if T is not Comparable
+ @SuppressWarnings("unchecked")
+ Comparator<? super T> comp = (Comparator<? super T>) Comparator.naturalOrder();
+ this.comparator = comp;
+ }
+
+ /**
+ * Sort using the provided comparator.
+ *
+ * @param comparator The comparator to be used to evaluate ordering.
+ */
+ OfRef(AbstractPipeline<?, T, ?> upstream, Comparator<? super T> comparator) {
+ super(upstream, StreamShape.REFERENCE,
+ StreamOpFlag.IS_ORDERED | StreamOpFlag.NOT_SORTED);
+ this.isNaturalSort = false;
+ this.comparator = Objects.requireNonNull(comparator);
+ }
+
+ @Override
+ public Sink<T> opWrapSink(int flags, Sink<T> sink) {
+ Objects.requireNonNull(sink);
+
+ // If the input is already naturally sorted and this operation
+ // also naturally sorted then this is a no-op
+ if (StreamOpFlag.SORTED.isKnown(flags) && isNaturalSort)
+ return sink;
+ else if (StreamOpFlag.SIZED.isKnown(flags))
+ return new SizedRefSortingSink<>(sink, comparator);
+ else
+ return new RefSortingSink<>(sink, comparator);
+ }
+
+ @Override
+ public <P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<T[]> generator) {
+ // If the input is already naturally sorted and this operation
+ // naturally sorts then collect the output
+ if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags()) && isNaturalSort) {
+ return helper.evaluate(spliterator, false, generator);
+ }
+ else {
+ // @@@ Weak two-pass parallel implementation; parallel collect, parallel sort
+ T[] flattenedData = helper.evaluate(spliterator, true, generator).asArray(generator);
+ Arrays.parallelSort(flattenedData, comparator);
+ return Nodes.node(flattenedData);
+ }
+ }
+ }
+
+ /**
+ * Specialized subtype for sorting int streams.
+ */
+ private static final class OfInt extends IntPipeline.StatefulOp<Integer> {
+ OfInt(AbstractPipeline<?, Integer, ?> upstream) {
+ super(upstream, StreamShape.INT_VALUE,
+ StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
+ }
+
+ @Override
+ public Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
+ Objects.requireNonNull(sink);
+
+ if (StreamOpFlag.SORTED.isKnown(flags))
+ return sink;
+ else if (StreamOpFlag.SIZED.isKnown(flags))
+ return new SizedIntSortingSink(sink);
+ else
+ return new IntSortingSink(sink);
+ }
+
+ @Override
+ public <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Integer[]> generator) {
+ if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
+ return helper.evaluate(spliterator, false, generator);
+ }
+ else {
+ Node.OfInt n = (Node.OfInt) helper.evaluate(spliterator, true, generator);
+
+ int[] content = n.asPrimitiveArray();
+ Arrays.parallelSort(content);
+
+ return Nodes.node(content);
+ }
+ }
+ }
+
+ /**
+ * Specialized subtype for sorting long streams.
+ */
+ private static final class OfLong extends LongPipeline.StatefulOp<Long> {
+ OfLong(AbstractPipeline<?, Long, ?> upstream) {
+ super(upstream, StreamShape.LONG_VALUE,
+ StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
+ }
+
+ @Override
+ public Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
+ Objects.requireNonNull(sink);
+
+ if (StreamOpFlag.SORTED.isKnown(flags))
+ return sink;
+ else if (StreamOpFlag.SIZED.isKnown(flags))
+ return new SizedLongSortingSink(sink);
+ else
+ return new LongSortingSink(sink);
+ }
+
+ @Override
+ public <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Long[]> generator) {
+ if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
+ return helper.evaluate(spliterator, false, generator);
+ }
+ else {
+ Node.OfLong n = (Node.OfLong) helper.evaluate(spliterator, true, generator);
+
+ long[] content = n.asPrimitiveArray();
+ Arrays.parallelSort(content);
+
+ return Nodes.node(content);
+ }
+ }
+ }
+
+ /**
+ * Specialized subtype for sorting double streams.
+ */
+ private static final class OfDouble extends DoublePipeline.StatefulOp<Double> {
+ OfDouble(AbstractPipeline<?, Double, ?> upstream) {
+ super(upstream, StreamShape.DOUBLE_VALUE,
+ StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
+ }
+
+ @Override
+ public Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
+ Objects.requireNonNull(sink);
+
+ if (StreamOpFlag.SORTED.isKnown(flags))
+ return sink;
+ else if (StreamOpFlag.SIZED.isKnown(flags))
+ return new SizedDoubleSortingSink(sink);
+ else
+ return new DoubleSortingSink(sink);
+ }
+
+ @Override
+ public <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
+ Spliterator<P_IN> spliterator,
+ IntFunction<Double[]> generator) {
+ if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
+ return helper.evaluate(spliterator, false, generator);
+ }
+ else {
+ Node.OfDouble n = (Node.OfDouble) helper.evaluate(spliterator, true, generator);
+
+ double[] content = n.asPrimitiveArray();
+ Arrays.parallelSort(content);
+
+ return Nodes.node(content);
+ }
+ }
+ }
+
+ /**
+ * Abstract {@link Sink} for implementing sort on reference streams.
+ *
+ * <p>
+ * Note: documentation below applies to reference and all primitive sinks.
+ * <p>
+ * Sorting sinks first accept all elements, buffering then into an array
+ * or a re-sizable data structure, if the size of the pipeline is known or
+ * unknown respectively. At the end of the sink protocol those elements are
+ * sorted and then pushed downstream.
+ * This class records if {@link #cancellationRequested} is called. If so it
+ * can be inferred that the source pushing source elements into the pipeline
+ * knows that the pipeline is short-circuiting. In such cases sub-classes
+ * pushing elements downstream will preserve the short-circuiting protocol
+ * by calling {@code downstream.cancellationRequested()} and checking the
+ * result is {@code false} before an element is pushed.
+ * <p>
+ * Note that the above behaviour is an optimization for sorting with
+ * sequential streams. It is not an error that more elements, than strictly
+ * required to produce a result, may flow through the pipeline. This can
+ * occur, in general (not restricted to just sorting), for short-circuiting
+ * parallel pipelines.
+ */
+ private static abstract class AbstractRefSortingSink<T> extends Sink.ChainedReference<T, T> {
+ protected final Comparator<? super T> comparator;
+ // @@@ could be a lazy final value, if/when support is added
+ protected boolean cancellationWasRequested;
+
+ AbstractRefSortingSink(Sink<? super T> downstream, Comparator<? super T> comparator) {
+ super(downstream);
+ this.comparator = comparator;
+ }
+
+ /**
+ * Records is cancellation is requested so short-circuiting behaviour
+ * can be preserved when the sorted elements are pushed downstream.
+ *
+ * @return false, as this sink never short-circuits.
+ */
+ @Override
+ public final boolean cancellationRequested() {
+ cancellationWasRequested = true;
+ return false;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on SIZED reference streams.
+ */
+ private static final class SizedRefSortingSink<T> extends AbstractRefSortingSink<T> {
+ private T[] array;
+ private int offset;
+
+ SizedRefSortingSink(Sink<? super T> sink, Comparator<? super T> comparator) {
+ super(sink, comparator);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ array = (T[]) new Object[(int) size];
+ }
+
+ @Override
+ public void end() {
+ Arrays.sort(array, 0, offset, comparator);
+ downstream.begin(offset);
+ if (!cancellationWasRequested) {
+ for (int i = 0; i < offset; i++)
+ downstream.accept(array[i]);
+ }
+ else {
+ for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
+ downstream.accept(array[i]);
+ }
+ downstream.end();
+ array = null;
+ }
+
+ @Override
+ public void accept(T t) {
+ array[offset++] = t;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on reference streams.
+ */
+ private static final class RefSortingSink<T> extends AbstractRefSortingSink<T> {
+ private ArrayList<T> list;
+
+ RefSortingSink(Sink<? super T> sink, Comparator<? super T> comparator) {
+ super(sink, comparator);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ list = (size >= 0) ? new ArrayList<T>((int) size) : new ArrayList<T>();
+ }
+
+ @Override
+ public void end() {
+ list.sort(comparator);
+ downstream.begin(list.size());
+ if (!cancellationWasRequested) {
+ list.forEach(downstream::accept);
+ }
+ else {
+ for (T t : list) {
+ if (downstream.cancellationRequested()) break;
+ downstream.accept(t);
+ }
+ }
+ downstream.end();
+ list = null;
+ }
+
+ @Override
+ public void accept(T t) {
+ list.add(t);
+ }
+ }
+
+ /**
+ * Abstract {@link Sink} for implementing sort on int streams.
+ */
+ private static abstract class AbstractIntSortingSink extends Sink.ChainedInt<Integer> {
+ protected boolean cancellationWasRequested;
+
+ AbstractIntSortingSink(Sink<? super Integer> downstream) {
+ super(downstream);
+ }
+
+ @Override
+ public final boolean cancellationRequested() {
+ cancellationWasRequested = true;
+ return false;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on SIZED int streams.
+ */
+ private static final class SizedIntSortingSink extends AbstractIntSortingSink {
+ private int[] array;
+ private int offset;
+
+ SizedIntSortingSink(Sink<? super Integer> downstream) {
+ super(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ array = new int[(int) size];
+ }
+
+ @Override
+ public void end() {
+ Arrays.sort(array, 0, offset);
+ downstream.begin(offset);
+ if (!cancellationWasRequested) {
+ for (int i = 0; i < offset; i++)
+ downstream.accept(array[i]);
+ }
+ else {
+ for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
+ downstream.accept(array[i]);
+ }
+ downstream.end();
+ array = null;
+ }
+
+ @Override
+ public void accept(int t) {
+ array[offset++] = t;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on int streams.
+ */
+ private static final class IntSortingSink extends AbstractIntSortingSink {
+ private SpinedBuffer.OfInt b;
+
+ IntSortingSink(Sink<? super Integer> sink) {
+ super(sink);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ b = (size > 0) ? new SpinedBuffer.OfInt((int) size) : new SpinedBuffer.OfInt();
+ }
+
+ @Override
+ public void end() {
+ int[] ints = b.asPrimitiveArray();
+ Arrays.sort(ints);
+ downstream.begin(ints.length);
+ if (!cancellationWasRequested) {
+ for (int anInt : ints)
+ downstream.accept(anInt);
+ }
+ else {
+ for (int anInt : ints) {
+ if (downstream.cancellationRequested()) break;
+ downstream.accept(anInt);
+ }
+ }
+ downstream.end();
+ }
+
+ @Override
+ public void accept(int t) {
+ b.accept(t);
+ }
+ }
+
+ /**
+ * Abstract {@link Sink} for implementing sort on long streams.
+ */
+ private static abstract class AbstractLongSortingSink extends Sink.ChainedLong<Long> {
+ protected boolean cancellationWasRequested;
+
+ AbstractLongSortingSink(Sink<? super Long> downstream) {
+ super(downstream);
+ }
+
+ @Override
+ public final boolean cancellationRequested() {
+ cancellationWasRequested = true;
+ return false;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on SIZED long streams.
+ */
+ private static final class SizedLongSortingSink extends AbstractLongSortingSink {
+ private long[] array;
+ private int offset;
+
+ SizedLongSortingSink(Sink<? super Long> downstream) {
+ super(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ array = new long[(int) size];
+ }
+
+ @Override
+ public void end() {
+ Arrays.sort(array, 0, offset);
+ downstream.begin(offset);
+ if (!cancellationWasRequested) {
+ for (int i = 0; i < offset; i++)
+ downstream.accept(array[i]);
+ }
+ else {
+ for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
+ downstream.accept(array[i]);
+ }
+ downstream.end();
+ array = null;
+ }
+
+ @Override
+ public void accept(long t) {
+ array[offset++] = t;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on long streams.
+ */
+ private static final class LongSortingSink extends AbstractLongSortingSink {
+ private SpinedBuffer.OfLong b;
+
+ LongSortingSink(Sink<? super Long> sink) {
+ super(sink);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ b = (size > 0) ? new SpinedBuffer.OfLong((int) size) : new SpinedBuffer.OfLong();
+ }
+
+ @Override
+ public void end() {
+ long[] longs = b.asPrimitiveArray();
+ Arrays.sort(longs);
+ downstream.begin(longs.length);
+ if (!cancellationWasRequested) {
+ for (long aLong : longs)
+ downstream.accept(aLong);
+ }
+ else {
+ for (long aLong : longs) {
+ if (downstream.cancellationRequested()) break;
+ downstream.accept(aLong);
+ }
+ }
+ downstream.end();
+ }
+
+ @Override
+ public void accept(long t) {
+ b.accept(t);
+ }
+ }
+
+ /**
+ * Abstract {@link Sink} for implementing sort on long streams.
+ */
+ private static abstract class AbstractDoubleSortingSink extends Sink.ChainedDouble<Double> {
+ protected boolean cancellationWasRequested;
+
+ AbstractDoubleSortingSink(Sink<? super Double> downstream) {
+ super(downstream);
+ }
+
+ @Override
+ public final boolean cancellationRequested() {
+ cancellationWasRequested = true;
+ return false;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on SIZED double streams.
+ */
+ private static final class SizedDoubleSortingSink extends AbstractDoubleSortingSink {
+ private double[] array;
+ private int offset;
+
+ SizedDoubleSortingSink(Sink<? super Double> downstream) {
+ super(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ array = new double[(int) size];
+ }
+
+ @Override
+ public void end() {
+ Arrays.sort(array, 0, offset);
+ downstream.begin(offset);
+ if (!cancellationWasRequested) {
+ for (int i = 0; i < offset; i++)
+ downstream.accept(array[i]);
+ }
+ else {
+ for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
+ downstream.accept(array[i]);
+ }
+ downstream.end();
+ array = null;
+ }
+
+ @Override
+ public void accept(double t) {
+ array[offset++] = t;
+ }
+ }
+
+ /**
+ * {@link Sink} for implementing sort on double streams.
+ */
+ private static final class DoubleSortingSink extends AbstractDoubleSortingSink {
+ private SpinedBuffer.OfDouble b;
+
+ DoubleSortingSink(Sink<? super Double> sink) {
+ super(sink);
+ }
+
+ @Override
+ public void begin(long size) {
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ b = (size > 0) ? new SpinedBuffer.OfDouble((int) size) : new SpinedBuffer.OfDouble();
+ }
+
+ @Override
+ public void end() {
+ double[] doubles = b.asPrimitiveArray();
+ Arrays.sort(doubles);
+ downstream.begin(doubles.length);
+ if (!cancellationWasRequested) {
+ for (double aDouble : doubles)
+ downstream.accept(aDouble);
+ }
+ else {
+ for (double aDouble : doubles) {
+ if (downstream.cancellationRequested()) break;
+ downstream.accept(aDouble);
+ }
+ }
+ downstream.end();
+ }
+
+ @Override
+ public void accept(double t) {
+ b.accept(t);
+ }
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/SpinedBuffer.java b/ojluni/src/main/java/java/util/stream/SpinedBuffer.java
new file mode 100644
index 0000000..163692c
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/SpinedBuffer.java
@@ -0,0 +1,1061 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
+
+/**
+ * An ordered collection of elements. Elements can be added, but not removed.
+ * Goes through a building phase, during which elements can be added, and a
+ * traversal phase, during which elements can be traversed in order but no
+ * further modifications are possible.
+ *
+ * <p> One or more arrays are used to store elements. The use of a multiple
+ * arrays has better performance characteristics than a single array used by
+ * {@link ArrayList}, as when the capacity of the list needs to be increased
+ * no copying of elements is required. This is usually beneficial in the case
+ * where the results will be traversed a small number of times.
+ *
+ * @param <E> the type of elements in this list
+ * @since 1.8
+ */
+class SpinedBuffer<E>
+ extends AbstractSpinedBuffer
+ implements Consumer<E>, Iterable<E> {
+
+ /*
+ * We optimistically hope that all the data will fit into the first chunk,
+ * so we try to avoid inflating the spine[] and priorElementCount[] arrays
+ * prematurely. So methods must be prepared to deal with these arrays being
+ * null. If spine is non-null, then spineIndex points to the current chunk
+ * within the spine, otherwise it is zero. The spine and priorElementCount
+ * arrays are always the same size, and for any i <= spineIndex,
+ * priorElementCount[i] is the sum of the sizes of all the prior chunks.
+ *
+ * The curChunk pointer is always valid. The elementIndex is the index of
+ * the next element to be written in curChunk; this may be past the end of
+ * curChunk so we have to check before writing. When we inflate the spine
+ * array, curChunk becomes the first element in it. When we clear the
+ * buffer, we discard all chunks except the first one, which we clear,
+ * restoring it to the initial single-chunk state.
+ */
+
+ /**
+ * Chunk that we're currently writing into; may or may not be aliased with
+ * the first element of the spine.
+ */
+ protected E[] curChunk;
+
+ /**
+ * All chunks, or null if there is only one chunk.
+ */
+ protected E[][] spine;
+
+ /**
+ * Constructs an empty list with the specified initial capacity.
+ *
+ * @param initialCapacity the initial capacity of the list
+ * @throws IllegalArgumentException if the specified initial capacity
+ * is negative
+ */
+ @SuppressWarnings("unchecked")
+ SpinedBuffer(int initialCapacity) {
+ super(initialCapacity);
+ curChunk = (E[]) new Object[1 << initialChunkPower];
+ }
+
+ /**
+ * Constructs an empty list with an initial capacity of sixteen.
+ */
+ @SuppressWarnings("unchecked")
+ SpinedBuffer() {
+ super();
+ curChunk = (E[]) new Object[1 << initialChunkPower];
+ }
+
+ /**
+ * Returns the current capacity of the buffer
+ */
+ protected long capacity() {
+ return (spineIndex == 0)
+ ? curChunk.length
+ : priorElementCount[spineIndex] + spine[spineIndex].length;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void inflateSpine() {
+ if (spine == null) {
+ spine = (E[][]) new Object[MIN_SPINE_SIZE][];
+ priorElementCount = new long[MIN_SPINE_SIZE];
+ spine[0] = curChunk;
+ }
+ }
+
+ /**
+ * Ensure that the buffer has at least capacity to hold the target size
+ */
+ @SuppressWarnings("unchecked")
+ protected final void ensureCapacity(long targetSize) {
+ long capacity = capacity();
+ if (targetSize > capacity) {
+ inflateSpine();
+ for (int i=spineIndex+1; targetSize > capacity; i++) {
+ if (i >= spine.length) {
+ int newSpineSize = spine.length * 2;
+ spine = Arrays.copyOf(spine, newSpineSize);
+ priorElementCount = Arrays.copyOf(priorElementCount, newSpineSize);
+ }
+ int nextChunkSize = chunkSize(i);
+ spine[i] = (E[]) new Object[nextChunkSize];
+ priorElementCount[i] = priorElementCount[i-1] + spine[i-1].length;
+ capacity += nextChunkSize;
+ }
+ }
+ }
+
+ /**
+ * Force the buffer to increase its capacity.
+ */
+ protected void increaseCapacity() {
+ ensureCapacity(capacity() + 1);
+ }
+
+ /**
+ * Retrieve the element at the specified index.
+ */
+ public E get(long index) {
+ // @@@ can further optimize by caching last seen spineIndex,
+ // which is going to be right most of the time
+
+ // Casts to int are safe since the spine array index is the index minus
+ // the prior element count from the current spine
+ if (spineIndex == 0) {
+ if (index < elementIndex)
+ return curChunk[((int) index)];
+ else
+ throw new IndexOutOfBoundsException(Long.toString(index));
+ }
+
+ if (index >= count())
+ throw new IndexOutOfBoundsException(Long.toString(index));
+
+ for (int j=0; j <= spineIndex; j++)
+ if (index < priorElementCount[j] + spine[j].length)
+ return spine[j][((int) (index - priorElementCount[j]))];
+
+ throw new IndexOutOfBoundsException(Long.toString(index));
+ }
+
+ /**
+ * Copy the elements, starting at the specified offset, into the specified
+ * array.
+ */
+ public void copyInto(E[] array, int offset) {
+ long finalOffset = offset + count();
+ if (finalOffset > array.length || finalOffset < offset) {
+ throw new IndexOutOfBoundsException("does not fit");
+ }
+
+ if (spineIndex == 0)
+ System.arraycopy(curChunk, 0, array, offset, elementIndex);
+ else {
+ // full chunks
+ for (int i=0; i < spineIndex; i++) {
+ System.arraycopy(spine[i], 0, array, offset, spine[i].length);
+ offset += spine[i].length;
+ }
+ if (elementIndex > 0)
+ System.arraycopy(curChunk, 0, array, offset, elementIndex);
+ }
+ }
+
+ /**
+ * Create a new array using the specified array factory, and copy the
+ * elements into it.
+ */
+ public E[] asArray(IntFunction<E[]> arrayFactory) {
+ long size = count();
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ E[] result = arrayFactory.apply((int) size);
+ copyInto(result, 0);
+ return result;
+ }
+
+ @Override
+ public void clear() {
+ if (spine != null) {
+ curChunk = spine[0];
+ for (int i=0; i<curChunk.length; i++)
+ curChunk[i] = null;
+ spine = null;
+ priorElementCount = null;
+ }
+ else {
+ for (int i=0; i<elementIndex; i++)
+ curChunk[i] = null;
+ }
+ elementIndex = 0;
+ spineIndex = 0;
+ }
+
+ @Override
+ public Iterator<E> iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+ @Override
+ public void forEach(Consumer<? super E> consumer) {
+ // completed chunks, if any
+ for (int j = 0; j < spineIndex; j++)
+ for (E t : spine[j])
+ consumer.accept(t);
+
+ // current chunk
+ for (int i=0; i<elementIndex; i++)
+ consumer.accept(curChunk[i]);
+ }
+
+ @Override
+ public void accept(E e) {
+ if (elementIndex == curChunk.length) {
+ inflateSpine();
+ if (spineIndex+1 >= spine.length || spine[spineIndex+1] == null)
+ increaseCapacity();
+ elementIndex = 0;
+ ++spineIndex;
+ curChunk = spine[spineIndex];
+ }
+ curChunk[elementIndex++] = e;
+ }
+
+ @Override
+ public String toString() {
+ List<E> list = new ArrayList<>();
+ forEach(list::add);
+ return "SpinedBuffer:" + list.toString();
+ }
+
+ private static final int SPLITERATOR_CHARACTERISTICS
+ = Spliterator.SIZED | Spliterator.ORDERED | Spliterator.SUBSIZED;
+
+ /**
+ * Return a {@link Spliterator} describing the contents of the buffer.
+ */
+ public Spliterator<E> spliterator() {
+ class Splitr implements Spliterator<E> {
+ // The current spine index
+ int splSpineIndex;
+
+ // Last spine index
+ final int lastSpineIndex;
+
+ // The current element index into the current spine
+ int splElementIndex;
+
+ // Last spine's last element index + 1
+ final int lastSpineElementFence;
+
+ // When splSpineIndex >= lastSpineIndex and
+ // splElementIndex >= lastSpineElementFence then
+ // this spliterator is fully traversed
+ // tryAdvance can set splSpineIndex > spineIndex if the last spine is full
+
+ // The current spine array
+ E[] splChunk;
+
+ Splitr(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ this.splSpineIndex = firstSpineIndex;
+ this.lastSpineIndex = lastSpineIndex;
+ this.splElementIndex = firstSpineElementIndex;
+ this.lastSpineElementFence = lastSpineElementFence;
+ assert spine != null || firstSpineIndex == 0 && lastSpineIndex == 0;
+ splChunk = (spine == null) ? curChunk : spine[firstSpineIndex];
+ }
+
+ @Override
+ public long estimateSize() {
+ return (splSpineIndex == lastSpineIndex)
+ ? (long) lastSpineElementFence - splElementIndex
+ : // # of elements prior to end -
+ priorElementCount[lastSpineIndex] + lastSpineElementFence -
+ // # of elements prior to current
+ priorElementCount[splSpineIndex] - splElementIndex;
+ }
+
+ @Override
+ public int characteristics() {
+ return SPLITERATOR_CHARACTERISTICS;
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super E> consumer) {
+ Objects.requireNonNull(consumer);
+
+ if (splSpineIndex < lastSpineIndex
+ || (splSpineIndex == lastSpineIndex && splElementIndex < lastSpineElementFence)) {
+ consumer.accept(splChunk[splElementIndex++]);
+
+ if (splElementIndex == splChunk.length) {
+ splElementIndex = 0;
+ ++splSpineIndex;
+ if (spine != null && splSpineIndex <= lastSpineIndex)
+ splChunk = spine[splSpineIndex];
+ }
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super E> consumer) {
+ Objects.requireNonNull(consumer);
+
+ if (splSpineIndex < lastSpineIndex
+ || (splSpineIndex == lastSpineIndex && splElementIndex < lastSpineElementFence)) {
+ int i = splElementIndex;
+ // completed chunks, if any
+ for (int sp = splSpineIndex; sp < lastSpineIndex; sp++) {
+ E[] chunk = spine[sp];
+ for (; i < chunk.length; i++) {
+ consumer.accept(chunk[i]);
+ }
+ i = 0;
+ }
+ // last (or current uncompleted) chunk
+ E[] chunk = (splSpineIndex == lastSpineIndex) ? splChunk : spine[lastSpineIndex];
+ int hElementIndex = lastSpineElementFence;
+ for (; i < hElementIndex; i++) {
+ consumer.accept(chunk[i]);
+ }
+ // mark consumed
+ splSpineIndex = lastSpineIndex;
+ splElementIndex = lastSpineElementFence;
+ }
+ }
+
+ @Override
+ public Spliterator<E> trySplit() {
+ if (splSpineIndex < lastSpineIndex) {
+ // split just before last chunk (if it is full this means 50:50 split)
+ Spliterator<E> ret = new Splitr(splSpineIndex, lastSpineIndex - 1,
+ splElementIndex, spine[lastSpineIndex-1].length);
+ // position to start of last chunk
+ splSpineIndex = lastSpineIndex;
+ splElementIndex = 0;
+ splChunk = spine[splSpineIndex];
+ return ret;
+ }
+ else if (splSpineIndex == lastSpineIndex) {
+ int t = (lastSpineElementFence - splElementIndex) / 2;
+ if (t == 0)
+ return null;
+ else {
+ Spliterator<E> ret = Arrays.spliterator(splChunk, splElementIndex, splElementIndex + t);
+ splElementIndex += t;
+ return ret;
+ }
+ }
+ else {
+ return null;
+ }
+ }
+ }
+ return new Splitr(0, spineIndex, 0, elementIndex);
+ }
+
+ /**
+ * An ordered collection of primitive values. Elements can be added, but
+ * not removed. Goes through a building phase, during which elements can be
+ * added, and a traversal phase, during which elements can be traversed in
+ * order but no further modifications are possible.
+ *
+ * <p> One or more arrays are used to store elements. The use of a multiple
+ * arrays has better performance characteristics than a single array used by
+ * {@link ArrayList}, as when the capacity of the list needs to be increased
+ * no copying of elements is required. This is usually beneficial in the case
+ * where the results will be traversed a small number of times.
+ *
+ * @param <E> the wrapper type for this primitive type
+ * @param <T_ARR> the array type for this primitive type
+ * @param <T_CONS> the Consumer type for this primitive type
+ */
+ abstract static class OfPrimitive<E, T_ARR, T_CONS>
+ extends AbstractSpinedBuffer implements Iterable<E> {
+
+ /*
+ * We optimistically hope that all the data will fit into the first chunk,
+ * so we try to avoid inflating the spine[] and priorElementCount[] arrays
+ * prematurely. So methods must be prepared to deal with these arrays being
+ * null. If spine is non-null, then spineIndex points to the current chunk
+ * within the spine, otherwise it is zero. The spine and priorElementCount
+ * arrays are always the same size, and for any i <= spineIndex,
+ * priorElementCount[i] is the sum of the sizes of all the prior chunks.
+ *
+ * The curChunk pointer is always valid. The elementIndex is the index of
+ * the next element to be written in curChunk; this may be past the end of
+ * curChunk so we have to check before writing. When we inflate the spine
+ * array, curChunk becomes the first element in it. When we clear the
+ * buffer, we discard all chunks except the first one, which we clear,
+ * restoring it to the initial single-chunk state.
+ */
+
+ // The chunk we're currently writing into
+ T_ARR curChunk;
+
+ // All chunks, or null if there is only one chunk
+ T_ARR[] spine;
+
+ /**
+ * Constructs an empty list with the specified initial capacity.
+ *
+ * @param initialCapacity the initial capacity of the list
+ * @throws IllegalArgumentException if the specified initial capacity
+ * is negative
+ */
+ OfPrimitive(int initialCapacity) {
+ super(initialCapacity);
+ curChunk = newArray(1 << initialChunkPower);
+ }
+
+ /**
+ * Constructs an empty list with an initial capacity of sixteen.
+ */
+ OfPrimitive() {
+ super();
+ curChunk = newArray(1 << initialChunkPower);
+ }
+
+ @Override
+ public abstract Iterator<E> iterator();
+
+ @Override
+ public abstract void forEach(Consumer<? super E> consumer);
+
+ /** Create a new array-of-array of the proper type and size */
+ protected abstract T_ARR[] newArrayArray(int size);
+
+ /** Create a new array of the proper type and size */
+ public abstract T_ARR newArray(int size);
+
+ /** Get the length of an array */
+ protected abstract int arrayLength(T_ARR array);
+
+ /** Iterate an array with the provided consumer */
+ protected abstract void arrayForEach(T_ARR array, int from, int to,
+ T_CONS consumer);
+
+ protected long capacity() {
+ return (spineIndex == 0)
+ ? arrayLength(curChunk)
+ : priorElementCount[spineIndex] + arrayLength(spine[spineIndex]);
+ }
+
+ private void inflateSpine() {
+ if (spine == null) {
+ spine = newArrayArray(MIN_SPINE_SIZE);
+ priorElementCount = new long[MIN_SPINE_SIZE];
+ spine[0] = curChunk;
+ }
+ }
+
+ protected final void ensureCapacity(long targetSize) {
+ long capacity = capacity();
+ if (targetSize > capacity) {
+ inflateSpine();
+ for (int i=spineIndex+1; targetSize > capacity; i++) {
+ if (i >= spine.length) {
+ int newSpineSize = spine.length * 2;
+ spine = Arrays.copyOf(spine, newSpineSize);
+ priorElementCount = Arrays.copyOf(priorElementCount, newSpineSize);
+ }
+ int nextChunkSize = chunkSize(i);
+ spine[i] = newArray(nextChunkSize);
+ priorElementCount[i] = priorElementCount[i-1] + arrayLength(spine[i - 1]);
+ capacity += nextChunkSize;
+ }
+ }
+ }
+
+ protected void increaseCapacity() {
+ ensureCapacity(capacity() + 1);
+ }
+
+ protected int chunkFor(long index) {
+ if (spineIndex == 0) {
+ if (index < elementIndex)
+ return 0;
+ else
+ throw new IndexOutOfBoundsException(Long.toString(index));
+ }
+
+ if (index >= count())
+ throw new IndexOutOfBoundsException(Long.toString(index));
+
+ for (int j=0; j <= spineIndex; j++)
+ if (index < priorElementCount[j] + arrayLength(spine[j]))
+ return j;
+
+ throw new IndexOutOfBoundsException(Long.toString(index));
+ }
+
+ public void copyInto(T_ARR array, int offset) {
+ long finalOffset = offset + count();
+ if (finalOffset > arrayLength(array) || finalOffset < offset) {
+ throw new IndexOutOfBoundsException("does not fit");
+ }
+
+ if (spineIndex == 0)
+ System.arraycopy(curChunk, 0, array, offset, elementIndex);
+ else {
+ // full chunks
+ for (int i=0; i < spineIndex; i++) {
+ System.arraycopy(spine[i], 0, array, offset, arrayLength(spine[i]));
+ offset += arrayLength(spine[i]);
+ }
+ if (elementIndex > 0)
+ System.arraycopy(curChunk, 0, array, offset, elementIndex);
+ }
+ }
+
+ public T_ARR asPrimitiveArray() {
+ long size = count();
+ if (size >= Nodes.MAX_ARRAY_SIZE)
+ throw new IllegalArgumentException(Nodes.BAD_SIZE);
+ T_ARR result = newArray((int) size);
+ copyInto(result, 0);
+ return result;
+ }
+
+ protected void preAccept() {
+ if (elementIndex == arrayLength(curChunk)) {
+ inflateSpine();
+ if (spineIndex+1 >= spine.length || spine[spineIndex+1] == null)
+ increaseCapacity();
+ elementIndex = 0;
+ ++spineIndex;
+ curChunk = spine[spineIndex];
+ }
+ }
+
+ public void clear() {
+ if (spine != null) {
+ curChunk = spine[0];
+ spine = null;
+ priorElementCount = null;
+ }
+ elementIndex = 0;
+ spineIndex = 0;
+ }
+
+ @SuppressWarnings("overloads")
+ public void forEach(T_CONS consumer) {
+ // completed chunks, if any
+ for (int j = 0; j < spineIndex; j++)
+ arrayForEach(spine[j], 0, arrayLength(spine[j]), consumer);
+
+ // current chunk
+ arrayForEach(curChunk, 0, elementIndex, consumer);
+ }
+
+ abstract class BaseSpliterator<T_SPLITR extends Spliterator.OfPrimitive<E, T_CONS, T_SPLITR>>
+ implements Spliterator.OfPrimitive<E, T_CONS, T_SPLITR> {
+ // The current spine index
+ int splSpineIndex;
+
+ // Last spine index
+ final int lastSpineIndex;
+
+ // The current element index into the current spine
+ int splElementIndex;
+
+ // Last spine's last element index + 1
+ final int lastSpineElementFence;
+
+ // When splSpineIndex >= lastSpineIndex and
+ // splElementIndex >= lastSpineElementFence then
+ // this spliterator is fully traversed
+ // tryAdvance can set splSpineIndex > spineIndex if the last spine is full
+
+ // The current spine array
+ T_ARR splChunk;
+
+ BaseSpliterator(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ this.splSpineIndex = firstSpineIndex;
+ this.lastSpineIndex = lastSpineIndex;
+ this.splElementIndex = firstSpineElementIndex;
+ this.lastSpineElementFence = lastSpineElementFence;
+ assert spine != null || firstSpineIndex == 0 && lastSpineIndex == 0;
+ splChunk = (spine == null) ? curChunk : spine[firstSpineIndex];
+ }
+
+ abstract T_SPLITR newSpliterator(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence);
+
+ abstract void arrayForOne(T_ARR array, int index, T_CONS consumer);
+
+ abstract T_SPLITR arraySpliterator(T_ARR array, int offset, int len);
+
+ @Override
+ public long estimateSize() {
+ return (splSpineIndex == lastSpineIndex)
+ ? (long) lastSpineElementFence - splElementIndex
+ : // # of elements prior to end -
+ priorElementCount[lastSpineIndex] + lastSpineElementFence -
+ // # of elements prior to current
+ priorElementCount[splSpineIndex] - splElementIndex;
+ }
+
+ @Override
+ public int characteristics() {
+ return SPLITERATOR_CHARACTERISTICS;
+ }
+
+ @Override
+ public boolean tryAdvance(T_CONS consumer) {
+ Objects.requireNonNull(consumer);
+
+ if (splSpineIndex < lastSpineIndex
+ || (splSpineIndex == lastSpineIndex && splElementIndex < lastSpineElementFence)) {
+ arrayForOne(splChunk, splElementIndex++, consumer);
+
+ if (splElementIndex == arrayLength(splChunk)) {
+ splElementIndex = 0;
+ ++splSpineIndex;
+ if (spine != null && splSpineIndex <= lastSpineIndex)
+ splChunk = spine[splSpineIndex];
+ }
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(T_CONS consumer) {
+ Objects.requireNonNull(consumer);
+
+ if (splSpineIndex < lastSpineIndex
+ || (splSpineIndex == lastSpineIndex && splElementIndex < lastSpineElementFence)) {
+ int i = splElementIndex;
+ // completed chunks, if any
+ for (int sp = splSpineIndex; sp < lastSpineIndex; sp++) {
+ T_ARR chunk = spine[sp];
+ arrayForEach(chunk, i, arrayLength(chunk), consumer);
+ i = 0;
+ }
+ // last (or current uncompleted) chunk
+ T_ARR chunk = (splSpineIndex == lastSpineIndex) ? splChunk : spine[lastSpineIndex];
+ arrayForEach(chunk, i, lastSpineElementFence, consumer);
+ // mark consumed
+ splSpineIndex = lastSpineIndex;
+ splElementIndex = lastSpineElementFence;
+ }
+ }
+
+ @Override
+ public T_SPLITR trySplit() {
+ if (splSpineIndex < lastSpineIndex) {
+ // split just before last chunk (if it is full this means 50:50 split)
+ T_SPLITR ret = newSpliterator(splSpineIndex, lastSpineIndex - 1,
+ splElementIndex, arrayLength(spine[lastSpineIndex - 1]));
+ // position us to start of last chunk
+ splSpineIndex = lastSpineIndex;
+ splElementIndex = 0;
+ splChunk = spine[splSpineIndex];
+ return ret;
+ }
+ else if (splSpineIndex == lastSpineIndex) {
+ int t = (lastSpineElementFence - splElementIndex) / 2;
+ if (t == 0)
+ return null;
+ else {
+ T_SPLITR ret = arraySpliterator(splChunk, splElementIndex, t);
+ splElementIndex += t;
+ return ret;
+ }
+ }
+ else {
+ return null;
+ }
+ }
+ }
+ }
+
+ /**
+ * An ordered collection of {@code int} values.
+ */
+ static class OfInt extends SpinedBuffer.OfPrimitive<Integer, int[], IntConsumer>
+ implements IntConsumer {
+ OfInt() { }
+
+ OfInt(int initialCapacity) {
+ super(initialCapacity);
+ }
+
+ @Override
+ public void forEach(Consumer<? super Integer> consumer) {
+ if (consumer instanceof IntConsumer) {
+ forEach((IntConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling SpinedBuffer.OfInt.forEach(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ @Override
+ protected int[][] newArrayArray(int size) {
+ return new int[size][];
+ }
+
+ @Override
+ public int[] newArray(int size) {
+ return new int[size];
+ }
+
+ @Override
+ protected int arrayLength(int[] array) {
+ return array.length;
+ }
+
+ @Override
+ protected void arrayForEach(int[] array,
+ int from, int to,
+ IntConsumer consumer) {
+ for (int i = from; i < to; i++)
+ consumer.accept(array[i]);
+ }
+
+ @Override
+ public void accept(int i) {
+ preAccept();
+ curChunk[elementIndex++] = i;
+ }
+
+ public int get(long index) {
+ // Casts to int are safe since the spine array index is the index minus
+ // the prior element count from the current spine
+ int ch = chunkFor(index);
+ if (spineIndex == 0 && ch == 0)
+ return curChunk[(int) index];
+ else
+ return spine[ch][(int) (index - priorElementCount[ch])];
+ }
+
+ @Override
+ public PrimitiveIterator.OfInt iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+ public Spliterator.OfInt spliterator() {
+ class Splitr extends BaseSpliterator<Spliterator.OfInt>
+ implements Spliterator.OfInt {
+ Splitr(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ super(firstSpineIndex, lastSpineIndex,
+ firstSpineElementIndex, lastSpineElementFence);
+ }
+
+ @Override
+ Splitr newSpliterator(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ return new Splitr(firstSpineIndex, lastSpineIndex,
+ firstSpineElementIndex, lastSpineElementFence);
+ }
+
+ @Override
+ void arrayForOne(int[] array, int index, IntConsumer consumer) {
+ consumer.accept(array[index]);
+ }
+
+ @Override
+ Spliterator.OfInt arraySpliterator(int[] array, int offset, int len) {
+ return Arrays.spliterator(array, offset, offset+len);
+ }
+ }
+ return new Splitr(0, spineIndex, 0, elementIndex);
+ }
+
+ @Override
+ public String toString() {
+ int[] array = asPrimitiveArray();
+ if (array.length < 200) {
+ return String.format("%s[length=%d, chunks=%d]%s",
+ getClass().getSimpleName(), array.length,
+ spineIndex, Arrays.toString(array));
+ }
+ else {
+ int[] array2 = Arrays.copyOf(array, 200);
+ return String.format("%s[length=%d, chunks=%d]%s...",
+ getClass().getSimpleName(), array.length,
+ spineIndex, Arrays.toString(array2));
+ }
+ }
+ }
+
+ /**
+ * An ordered collection of {@code long} values.
+ */
+ static class OfLong extends SpinedBuffer.OfPrimitive<Long, long[], LongConsumer>
+ implements LongConsumer {
+ OfLong() { }
+
+ OfLong(int initialCapacity) {
+ super(initialCapacity);
+ }
+
+ @Override
+ public void forEach(Consumer<? super Long> consumer) {
+ if (consumer instanceof LongConsumer) {
+ forEach((LongConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling SpinedBuffer.OfLong.forEach(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ @Override
+ protected long[][] newArrayArray(int size) {
+ return new long[size][];
+ }
+
+ @Override
+ public long[] newArray(int size) {
+ return new long[size];
+ }
+
+ @Override
+ protected int arrayLength(long[] array) {
+ return array.length;
+ }
+
+ @Override
+ protected void arrayForEach(long[] array,
+ int from, int to,
+ LongConsumer consumer) {
+ for (int i = from; i < to; i++)
+ consumer.accept(array[i]);
+ }
+
+ @Override
+ public void accept(long i) {
+ preAccept();
+ curChunk[elementIndex++] = i;
+ }
+
+ public long get(long index) {
+ // Casts to int are safe since the spine array index is the index minus
+ // the prior element count from the current spine
+ int ch = chunkFor(index);
+ if (spineIndex == 0 && ch == 0)
+ return curChunk[(int) index];
+ else
+ return spine[ch][(int) (index - priorElementCount[ch])];
+ }
+
+ @Override
+ public PrimitiveIterator.OfLong iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+
+ public Spliterator.OfLong spliterator() {
+ class Splitr extends BaseSpliterator<Spliterator.OfLong>
+ implements Spliterator.OfLong {
+ Splitr(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ super(firstSpineIndex, lastSpineIndex,
+ firstSpineElementIndex, lastSpineElementFence);
+ }
+
+ @Override
+ Splitr newSpliterator(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ return new Splitr(firstSpineIndex, lastSpineIndex,
+ firstSpineElementIndex, lastSpineElementFence);
+ }
+
+ @Override
+ void arrayForOne(long[] array, int index, LongConsumer consumer) {
+ consumer.accept(array[index]);
+ }
+
+ @Override
+ Spliterator.OfLong arraySpliterator(long[] array, int offset, int len) {
+ return Arrays.spliterator(array, offset, offset+len);
+ }
+ }
+ return new Splitr(0, spineIndex, 0, elementIndex);
+ }
+
+ @Override
+ public String toString() {
+ long[] array = asPrimitiveArray();
+ if (array.length < 200) {
+ return String.format("%s[length=%d, chunks=%d]%s",
+ getClass().getSimpleName(), array.length,
+ spineIndex, Arrays.toString(array));
+ }
+ else {
+ long[] array2 = Arrays.copyOf(array, 200);
+ return String.format("%s[length=%d, chunks=%d]%s...",
+ getClass().getSimpleName(), array.length,
+ spineIndex, Arrays.toString(array2));
+ }
+ }
+ }
+
+ /**
+ * An ordered collection of {@code double} values.
+ */
+ static class OfDouble
+ extends SpinedBuffer.OfPrimitive<Double, double[], DoubleConsumer>
+ implements DoubleConsumer {
+ OfDouble() { }
+
+ OfDouble(int initialCapacity) {
+ super(initialCapacity);
+ }
+
+ @Override
+ public void forEach(Consumer<? super Double> consumer) {
+ if (consumer instanceof DoubleConsumer) {
+ forEach((DoubleConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling SpinedBuffer.OfDouble.forEach(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ @Override
+ protected double[][] newArrayArray(int size) {
+ return new double[size][];
+ }
+
+ @Override
+ public double[] newArray(int size) {
+ return new double[size];
+ }
+
+ @Override
+ protected int arrayLength(double[] array) {
+ return array.length;
+ }
+
+ @Override
+ protected void arrayForEach(double[] array,
+ int from, int to,
+ DoubleConsumer consumer) {
+ for (int i = from; i < to; i++)
+ consumer.accept(array[i]);
+ }
+
+ @Override
+ public void accept(double i) {
+ preAccept();
+ curChunk[elementIndex++] = i;
+ }
+
+ public double get(long index) {
+ // Casts to int are safe since the spine array index is the index minus
+ // the prior element count from the current spine
+ int ch = chunkFor(index);
+ if (spineIndex == 0 && ch == 0)
+ return curChunk[(int) index];
+ else
+ return spine[ch][(int) (index - priorElementCount[ch])];
+ }
+
+ @Override
+ public PrimitiveIterator.OfDouble iterator() {
+ return Spliterators.iterator(spliterator());
+ }
+
+ public Spliterator.OfDouble spliterator() {
+ class Splitr extends BaseSpliterator<Spliterator.OfDouble>
+ implements Spliterator.OfDouble {
+ Splitr(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ super(firstSpineIndex, lastSpineIndex,
+ firstSpineElementIndex, lastSpineElementFence);
+ }
+
+ @Override
+ Splitr newSpliterator(int firstSpineIndex, int lastSpineIndex,
+ int firstSpineElementIndex, int lastSpineElementFence) {
+ return new Splitr(firstSpineIndex, lastSpineIndex,
+ firstSpineElementIndex, lastSpineElementFence);
+ }
+
+ @Override
+ void arrayForOne(double[] array, int index, DoubleConsumer consumer) {
+ consumer.accept(array[index]);
+ }
+
+ @Override
+ Spliterator.OfDouble arraySpliterator(double[] array, int offset, int len) {
+ return Arrays.spliterator(array, offset, offset+len);
+ }
+ }
+ return new Splitr(0, spineIndex, 0, elementIndex);
+ }
+
+ @Override
+ public String toString() {
+ double[] array = asPrimitiveArray();
+ if (array.length < 200) {
+ return String.format("%s[length=%d, chunks=%d]%s",
+ getClass().getSimpleName(), array.length,
+ spineIndex, Arrays.toString(array));
+ }
+ else {
+ double[] array2 = Arrays.copyOf(array, 200);
+ return String.format("%s[length=%d, chunks=%d]%s...",
+ getClass().getSimpleName(), array.length,
+ spineIndex, Arrays.toString(array2));
+ }
+ }
+ }
+}
+
diff --git a/ojluni/src/main/java/java/util/stream/Stream.java b/ojluni/src/main/java/java/util/stream/Stream.java
new file mode 100644
index 0000000..d4cb9ff
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Stream.java
@@ -0,0 +1,1142 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongFunction;
+import java.util.function.UnaryOperator;
+
+/**
+ * A sequence of elements supporting sequential and parallel aggregate
+ * operations. The following example illustrates an aggregate operation using
+ * {@link Stream} and {@link IntStream}:
+ *
+ * <pre>{@code
+ * int sum = widgets.stream()
+ * .filter(w -> w.getColor() == RED)
+ * .mapToInt(w -> w.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * In this example, {@code widgets} is a {@code Collection<Widget>}. We create
+ * a stream of {@code Widget} objects via {@link Collection#stream Collection.stream()},
+ * filter it to produce a stream containing only the red widgets, and then
+ * transform it into a stream of {@code int} values representing the weight of
+ * each red widget. Then this stream is summed to produce a total weight.
+ *
+ * <p>In addition to {@code Stream}, which is a stream of object references,
+ * there are primitive specializations for {@link IntStream}, {@link LongStream},
+ * and {@link DoubleStream}, all of which are referred to as "streams" and
+ * conform to the characteristics and restrictions described here.
+ *
+ * <p>To perform a computation, stream
+ * <a href="package-summary.html#StreamOps">operations</a> are composed into a
+ * <em>stream pipeline</em>. A stream pipeline consists of a source (which
+ * might be an array, a collection, a generator function, an I/O channel,
+ * etc), zero or more <em>intermediate operations</em> (which transform a
+ * stream into another stream, such as {@link Stream#filter(Predicate)}), and a
+ * <em>terminal operation</em> (which produces a result or side-effect, such
+ * as {@link Stream#count()} or {@link Stream#forEach(Consumer)}).
+ * Streams are lazy; computation on the source data is only performed when the
+ * terminal operation is initiated, and source elements are consumed only
+ * as needed.
+ *
+ * <p>Collections and streams, while bearing some superficial similarities,
+ * have different goals. Collections are primarily concerned with the efficient
+ * management of, and access to, their elements. By contrast, streams do not
+ * provide a means to directly access or manipulate their elements, and are
+ * instead concerned with declaratively describing their source and the
+ * computational operations which will be performed in aggregate on that source.
+ * However, if the provided stream operations do not offer the desired
+ * functionality, the {@link #iterator()} and {@link #spliterator()} operations
+ * can be used to perform a controlled traversal.
+ *
+ * <p>A stream pipeline, like the "widgets" example above, can be viewed as
+ * a <em>query</em> on the stream source. Unless the source was explicitly
+ * designed for concurrent modification (such as a {@link ConcurrentHashMap}),
+ * unpredictable or erroneous behavior may result from modifying the stream
+ * source while it is being queried.
+ *
+ * <p>Most stream operations accept parameters that describe user-specified
+ * behavior, such as the lambda expression {@code w -> w.getWeight()} passed to
+ * {@code mapToInt} in the example above. To preserve correct behavior,
+ * these <em>behavioral parameters</em>:
+ * <ul>
+ * <li>must be <a href="package-summary.html#NonInterference">non-interfering</a>
+ * (they do not modify the stream source); and</li>
+ * <li>in most cases must be <a href="package-summary.html#Statelessness">stateless</a>
+ * (their result should not depend on any state that might change during execution
+ * of the stream pipeline).</li>
+ * </ul>
+ *
+ * <p>Such parameters are always instances of a
+ * <a href="../function/package-summary.html">functional interface</a> such
+ * as {@link java.util.function.Function}, and are often lambda expressions or
+ * method references. Unless otherwise specified these parameters must be
+ * <em>non-null</em>.
+ *
+ * <p>A stream should be operated on (invoking an intermediate or terminal stream
+ * operation) only once. This rules out, for example, "forked" streams, where
+ * the same source feeds two or more pipelines, or multiple traversals of the
+ * same stream. A stream implementation may throw {@link IllegalStateException}
+ * if it detects that the stream is being reused. However, since some stream
+ * operations may return their receiver rather than a new stream object, it may
+ * not be possible to detect reuse in all cases.
+ *
+ * <p>Streams have a {@link #close()} method and implement {@link AutoCloseable},
+ * but nearly all stream instances do not actually need to be closed after use.
+ * Generally, only streams whose source is an IO channel will require closing. Most streams
+ * are backed by collections, arrays, or generating functions, which require no
+ * special resource management. (If a stream does require closing, it can be
+ * declared as a resource in a {@code try}-with-resources statement.)
+ *
+ * <p>Stream pipelines may execute either sequentially or in
+ * <a href="package-summary.html#Parallelism">parallel</a>. This
+ * execution mode is a property of the stream. Streams are created
+ * with an initial choice of sequential or parallel execution. (For example,
+ * {@link Collection#stream() Collection.stream()} creates a sequential stream,
+ * and {@link Collection#parallelStream() Collection.parallelStream()} creates
+ * a parallel one.) This choice of execution mode may be modified by the
+ * {@link #sequential()} or {@link #parallel()} methods, and may be queried with
+ * the {@link #isParallel()} method.
+ *
+ * @param <T> the type of the stream elements
+ * @since 1.8
+ * @see IntStream
+ * @see LongStream
+ * @see DoubleStream
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface Stream<T> extends BaseStream<T, Stream<T>> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to each element to determine if it
+ * should be included
+ * @return the new stream
+ */
+ Stream<T> filter(Predicate<? super T> predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param <R> The element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ <R> Stream<R> map(Function<? super T, ? extends R> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ IntStream mapToInt(ToIntFunction<? super T> mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ LongStream mapToLong(ToLongFunction<? super T> mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element
+ * @return the new stream
+ */
+ DoubleStream mapToDouble(ToDoubleFunction<? super T> mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of a mapped stream produced by applying
+ * the provided mapping function to each element. Each mapped stream is
+ * {@link java.util.stream.BaseStream#close() closed} after its contents
+ * have been placed into this stream. (If a mapped stream is {@code null}
+ * an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @apiNote
+ * The {@code flatMap()} operation has the effect of applying a one-to-many
+ * transformation to the elements of the stream, and then flattening the
+ * resulting elements into a new stream.
+ *
+ * <p><b>Examples.</b>
+ *
+ * <p>If {@code orders} is a stream of purchase orders, and each purchase
+ * order contains a collection of line items, then the following produces a
+ * stream containing all the line items in all the orders:
+ * <pre>{@code
+ * orders.flatMap(order -> order.getLineItems().stream())...
+ * }</pre>
+ *
+ * <p>If {@code path} is the path to a file, then the following produces a
+ * stream of the {@code words} contained in that file:
+ * <pre>{@code
+ * Stream<String> lines = Files.lines(path, StandardCharsets.UTF_8);
+ * Stream<String> words = lines.flatMap(line -> Stream.of(line.split(" +")));
+ * }</pre>
+ * The {@code mapper} function passed to {@code flatMap} splits a line,
+ * using a simple regular expression, into an array of words, and then
+ * creates a stream of words from that array.
+ *
+ * @param <R> The element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces a stream
+ * of new values
+ * @return the new stream
+ */
+ <R> Stream<R> flatMap(Function<? super T, ? extends Stream<? extends R>> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of replacing each
+ * element of this stream with the contents of a mapped stream produced by
+ * applying the provided mapping function to each element. Each mapped
+ * stream is {@link java.util.stream.BaseStream#close() closed} after its
+ * contents have been placed into this stream. (If a mapped stream is
+ * {@code null} an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces a stream
+ * of new values
+ * @return the new stream
+ * @see #flatMap(Function)
+ */
+ IntStream flatMapToInt(Function<? super T, ? extends IntStream> mapper);
+
+ /**
+ * Returns an {@code LongStream} consisting of the results of replacing each
+ * element of this stream with the contents of a mapped stream produced by
+ * applying the provided mapping function to each element. Each mapped
+ * stream is {@link java.util.stream.BaseStream#close() closed} after its
+ * contents have been placed into this stream. (If a mapped stream is
+ * {@code null} an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces a stream
+ * of new values
+ * @return the new stream
+ * @see #flatMap(Function)
+ */
+ LongStream flatMapToLong(Function<? super T, ? extends LongStream> mapper);
+
+ /**
+ * Returns an {@code DoubleStream} consisting of the results of replacing
+ * each element of this stream with the contents of a mapped stream produced
+ * by applying the provided mapping function to each element. Each mapped
+ * stream is {@link java.util.stream.BaseStream#close() closed} after its
+ * contents have placed been into this stream. (If a mapped stream is
+ * {@code null} an empty stream is used, instead.)
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function to apply to each element which produces a stream
+ * of new values
+ * @return the new stream
+ * @see #flatMap(Function)
+ */
+ DoubleStream flatMapToDouble(Function<? super T, ? extends DoubleStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements (according to
+ * {@link Object#equals(Object)}) of this stream.
+ *
+ * <p>For ordered streams, the selection of distinct elements is stable
+ * (for duplicated elements, the element appearing first in the encounter
+ * order is preserved.) For unordered streams, no stability guarantees
+ * are made.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @apiNote
+ * Preserving stability for {@code distinct()} in parallel pipelines is
+ * relatively expensive (requires that the operation act as a full barrier,
+ * with substantial buffering overhead), and stability is often not needed.
+ * Using an unordered stream source (such as {@link #generate(Supplier)})
+ * or removing the ordering constraint with {@link #unordered()} may result
+ * in significantly more efficient execution for {@code distinct()} in parallel
+ * pipelines, if the semantics of your situation permit. If consistency
+ * with encounter order is required, and you are experiencing poor performance
+ * or memory utilization with {@code distinct()} in parallel pipelines,
+ * switching to sequential execution with {@link #sequential()} may improve
+ * performance.
+ *
+ * @return the new stream
+ */
+ Stream<T> distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, sorted
+ * according to natural order. If the elements of this stream are not
+ * {@code Comparable}, a {@code java.lang.ClassCastException} may be thrown
+ * when the terminal operation is executed.
+ *
+ * <p>For ordered streams, the sort is stable. For unordered streams, no
+ * stability guarantees are made.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ Stream<T> sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, sorted
+ * according to the provided {@code Comparator}.
+ *
+ * <p>For ordered streams, the sort is stable. For unordered streams, no
+ * stability guarantees are made.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @param comparator a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * {@code Comparator} to be used to compare stream elements
+ * @return the new stream
+ */
+ Stream<T> sorted(Comparator<? super T> comparator);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * Stream.of("one", "two", "three", "four")
+ * .filter(e -> e.length() > 3)
+ * .peek(e -> System.out.println("Filtered value: " + e))
+ * .map(String::toUpperCase)
+ * .peek(e -> System.out.println("Mapped value: " + e))
+ * .collect(Collectors.toList());
+ * }</pre>
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ Stream<T> peek(Consumer<? super T> action);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code limit()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code maxSize}, since {@code limit(n)}
+ * is constrained to return not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(Supplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code limit()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code limit()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ Stream<T> limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after discarding the first {@code n} elements of the stream.
+ * If this stream contains fewer than {@code n} elements then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @apiNote
+ * While {@code skip()} is generally a cheap operation on sequential
+ * stream pipelines, it can be quite expensive on ordered parallel pipelines,
+ * especially for large values of {@code n}, since {@code skip(n)}
+ * is constrained to skip not just any <em>n</em> elements, but the
+ * <em>first n</em> elements in the encounter order. Using an unordered
+ * stream source (such as {@link #generate(Supplier)}) or removing the
+ * ordering constraint with {@link #unordered()} may result in significant
+ * speedups of {@code skip()} in parallel pipelines, if the semantics of
+ * your situation permit. If consistency with encounter order is required,
+ * and you are experiencing poor performance or memory utilization with
+ * {@code skip()} in parallel pipelines, switching to sequential execution
+ * with {@link #sequential()} may improve performance.
+ *
+ * @param n the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code n} is negative
+ */
+ Stream<T> skip(long n);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic.
+ * For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(Consumer<? super T> action);
+
+ /**
+ * Performs an action for each element of this stream, in the encounter
+ * order of the stream if the stream has a defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>This operation processes the elements one at a time, in encounter
+ * order if one exists. Performing the action for one element
+ * <a href="../concurrent/package-summary.html#MemoryVisibility"><i>happens-before</i></a>
+ * performing the action for subsequent elements, but for any given element,
+ * the action may be performed in whatever thread the library chooses.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(Consumer)
+ */
+ void forEachOrdered(Consumer<? super T> action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ Object[] toArray();
+
+ /**
+ * Returns an array containing the elements of this stream, using the
+ * provided {@code generator} function to allocate the returned array, as
+ * well as any additional arrays that might be required for a partitioned
+ * execution or for resizing.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote
+ * The generator function takes an integer, which is the size of the
+ * desired array, and produces an array of the desired size. This can be
+ * concisely expressed with an array constructor reference:
+ * <pre>{@code
+ * Person[] men = people.stream()
+ * .filter(p -> p.getGender() == MALE)
+ * .toArray(Person[]::new);
+ * }</pre>
+ *
+ * @param <A> the element type of the resulting array
+ * @param generator a function which produces a new array of the desired
+ * type and the provided length
+ * @return an array containing the elements in this stream
+ * @throws ArrayStoreException if the runtime type of the array returned
+ * from the array generator is not a supertype of the runtime type of every
+ * element in this stream
+ */
+ <A> A[] toArray(IntFunction<A[]> generator);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * T result = identity;
+ * for (T element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code t},
+ * {@code accumulator.apply(identity, t)} is equal to {@code t}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, average, and string concatenation are all special
+ * cases of reduction. Summing a stream of numbers can be expressed as:
+ *
+ * <pre>{@code
+ * Integer sum = integers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or:
+ *
+ * <pre>{@code
+ * Integer sum = integers.reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return the result of the reduction
+ */
+ T reduce(T identity, BinaryOperator<T> accumulator);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code Optional} describing the reduced value,
+ * if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * T result = null;
+ * for (T element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.apply(result, element);
+ * }
+ * return foundAny ? Optional.of(result) : Optional.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values
+ * @return an {@link Optional} describing the result of the reduction
+ * @throws NullPointerException if the result of the reduction is null
+ * @see #reduce(Object, BinaryOperator)
+ * @see #min(Comparator)
+ * @see #max(Comparator)
+ */
+ Optional<T> reduce(BinaryOperator<T> accumulator);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity, accumulation and
+ * combining functions. This is equivalent to:
+ * <pre>{@code
+ * U result = identity;
+ * for (T element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the combiner
+ * function. This means that for all {@code u}, {@code combiner(identity, u)}
+ * is equal to {@code u}. Additionally, the {@code combiner} function
+ * must be compatible with the {@code accumulator} function; for all
+ * {@code u} and {@code t}, the following must hold:
+ * <pre>{@code
+ * combiner.apply(u, accumulator.apply(identity, t)) == accumulator.apply(u, t)
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Many reductions using this form can be represented more simply
+ * by an explicit combination of {@code map} and {@code reduce} operations.
+ * The {@code accumulator} function acts as a fused mapper and accumulator,
+ * which can sometimes be more efficient than separate mapping and reduction,
+ * such as when knowing the previously reduced value allows you to avoid
+ * some computation.
+ *
+ * @param <U> The type of the result
+ * @param identity the identity value for the combiner function
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for incorporating an additional element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values, which must be
+ * compatible with the accumulator function
+ * @return the result of the reduction
+ * @see #reduce(BinaryOperator)
+ * @see #reduce(Object, BinaryOperator)
+ */
+ <U> U reduce(U identity,
+ BiFunction<U, ? super T, U> accumulator,
+ BinaryOperator<U> combiner);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable result container,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = supplier.get();
+ * for (T element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(Object, BinaryOperator)}, {@code collect} operations
+ * can be parallelized without requiring additional synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote There are many existing classes in the JDK whose signatures are
+ * well-suited for use with method references as arguments to {@code collect()}.
+ * For example, the following will accumulate strings into an {@code ArrayList}:
+ * <pre>{@code
+ * List<String> asList = stringStream.collect(ArrayList::new, ArrayList::add,
+ * ArrayList::addAll);
+ * }</pre>
+ *
+ * <p>The following will take a stream of strings and concatenates them into a
+ * single string:
+ * <pre>{@code
+ * String concat = stringStream.collect(StringBuilder::new, StringBuilder::append,
+ * StringBuilder::append)
+ * .toString();
+ * }</pre>
+ *
+ * @param <R> type of the result
+ * @param supplier a function that creates a new result container. For a
+ * parallel execution, this function may be called
+ * multiple times and must return a fresh value each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for incorporating an additional element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>,
+ * <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * function for combining two values, which must be
+ * compatible with the accumulator function
+ * @return the result of the reduction
+ */
+ <R> R collect(Supplier<R> supplier,
+ BiConsumer<R, ? super T> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream using a
+ * {@code Collector}. A {@code Collector}
+ * encapsulates the functions used as arguments to
+ * {@link #collect(Supplier, BiConsumer, BiConsumer)}, allowing for reuse of
+ * collection strategies and composition of collect operations such as
+ * multiple-level grouping or partitioning.
+ *
+ * <p>If the stream is parallel, and the {@code Collector}
+ * is {@link Collector.Characteristics#CONCURRENT concurrent}, and
+ * either the stream is unordered or the collector is
+ * {@link Collector.Characteristics#UNORDERED unordered},
+ * then a concurrent reduction will be performed (see {@link Collector} for
+ * details on concurrent reduction.)
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>When executed in parallel, multiple intermediate results may be
+ * instantiated, populated, and merged so as to maintain isolation of
+ * mutable data structures. Therefore, even when executed in parallel
+ * with non-thread-safe data structures (such as {@code ArrayList}), no
+ * additional synchronization is needed for a parallel reduction.
+ *
+ * @apiNote
+ * The following will accumulate strings into an ArrayList:
+ * <pre>{@code
+ * List<String> asList = stringStream.collect(Collectors.toList());
+ * }</pre>
+ *
+ * <p>The following will classify {@code Person} objects by city:
+ * <pre>{@code
+ * Map<String, List<Person>> peopleByCity
+ * = personStream.collect(Collectors.groupingBy(Person::getCity));
+ * }</pre>
+ *
+ * <p>The following will classify {@code Person} objects by state and city,
+ * cascading two {@code Collector}s together:
+ * <pre>{@code
+ * Map<String, Map<String, List<Person>>> peopleByStateAndCity
+ * = personStream.collect(Collectors.groupingBy(Person::getState,
+ * Collectors.groupingBy(Person::getCity)));
+ * }</pre>
+ *
+ * @param <R> the type of the result
+ * @param <A> the intermediate accumulation type of the {@code Collector}
+ * @param collector the {@code Collector} describing the reduction
+ * @return the result of the reduction
+ * @see #collect(Supplier, BiConsumer, BiConsumer)
+ * @see Collectors
+ */
+ <R, A> R collect(Collector<? super T, A, R> collector);
+
+ /**
+ * Returns the minimum element of this stream according to the provided
+ * {@code Comparator}. This is a special case of a
+ * <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @param comparator a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * {@code Comparator} to compare elements of this stream
+ * @return an {@code Optional} describing the minimum element of this stream,
+ * or an empty {@code Optional} if the stream is empty
+ * @throws NullPointerException if the minimum element is null
+ */
+ Optional<T> min(Comparator<? super T> comparator);
+
+ /**
+ * Returns the maximum element of this stream according to the provided
+ * {@code Comparator}. This is a special case of a
+ * <a href="package-summary.html#Reduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param comparator a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * {@code Comparator} to compare elements of this stream
+ * @return an {@code Optional} describing the maximum element of this stream,
+ * or an empty {@code Optional} if the stream is empty
+ * @throws NullPointerException if the maximum element is null
+ */
+ Optional<T> max(Comparator<? super T> comparator);
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#Reduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return mapToLong(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result. If the stream is empty then
+ * {@code false} is returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>existential quantification</em> of the
+ * predicate over the elements of the stream (for some x P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate, otherwise {@code false}
+ */
+ boolean anyMatch(Predicate<? super T> predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * predicate over the elements of the stream (for all x P(x)). If the
+ * stream is empty, the quantification is said to be <em>vacuously
+ * satisfied</em> and is always {@code true} (regardless of P(x)).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either all elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean allMatch(Predicate<? super T> predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result. If the stream is empty then {@code true} is
+ * returned and the predicate is not evaluated.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @apiNote
+ * This method evaluates the <em>universal quantification</em> of the
+ * negated predicate over the elements of the stream (for all x ~P(x)). If
+ * the stream is empty, the quantification is said to be vacuously satisfied
+ * and is always {@code true}, regardless of P(x).
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
+ * <a href="package-summary.html#Statelessness">stateless</a>
+ * predicate to apply to elements of this stream
+ * @return {@code true} if either no elements of the stream match the
+ * provided predicate or the stream is empty, otherwise {@code false}
+ */
+ boolean noneMatch(Predicate<? super T> predicate);
+
+ /**
+ * Returns an {@link Optional} describing the first element of this stream,
+ * or an empty {@code Optional} if the stream is empty. If the stream has
+ * no encounter order, then any element may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code Optional} describing the first element of this stream,
+ * or an empty {@code Optional} if the stream is empty
+ * @throws NullPointerException if the element selected is null
+ */
+ Optional<T> findFirst();
+
+ /**
+ * Returns an {@link Optional} describing some element of the stream, or an
+ * empty {@code Optional} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If a stable result
+ * is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code Optional} describing some element of this stream, or an
+ * empty {@code Optional} if the stream is empty
+ * @throws NullPointerException if the element selected is null
+ * @see #findFirst()
+ */
+ Optional<T> findAny();
+
+ // Static factories
+
+ /**
+ * Returns a builder for a {@code Stream}.
+ *
+ * @param <T> type of elements
+ * @return a stream builder
+ */
+ public static<T> Builder<T> builder() {
+ return new Streams.StreamBuilderImpl<>();
+ }
+
+ /**
+ * Returns an empty sequential {@code Stream}.
+ *
+ * @param <T> the type of stream elements
+ * @return an empty sequential stream
+ */
+ public static<T> Stream<T> empty() {
+ return StreamSupport.stream(Spliterators.<T>emptySpliterator(), false);
+ }
+
+ /**
+ * Returns a sequential {@code Stream} containing a single element.
+ *
+ * @param t the single element
+ * @param <T> the type of stream elements
+ * @return a singleton sequential stream
+ */
+ public static<T> Stream<T> of(T t) {
+ return StreamSupport.stream(new Streams.StreamBuilderImpl<>(t), false);
+ }
+
+ /**
+ * Returns a sequential ordered stream whose elements are the specified values.
+ *
+ * @param <T> the type of stream elements
+ * @param values the elements of the new stream
+ * @return the new stream
+ */
+ @SafeVarargs
+ @SuppressWarnings("varargs") // Creating a stream from an array is safe
+ public static<T> Stream<T> of(T... values) {
+ return Arrays.stream(values);
+ }
+
+ /**
+ * Returns an infinite sequential ordered {@code Stream} produced by iterative
+ * application of a function {@code f} to an initial element {@code seed},
+ * producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
+ * {@code f(f(seed))}, etc.
+ *
+ * <p>The first element (position {@code 0}) in the {@code Stream} will be
+ * the provided {@code seed}. For {@code n > 0}, the element at position
+ * {@code n}, will be the result of applying the function {@code f} to the
+ * element at position {@code n - 1}.
+ *
+ * @param <T> the type of stream elements
+ * @param seed the initial element
+ * @param f a function to be applied to to the previous element to produce
+ * a new element
+ * @return a new sequential {@code Stream}
+ */
+ public static<T> Stream<T> iterate(final T seed, final UnaryOperator<T> f) {
+ Objects.requireNonNull(f);
+ final Iterator<T> iterator = new Iterator<T>() {
+ @SuppressWarnings("unchecked")
+ T t = (T) Streams.NONE;
+
+ @Override
+ public boolean hasNext() {
+ return true;
+ }
+
+ @Override
+ public T next() {
+ return t = (t == Streams.NONE) ? seed : f.apply(t);
+ }
+ };
+ return StreamSupport.stream(Spliterators.spliteratorUnknownSize(
+ iterator,
+ Spliterator.ORDERED | Spliterator.IMMUTABLE), false);
+ }
+
+ /**
+ * Returns an infinite sequential unordered stream where each element is
+ * generated by the provided {@code Supplier}. This is suitable for
+ * generating constant streams, streams of random elements, etc.
+ *
+ * @param <T> the type of stream elements
+ * @param s the {@code Supplier} of generated elements
+ * @return a new infinite sequential unordered {@code Stream}
+ */
+ public static<T> Stream<T> generate(Supplier<T> s) {
+ Objects.requireNonNull(s);
+ return StreamSupport.stream(
+ new StreamSpliterators.InfiniteSupplyingSpliterator.OfRef<>(Long.MAX_VALUE, s), false);
+ }
+
+ /**
+ * Creates a lazily concatenated stream whose elements are all the
+ * elements of the first stream followed by all the elements of the
+ * second stream. The resulting stream is ordered if both
+ * of the input streams are ordered, and parallel if either of the input
+ * streams is parallel. When the resulting stream is closed, the close
+ * handlers for both input streams are invoked.
+ *
+ * @implNote
+ * Use caution when constructing streams from repeated concatenation.
+ * Accessing an element of a deeply concatenated stream can result in deep
+ * call chains, or even {@code StackOverflowException}.
+ *
+ * @param <T> The type of stream elements
+ * @param a the first stream
+ * @param b the second stream
+ * @return the concatenation of the two input streams
+ */
+ public static <T> Stream<T> concat(Stream<? extends T> a, Stream<? extends T> b) {
+ Objects.requireNonNull(a);
+ Objects.requireNonNull(b);
+
+ @SuppressWarnings("unchecked")
+ Spliterator<T> split = new Streams.ConcatSpliterator.OfRef<>(
+ (Spliterator<T>) a.spliterator(), (Spliterator<T>) b.spliterator());
+ Stream<T> stream = StreamSupport.stream(split, a.isParallel() || b.isParallel());
+ return stream.onClose(Streams.composedClose(a, b));
+ }
+
+ /**
+ * A mutable builder for a {@code Stream}. This allows the creation of a
+ * {@code Stream} by generating elements individually and adding them to the
+ * {@code Builder} (without the copying overhead that comes from using
+ * an {@code ArrayList} as a temporary buffer.)
+ *
+ * <p>A stream builder has a lifecycle, which starts in a building
+ * phase, during which elements can be added, and then transitions to a built
+ * phase, after which elements may not be added. The built phase begins
+ * when the {@link #build()} method is called, which creates an ordered
+ * {@code Stream} whose elements are the elements that were added to the stream
+ * builder, in the order they were added.
+ *
+ * @param <T> the type of stream elements
+ * @see Stream#builder()
+ * @since 1.8
+ */
+ public interface Builder<T> extends Consumer<T> {
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @throws IllegalStateException if the builder has already transitioned to
+ * the built state
+ */
+ @Override
+ void accept(T t);
+
+ /**
+ * Adds an element to the stream being built.
+ *
+ * @implSpec
+ * The default implementation behaves as if:
+ * <pre>{@code
+ * accept(t)
+ * return this;
+ * }</pre>
+ *
+ * @param t the element to add
+ * @return {@code this} builder
+ * @throws IllegalStateException if the builder has already transitioned to
+ * the built state
+ */
+ default Builder<T> add(T t) {
+ accept(t);
+ return this;
+ }
+
+ /**
+ * Builds the stream, transitioning this builder to the built state.
+ * An {@code IllegalStateException} is thrown if there are further attempts
+ * to operate on the builder after it has entered the built state.
+ *
+ * @return the built stream
+ * @throws IllegalStateException if the builder has already transitioned to
+ * the built state
+ */
+ Stream<T> build();
+
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/StreamOpFlag.java b/ojluni/src/main/java/java/util/stream/StreamOpFlag.java
new file mode 100644
index 0000000..8fecfed
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/StreamOpFlag.java
@@ -0,0 +1,753 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.Spliterator;
+
+/**
+ * Flags corresponding to characteristics of streams and operations. Flags are
+ * utilized by the stream framework to control, specialize or optimize
+ * computation.
+ *
+ * <p>
+ * Stream flags may be used to describe characteristics of several different
+ * entities associated with streams: stream sources, intermediate operations,
+ * and terminal operations. Not all stream flags are meaningful for all
+ * entities; the following table summarizes which flags are meaningful in what
+ * contexts:
+ *
+ * <div>
+ * <table>
+ * <caption>Type Characteristics</caption>
+ * <thead class="tableSubHeadingColor">
+ * <tr>
+ * <th colspan="2"> </th>
+ * <th>{@code DISTINCT}</th>
+ * <th>{@code SORTED}</th>
+ * <th>{@code ORDERED}</th>
+ * <th>{@code SIZED}</th>
+ * <th>{@code SHORT_CIRCUIT}</th>
+ * </tr>
+ * </thead>
+ * <tbody>
+ * <tr>
+ * <th colspan="2" class="tableSubHeadingColor">Stream source</th>
+ * <td>Y</td>
+ * <td>Y</td>
+ * <td>Y</td>
+ * <td>Y</td>
+ * <td>N</td>
+ * </tr>
+ * <tr>
+ * <th colspan="2" class="tableSubHeadingColor">Intermediate operation</th>
+ * <td>PCI</td>
+ * <td>PCI</td>
+ * <td>PCI</td>
+ * <td>PC</td>
+ * <td>PI</td>
+ * </tr>
+ * <tr>
+ * <th colspan="2" class="tableSubHeadingColor">Terminal operation</th>
+ * <td>N</td>
+ * <td>N</td>
+ * <td>PC</td>
+ * <td>N</td>
+ * <td>PI</td>
+ * </tr>
+ * </tbody>
+ * <tfoot>
+ * <tr>
+ * <th class="tableSubHeadingColor" colspan="2">Legend</th>
+ * <th colspan="6" rowspan="7"> </th>
+ * </tr>
+ * <tr>
+ * <th class="tableSubHeadingColor">Flag</th>
+ * <th class="tableSubHeadingColor">Meaning</th>
+ * <th colspan="6"></th>
+ * </tr>
+ * <tr><td>Y</td><td>Allowed</td></tr>
+ * <tr><td>N</td><td>Invalid</td></tr>
+ * <tr><td>P</td><td>Preserves</td></tr>
+ * <tr><td>C</td><td>Clears</td></tr>
+ * <tr><td>I</td><td>Injects</td></tr>
+ * </tfoot>
+ * </table>
+ * </div>
+ *
+ * <p>In the above table, "PCI" means "may preserve, clear, or inject"; "PC"
+ * means "may preserve or clear", "PI" means "may preserve or inject", and "N"
+ * means "not valid".
+ *
+ * <p>Stream flags are represented by unioned bit sets, so that a single word
+ * may describe all the characteristics of a given stream entity, and that, for
+ * example, the flags for a stream source can be efficiently combined with the
+ * flags for later operations on that stream.
+ *
+ * <p>The bit masks {@link #STREAM_MASK}, {@link #OP_MASK}, and
+ * {@link #TERMINAL_OP_MASK} can be ANDed with a bit set of stream flags to
+ * produce a mask containing only the valid flags for that entity type.
+ *
+ * <p>When describing a stream source, one only need describe what
+ * characteristics that stream has; when describing a stream operation, one need
+ * describe whether the operation preserves, injects, or clears that
+ * characteristic. Accordingly, two bits are used for each flag, so as to allow
+ * representing not only the presence of of a characteristic, but how an
+ * operation modifies that characteristic. There are two common forms in which
+ * flag bits are combined into an {@code int} bit set. <em>Stream flags</em>
+ * are a unioned bit set constructed by ORing the enum characteristic values of
+ * {@link #set()} (or, more commonly, ORing the corresponding static named
+ * constants prefixed with {@code IS_}). <em>Operation flags</em> are a unioned
+ * bit set constructed by ORing the enum characteristic values of {@link #set()}
+ * or {@link #clear()} (to inject, or clear, respectively, the corresponding
+ * flag), or more commonly ORing the corresponding named constants prefixed with
+ * {@code IS_} or {@code NOT_}. Flags that are not marked with {@code IS_} or
+ * {@code NOT_} are implicitly treated as preserved. Care must be taken when
+ * combining bitsets that the correct combining operations are applied in the
+ * correct order.
+ *
+ * <p>
+ * With the exception of {@link #SHORT_CIRCUIT}, stream characteristics can be
+ * derived from the equivalent {@link java.util.Spliterator} characteristics:
+ * {@link java.util.Spliterator#DISTINCT}, {@link java.util.Spliterator#SORTED},
+ * {@link java.util.Spliterator#ORDERED}, and
+ * {@link java.util.Spliterator#SIZED}. A spliterator characteristics bit set
+ * can be converted to stream flags using the method
+ * {@link #fromCharacteristics(java.util.Spliterator)} and converted back using
+ * {@link #toCharacteristics(int)}. (The bit set
+ * {@link #SPLITERATOR_CHARACTERISTICS_MASK} is used to AND with a bit set to
+ * produce a valid spliterator characteristics bit set that can be converted to
+ * stream flags.)
+ *
+ * <p>
+ * The source of a stream encapsulates a spliterator. The characteristics of
+ * that source spliterator when transformed to stream flags will be a proper
+ * subset of stream flags of that stream.
+ * For example:
+ * <pre> {@code
+ * Spliterator s = ...;
+ * Stream stream = Streams.stream(s);
+ * flagsFromSplitr = fromCharacteristics(s.characteristics());
+ * assert(flagsFromSplitr & stream.getStreamFlags() == flagsFromSplitr);
+ * }</pre>
+ *
+ * <p>
+ * An intermediate operation, performed on an input stream to create a new
+ * output stream, may preserve, clear or inject stream or operation
+ * characteristics. Similarly, a terminal operation, performed on an input
+ * stream to produce an output result may preserve, clear or inject stream or
+ * operation characteristics. Preservation means that if that characteristic
+ * is present on the input, then it is also present on the output. Clearing
+ * means that the characteristic is not present on the output regardless of the
+ * input. Injection means that the characteristic is present on the output
+ * regardless of the input. If a characteristic is not cleared or injected then
+ * it is implicitly preserved.
+ *
+ * <p>
+ * A pipeline consists of a stream source encapsulating a spliterator, one or
+ * more intermediate operations, and finally a terminal operation that produces
+ * a result. At each stage of the pipeline, a combined stream and operation
+ * flags can be calculated, using {@link #combineOpFlags(int, int)}. Such flags
+ * ensure that preservation, clearing and injecting information is retained at
+ * each stage.
+ *
+ * The combined stream and operation flags for the source stage of the pipeline
+ * is calculated as follows:
+ * <pre> {@code
+ * int flagsForSourceStage = combineOpFlags(sourceFlags, INITIAL_OPS_VALUE);
+ * }</pre>
+ *
+ * The combined stream and operation flags of each subsequent intermediate
+ * operation stage in the pipeline is calculated as follows:
+ * <pre> {@code
+ * int flagsForThisStage = combineOpFlags(flagsForPreviousStage, thisOpFlags);
+ * }</pre>
+ *
+ * Finally the flags output from the last intermediate operation of the pipeline
+ * are combined with the operation flags of the terminal operation to produce
+ * the flags output from the pipeline.
+ *
+ * <p>Those flags can then be used to apply optimizations. For example, if
+ * {@code SIZED.isKnown(flags)} returns true then the stream size remains
+ * constant throughout the pipeline, this information can be utilized to
+ * pre-allocate data structures and combined with
+ * {@link java.util.Spliterator#SUBSIZED} that information can be utilized to
+ * perform concurrent in-place updates into a shared array.
+ *
+ * For specific details see the {@link AbstractPipeline} constructors.
+ *
+ * @since 1.8
+ */
+enum StreamOpFlag {
+
+ /*
+ * Each characteristic takes up 2 bits in a bit set to accommodate
+ * preserving, clearing and setting/injecting information.
+ *
+ * This applies to stream flags, intermediate/terminal operation flags, and
+ * combined stream and operation flags. Even though the former only requires
+ * 1 bit of information per characteristic, is it more efficient when
+ * combining flags to align set and inject bits.
+ *
+ * Characteristics belong to certain types, see the Type enum. Bit masks for
+ * the types are constructed as per the following table:
+ *
+ * DISTINCT SORTED ORDERED SIZED SHORT_CIRCUIT
+ * SPLITERATOR 01 01 01 01 00
+ * STREAM 01 01 01 01 00
+ * OP 11 11 11 10 01
+ * TERMINAL_OP 00 00 10 00 01
+ * UPSTREAM_TERMINAL_OP 00 00 10 00 00
+ *
+ * 01 = set/inject
+ * 10 = clear
+ * 11 = preserve
+ *
+ * Construction of the columns is performed using a simple builder for
+ * non-zero values.
+ */
+
+
+ // The following flags correspond to characteristics on Spliterator
+ // and the values MUST be equal.
+ //
+
+ /**
+ * Characteristic value signifying that, for each pair of
+ * encountered elements in a stream {@code x, y}, {@code !x.equals(y)}.
+ * <p>
+ * A stream may have this value or an intermediate operation can preserve,
+ * clear or inject this value.
+ */
+ // 0, 0x00000001
+ // Matches Spliterator.DISTINCT
+ DISTINCT(0,
+ set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)),
+
+ /**
+ * Characteristic value signifying that encounter order follows a natural
+ * sort order of comparable elements.
+ * <p>
+ * A stream can have this value or an intermediate operation can preserve,
+ * clear or inject this value.
+ * <p>
+ * Note: The {@link java.util.Spliterator#SORTED} characteristic can define
+ * a sort order with an associated non-null comparator. Augmenting flag
+ * state with addition properties such that those properties can be passed
+ * to operations requires some disruptive changes for a singular use-case.
+ * Furthermore, comparing comparators for equality beyond that of identity
+ * is likely to be unreliable. Therefore the {@code SORTED} characteristic
+ * for a defined non-natural sort order is not mapped internally to the
+ * {@code SORTED} flag.
+ */
+ // 1, 0x00000004
+ // Matches Spliterator.SORTED
+ SORTED(1,
+ set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)),
+
+ /**
+ * Characteristic value signifying that an encounter order is
+ * defined for stream elements.
+ * <p>
+ * A stream can have this value, an intermediate operation can preserve,
+ * clear or inject this value, or a terminal operation can preserve or clear
+ * this value.
+ */
+ // 2, 0x00000010
+ // Matches Spliterator.ORDERED
+ ORDERED(2,
+ set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP).clear(Type.TERMINAL_OP)
+ .clear(Type.UPSTREAM_TERMINAL_OP)),
+
+ /**
+ * Characteristic value signifying that size of the stream
+ * is of a known finite size that is equal to the known finite
+ * size of the source spliterator input to the first stream
+ * in the pipeline.
+ * <p>
+ * A stream can have this value or an intermediate operation can preserve or
+ * clear this value.
+ */
+ // 3, 0x00000040
+ // Matches Spliterator.SIZED
+ SIZED(3,
+ set(Type.SPLITERATOR).set(Type.STREAM).clear(Type.OP)),
+
+ // The following Spliterator characteristics are not currently used but a
+ // gap in the bit set is deliberately retained to enable corresponding
+ // stream flags if//when required without modification to other flag values.
+ //
+ // 4, 0x00000100 NONNULL(4, ...
+ // 5, 0x00000400 IMMUTABLE(5, ...
+ // 6, 0x00001000 CONCURRENT(6, ...
+ // 7, 0x00004000 SUBSIZED(7, ...
+
+ // The following 4 flags are currently undefined and a free for any further
+ // spliterator characteristics.
+ //
+ // 8, 0x00010000
+ // 9, 0x00040000
+ // 10, 0x00100000
+ // 11, 0x00400000
+
+ // The following flags are specific to streams and operations
+ //
+
+ /**
+ * Characteristic value signifying that an operation may short-circuit the
+ * stream.
+ * <p>
+ * An intermediate operation can preserve or inject this value,
+ * or a terminal operation can preserve or inject this value.
+ */
+ // 12, 0x01000000
+ SHORT_CIRCUIT(12,
+ set(Type.OP).set(Type.TERMINAL_OP));
+
+ // The following 2 flags are currently undefined and a free for any further
+ // stream flags if/when required
+ //
+ // 13, 0x04000000
+ // 14, 0x10000000
+ // 15, 0x40000000
+
+ /**
+ * Type of a flag
+ */
+ enum Type {
+ /**
+ * The flag is associated with spliterator characteristics.
+ */
+ SPLITERATOR,
+
+ /**
+ * The flag is associated with stream flags.
+ */
+ STREAM,
+
+ /**
+ * The flag is associated with intermediate operation flags.
+ */
+ OP,
+
+ /**
+ * The flag is associated with terminal operation flags.
+ */
+ TERMINAL_OP,
+
+ /**
+ * The flag is associated with terminal operation flags that are
+ * propagated upstream across the last stateful operation boundary
+ */
+ UPSTREAM_TERMINAL_OP
+ }
+
+ /**
+ * The bit pattern for setting/injecting a flag.
+ */
+ private static final int SET_BITS = 0b01;
+
+ /**
+ * The bit pattern for clearing a flag.
+ */
+ private static final int CLEAR_BITS = 0b10;
+
+ /**
+ * The bit pattern for preserving a flag.
+ */
+ private static final int PRESERVE_BITS = 0b11;
+
+ private static MaskBuilder set(Type t) {
+ return new MaskBuilder(new EnumMap<>(Type.class)).set(t);
+ }
+
+ private static class MaskBuilder {
+ final Map<Type, Integer> map;
+
+ MaskBuilder(Map<Type, Integer> map) {
+ this.map = map;
+ }
+
+ MaskBuilder mask(Type t, Integer i) {
+ map.put(t, i);
+ return this;
+ }
+
+ MaskBuilder set(Type t) {
+ return mask(t, SET_BITS);
+ }
+
+ MaskBuilder clear(Type t) {
+ return mask(t, CLEAR_BITS);
+ }
+
+ MaskBuilder setAndClear(Type t) {
+ return mask(t, PRESERVE_BITS);
+ }
+
+ Map<Type, Integer> build() {
+ for (Type t : Type.values()) {
+ map.putIfAbsent(t, 0b00);
+ }
+ return map;
+ }
+ }
+
+ /**
+ * The mask table for a flag, this is used to determine if a flag
+ * corresponds to a certain flag type and for creating mask constants.
+ */
+ private final Map<Type, Integer> maskTable;
+
+ /**
+ * The bit position in the bit mask.
+ */
+ private final int bitPosition;
+
+ /**
+ * The set 2 bit set offset at the bit position.
+ */
+ private final int set;
+
+ /**
+ * The clear 2 bit set offset at the bit position.
+ */
+ private final int clear;
+
+ /**
+ * The preserve 2 bit set offset at the bit position.
+ */
+ private final int preserve;
+
+ private StreamOpFlag(int position, MaskBuilder maskBuilder) {
+ this.maskTable = maskBuilder.build();
+ // Two bits per flag
+ position *= 2;
+ this.bitPosition = position;
+ this.set = SET_BITS << position;
+ this.clear = CLEAR_BITS << position;
+ this.preserve = PRESERVE_BITS << position;
+ }
+
+ /**
+ * Gets the bitmap associated with setting this characteristic.
+ *
+ * @return the bitmap for setting this characteristic
+ */
+ int set() {
+ return set;
+ }
+
+ /**
+ * Gets the bitmap associated with clearing this characteristic.
+ *
+ * @return the bitmap for clearing this characteristic
+ */
+ int clear() {
+ return clear;
+ }
+
+ /**
+ * Determines if this flag is a stream-based flag.
+ *
+ * @return true if a stream-based flag, otherwise false.
+ */
+ boolean isStreamFlag() {
+ return maskTable.get(Type.STREAM) > 0;
+ }
+
+ /**
+ * Checks if this flag is set on stream flags, injected on operation flags,
+ * and injected on combined stream and operation flags.
+ *
+ * @param flags the stream flags, operation flags, or combined stream and
+ * operation flags
+ * @return true if this flag is known, otherwise false.
+ */
+ boolean isKnown(int flags) {
+ return (flags & preserve) == set;
+ }
+
+ /**
+ * Checks if this flag is cleared on operation flags or combined stream and
+ * operation flags.
+ *
+ * @param flags the operation flags or combined stream and operations flags.
+ * @return true if this flag is preserved, otherwise false.
+ */
+ boolean isCleared(int flags) {
+ return (flags & preserve) == clear;
+ }
+
+ /**
+ * Checks if this flag is preserved on combined stream and operation flags.
+ *
+ * @param flags the combined stream and operations flags.
+ * @return true if this flag is preserved, otherwise false.
+ */
+ boolean isPreserved(int flags) {
+ return (flags & preserve) == preserve;
+ }
+
+ /**
+ * Determines if this flag can be set for a flag type.
+ *
+ * @param t the flag type.
+ * @return true if this flag can be set for the flag type, otherwise false.
+ */
+ boolean canSet(Type t) {
+ return (maskTable.get(t) & SET_BITS) > 0;
+ }
+
+ /**
+ * The bit mask for spliterator characteristics
+ */
+ static final int SPLITERATOR_CHARACTERISTICS_MASK = createMask(Type.SPLITERATOR);
+
+ /**
+ * The bit mask for source stream flags.
+ */
+ static final int STREAM_MASK = createMask(Type.STREAM);
+
+ /**
+ * The bit mask for intermediate operation flags.
+ */
+ static final int OP_MASK = createMask(Type.OP);
+
+ /**
+ * The bit mask for terminal operation flags.
+ */
+ static final int TERMINAL_OP_MASK = createMask(Type.TERMINAL_OP);
+
+ /**
+ * The bit mask for upstream terminal operation flags.
+ */
+ static final int UPSTREAM_TERMINAL_OP_MASK = createMask(Type.UPSTREAM_TERMINAL_OP);
+
+ private static int createMask(Type t) {
+ int mask = 0;
+ for (StreamOpFlag flag : StreamOpFlag.values()) {
+ mask |= flag.maskTable.get(t) << flag.bitPosition;
+ }
+ return mask;
+ }
+
+ /**
+ * Complete flag mask.
+ */
+ private static final int FLAG_MASK = createFlagMask();
+
+ private static int createFlagMask() {
+ int mask = 0;
+ for (StreamOpFlag flag : StreamOpFlag.values()) {
+ mask |= flag.preserve;
+ }
+ return mask;
+ }
+
+ /**
+ * Flag mask for stream flags that are set.
+ */
+ private static final int FLAG_MASK_IS = STREAM_MASK;
+
+ /**
+ * Flag mask for stream flags that are cleared.
+ */
+ private static final int FLAG_MASK_NOT = STREAM_MASK << 1;
+
+ /**
+ * The initial value to be combined with the stream flags of the first
+ * stream in the pipeline.
+ */
+ static final int INITIAL_OPS_VALUE = FLAG_MASK_IS | FLAG_MASK_NOT;
+
+ /**
+ * The bit value to set or inject {@link #DISTINCT}.
+ */
+ static final int IS_DISTINCT = DISTINCT.set;
+
+ /**
+ * The bit value to clear {@link #DISTINCT}.
+ */
+ static final int NOT_DISTINCT = DISTINCT.clear;
+
+ /**
+ * The bit value to set or inject {@link #SORTED}.
+ */
+ static final int IS_SORTED = SORTED.set;
+
+ /**
+ * The bit value to clear {@link #SORTED}.
+ */
+ static final int NOT_SORTED = SORTED.clear;
+
+ /**
+ * The bit value to set or inject {@link #ORDERED}.
+ */
+ static final int IS_ORDERED = ORDERED.set;
+
+ /**
+ * The bit value to clear {@link #ORDERED}.
+ */
+ static final int NOT_ORDERED = ORDERED.clear;
+
+ /**
+ * The bit value to set {@link #SIZED}.
+ */
+ static final int IS_SIZED = SIZED.set;
+
+ /**
+ * The bit value to clear {@link #SIZED}.
+ */
+ static final int NOT_SIZED = SIZED.clear;
+
+ /**
+ * The bit value to inject {@link #SHORT_CIRCUIT}.
+ */
+ static final int IS_SHORT_CIRCUIT = SHORT_CIRCUIT.set;
+
+ private static int getMask(int flags) {
+ return (flags == 0)
+ ? FLAG_MASK
+ : ~(flags | ((FLAG_MASK_IS & flags) << 1) | ((FLAG_MASK_NOT & flags) >> 1));
+ }
+
+ /**
+ * Combines stream or operation flags with previously combined stream and
+ * operation flags to produce updated combined stream and operation flags.
+ * <p>
+ * A flag set on stream flags or injected on operation flags,
+ * and injected combined stream and operation flags,
+ * will be injected on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag set on stream flags or injected on operation flags,
+ * and cleared on the combined stream and operation flags,
+ * will be cleared on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag set on the stream flags or injected on operation flags,
+ * and preserved on the combined stream and operation flags,
+ * will be injected on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag not set on the stream flags or cleared/preserved on operation
+ * flags, and injected on the combined stream and operation flags,
+ * will be injected on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag not set on the stream flags or cleared/preserved on operation
+ * flags, and cleared on the combined stream and operation flags,
+ * will be cleared on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag not set on the stream flags,
+ * and preserved on the combined stream and operation flags
+ * will be preserved on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag cleared on operation flags,
+ * and preserved on the combined stream and operation flags
+ * will be cleared on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag preserved on operation flags,
+ * and preserved on the combined stream and operation flags
+ * will be preserved on the updated combined stream and operation flags.
+ *
+ * @param newStreamOrOpFlags the stream or operation flags.
+ * @param prevCombOpFlags previously combined stream and operation flags.
+ * The value {#link INITIAL_OPS_VALUE} must be used as the seed value.
+ * @return the updated combined stream and operation flags.
+ */
+ static int combineOpFlags(int newStreamOrOpFlags, int prevCombOpFlags) {
+ // 0x01 or 0x10 nibbles are transformed to 0x11
+ // 0x00 nibbles remain unchanged
+ // Then all the bits are flipped
+ // Then the result is logically or'ed with the operation flags.
+ return (prevCombOpFlags & StreamOpFlag.getMask(newStreamOrOpFlags)) | newStreamOrOpFlags;
+ }
+
+ /**
+ * Converts combined stream and operation flags to stream flags.
+ *
+ * <p>Each flag injected on the combined stream and operation flags will be
+ * set on the stream flags.
+ *
+ * @param combOpFlags the combined stream and operation flags.
+ * @return the stream flags.
+ */
+ static int toStreamFlags(int combOpFlags) {
+ // By flipping the nibbles 0x11 become 0x00 and 0x01 become 0x10
+ // Shift left 1 to restore set flags and mask off anything other than the set flags
+ return ((~combOpFlags) >> 1) & FLAG_MASK_IS & combOpFlags;
+ }
+
+ /**
+ * Converts stream flags to a spliterator characteristic bit set.
+ *
+ * @param streamFlags the stream flags.
+ * @return the spliterator characteristic bit set.
+ */
+ static int toCharacteristics(int streamFlags) {
+ return streamFlags & SPLITERATOR_CHARACTERISTICS_MASK;
+ }
+
+ /**
+ * Converts a spliterator characteristic bit set to stream flags.
+ *
+ * @implSpec
+ * If the spliterator is naturally {@code SORTED} (the associated
+ * {@code Comparator} is {@code null}) then the characteristic is converted
+ * to the {@link #SORTED} flag, otherwise the characteristic is not
+ * converted.
+ *
+ * @param spliterator the spliterator from which to obtain characteristic
+ * bit set.
+ * @return the stream flags.
+ */
+ static int fromCharacteristics(Spliterator<?> spliterator) {
+ int characteristics = spliterator.characteristics();
+ if ((characteristics & Spliterator.SORTED) != 0 && spliterator.getComparator() != null) {
+ // Do not propagate the SORTED characteristic if it does not correspond
+ // to a natural sort order
+ return characteristics & SPLITERATOR_CHARACTERISTICS_MASK & ~Spliterator.SORTED;
+ }
+ else {
+ return characteristics & SPLITERATOR_CHARACTERISTICS_MASK;
+ }
+ }
+
+ /**
+ * Converts a spliterator characteristic bit set to stream flags.
+ *
+ * @param characteristics the spliterator characteristic bit set.
+ * @return the stream flags.
+ */
+ static int fromCharacteristics(int characteristics) {
+ return characteristics & SPLITERATOR_CHARACTERISTICS_MASK;
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/StreamShape.java b/ojluni/src/main/java/java/util/stream/StreamShape.java
new file mode 100644
index 0000000..9051be2
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/StreamShape.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+/**
+ * An enum describing the known shape specializations for stream abstractions.
+ * Each will correspond to a specific subinterface of {@link BaseStream}
+ * (e.g., {@code REFERENCE} corresponds to {@code Stream}, {@code INT_VALUE}
+ * corresponds to {@code IntStream}). Each may also correspond to
+ * specializations of value-handling abstractions such as {@code Spliterator},
+ * {@code Consumer}, etc.
+ *
+ * @apiNote
+ * This enum is used by implementations to determine compatibility between
+ * streams and operations (i.e., if the output shape of a stream is compatible
+ * with the input shape of the next operation).
+ *
+ * <p>Some APIs require you to specify both a generic type and a stream shape
+ * for input or output elements, such as {@link TerminalOp} which has both
+ * generic type parameters for its input types, and a getter for the
+ * input shape. When representing primitive streams in this way, the
+ * generic type parameter should correspond to the wrapper type for that
+ * primitive type.
+ *
+ * @since 1.8
+ */
+enum StreamShape {
+ /**
+ * The shape specialization corresponding to {@code Stream} and elements
+ * that are object references.
+ */
+ REFERENCE,
+ /**
+ * The shape specialization corresponding to {@code IntStream} and elements
+ * that are {@code int} values.
+ */
+ INT_VALUE,
+ /**
+ * The shape specialization corresponding to {@code LongStream} and elements
+ * that are {@code long} values.
+ */
+ LONG_VALUE,
+ /**
+ * The shape specialization corresponding to {@code DoubleStream} and
+ * elements that are {@code double} values.
+ */
+ DOUBLE_VALUE
+}
diff --git a/ojluni/src/main/java/java/util/stream/StreamSpliterators.java b/ojluni/src/main/java/java/util/stream/StreamSpliterators.java
new file mode 100644
index 0000000..6768342
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/StreamSpliterators.java
@@ -0,0 +1,1548 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Comparator;
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.BooleanSupplier;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.DoubleSupplier;
+import java.util.function.IntConsumer;
+import java.util.function.IntSupplier;
+import java.util.function.LongConsumer;
+import java.util.function.LongSupplier;
+import java.util.function.Supplier;
+
+/**
+ * Spliterator implementations for wrapping and delegating spliterators, used
+ * in the implementation of the {@link Stream#spliterator()} method.
+ *
+ * @since 1.8
+ */
+class StreamSpliterators {
+
+ /**
+ * Abstract wrapping spliterator that binds to the spliterator of a
+ * pipeline helper on first operation.
+ *
+ * <p>This spliterator is not late-binding and will bind to the source
+ * spliterator when first operated on.
+ *
+ * <p>A wrapping spliterator produced from a sequential stream
+ * cannot be split if there are stateful operations present.
+ */
+ private static abstract class AbstractWrappingSpliterator<P_IN, P_OUT,
+ T_BUFFER extends AbstractSpinedBuffer>
+ implements Spliterator<P_OUT> {
+
+ // @@@ Detect if stateful operations are present or not
+ // If not then can split otherwise cannot
+
+ /**
+ * True if this spliterator supports splitting
+ */
+ final boolean isParallel;
+
+ final PipelineHelper<P_OUT> ph;
+
+ /**
+ * Supplier for the source spliterator. Client provides either a
+ * spliterator or a supplier.
+ */
+ private Supplier<Spliterator<P_IN>> spliteratorSupplier;
+
+ /**
+ * Source spliterator. Either provided from client or obtained from
+ * supplier.
+ */
+ Spliterator<P_IN> spliterator;
+
+ /**
+ * Sink chain for the downstream stages of the pipeline, ultimately
+ * leading to the buffer. Used during partial traversal.
+ */
+ Sink<P_IN> bufferSink;
+
+ /**
+ * A function that advances one element of the spliterator, pushing
+ * it to bufferSink. Returns whether any elements were processed.
+ * Used during partial traversal.
+ */
+ BooleanSupplier pusher;
+
+ /** Next element to consume from the buffer, used during partial traversal */
+ long nextToConsume;
+
+ /** Buffer into which elements are pushed. Used during partial traversal. */
+ T_BUFFER buffer;
+
+ /**
+ * True if full traversal has occurred (with possible cancelation).
+ * If doing a partial traversal, there may be still elements in buffer.
+ */
+ boolean finished;
+
+ /**
+ * Construct an AbstractWrappingSpliterator from a
+ * {@code Supplier<Spliterator>}.
+ */
+ AbstractWrappingSpliterator(PipelineHelper<P_OUT> ph,
+ Supplier<Spliterator<P_IN>> spliteratorSupplier,
+ boolean parallel) {
+ this.ph = ph;
+ this.spliteratorSupplier = spliteratorSupplier;
+ this.spliterator = null;
+ this.isParallel = parallel;
+ }
+
+ /**
+ * Construct an AbstractWrappingSpliterator from a
+ * {@code Spliterator}.
+ */
+ AbstractWrappingSpliterator(PipelineHelper<P_OUT> ph,
+ Spliterator<P_IN> spliterator,
+ boolean parallel) {
+ this.ph = ph;
+ this.spliteratorSupplier = null;
+ this.spliterator = spliterator;
+ this.isParallel = parallel;
+ }
+
+ /**
+ * Called before advancing to set up spliterator, if needed.
+ */
+ final void init() {
+ if (spliterator == null) {
+ spliterator = spliteratorSupplier.get();
+ spliteratorSupplier = null;
+ }
+ }
+
+ /**
+ * Get an element from the source, pushing it into the sink chain,
+ * setting up the buffer if needed
+ * @return whether there are elements to consume from the buffer
+ */
+ final boolean doAdvance() {
+ if (buffer == null) {
+ if (finished)
+ return false;
+
+ init();
+ initPartialTraversalState();
+ nextToConsume = 0;
+ bufferSink.begin(spliterator.getExactSizeIfKnown());
+ return fillBuffer();
+ }
+ else {
+ ++nextToConsume;
+ boolean hasNext = nextToConsume < buffer.count();
+ if (!hasNext) {
+ nextToConsume = 0;
+ buffer.clear();
+ hasNext = fillBuffer();
+ }
+ return hasNext;
+ }
+ }
+
+ /**
+ * Invokes the shape-specific constructor with the provided arguments
+ * and returns the result.
+ */
+ abstract AbstractWrappingSpliterator<P_IN, P_OUT, ?> wrap(Spliterator<P_IN> s);
+
+ /**
+ * Initializes buffer, sink chain, and pusher for a shape-specific
+ * implementation.
+ */
+ abstract void initPartialTraversalState();
+
+ @Override
+ public Spliterator<P_OUT> trySplit() {
+ if (isParallel && !finished) {
+ init();
+
+ Spliterator<P_IN> split = spliterator.trySplit();
+ return (split == null) ? null : wrap(split);
+ }
+ else
+ return null;
+ }
+
+ /**
+ * If the buffer is empty, push elements into the sink chain until
+ * the source is empty or cancellation is requested.
+ * @return whether there are elements to consume from the buffer
+ */
+ private boolean fillBuffer() {
+ while (buffer.count() == 0) {
+ if (bufferSink.cancellationRequested() || !pusher.getAsBoolean()) {
+ if (finished)
+ return false;
+ else {
+ bufferSink.end(); // might trigger more elements
+ finished = true;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public final long estimateSize() {
+ init();
+ // Use the estimate of the wrapped spliterator
+ // Note this may not be accurate if there are filter/flatMap
+ // operations filtering or adding elements to the stream
+ return spliterator.estimateSize();
+ }
+
+ @Override
+ public final long getExactSizeIfKnown() {
+ init();
+ return StreamOpFlag.SIZED.isKnown(ph.getStreamAndOpFlags())
+ ? spliterator.getExactSizeIfKnown()
+ : -1;
+ }
+
+ @Override
+ public final int characteristics() {
+ init();
+
+ // Get the characteristics from the pipeline
+ int c = StreamOpFlag.toCharacteristics(StreamOpFlag.toStreamFlags(ph.getStreamAndOpFlags()));
+
+ // Mask off the size and uniform characteristics and replace with
+ // those of the spliterator
+ // Note that a non-uniform spliterator can change from something
+ // with an exact size to an estimate for a sub-split, for example
+ // with HashSet where the size is known at the top level spliterator
+ // but for sub-splits only an estimate is known
+ if ((c & Spliterator.SIZED) != 0) {
+ c &= ~(Spliterator.SIZED | Spliterator.SUBSIZED);
+ c |= (spliterator.characteristics() & (Spliterator.SIZED | Spliterator.SUBSIZED));
+ }
+
+ return c;
+ }
+
+ @Override
+ public Comparator<? super P_OUT> getComparator() {
+ if (!hasCharacteristics(SORTED))
+ throw new IllegalStateException();
+ return null;
+ }
+
+ @Override
+ public final String toString() {
+ return String.format("%s[%s]", getClass().getName(), spliterator);
+ }
+ }
+
+ static final class WrappingSpliterator<P_IN, P_OUT>
+ extends AbstractWrappingSpliterator<P_IN, P_OUT, SpinedBuffer<P_OUT>> {
+
+ WrappingSpliterator(PipelineHelper<P_OUT> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean parallel) {
+ super(ph, supplier, parallel);
+ }
+
+ WrappingSpliterator(PipelineHelper<P_OUT> ph,
+ Spliterator<P_IN> spliterator,
+ boolean parallel) {
+ super(ph, spliterator, parallel);
+ }
+
+ @Override
+ WrappingSpliterator<P_IN, P_OUT> wrap(Spliterator<P_IN> s) {
+ return new WrappingSpliterator<>(ph, s, isParallel);
+ }
+
+ @Override
+ void initPartialTraversalState() {
+ SpinedBuffer<P_OUT> b = new SpinedBuffer<>();
+ buffer = b;
+ bufferSink = ph.wrapSink(b::accept);
+ pusher = () -> spliterator.tryAdvance(bufferSink);
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super P_OUT> consumer) {
+ Objects.requireNonNull(consumer);
+ boolean hasNext = doAdvance();
+ if (hasNext)
+ consumer.accept(buffer.get(nextToConsume));
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super P_OUT> consumer) {
+ if (buffer == null && !finished) {
+ Objects.requireNonNull(consumer);
+ init();
+
+ ph.wrapAndCopyInto((Sink<P_OUT>) consumer::accept, spliterator);
+ finished = true;
+ }
+ else {
+ do { } while (tryAdvance(consumer));
+ }
+ }
+ }
+
+ static final class IntWrappingSpliterator<P_IN>
+ extends AbstractWrappingSpliterator<P_IN, Integer, SpinedBuffer.OfInt>
+ implements Spliterator.OfInt {
+
+ IntWrappingSpliterator(PipelineHelper<Integer> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean parallel) {
+ super(ph, supplier, parallel);
+ }
+
+ IntWrappingSpliterator(PipelineHelper<Integer> ph,
+ Spliterator<P_IN> spliterator,
+ boolean parallel) {
+ super(ph, spliterator, parallel);
+ }
+
+ @Override
+ AbstractWrappingSpliterator<P_IN, Integer, ?> wrap(Spliterator<P_IN> s) {
+ return new IntWrappingSpliterator<>(ph, s, isParallel);
+ }
+
+ @Override
+ void initPartialTraversalState() {
+ SpinedBuffer.OfInt b = new SpinedBuffer.OfInt();
+ buffer = b;
+ bufferSink = ph.wrapSink((Sink.OfInt) b::accept);
+ pusher = () -> spliterator.tryAdvance(bufferSink);
+ }
+
+ @Override
+ public Spliterator.OfInt trySplit() {
+ return (Spliterator.OfInt) super.trySplit();
+ }
+
+ @Override
+ public boolean tryAdvance(IntConsumer consumer) {
+ Objects.requireNonNull(consumer);
+ boolean hasNext = doAdvance();
+ if (hasNext)
+ consumer.accept(buffer.get(nextToConsume));
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(IntConsumer consumer) {
+ if (buffer == null && !finished) {
+ Objects.requireNonNull(consumer);
+ init();
+
+ ph.wrapAndCopyInto((Sink.OfInt) consumer::accept, spliterator);
+ finished = true;
+ }
+ else {
+ do { } while (tryAdvance(consumer));
+ }
+ }
+ }
+
+ static final class LongWrappingSpliterator<P_IN>
+ extends AbstractWrappingSpliterator<P_IN, Long, SpinedBuffer.OfLong>
+ implements Spliterator.OfLong {
+
+ LongWrappingSpliterator(PipelineHelper<Long> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean parallel) {
+ super(ph, supplier, parallel);
+ }
+
+ LongWrappingSpliterator(PipelineHelper<Long> ph,
+ Spliterator<P_IN> spliterator,
+ boolean parallel) {
+ super(ph, spliterator, parallel);
+ }
+
+ @Override
+ AbstractWrappingSpliterator<P_IN, Long, ?> wrap(Spliterator<P_IN> s) {
+ return new LongWrappingSpliterator<>(ph, s, isParallel);
+ }
+
+ @Override
+ void initPartialTraversalState() {
+ SpinedBuffer.OfLong b = new SpinedBuffer.OfLong();
+ buffer = b;
+ bufferSink = ph.wrapSink((Sink.OfLong) b::accept);
+ pusher = () -> spliterator.tryAdvance(bufferSink);
+ }
+
+ @Override
+ public Spliterator.OfLong trySplit() {
+ return (Spliterator.OfLong) super.trySplit();
+ }
+
+ @Override
+ public boolean tryAdvance(LongConsumer consumer) {
+ Objects.requireNonNull(consumer);
+ boolean hasNext = doAdvance();
+ if (hasNext)
+ consumer.accept(buffer.get(nextToConsume));
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(LongConsumer consumer) {
+ if (buffer == null && !finished) {
+ Objects.requireNonNull(consumer);
+ init();
+
+ ph.wrapAndCopyInto((Sink.OfLong) consumer::accept, spliterator);
+ finished = true;
+ }
+ else {
+ do { } while (tryAdvance(consumer));
+ }
+ }
+ }
+
+ static final class DoubleWrappingSpliterator<P_IN>
+ extends AbstractWrappingSpliterator<P_IN, Double, SpinedBuffer.OfDouble>
+ implements Spliterator.OfDouble {
+
+ DoubleWrappingSpliterator(PipelineHelper<Double> ph,
+ Supplier<Spliterator<P_IN>> supplier,
+ boolean parallel) {
+ super(ph, supplier, parallel);
+ }
+
+ DoubleWrappingSpliterator(PipelineHelper<Double> ph,
+ Spliterator<P_IN> spliterator,
+ boolean parallel) {
+ super(ph, spliterator, parallel);
+ }
+
+ @Override
+ AbstractWrappingSpliterator<P_IN, Double, ?> wrap(Spliterator<P_IN> s) {
+ return new DoubleWrappingSpliterator<>(ph, s, isParallel);
+ }
+
+ @Override
+ void initPartialTraversalState() {
+ SpinedBuffer.OfDouble b = new SpinedBuffer.OfDouble();
+ buffer = b;
+ bufferSink = ph.wrapSink((Sink.OfDouble) b::accept);
+ pusher = () -> spliterator.tryAdvance(bufferSink);
+ }
+
+ @Override
+ public Spliterator.OfDouble trySplit() {
+ return (Spliterator.OfDouble) super.trySplit();
+ }
+
+ @Override
+ public boolean tryAdvance(DoubleConsumer consumer) {
+ Objects.requireNonNull(consumer);
+ boolean hasNext = doAdvance();
+ if (hasNext)
+ consumer.accept(buffer.get(nextToConsume));
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(DoubleConsumer consumer) {
+ if (buffer == null && !finished) {
+ Objects.requireNonNull(consumer);
+ init();
+
+ ph.wrapAndCopyInto((Sink.OfDouble) consumer::accept, spliterator);
+ finished = true;
+ }
+ else {
+ do { } while (tryAdvance(consumer));
+ }
+ }
+ }
+
+ /**
+ * Spliterator implementation that delegates to an underlying spliterator,
+ * acquiring the spliterator from a {@code Supplier<Spliterator>} on the
+ * first call to any spliterator method.
+ * @param <T>
+ */
+ static class DelegatingSpliterator<T, T_SPLITR extends Spliterator<T>>
+ implements Spliterator<T> {
+ private final Supplier<? extends T_SPLITR> supplier;
+
+ private T_SPLITR s;
+
+ DelegatingSpliterator(Supplier<? extends T_SPLITR> supplier) {
+ this.supplier = supplier;
+ }
+
+ T_SPLITR get() {
+ if (s == null) {
+ s = supplier.get();
+ }
+ return s;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public T_SPLITR trySplit() {
+ return (T_SPLITR) get().trySplit();
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> consumer) {
+ return get().tryAdvance(consumer);
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> consumer) {
+ get().forEachRemaining(consumer);
+ }
+
+ @Override
+ public long estimateSize() {
+ return get().estimateSize();
+ }
+
+ @Override
+ public int characteristics() {
+ return get().characteristics();
+ }
+
+ @Override
+ public Comparator<? super T> getComparator() {
+ return get().getComparator();
+ }
+
+ @Override
+ public long getExactSizeIfKnown() {
+ return get().getExactSizeIfKnown();
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getName() + "[" + get() + "]";
+ }
+
+ static class OfPrimitive<T, T_CONS, T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>>
+ extends DelegatingSpliterator<T, T_SPLITR>
+ implements Spliterator.OfPrimitive<T, T_CONS, T_SPLITR> {
+ OfPrimitive(Supplier<? extends T_SPLITR> supplier) {
+ super(supplier);
+ }
+
+ @Override
+ public boolean tryAdvance(T_CONS consumer) {
+ return get().tryAdvance(consumer);
+ }
+
+ @Override
+ public void forEachRemaining(T_CONS consumer) {
+ get().forEachRemaining(consumer);
+ }
+ }
+
+ static final class OfInt
+ extends OfPrimitive<Integer, IntConsumer, Spliterator.OfInt>
+ implements Spliterator.OfInt {
+
+ OfInt(Supplier<Spliterator.OfInt> supplier) {
+ super(supplier);
+ }
+ }
+
+ static final class OfLong
+ extends OfPrimitive<Long, LongConsumer, Spliterator.OfLong>
+ implements Spliterator.OfLong {
+
+ OfLong(Supplier<Spliterator.OfLong> supplier) {
+ super(supplier);
+ }
+ }
+
+ static final class OfDouble
+ extends OfPrimitive<Double, DoubleConsumer, Spliterator.OfDouble>
+ implements Spliterator.OfDouble {
+
+ OfDouble(Supplier<Spliterator.OfDouble> supplier) {
+ super(supplier);
+ }
+ }
+ }
+
+ /**
+ * A slice Spliterator from a source Spliterator that reports
+ * {@code SUBSIZED}.
+ *
+ */
+ static abstract class SliceSpliterator<T, T_SPLITR extends Spliterator<T>> {
+ // The start index of the slice
+ final long sliceOrigin;
+ // One past the last index of the slice
+ final long sliceFence;
+
+ // The spliterator to slice
+ T_SPLITR s;
+ // current (absolute) index, modified on advance/split
+ long index;
+ // one past last (absolute) index or sliceFence, which ever is smaller
+ long fence;
+
+ SliceSpliterator(T_SPLITR s, long sliceOrigin, long sliceFence, long origin, long fence) {
+ assert s.hasCharacteristics(Spliterator.SUBSIZED);
+ this.s = s;
+ this.sliceOrigin = sliceOrigin;
+ this.sliceFence = sliceFence;
+ this.index = origin;
+ this.fence = fence;
+ }
+
+ protected abstract T_SPLITR makeSpliterator(T_SPLITR s, long sliceOrigin, long sliceFence, long origin, long fence);
+
+ public T_SPLITR trySplit() {
+ if (sliceOrigin >= fence)
+ return null;
+
+ if (index >= fence)
+ return null;
+
+ // Keep splitting until the left and right splits intersect with the slice
+ // thereby ensuring the size estimate decreases.
+ // This also avoids creating empty spliterators which can result in
+ // existing and additionally created F/J tasks that perform
+ // redundant work on no elements.
+ while (true) {
+ @SuppressWarnings("unchecked")
+ T_SPLITR leftSplit = (T_SPLITR) s.trySplit();
+ if (leftSplit == null)
+ return null;
+
+ long leftSplitFenceUnbounded = index + leftSplit.estimateSize();
+ long leftSplitFence = Math.min(leftSplitFenceUnbounded, sliceFence);
+ if (sliceOrigin >= leftSplitFence) {
+ // The left split does not intersect with, and is to the left of, the slice
+ // The right split does intersect
+ // Discard the left split and split further with the right split
+ index = leftSplitFence;
+ }
+ else if (leftSplitFence >= sliceFence) {
+ // The right split does not intersect with, and is to the right of, the slice
+ // The left split does intersect
+ // Discard the right split and split further with the left split
+ s = leftSplit;
+ fence = leftSplitFence;
+ }
+ else if (index >= sliceOrigin && leftSplitFenceUnbounded <= sliceFence) {
+ // The left split is contained within the slice, return the underlying left split
+ // Right split is contained within or intersects with the slice
+ index = leftSplitFence;
+ return leftSplit;
+ } else {
+ // The left split intersects with the slice
+ // Right split is contained within or intersects with the slice
+ return makeSpliterator(leftSplit, sliceOrigin, sliceFence, index, index = leftSplitFence);
+ }
+ }
+ }
+
+ public long estimateSize() {
+ return (sliceOrigin < fence)
+ ? fence - Math.max(sliceOrigin, index) : 0;
+ }
+
+ public int characteristics() {
+ return s.characteristics();
+ }
+
+ static final class OfRef<T>
+ extends SliceSpliterator<T, Spliterator<T>>
+ implements Spliterator<T> {
+
+ OfRef(Spliterator<T> s, long sliceOrigin, long sliceFence) {
+ this(s, sliceOrigin, sliceFence, 0, Math.min(s.estimateSize(), sliceFence));
+ }
+
+ private OfRef(Spliterator<T> s,
+ long sliceOrigin, long sliceFence, long origin, long fence) {
+ super(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected Spliterator<T> makeSpliterator(Spliterator<T> s,
+ long sliceOrigin, long sliceFence,
+ long origin, long fence) {
+ return new OfRef<>(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ if (sliceOrigin >= fence)
+ return false;
+
+ while (sliceOrigin > index) {
+ s.tryAdvance(e -> {});
+ index++;
+ }
+
+ if (index >= fence)
+ return false;
+
+ index++;
+ return s.tryAdvance(action);
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ if (sliceOrigin >= fence)
+ return;
+
+ if (index >= fence)
+ return;
+
+ if (index >= sliceOrigin && (index + s.estimateSize()) <= sliceFence) {
+ // The spliterator is contained within the slice
+ s.forEachRemaining(action);
+ index = fence;
+ } else {
+ // The spliterator intersects with the slice
+ while (sliceOrigin > index) {
+ s.tryAdvance(e -> {});
+ index++;
+ }
+ // Traverse elements up to the fence
+ for (;index < fence; index++) {
+ s.tryAdvance(action);
+ }
+ }
+ }
+ }
+
+ static abstract class OfPrimitive<T,
+ T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>,
+ T_CONS>
+ extends SliceSpliterator<T, T_SPLITR>
+ implements Spliterator.OfPrimitive<T, T_CONS, T_SPLITR> {
+
+ OfPrimitive(T_SPLITR s, long sliceOrigin, long sliceFence) {
+ this(s, sliceOrigin, sliceFence, 0, Math.min(s.estimateSize(), sliceFence));
+ }
+
+ private OfPrimitive(T_SPLITR s,
+ long sliceOrigin, long sliceFence, long origin, long fence) {
+ super(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ public boolean tryAdvance(T_CONS action) {
+ Objects.requireNonNull(action);
+
+ if (sliceOrigin >= fence)
+ return false;
+
+ while (sliceOrigin > index) {
+ s.tryAdvance(emptyConsumer());
+ index++;
+ }
+
+ if (index >= fence)
+ return false;
+
+ index++;
+ return s.tryAdvance(action);
+ }
+
+ @Override
+ public void forEachRemaining(T_CONS action) {
+ Objects.requireNonNull(action);
+
+ if (sliceOrigin >= fence)
+ return;
+
+ if (index >= fence)
+ return;
+
+ if (index >= sliceOrigin && (index + s.estimateSize()) <= sliceFence) {
+ // The spliterator is contained within the slice
+ s.forEachRemaining(action);
+ index = fence;
+ } else {
+ // The spliterator intersects with the slice
+ while (sliceOrigin > index) {
+ s.tryAdvance(emptyConsumer());
+ index++;
+ }
+ // Traverse elements up to the fence
+ for (;index < fence; index++) {
+ s.tryAdvance(action);
+ }
+ }
+ }
+
+ protected abstract T_CONS emptyConsumer();
+ }
+
+ static final class OfInt extends OfPrimitive<Integer, Spliterator.OfInt, IntConsumer>
+ implements Spliterator.OfInt {
+ OfInt(Spliterator.OfInt s, long sliceOrigin, long sliceFence) {
+ super(s, sliceOrigin, sliceFence);
+ }
+
+ OfInt(Spliterator.OfInt s,
+ long sliceOrigin, long sliceFence, long origin, long fence) {
+ super(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected Spliterator.OfInt makeSpliterator(Spliterator.OfInt s,
+ long sliceOrigin, long sliceFence,
+ long origin, long fence) {
+ return new SliceSpliterator.OfInt(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected IntConsumer emptyConsumer() {
+ return e -> {};
+ }
+ }
+
+ static final class OfLong extends OfPrimitive<Long, Spliterator.OfLong, LongConsumer>
+ implements Spliterator.OfLong {
+ OfLong(Spliterator.OfLong s, long sliceOrigin, long sliceFence) {
+ super(s, sliceOrigin, sliceFence);
+ }
+
+ OfLong(Spliterator.OfLong s,
+ long sliceOrigin, long sliceFence, long origin, long fence) {
+ super(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected Spliterator.OfLong makeSpliterator(Spliterator.OfLong s,
+ long sliceOrigin, long sliceFence,
+ long origin, long fence) {
+ return new SliceSpliterator.OfLong(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected LongConsumer emptyConsumer() {
+ return e -> {};
+ }
+ }
+
+ static final class OfDouble extends OfPrimitive<Double, Spliterator.OfDouble, DoubleConsumer>
+ implements Spliterator.OfDouble {
+ OfDouble(Spliterator.OfDouble s, long sliceOrigin, long sliceFence) {
+ super(s, sliceOrigin, sliceFence);
+ }
+
+ OfDouble(Spliterator.OfDouble s,
+ long sliceOrigin, long sliceFence, long origin, long fence) {
+ super(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected Spliterator.OfDouble makeSpliterator(Spliterator.OfDouble s,
+ long sliceOrigin, long sliceFence,
+ long origin, long fence) {
+ return new SliceSpliterator.OfDouble(s, sliceOrigin, sliceFence, origin, fence);
+ }
+
+ @Override
+ protected DoubleConsumer emptyConsumer() {
+ return e -> {};
+ }
+ }
+ }
+
+ /**
+ * A slice Spliterator that does not preserve order, if any, of a source
+ * Spliterator.
+ *
+ * Note: The source spliterator may report {@code ORDERED} since that
+ * spliterator be the result of a previous pipeline stage that was
+ * collected to a {@code Node}. It is the order of the pipeline stage
+ * that governs whether the this slice spliterator is to be used or not.
+ */
+ static abstract class UnorderedSliceSpliterator<T, T_SPLITR extends Spliterator<T>> {
+ static final int CHUNK_SIZE = 1 << 7;
+
+ // The spliterator to slice
+ protected final T_SPLITR s;
+ protected final boolean unlimited;
+ private final long skipThreshold;
+ private final AtomicLong permits;
+
+ UnorderedSliceSpliterator(T_SPLITR s, long skip, long limit) {
+ this.s = s;
+ this.unlimited = limit < 0;
+ this.skipThreshold = limit >= 0 ? limit : 0;
+ this.permits = new AtomicLong(limit >= 0 ? skip + limit : skip);
+ }
+
+ UnorderedSliceSpliterator(T_SPLITR s,
+ UnorderedSliceSpliterator<T, T_SPLITR> parent) {
+ this.s = s;
+ this.unlimited = parent.unlimited;
+ this.permits = parent.permits;
+ this.skipThreshold = parent.skipThreshold;
+ }
+
+ /**
+ * Acquire permission to skip or process elements. The caller must
+ * first acquire the elements, then consult this method for guidance
+ * as to what to do with the data.
+ *
+ * <p>We use an {@code AtomicLong} to atomically maintain a counter,
+ * which is initialized as skip+limit if we are limiting, or skip only
+ * if we are not limiting. The user should consult the method
+ * {@code checkPermits()} before acquiring data elements.
+ *
+ * @param numElements the number of elements the caller has in hand
+ * @return the number of elements that should be processed; any
+ * remaining elements should be discarded.
+ */
+ protected final long acquirePermits(long numElements) {
+ long remainingPermits;
+ long grabbing;
+ // permits never increase, and don't decrease below zero
+ assert numElements > 0;
+ do {
+ remainingPermits = permits.get();
+ if (remainingPermits == 0)
+ return unlimited ? numElements : 0;
+ grabbing = Math.min(remainingPermits, numElements);
+ } while (grabbing > 0 &&
+ !permits.compareAndSet(remainingPermits, remainingPermits - grabbing));
+
+ if (unlimited)
+ return Math.max(numElements - grabbing, 0);
+ else if (remainingPermits > skipThreshold)
+ return Math.max(grabbing - (remainingPermits - skipThreshold), 0);
+ else
+ return grabbing;
+ }
+
+ enum PermitStatus { NO_MORE, MAYBE_MORE, UNLIMITED }
+
+ /** Call to check if permits might be available before acquiring data */
+ protected final PermitStatus permitStatus() {
+ if (permits.get() > 0)
+ return PermitStatus.MAYBE_MORE;
+ else
+ return unlimited ? PermitStatus.UNLIMITED : PermitStatus.NO_MORE;
+ }
+
+ public final T_SPLITR trySplit() {
+ // Stop splitting when there are no more limit permits
+ if (permits.get() == 0)
+ return null;
+ @SuppressWarnings("unchecked")
+ T_SPLITR split = (T_SPLITR) s.trySplit();
+ return split == null ? null : makeSpliterator(split);
+ }
+
+ protected abstract T_SPLITR makeSpliterator(T_SPLITR s);
+
+ public final long estimateSize() {
+ return s.estimateSize();
+ }
+
+ public final int characteristics() {
+ return s.characteristics() &
+ ~(Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.ORDERED);
+ }
+
+ static final class OfRef<T> extends UnorderedSliceSpliterator<T, Spliterator<T>>
+ implements Spliterator<T>, Consumer<T> {
+ T tmpSlot;
+
+ OfRef(Spliterator<T> s, long skip, long limit) {
+ super(s, skip, limit);
+ }
+
+ OfRef(Spliterator<T> s, OfRef<T> parent) {
+ super(s, parent);
+ }
+
+ @Override
+ public final void accept(T t) {
+ tmpSlot = t;
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ while (permitStatus() != PermitStatus.NO_MORE) {
+ if (!s.tryAdvance(this))
+ return false;
+ else if (acquirePermits(1) == 1) {
+ action.accept(tmpSlot);
+ tmpSlot = null;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ ArrayBuffer.OfRef<T> sb = null;
+ PermitStatus permitStatus;
+ while ((permitStatus = permitStatus()) != PermitStatus.NO_MORE) {
+ if (permitStatus == PermitStatus.MAYBE_MORE) {
+ // Optimistically traverse elements up to a threshold of CHUNK_SIZE
+ if (sb == null)
+ sb = new ArrayBuffer.OfRef<>(CHUNK_SIZE);
+ else
+ sb.reset();
+ long permitsRequested = 0;
+ do { } while (s.tryAdvance(sb) && ++permitsRequested < CHUNK_SIZE);
+ if (permitsRequested == 0)
+ return;
+ sb.forEach(action, acquirePermits(permitsRequested));
+ }
+ else {
+ // Must be UNLIMITED; let 'er rip
+ s.forEachRemaining(action);
+ return;
+ }
+ }
+ }
+
+ @Override
+ protected Spliterator<T> makeSpliterator(Spliterator<T> s) {
+ return new UnorderedSliceSpliterator.OfRef<>(s, this);
+ }
+ }
+
+ /**
+ * Concrete sub-types must also be an instance of type {@code T_CONS}.
+ *
+ * @param <T_BUFF> the type of the spined buffer. Must also be a type of
+ * {@code T_CONS}.
+ */
+ static abstract class OfPrimitive<
+ T,
+ T_CONS,
+ T_BUFF extends ArrayBuffer.OfPrimitive<T_CONS>,
+ T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>>
+ extends UnorderedSliceSpliterator<T, T_SPLITR>
+ implements Spliterator.OfPrimitive<T, T_CONS, T_SPLITR> {
+ OfPrimitive(T_SPLITR s, long skip, long limit) {
+ super(s, skip, limit);
+ }
+
+ OfPrimitive(T_SPLITR s, UnorderedSliceSpliterator.OfPrimitive<T, T_CONS, T_BUFF, T_SPLITR> parent) {
+ super(s, parent);
+ }
+
+ @Override
+ public boolean tryAdvance(T_CONS action) {
+ Objects.requireNonNull(action);
+ @SuppressWarnings("unchecked")
+ T_CONS consumer = (T_CONS) this;
+
+ while (permitStatus() != PermitStatus.NO_MORE) {
+ if (!s.tryAdvance(consumer))
+ return false;
+ else if (acquirePermits(1) == 1) {
+ acceptConsumed(action);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ protected abstract void acceptConsumed(T_CONS action);
+
+ @Override
+ public void forEachRemaining(T_CONS action) {
+ Objects.requireNonNull(action);
+
+ T_BUFF sb = null;
+ PermitStatus permitStatus;
+ while ((permitStatus = permitStatus()) != PermitStatus.NO_MORE) {
+ if (permitStatus == PermitStatus.MAYBE_MORE) {
+ // Optimistically traverse elements up to a threshold of CHUNK_SIZE
+ if (sb == null)
+ sb = bufferCreate(CHUNK_SIZE);
+ else
+ sb.reset();
+ @SuppressWarnings("unchecked")
+ T_CONS sbc = (T_CONS) sb;
+ long permitsRequested = 0;
+ do { } while (s.tryAdvance(sbc) && ++permitsRequested < CHUNK_SIZE);
+ if (permitsRequested == 0)
+ return;
+ sb.forEach(action, acquirePermits(permitsRequested));
+ }
+ else {
+ // Must be UNLIMITED; let 'er rip
+ s.forEachRemaining(action);
+ return;
+ }
+ }
+ }
+
+ protected abstract T_BUFF bufferCreate(int initialCapacity);
+ }
+
+ static final class OfInt
+ extends OfPrimitive<Integer, IntConsumer, ArrayBuffer.OfInt, Spliterator.OfInt>
+ implements Spliterator.OfInt, IntConsumer {
+
+ int tmpValue;
+
+ OfInt(Spliterator.OfInt s, long skip, long limit) {
+ super(s, skip, limit);
+ }
+
+ OfInt(Spliterator.OfInt s, UnorderedSliceSpliterator.OfInt parent) {
+ super(s, parent);
+ }
+
+ @Override
+ public void accept(int value) {
+ tmpValue = value;
+ }
+
+ @Override
+ protected void acceptConsumed(IntConsumer action) {
+ action.accept(tmpValue);
+ }
+
+ @Override
+ protected ArrayBuffer.OfInt bufferCreate(int initialCapacity) {
+ return new ArrayBuffer.OfInt(initialCapacity);
+ }
+
+ @Override
+ protected Spliterator.OfInt makeSpliterator(Spliterator.OfInt s) {
+ return new UnorderedSliceSpliterator.OfInt(s, this);
+ }
+ }
+
+ static final class OfLong
+ extends OfPrimitive<Long, LongConsumer, ArrayBuffer.OfLong, Spliterator.OfLong>
+ implements Spliterator.OfLong, LongConsumer {
+
+ long tmpValue;
+
+ OfLong(Spliterator.OfLong s, long skip, long limit) {
+ super(s, skip, limit);
+ }
+
+ OfLong(Spliterator.OfLong s, UnorderedSliceSpliterator.OfLong parent) {
+ super(s, parent);
+ }
+
+ @Override
+ public void accept(long value) {
+ tmpValue = value;
+ }
+
+ @Override
+ protected void acceptConsumed(LongConsumer action) {
+ action.accept(tmpValue);
+ }
+
+ @Override
+ protected ArrayBuffer.OfLong bufferCreate(int initialCapacity) {
+ return new ArrayBuffer.OfLong(initialCapacity);
+ }
+
+ @Override
+ protected Spliterator.OfLong makeSpliterator(Spliterator.OfLong s) {
+ return new UnorderedSliceSpliterator.OfLong(s, this);
+ }
+ }
+
+ static final class OfDouble
+ extends OfPrimitive<Double, DoubleConsumer, ArrayBuffer.OfDouble, Spliterator.OfDouble>
+ implements Spliterator.OfDouble, DoubleConsumer {
+
+ double tmpValue;
+
+ OfDouble(Spliterator.OfDouble s, long skip, long limit) {
+ super(s, skip, limit);
+ }
+
+ OfDouble(Spliterator.OfDouble s, UnorderedSliceSpliterator.OfDouble parent) {
+ super(s, parent);
+ }
+
+ @Override
+ public void accept(double value) {
+ tmpValue = value;
+ }
+
+ @Override
+ protected void acceptConsumed(DoubleConsumer action) {
+ action.accept(tmpValue);
+ }
+
+ @Override
+ protected ArrayBuffer.OfDouble bufferCreate(int initialCapacity) {
+ return new ArrayBuffer.OfDouble(initialCapacity);
+ }
+
+ @Override
+ protected Spliterator.OfDouble makeSpliterator(Spliterator.OfDouble s) {
+ return new UnorderedSliceSpliterator.OfDouble(s, this);
+ }
+ }
+ }
+
+ /**
+ * A wrapping spliterator that only reports distinct elements of the
+ * underlying spliterator. Does not preserve size and encounter order.
+ */
+ static final class DistinctSpliterator<T> implements Spliterator<T>, Consumer<T> {
+
+ // The value to represent null in the ConcurrentHashMap
+ private static final Object NULL_VALUE = new Object();
+
+ // The underlying spliterator
+ private final Spliterator<T> s;
+
+ // ConcurrentHashMap holding distinct elements as keys
+ private final ConcurrentHashMap<T, Boolean> seen;
+
+ // Temporary element, only used with tryAdvance
+ private T tmpSlot;
+
+ DistinctSpliterator(Spliterator<T> s) {
+ this(s, new ConcurrentHashMap<>());
+ }
+
+ private DistinctSpliterator(Spliterator<T> s, ConcurrentHashMap<T, Boolean> seen) {
+ this.s = s;
+ this.seen = seen;
+ }
+
+ @Override
+ public void accept(T t) {
+ this.tmpSlot = t;
+ }
+
+ @SuppressWarnings("unchecked")
+ private T mapNull(T t) {
+ return t != null ? t : (T) NULL_VALUE;
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> action) {
+ while (s.tryAdvance(this)) {
+ if (seen.putIfAbsent(mapNull(tmpSlot), Boolean.TRUE) == null) {
+ action.accept(tmpSlot);
+ tmpSlot = null;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> action) {
+ s.forEachRemaining(t -> {
+ if (seen.putIfAbsent(mapNull(t), Boolean.TRUE) == null) {
+ action.accept(t);
+ }
+ });
+ }
+
+ @Override
+ public Spliterator<T> trySplit() {
+ Spliterator<T> split = s.trySplit();
+ return (split != null) ? new DistinctSpliterator<>(split, seen) : null;
+ }
+
+ @Override
+ public long estimateSize() {
+ return s.estimateSize();
+ }
+
+ @Override
+ public int characteristics() {
+ return (s.characteristics() & ~(Spliterator.SIZED | Spliterator.SUBSIZED |
+ Spliterator.SORTED | Spliterator.ORDERED))
+ | Spliterator.DISTINCT;
+ }
+
+ @Override
+ public Comparator<? super T> getComparator() {
+ return s.getComparator();
+ }
+ }
+
+ /**
+ * A Spliterator that infinitely supplies elements in no particular order.
+ *
+ * <p>Splitting divides the estimated size in two and stops when the
+ * estimate size is 0.
+ *
+ * <p>The {@code forEachRemaining} method if invoked will never terminate.
+ * The {@code tryAdvance} method always returns true.
+ *
+ */
+ static abstract class InfiniteSupplyingSpliterator<T> implements Spliterator<T> {
+ long estimate;
+
+ protected InfiniteSupplyingSpliterator(long estimate) {
+ this.estimate = estimate;
+ }
+
+ @Override
+ public long estimateSize() {
+ return estimate;
+ }
+
+ @Override
+ public int characteristics() {
+ return IMMUTABLE;
+ }
+
+ static final class OfRef<T> extends InfiniteSupplyingSpliterator<T> {
+ final Supplier<T> s;
+
+ OfRef(long size, Supplier<T> s) {
+ super(size);
+ this.s = s;
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ action.accept(s.get());
+ return true;
+ }
+
+ @Override
+ public Spliterator<T> trySplit() {
+ if (estimate == 0)
+ return null;
+ return new InfiniteSupplyingSpliterator.OfRef<>(estimate >>>= 1, s);
+ }
+ }
+
+ static final class OfInt extends InfiniteSupplyingSpliterator<Integer>
+ implements Spliterator.OfInt {
+ final IntSupplier s;
+
+ OfInt(long size, IntSupplier s) {
+ super(size);
+ this.s = s;
+ }
+
+ @Override
+ public boolean tryAdvance(IntConsumer action) {
+ Objects.requireNonNull(action);
+
+ action.accept(s.getAsInt());
+ return true;
+ }
+
+ @Override
+ public Spliterator.OfInt trySplit() {
+ if (estimate == 0)
+ return null;
+ return new InfiniteSupplyingSpliterator.OfInt(estimate = estimate >>> 1, s);
+ }
+ }
+
+ static final class OfLong extends InfiniteSupplyingSpliterator<Long>
+ implements Spliterator.OfLong {
+ final LongSupplier s;
+
+ OfLong(long size, LongSupplier s) {
+ super(size);
+ this.s = s;
+ }
+
+ @Override
+ public boolean tryAdvance(LongConsumer action) {
+ Objects.requireNonNull(action);
+
+ action.accept(s.getAsLong());
+ return true;
+ }
+
+ @Override
+ public Spliterator.OfLong trySplit() {
+ if (estimate == 0)
+ return null;
+ return new InfiniteSupplyingSpliterator.OfLong(estimate = estimate >>> 1, s);
+ }
+ }
+
+ static final class OfDouble extends InfiniteSupplyingSpliterator<Double>
+ implements Spliterator.OfDouble {
+ final DoubleSupplier s;
+
+ OfDouble(long size, DoubleSupplier s) {
+ super(size);
+ this.s = s;
+ }
+
+ @Override
+ public boolean tryAdvance(DoubleConsumer action) {
+ Objects.requireNonNull(action);
+
+ action.accept(s.getAsDouble());
+ return true;
+ }
+
+ @Override
+ public Spliterator.OfDouble trySplit() {
+ if (estimate == 0)
+ return null;
+ return new InfiniteSupplyingSpliterator.OfDouble(estimate = estimate >>> 1, s);
+ }
+ }
+ }
+
+ // @@@ Consolidate with Node.Builder
+ static abstract class ArrayBuffer {
+ int index;
+
+ void reset() {
+ index = 0;
+ }
+
+ static final class OfRef<T> extends ArrayBuffer implements Consumer<T> {
+ final Object[] array;
+
+ OfRef(int size) {
+ this.array = new Object[size];
+ }
+
+ @Override
+ public void accept(T t) {
+ array[index++] = t;
+ }
+
+ public void forEach(Consumer<? super T> action, long fence) {
+ for (int i = 0; i < fence; i++) {
+ @SuppressWarnings("unchecked")
+ T t = (T) array[i];
+ action.accept(t);
+ }
+ }
+ }
+
+ static abstract class OfPrimitive<T_CONS> extends ArrayBuffer {
+ int index;
+
+ @Override
+ void reset() {
+ index = 0;
+ }
+
+ abstract void forEach(T_CONS action, long fence);
+ }
+
+ static final class OfInt extends OfPrimitive<IntConsumer>
+ implements IntConsumer {
+ final int[] array;
+
+ OfInt(int size) {
+ this.array = new int[size];
+ }
+
+ @Override
+ public void accept(int t) {
+ array[index++] = t;
+ }
+
+ @Override
+ public void forEach(IntConsumer action, long fence) {
+ for (int i = 0; i < fence; i++) {
+ action.accept(array[i]);
+ }
+ }
+ }
+
+ static final class OfLong extends OfPrimitive<LongConsumer>
+ implements LongConsumer {
+ final long[] array;
+
+ OfLong(int size) {
+ this.array = new long[size];
+ }
+
+ @Override
+ public void accept(long t) {
+ array[index++] = t;
+ }
+
+ @Override
+ public void forEach(LongConsumer action, long fence) {
+ for (int i = 0; i < fence; i++) {
+ action.accept(array[i]);
+ }
+ }
+ }
+
+ static final class OfDouble extends OfPrimitive<DoubleConsumer>
+ implements DoubleConsumer {
+ final double[] array;
+
+ OfDouble(int size) {
+ this.array = new double[size];
+ }
+
+ @Override
+ public void accept(double t) {
+ array[index++] = t;
+ }
+
+ @Override
+ void forEach(DoubleConsumer action, long fence) {
+ for (int i = 0; i < fence; i++) {
+ action.accept(array[i]);
+ }
+ }
+ }
+ }
+}
+
diff --git a/ojluni/src/main/java/java/util/stream/StreamSupport.java b/ojluni/src/main/java/java/util/stream/StreamSupport.java
new file mode 100644
index 0000000..9a1820c
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/StreamSupport.java
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.function.Supplier;
+
+/**
+ * Low-level utility methods for creating and manipulating streams.
+ *
+ * <p>This class is mostly for library writers presenting stream views
+ * of data structures; most static stream methods intended for end users are in
+ * the various {@code Stream} classes.
+ *
+ * @since 1.8
+ */
+public final class StreamSupport {
+
+ // Suppresses default constructor, ensuring non-instantiability.
+ private StreamSupport() {}
+
+ /**
+ * Creates a new sequential or parallel {@code Stream} from a
+ * {@code Spliterator}.
+ *
+ * <p>The spliterator is only traversed, split, or queried for estimated
+ * size after the terminal operation of the stream pipeline commences.
+ *
+ * <p>It is strongly recommended the spliterator report a characteristic of
+ * {@code IMMUTABLE} or {@code CONCURRENT}, or be
+ * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
+ * {@link #stream(java.util.function.Supplier, int, boolean)} should be used
+ * to reduce the scope of potential interference with the source. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param <T> the type of stream elements
+ * @param spliterator a {@code Spliterator} describing the stream elements
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code Stream}
+ */
+ public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) {
+ Objects.requireNonNull(spliterator);
+ return new ReferencePipeline.Head<>(spliterator,
+ StreamOpFlag.fromCharacteristics(spliterator),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code Stream} from a
+ * {@code Supplier} of {@code Spliterator}.
+ *
+ * <p>The {@link Supplier#get()} method will be invoked on the supplier no
+ * more than once, and only after the terminal operation of the stream pipeline
+ * commences.
+ *
+ * <p>For spliterators that report a characteristic of {@code IMMUTABLE}
+ * or {@code CONCURRENT}, or that are
+ * <a href="../Spliterator.html#binding">late-binding</a>, it is likely
+ * more efficient to use {@link #stream(java.util.Spliterator, boolean)}
+ * instead.
+ * <p>The use of a {@code Supplier} in this form provides a level of
+ * indirection that reduces the scope of potential interference with the
+ * source. Since the supplier is only invoked after the terminal operation
+ * commences, any modifications to the source up to the start of the
+ * terminal operation are reflected in the stream result. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param <T> the type of stream elements
+ * @param supplier a {@code Supplier} of a {@code Spliterator}
+ * @param characteristics Spliterator characteristics of the supplied
+ * {@code Spliterator}. The characteristics must be equal to
+ * {@code supplier.get().characteristics()}, otherwise undefined
+ * behavior may occur when terminal operation commences.
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code Stream}
+ * @see #stream(java.util.Spliterator, boolean)
+ */
+ public static <T> Stream<T> stream(Supplier<? extends Spliterator<T>> supplier,
+ int characteristics,
+ boolean parallel) {
+ Objects.requireNonNull(supplier);
+ return new ReferencePipeline.Head<>(supplier,
+ StreamOpFlag.fromCharacteristics(characteristics),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code IntStream} from a
+ * {@code Spliterator.OfInt}.
+ *
+ * <p>The spliterator is only traversed, split, or queried for estimated size
+ * after the terminal operation of the stream pipeline commences.
+ *
+ * <p>It is strongly recommended the spliterator report a characteristic of
+ * {@code IMMUTABLE} or {@code CONCURRENT}, or be
+ * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
+ * {@link #intStream(java.util.function.Supplier, int, boolean)} should be
+ * used to reduce the scope of potential interference with the source. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param spliterator a {@code Spliterator.OfInt} describing the stream elements
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code IntStream}
+ */
+ public static IntStream intStream(Spliterator.OfInt spliterator, boolean parallel) {
+ return new IntPipeline.Head<>(spliterator,
+ StreamOpFlag.fromCharacteristics(spliterator),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code IntStream} from a
+ * {@code Supplier} of {@code Spliterator.OfInt}.
+ *
+ * <p>The {@link Supplier#get()} method will be invoked on the supplier no
+ * more than once, and only after the terminal operation of the stream pipeline
+ * commences.
+ *
+ * <p>For spliterators that report a characteristic of {@code IMMUTABLE}
+ * or {@code CONCURRENT}, or that are
+ * <a href="../Spliterator.html#binding">late-binding</a>, it is likely
+ * more efficient to use {@link #intStream(java.util.Spliterator.OfInt, boolean)}
+ * instead.
+ * <p>The use of a {@code Supplier} in this form provides a level of
+ * indirection that reduces the scope of potential interference with the
+ * source. Since the supplier is only invoked after the terminal operation
+ * commences, any modifications to the source up to the start of the
+ * terminal operation are reflected in the stream result. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param supplier a {@code Supplier} of a {@code Spliterator.OfInt}
+ * @param characteristics Spliterator characteristics of the supplied
+ * {@code Spliterator.OfInt}. The characteristics must be equal to
+ * {@code supplier.get().characteristics()}, otherwise undefined
+ * behavior may occur when terminal operation commences.
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code IntStream}
+ * @see #intStream(java.util.Spliterator.OfInt, boolean)
+ */
+ public static IntStream intStream(Supplier<? extends Spliterator.OfInt> supplier,
+ int characteristics,
+ boolean parallel) {
+ return new IntPipeline.Head<>(supplier,
+ StreamOpFlag.fromCharacteristics(characteristics),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code LongStream} from a
+ * {@code Spliterator.OfLong}.
+ *
+ * <p>The spliterator is only traversed, split, or queried for estimated
+ * size after the terminal operation of the stream pipeline commences.
+ *
+ * <p>It is strongly recommended the spliterator report a characteristic of
+ * {@code IMMUTABLE} or {@code CONCURRENT}, or be
+ * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
+ * {@link #longStream(java.util.function.Supplier, int, boolean)} should be
+ * used to reduce the scope of potential interference with the source. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param spliterator a {@code Spliterator.OfLong} describing the stream elements
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code LongStream}
+ */
+ public static LongStream longStream(Spliterator.OfLong spliterator,
+ boolean parallel) {
+ return new LongPipeline.Head<>(spliterator,
+ StreamOpFlag.fromCharacteristics(spliterator),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code LongStream} from a
+ * {@code Supplier} of {@code Spliterator.OfLong}.
+ *
+ * <p>The {@link Supplier#get()} method will be invoked on the supplier no
+ * more than once, and only after the terminal operation of the stream pipeline
+ * commences.
+ *
+ * <p>For spliterators that report a characteristic of {@code IMMUTABLE}
+ * or {@code CONCURRENT}, or that are
+ * <a href="../Spliterator.html#binding">late-binding</a>, it is likely
+ * more efficient to use {@link #longStream(java.util.Spliterator.OfLong, boolean)}
+ * instead.
+ * <p>The use of a {@code Supplier} in this form provides a level of
+ * indirection that reduces the scope of potential interference with the
+ * source. Since the supplier is only invoked after the terminal operation
+ * commences, any modifications to the source up to the start of the
+ * terminal operation are reflected in the stream result. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param supplier a {@code Supplier} of a {@code Spliterator.OfLong}
+ * @param characteristics Spliterator characteristics of the supplied
+ * {@code Spliterator.OfLong}. The characteristics must be equal to
+ * {@code supplier.get().characteristics()}, otherwise undefined
+ * behavior may occur when terminal operation commences.
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code LongStream}
+ * @see #longStream(java.util.Spliterator.OfLong, boolean)
+ */
+ public static LongStream longStream(Supplier<? extends Spliterator.OfLong> supplier,
+ int characteristics,
+ boolean parallel) {
+ return new LongPipeline.Head<>(supplier,
+ StreamOpFlag.fromCharacteristics(characteristics),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code DoubleStream} from a
+ * {@code Spliterator.OfDouble}.
+ *
+ * <p>The spliterator is only traversed, split, or queried for estimated size
+ * after the terminal operation of the stream pipeline commences.
+ *
+ * <p>It is strongly recommended the spliterator report a characteristic of
+ * {@code IMMUTABLE} or {@code CONCURRENT}, or be
+ * <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
+ * {@link #doubleStream(java.util.function.Supplier, int, boolean)} should
+ * be used to reduce the scope of potential interference with the source. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param spliterator A {@code Spliterator.OfDouble} describing the stream elements
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code DoubleStream}
+ */
+ public static DoubleStream doubleStream(Spliterator.OfDouble spliterator,
+ boolean parallel) {
+ return new DoublePipeline.Head<>(spliterator,
+ StreamOpFlag.fromCharacteristics(spliterator),
+ parallel);
+ }
+
+ /**
+ * Creates a new sequential or parallel {@code DoubleStream} from a
+ * {@code Supplier} of {@code Spliterator.OfDouble}.
+ *
+ * <p>The {@link Supplier#get()} method will be invoked on the supplier no
+ * more than once, and only after the terminal operation of the stream pipeline
+ * commences.
+ *
+ * <p>For spliterators that report a characteristic of {@code IMMUTABLE}
+ * or {@code CONCURRENT}, or that are
+ * <a href="../Spliterator.html#binding">late-binding</a>, it is likely
+ * more efficient to use {@link #doubleStream(java.util.Spliterator.OfDouble, boolean)}
+ * instead.
+ * <p>The use of a {@code Supplier} in this form provides a level of
+ * indirection that reduces the scope of potential interference with the
+ * source. Since the supplier is only invoked after the terminal operation
+ * commences, any modifications to the source up to the start of the
+ * terminal operation are reflected in the stream result. See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a> for
+ * more details.
+ *
+ * @param supplier A {@code Supplier} of a {@code Spliterator.OfDouble}
+ * @param characteristics Spliterator characteristics of the supplied
+ * {@code Spliterator.OfDouble}. The characteristics must be equal to
+ * {@code supplier.get().characteristics()}, otherwise undefined
+ * behavior may occur when terminal operation commences.
+ * @param parallel if {@code true} then the returned stream is a parallel
+ * stream; if {@code false} the returned stream is a sequential
+ * stream.
+ * @return a new sequential or parallel {@code DoubleStream}
+ * @see #doubleStream(java.util.Spliterator.OfDouble, boolean)
+ */
+ public static DoubleStream doubleStream(Supplier<? extends Spliterator.OfDouble> supplier,
+ int characteristics,
+ boolean parallel) {
+ return new DoublePipeline.Head<>(supplier,
+ StreamOpFlag.fromCharacteristics(characteristics),
+ parallel);
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/Streams.java b/ojluni/src/main/java/java/util/stream/Streams.java
new file mode 100644
index 0000000..072691a
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Streams.java
@@ -0,0 +1,896 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Comparator;
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.LongConsumer;
+
+/**
+ * Utility methods for operating on and creating streams.
+ *
+ * <p>Unless otherwise stated, streams are created as sequential streams. A
+ * sequential stream can be transformed into a parallel stream by calling the
+ * {@code parallel()} method on the created stream.
+ *
+ * @since 1.8
+ */
+final class Streams {
+
+ private Streams() {
+ throw new Error("no instances");
+ }
+
+ /**
+ * An object instance representing no value, that cannot be an actual
+ * data element of a stream. Used when processing streams that can contain
+ * {@code null} elements to distinguish between a {@code null} value and no
+ * value.
+ */
+ static final Object NONE = new Object();
+
+ /**
+ * An {@code int} range spliterator.
+ */
+ static final class RangeIntSpliterator implements Spliterator.OfInt {
+ // Can never be greater that upTo, this avoids overflow if upper bound
+ // is Integer.MAX_VALUE
+ // All elements are traversed if from == upTo & last == 0
+ private int from;
+ private final int upTo;
+ // 1 if the range is closed and the last element has not been traversed
+ // Otherwise, 0 if the range is open, or is a closed range and all
+ // elements have been traversed
+ private int last;
+
+ RangeIntSpliterator(int from, int upTo, boolean closed) {
+ this(from, upTo, closed ? 1 : 0);
+ }
+
+ private RangeIntSpliterator(int from, int upTo, int last) {
+ this.from = from;
+ this.upTo = upTo;
+ this.last = last;
+ }
+
+ @Override
+ public boolean tryAdvance(IntConsumer consumer) {
+ Objects.requireNonNull(consumer);
+
+ final int i = from;
+ if (i < upTo) {
+ from++;
+ consumer.accept(i);
+ return true;
+ }
+ else if (last > 0) {
+ last = 0;
+ consumer.accept(i);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(IntConsumer consumer) {
+ Objects.requireNonNull(consumer);
+
+ int i = from;
+ final int hUpTo = upTo;
+ int hLast = last;
+ from = upTo;
+ last = 0;
+ while (i < hUpTo) {
+ consumer.accept(i++);
+ }
+ if (hLast > 0) {
+ // Last element of closed range
+ consumer.accept(i);
+ }
+ }
+
+ @Override
+ public long estimateSize() {
+ // Ensure ranges of size > Integer.MAX_VALUE report the correct size
+ return ((long) upTo) - from + last;
+ }
+
+ @Override
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED |
+ Spliterator.IMMUTABLE | Spliterator.NONNULL |
+ Spliterator.DISTINCT | Spliterator.SORTED;
+ }
+
+ @Override
+ public Comparator<? super Integer> getComparator() {
+ return null;
+ }
+
+ @Override
+ public Spliterator.OfInt trySplit() {
+ long size = estimateSize();
+ return size <= 1
+ ? null
+ // Left split always has a half-open range
+ : new RangeIntSpliterator(from, from = from + splitPoint(size), 0);
+ }
+
+ /**
+ * The spliterator size below which the spliterator will be split
+ * at the mid-point to produce balanced splits. Above this size the
+ * spliterator will be split at a ratio of
+ * 1:(RIGHT_BALANCED_SPLIT_RATIO - 1)
+ * to produce right-balanced splits.
+ *
+ * <p>Such splitting ensures that for very large ranges that the left
+ * side of the range will more likely be processed at a lower-depth
+ * than a balanced tree at the expense of a higher-depth for the right
+ * side of the range.
+ *
+ * <p>This is optimized for cases such as IntStream.ints() that is
+ * implemented as range of 0 to Integer.MAX_VALUE but is likely to be
+ * augmented with a limit operation that limits the number of elements
+ * to a count lower than this threshold.
+ */
+ private static final int BALANCED_SPLIT_THRESHOLD = 1 << 24;
+
+ /**
+ * The split ratio of the left and right split when the spliterator
+ * size is above BALANCED_SPLIT_THRESHOLD.
+ */
+ private static final int RIGHT_BALANCED_SPLIT_RATIO = 1 << 3;
+
+ private int splitPoint(long size) {
+ int d = (size < BALANCED_SPLIT_THRESHOLD) ? 2 : RIGHT_BALANCED_SPLIT_RATIO;
+ // Cast to int is safe since:
+ // 2 <= size < 2^32
+ // 2 <= d <= 8
+ return (int) (size / d);
+ }
+ }
+
+ /**
+ * A {@code long} range spliterator.
+ *
+ * This implementation cannot be used for ranges whose size is greater
+ * than Long.MAX_VALUE
+ */
+ static final class RangeLongSpliterator implements Spliterator.OfLong {
+ // Can never be greater that upTo, this avoids overflow if upper bound
+ // is Long.MAX_VALUE
+ // All elements are traversed if from == upTo & last == 0
+ private long from;
+ private final long upTo;
+ // 1 if the range is closed and the last element has not been traversed
+ // Otherwise, 0 if the range is open, or is a closed range and all
+ // elements have been traversed
+ private int last;
+
+ RangeLongSpliterator(long from, long upTo, boolean closed) {
+ this(from, upTo, closed ? 1 : 0);
+ }
+
+ private RangeLongSpliterator(long from, long upTo, int last) {
+ assert upTo - from + last > 0;
+ this.from = from;
+ this.upTo = upTo;
+ this.last = last;
+ }
+
+ @Override
+ public boolean tryAdvance(LongConsumer consumer) {
+ Objects.requireNonNull(consumer);
+
+ final long i = from;
+ if (i < upTo) {
+ from++;
+ consumer.accept(i);
+ return true;
+ }
+ else if (last > 0) {
+ last = 0;
+ consumer.accept(i);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(LongConsumer consumer) {
+ Objects.requireNonNull(consumer);
+
+ long i = from;
+ final long hUpTo = upTo;
+ int hLast = last;
+ from = upTo;
+ last = 0;
+ while (i < hUpTo) {
+ consumer.accept(i++);
+ }
+ if (hLast > 0) {
+ // Last element of closed range
+ consumer.accept(i);
+ }
+ }
+
+ @Override
+ public long estimateSize() {
+ return upTo - from + last;
+ }
+
+ @Override
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED |
+ Spliterator.IMMUTABLE | Spliterator.NONNULL |
+ Spliterator.DISTINCT | Spliterator.SORTED;
+ }
+
+ @Override
+ public Comparator<? super Long> getComparator() {
+ return null;
+ }
+
+ @Override
+ public Spliterator.OfLong trySplit() {
+ long size = estimateSize();
+ return size <= 1
+ ? null
+ // Left split always has a half-open range
+ : new RangeLongSpliterator(from, from = from + splitPoint(size), 0);
+ }
+
+ /**
+ * The spliterator size below which the spliterator will be split
+ * at the mid-point to produce balanced splits. Above this size the
+ * spliterator will be split at a ratio of
+ * 1:(RIGHT_BALANCED_SPLIT_RATIO - 1)
+ * to produce right-balanced splits.
+ *
+ * <p>Such splitting ensures that for very large ranges that the left
+ * side of the range will more likely be processed at a lower-depth
+ * than a balanced tree at the expense of a higher-depth for the right
+ * side of the range.
+ *
+ * <p>This is optimized for cases such as LongStream.longs() that is
+ * implemented as range of 0 to Long.MAX_VALUE but is likely to be
+ * augmented with a limit operation that limits the number of elements
+ * to a count lower than this threshold.
+ */
+ private static final long BALANCED_SPLIT_THRESHOLD = 1 << 24;
+
+ /**
+ * The split ratio of the left and right split when the spliterator
+ * size is above BALANCED_SPLIT_THRESHOLD.
+ */
+ private static final long RIGHT_BALANCED_SPLIT_RATIO = 1 << 3;
+
+ private long splitPoint(long size) {
+ long d = (size < BALANCED_SPLIT_THRESHOLD) ? 2 : RIGHT_BALANCED_SPLIT_RATIO;
+ // 2 <= size <= Long.MAX_VALUE
+ return size / d;
+ }
+ }
+
+ private static abstract class AbstractStreamBuilderImpl<T, S extends Spliterator<T>> implements Spliterator<T> {
+ // >= 0 when building, < 0 when built
+ // -1 == no elements
+ // -2 == one element, held by first
+ // -3 == two or more elements, held by buffer
+ int count;
+
+ // Spliterator implementation for 0 or 1 element
+ // count == -1 for no elements
+ // count == -2 for one element held by first
+
+ @Override
+ public S trySplit() {
+ return null;
+ }
+
+ @Override
+ public long estimateSize() {
+ return -count - 1;
+ }
+
+ @Override
+ public int characteristics() {
+ return Spliterator.SIZED | Spliterator.SUBSIZED |
+ Spliterator.ORDERED | Spliterator.IMMUTABLE;
+ }
+ }
+
+ static final class StreamBuilderImpl<T>
+ extends AbstractStreamBuilderImpl<T, Spliterator<T>>
+ implements Stream.Builder<T> {
+ // The first element in the stream
+ // valid if count == 1
+ T first;
+
+ // The first and subsequent elements in the stream
+ // non-null if count == 2
+ SpinedBuffer<T> buffer;
+
+ /**
+ * Constructor for building a stream of 0 or more elements.
+ */
+ StreamBuilderImpl() { }
+
+ /**
+ * Constructor for a singleton stream.
+ *
+ * @param t the single element
+ */
+ StreamBuilderImpl(T t) {
+ first = t;
+ count = -2;
+ }
+
+ // StreamBuilder implementation
+
+ @Override
+ public void accept(T t) {
+ if (count == 0) {
+ first = t;
+ count++;
+ }
+ else if (count > 0) {
+ if (buffer == null) {
+ buffer = new SpinedBuffer<>();
+ buffer.accept(first);
+ count++;
+ }
+
+ buffer.accept(t);
+ }
+ else {
+ throw new IllegalStateException();
+ }
+ }
+
+ public Stream.Builder<T> add(T t) {
+ accept(t);
+ return this;
+ }
+
+ @Override
+ public Stream<T> build() {
+ int c = count;
+ if (c >= 0) {
+ // Switch count to negative value signalling the builder is built
+ count = -count - 1;
+ // Use this spliterator if 0 or 1 elements, otherwise use
+ // the spliterator of the spined buffer
+ return (c < 2) ? StreamSupport.stream(this, false) : StreamSupport.stream(buffer.spliterator(), false);
+ }
+
+ throw new IllegalStateException();
+ }
+
+ // Spliterator implementation for 0 or 1 element
+ // count == -1 for no elements
+ // count == -2 for one element held by first
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ }
+ }
+ }
+
+ static final class IntStreamBuilderImpl
+ extends AbstractStreamBuilderImpl<Integer, Spliterator.OfInt>
+ implements IntStream.Builder, Spliterator.OfInt {
+ // The first element in the stream
+ // valid if count == 1
+ int first;
+
+ // The first and subsequent elements in the stream
+ // non-null if count == 2
+ SpinedBuffer.OfInt buffer;
+
+ /**
+ * Constructor for building a stream of 0 or more elements.
+ */
+ IntStreamBuilderImpl() { }
+
+ /**
+ * Constructor for a singleton stream.
+ *
+ * @param t the single element
+ */
+ IntStreamBuilderImpl(int t) {
+ first = t;
+ count = -2;
+ }
+
+ // StreamBuilder implementation
+
+ @Override
+ public void accept(int t) {
+ if (count == 0) {
+ first = t;
+ count++;
+ }
+ else if (count > 0) {
+ if (buffer == null) {
+ buffer = new SpinedBuffer.OfInt();
+ buffer.accept(first);
+ count++;
+ }
+
+ buffer.accept(t);
+ }
+ else {
+ throw new IllegalStateException();
+ }
+ }
+
+ @Override
+ public IntStream build() {
+ int c = count;
+ if (c >= 0) {
+ // Switch count to negative value signalling the builder is built
+ count = -count - 1;
+ // Use this spliterator if 0 or 1 elements, otherwise use
+ // the spliterator of the spined buffer
+ return (c < 2) ? StreamSupport.intStream(this, false) : StreamSupport.intStream(buffer.spliterator(), false);
+ }
+
+ throw new IllegalStateException();
+ }
+
+ // Spliterator implementation for 0 or 1 element
+ // count == -1 for no elements
+ // count == -2 for one element held by first
+
+ @Override
+ public boolean tryAdvance(IntConsumer action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+
+ @Override
+ public void forEachRemaining(IntConsumer action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ }
+ }
+ }
+
+ static final class LongStreamBuilderImpl
+ extends AbstractStreamBuilderImpl<Long, Spliterator.OfLong>
+ implements LongStream.Builder, Spliterator.OfLong {
+ // The first element in the stream
+ // valid if count == 1
+ long first;
+
+ // The first and subsequent elements in the stream
+ // non-null if count == 2
+ SpinedBuffer.OfLong buffer;
+
+ /**
+ * Constructor for building a stream of 0 or more elements.
+ */
+ LongStreamBuilderImpl() { }
+
+ /**
+ * Constructor for a singleton stream.
+ *
+ * @param t the single element
+ */
+ LongStreamBuilderImpl(long t) {
+ first = t;
+ count = -2;
+ }
+
+ // StreamBuilder implementation
+
+ @Override
+ public void accept(long t) {
+ if (count == 0) {
+ first = t;
+ count++;
+ }
+ else if (count > 0) {
+ if (buffer == null) {
+ buffer = new SpinedBuffer.OfLong();
+ buffer.accept(first);
+ count++;
+ }
+
+ buffer.accept(t);
+ }
+ else {
+ throw new IllegalStateException();
+ }
+ }
+
+ @Override
+ public LongStream build() {
+ int c = count;
+ if (c >= 0) {
+ // Switch count to negative value signalling the builder is built
+ count = -count - 1;
+ // Use this spliterator if 0 or 1 elements, otherwise use
+ // the spliterator of the spined buffer
+ return (c < 2) ? StreamSupport.longStream(this, false) : StreamSupport.longStream(buffer.spliterator(), false);
+ }
+
+ throw new IllegalStateException();
+ }
+
+ // Spliterator implementation for 0 or 1 element
+ // count == -1 for no elements
+ // count == -2 for one element held by first
+
+ @Override
+ public boolean tryAdvance(LongConsumer action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+
+ @Override
+ public void forEachRemaining(LongConsumer action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ }
+ }
+ }
+
+ static final class DoubleStreamBuilderImpl
+ extends AbstractStreamBuilderImpl<Double, Spliterator.OfDouble>
+ implements DoubleStream.Builder, Spliterator.OfDouble {
+ // The first element in the stream
+ // valid if count == 1
+ double first;
+
+ // The first and subsequent elements in the stream
+ // non-null if count == 2
+ SpinedBuffer.OfDouble buffer;
+
+ /**
+ * Constructor for building a stream of 0 or more elements.
+ */
+ DoubleStreamBuilderImpl() { }
+
+ /**
+ * Constructor for a singleton stream.
+ *
+ * @param t the single element
+ */
+ DoubleStreamBuilderImpl(double t) {
+ first = t;
+ count = -2;
+ }
+
+ // StreamBuilder implementation
+
+ @Override
+ public void accept(double t) {
+ if (count == 0) {
+ first = t;
+ count++;
+ }
+ else if (count > 0) {
+ if (buffer == null) {
+ buffer = new SpinedBuffer.OfDouble();
+ buffer.accept(first);
+ count++;
+ }
+
+ buffer.accept(t);
+ }
+ else {
+ throw new IllegalStateException();
+ }
+ }
+
+ @Override
+ public DoubleStream build() {
+ int c = count;
+ if (c >= 0) {
+ // Switch count to negative value signalling the builder is built
+ count = -count - 1;
+ // Use this spliterator if 0 or 1 elements, otherwise use
+ // the spliterator of the spined buffer
+ return (c < 2) ? StreamSupport.doubleStream(this, false) : StreamSupport.doubleStream(buffer.spliterator(), false);
+ }
+
+ throw new IllegalStateException();
+ }
+
+ // Spliterator implementation for 0 or 1 element
+ // count == -1 for no elements
+ // count == -2 for one element held by first
+
+ @Override
+ public boolean tryAdvance(DoubleConsumer action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+
+ @Override
+ public void forEachRemaining(DoubleConsumer action) {
+ Objects.requireNonNull(action);
+
+ if (count == -2) {
+ action.accept(first);
+ count = -1;
+ }
+ }
+ }
+
+ abstract static class ConcatSpliterator<T, T_SPLITR extends Spliterator<T>>
+ implements Spliterator<T> {
+ protected final T_SPLITR aSpliterator;
+ protected final T_SPLITR bSpliterator;
+ // True when no split has occurred, otherwise false
+ boolean beforeSplit;
+ // Never read after splitting
+ final boolean unsized;
+
+ public ConcatSpliterator(T_SPLITR aSpliterator, T_SPLITR bSpliterator) {
+ this.aSpliterator = aSpliterator;
+ this.bSpliterator = bSpliterator;
+ beforeSplit = true;
+ // The spliterator is known to be unsized before splitting if the
+ // sum of the estimates overflows.
+ unsized = aSpliterator.estimateSize() + bSpliterator.estimateSize() < 0;
+ }
+
+ @Override
+ public T_SPLITR trySplit() {
+ @SuppressWarnings("unchecked")
+ T_SPLITR ret = beforeSplit ? aSpliterator : (T_SPLITR) bSpliterator.trySplit();
+ beforeSplit = false;
+ return ret;
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> consumer) {
+ boolean hasNext;
+ if (beforeSplit) {
+ hasNext = aSpliterator.tryAdvance(consumer);
+ if (!hasNext) {
+ beforeSplit = false;
+ hasNext = bSpliterator.tryAdvance(consumer);
+ }
+ }
+ else
+ hasNext = bSpliterator.tryAdvance(consumer);
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> consumer) {
+ if (beforeSplit)
+ aSpliterator.forEachRemaining(consumer);
+ bSpliterator.forEachRemaining(consumer);
+ }
+
+ @Override
+ public long estimateSize() {
+ if (beforeSplit) {
+ // If one or both estimates are Long.MAX_VALUE then the sum
+ // will either be Long.MAX_VALUE or overflow to a negative value
+ long size = aSpliterator.estimateSize() + bSpliterator.estimateSize();
+ return (size >= 0) ? size : Long.MAX_VALUE;
+ }
+ else {
+ return bSpliterator.estimateSize();
+ }
+ }
+
+ @Override
+ public int characteristics() {
+ if (beforeSplit) {
+ // Concatenation loses DISTINCT and SORTED characteristics
+ return aSpliterator.characteristics() & bSpliterator.characteristics()
+ & ~(Spliterator.DISTINCT | Spliterator.SORTED
+ | (unsized ? Spliterator.SIZED | Spliterator.SUBSIZED : 0));
+ }
+ else {
+ return bSpliterator.characteristics();
+ }
+ }
+
+ @Override
+ public Comparator<? super T> getComparator() {
+ if (beforeSplit)
+ throw new IllegalStateException();
+ return bSpliterator.getComparator();
+ }
+
+ static class OfRef<T> extends ConcatSpliterator<T, Spliterator<T>> {
+ OfRef(Spliterator<T> aSpliterator, Spliterator<T> bSpliterator) {
+ super(aSpliterator, bSpliterator);
+ }
+ }
+
+ private static abstract class OfPrimitive<T, T_CONS, T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>>
+ extends ConcatSpliterator<T, T_SPLITR>
+ implements Spliterator.OfPrimitive<T, T_CONS, T_SPLITR> {
+ private OfPrimitive(T_SPLITR aSpliterator, T_SPLITR bSpliterator) {
+ super(aSpliterator, bSpliterator);
+ }
+
+ @Override
+ public boolean tryAdvance(T_CONS action) {
+ boolean hasNext;
+ if (beforeSplit) {
+ hasNext = aSpliterator.tryAdvance(action);
+ if (!hasNext) {
+ beforeSplit = false;
+ hasNext = bSpliterator.tryAdvance(action);
+ }
+ }
+ else
+ hasNext = bSpliterator.tryAdvance(action);
+ return hasNext;
+ }
+
+ @Override
+ public void forEachRemaining(T_CONS action) {
+ if (beforeSplit)
+ aSpliterator.forEachRemaining(action);
+ bSpliterator.forEachRemaining(action);
+ }
+ }
+
+ static class OfInt
+ extends ConcatSpliterator.OfPrimitive<Integer, IntConsumer, Spliterator.OfInt>
+ implements Spliterator.OfInt {
+ OfInt(Spliterator.OfInt aSpliterator, Spliterator.OfInt bSpliterator) {
+ super(aSpliterator, bSpliterator);
+ }
+ }
+
+ static class OfLong
+ extends ConcatSpliterator.OfPrimitive<Long, LongConsumer, Spliterator.OfLong>
+ implements Spliterator.OfLong {
+ OfLong(Spliterator.OfLong aSpliterator, Spliterator.OfLong bSpliterator) {
+ super(aSpliterator, bSpliterator);
+ }
+ }
+
+ static class OfDouble
+ extends ConcatSpliterator.OfPrimitive<Double, DoubleConsumer, Spliterator.OfDouble>
+ implements Spliterator.OfDouble {
+ OfDouble(Spliterator.OfDouble aSpliterator, Spliterator.OfDouble bSpliterator) {
+ super(aSpliterator, bSpliterator);
+ }
+ }
+ }
+
+ /**
+ * Given two Runnables, return a Runnable that executes both in sequence,
+ * even if the first throws an exception, and if both throw exceptions, add
+ * any exceptions thrown by the second as suppressed exceptions of the first.
+ */
+ static Runnable composeWithExceptions(Runnable a, Runnable b) {
+ return new Runnable() {
+ @Override
+ public void run() {
+ try {
+ a.run();
+ }
+ catch (Throwable e1) {
+ try {
+ b.run();
+ }
+ catch (Throwable e2) {
+ try {
+ e1.addSuppressed(e2);
+ } catch (Throwable ignore) {}
+ }
+ throw e1;
+ }
+ b.run();
+ }
+ };
+ }
+
+ /**
+ * Given two streams, return a Runnable that
+ * executes both of their {@link BaseStream#close} methods in sequence,
+ * even if the first throws an exception, and if both throw exceptions, add
+ * any exceptions thrown by the second as suppressed exceptions of the first.
+ */
+ static Runnable composedClose(BaseStream<?, ?> a, BaseStream<?, ?> b) {
+ return new Runnable() {
+ @Override
+ public void run() {
+ try {
+ a.close();
+ }
+ catch (Throwable e1) {
+ try {
+ b.close();
+ }
+ catch (Throwable e2) {
+ try {
+ e1.addSuppressed(e2);
+ } catch (Throwable ignore) {}
+ }
+ throw e1;
+ }
+ b.close();
+ }
+ };
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/TerminalOp.java b/ojluni/src/main/java/java/util/stream/TerminalOp.java
new file mode 100644
index 0000000..a6e8ae1
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/TerminalOp.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+
+/**
+ * An operation in a stream pipeline that takes a stream as input and produces
+ * a result or side-effect. A {@code TerminalOp} has an input type and stream
+ * shape, and a result type. A {@code TerminalOp} also has a set of
+ * <em>operation flags</em> that describes how the operation processes elements
+ * of the stream (such as short-circuiting or respecting encounter order; see
+ * {@link StreamOpFlag}).
+ *
+ * <p>A {@code TerminalOp} must provide a sequential and parallel implementation
+ * of the operation relative to a given stream source and set of intermediate
+ * operations.
+ *
+ * @param <E_IN> the type of input elements
+ * @param <R> the type of the result
+ * @since 1.8
+ */
+interface TerminalOp<E_IN, R> {
+ /**
+ * Gets the shape of the input type of this operation.
+ *
+ * @implSpec The default returns {@code StreamShape.REFERENCE}.
+ *
+ * @return StreamShape of the input type of this operation
+ */
+ default StreamShape inputShape() { return StreamShape.REFERENCE; }
+
+ /**
+ * Gets the stream flags of the operation. Terminal operations may set a
+ * limited subset of the stream flags defined in {@link StreamOpFlag}, and
+ * these flags are combined with the previously combined stream and
+ * intermediate operation flags for the pipeline.
+ *
+ * @implSpec The default implementation returns zero.
+ *
+ * @return the stream flags for this operation
+ * @see StreamOpFlag
+ */
+ default int getOpFlags() { return 0; }
+
+ /**
+ * Performs a parallel evaluation of the operation using the specified
+ * {@code PipelineHelper}, which describes the upstream intermediate
+ * operations.
+ *
+ * @implSpec The default performs a sequential evaluation of the operation
+ * using the specified {@code PipelineHelper}.
+ *
+ * @param helper the pipeline helper
+ * @param spliterator the source spliterator
+ * @return the result of the evaluation
+ */
+ default <P_IN> R evaluateParallel(PipelineHelper<E_IN> helper,
+ Spliterator<P_IN> spliterator) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} triggering TerminalOp.evaluateParallel serial default");
+ return evaluateSequential(helper, spliterator);
+ }
+
+ /**
+ * Performs a sequential evaluation of the operation using the specified
+ * {@code PipelineHelper}, which describes the upstream intermediate
+ * operations.
+ *
+ * @param helper the pipeline helper
+ * @param spliterator the source spliterator
+ * @return the result of the evaluation
+ */
+ <P_IN> R evaluateSequential(PipelineHelper<E_IN> helper,
+ Spliterator<P_IN> spliterator);
+}
diff --git a/ojluni/src/main/java/java/util/stream/TerminalSink.java b/ojluni/src/main/java/java/util/stream/TerminalSink.java
new file mode 100644
index 0000000..9808d54
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/TerminalSink.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.function.Supplier;
+
+/**
+ * A {@link Sink} which accumulates state as elements are accepted, and allows
+ * a result to be retrieved after the computation is finished.
+ *
+ * @param <T> the type of elements to be accepted
+ * @param <R> the type of the result
+ *
+ * @since 1.8
+ */
+interface TerminalSink<T, R> extends Sink<T>, Supplier<R> { }
diff --git a/ojluni/src/main/java/java/util/stream/Tripwire.java b/ojluni/src/main/java/java/util/stream/Tripwire.java
new file mode 100644
index 0000000..c6558b9
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/Tripwire.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
+import sun.util.logging.PlatformLogger;
+
+/**
+ * Utility class for detecting inadvertent uses of boxing in
+ * {@code java.util.stream} classes. The detection is turned on or off based on
+ * whether the system property {@code org.openjdk.java.util.stream.tripwire} is
+ * considered {@code true} according to {@link Boolean#getBoolean(String)}.
+ * This should normally be turned off for production use.
+ *
+ * @apiNote
+ * Typical usage would be for boxing code to do:
+ * <pre>{@code
+ * if (Tripwire.ENABLED)
+ * Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
+ * }</pre>
+ *
+ * @since 1.8
+ */
+final class Tripwire {
+ private static final String TRIPWIRE_PROPERTY = "org.openjdk.java.util.stream.tripwire";
+
+ /** Should debugging checks be enabled? */
+ static final boolean ENABLED = AccessController.doPrivileged(
+ (PrivilegedAction<Boolean>) () -> Boolean.getBoolean(TRIPWIRE_PROPERTY));
+
+ private Tripwire() { }
+
+ /**
+ * Produces a log warning, using {@code PlatformLogger.getLogger(className)},
+ * using the supplied message. The class name of {@code trippingClass} will
+ * be used as the first parameter to the message.
+ *
+ * @param trippingClass Name of the class generating the message
+ * @param msg A message format string of the type expected by
+ * {@link PlatformLogger}
+ */
+ static void trip(Class<?> trippingClass, String msg) {
+ PlatformLogger.getLogger(trippingClass.getName()).warning(msg, trippingClass.getName());
+ }
+}
diff --git a/ojluni/src/main/java/java/util/stream/package-info.java b/ojluni/src/main/java/java/util/stream/package-info.java
new file mode 100644
index 0000000..17b070a
--- /dev/null
+++ b/ojluni/src/main/java/java/util/stream/package-info.java
@@ -0,0 +1,734 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Classes to support functional-style operations on streams of elements, such
+ * as map-reduce transformations on collections. For example:
+ *
+ * <pre>{@code
+ * int sum = widgets.stream()
+ * .filter(b -> b.getColor() == RED)
+ * .mapToInt(b -> b.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * <p>Here we use {@code widgets}, a {@code Collection<Widget>},
+ * as a source for a stream, and then perform a filter-map-reduce on the stream
+ * to obtain the sum of the weights of the red widgets. (Summation is an
+ * example of a <a href="package-summary.html#Reduction">reduction</a>
+ * operation.)
+ *
+ * <p>The key abstraction introduced in this package is <em>stream</em>. The
+ * classes {@link java.util.stream.Stream}, {@link java.util.stream.IntStream},
+ * {@link java.util.stream.LongStream}, and {@link java.util.stream.DoubleStream}
+ * are streams over objects and the primitive {@code int}, {@code long} and
+ * {@code double} types. Streams differ from collections in several ways:
+ *
+ * <ul>
+ * <li>No storage. A stream is not a data structure that stores elements;
+ * instead, it conveys elements from a source such as a data structure,
+ * an array, a generator function, or an I/O channel, through a pipeline of
+ * computational operations.</li>
+ * <li>Functional in nature. An operation on a stream produces a result,
+ * but does not modify its source. For example, filtering a {@code Stream}
+ * obtained from a collection produces a new {@code Stream} without the
+ * filtered elements, rather than removing elements from the source
+ * collection.</li>
+ * <li>Laziness-seeking. Many stream operations, such as filtering, mapping,
+ * or duplicate removal, can be implemented lazily, exposing opportunities
+ * for optimization. For example, "find the first {@code String} with
+ * three consecutive vowels" need not examine all the input strings.
+ * Stream operations are divided into intermediate ({@code Stream}-producing)
+ * operations and terminal (value- or side-effect-producing) operations.
+ * Intermediate operations are always lazy.</li>
+ * <li>Possibly unbounded. While collections have a finite size, streams
+ * need not. Short-circuiting operations such as {@code limit(n)} or
+ * {@code findFirst()} can allow computations on infinite streams to
+ * complete in finite time.</li>
+ * <li>Consumable. The elements of a stream are only visited once during
+ * the life of a stream. Like an {@link java.util.Iterator}, a new stream
+ * must be generated to revisit the same elements of the source.
+ * </li>
+ * </ul>
+ *
+ * Streams can be obtained in a number of ways. Some examples include:
+ * <ul>
+ * <li>From a {@link java.util.Collection} via the {@code stream()} and
+ * {@code parallelStream()} methods;</li>
+ * <li>From an array via {@link java.util.Arrays#stream(Object[])};</li>
+ * <li>From static factory methods on the stream classes, such as
+ * {@link java.util.stream.Stream#of(Object[])},
+ * {@link java.util.stream.IntStream#range(int, int)}
+ * or {@link java.util.stream.Stream#iterate(Object, UnaryOperator)};</li>
+ * </li>
+ * </ul>
+ *
+ * <p>Additional stream sources can be provided by third-party libraries using
+ * <a href="package-summary.html#StreamSources">these techniques</a>.
+ *
+ * <h2><a name="StreamOps">Stream operations and pipelines</a></h2>
+ *
+ * <p>Stream operations are divided into <em>intermediate</em> and
+ * <em>terminal</em> operations, and are combined to form <em>stream
+ * pipelines</em>. A stream pipeline consists of a source (such as a
+ * {@code Collection}, an array, a generator function, or an I/O channel);
+ * followed by zero or more intermediate operations such as
+ * {@code Stream.filter} or {@code Stream.map}; and a terminal operation such
+ * as {@code Stream.forEach} or {@code Stream.reduce}.
+ *
+ * <p>Intermediate operations return a new stream. They are always
+ * <em>lazy</em>; executing an intermediate operation such as
+ * {@code filter()} does not actually perform any filtering, but instead
+ * creates a new stream that, when traversed, contains the elements of
+ * the initial stream that match the given predicate. Traversal
+ * of the pipeline source does not begin until the terminal operation of the
+ * pipeline is executed.
+ *
+ * <p>Terminal operations, such as {@code Stream.forEach} or
+ * {@code IntStream.sum}, may traverse the stream to produce a result or a
+ * side-effect. After the terminal operation is performed, the stream pipeline
+ * is considered consumed, and can no longer be used; if you need to traverse
+ * the same data source again, you must return to the data source to get a new
+ * stream. In almost all cases, terminal operations are <em>eager</em>,
+ * completing their traversal of the data source and processing of the pipeline
+ * before returning. Only the terminal operations {@code iterator()} and
+ * {@code spliterator()} are not; these are provided as an "escape hatch" to enable
+ * arbitrary client-controlled pipeline traversals in the event that the
+ * existing operations are not sufficient to the task.
+ *
+ * <p> Processing streams lazily allows for significant efficiencies; in a
+ * pipeline such as the filter-map-sum example above, filtering, mapping, and
+ * summing can be fused into a single pass on the data, with minimal
+ * intermediate state. Laziness also allows avoiding examining all the data
+ * when it is not necessary; for operations such as "find the first string
+ * longer than 1000 characters", it is only necessary to examine just enough
+ * strings to find one that has the desired characteristics without examining
+ * all of the strings available from the source. (This behavior becomes even
+ * more important when the input stream is infinite and not merely large.)
+ *
+ * <p>Intermediate operations are further divided into <em>stateless</em>
+ * and <em>stateful</em> operations. Stateless operations, such as {@code filter}
+ * and {@code map}, retain no state from previously seen element when processing
+ * a new element -- each element can be processed
+ * independently of operations on other elements. Stateful operations, such as
+ * {@code distinct} and {@code sorted}, may incorporate state from previously
+ * seen elements when processing new elements.
+ *
+ * <p>Stateful operations may need to process the entire input
+ * before producing a result. For example, one cannot produce any results from
+ * sorting a stream until one has seen all elements of the stream. As a result,
+ * under parallel computation, some pipelines containing stateful intermediate
+ * operations may require multiple passes on the data or may need to buffer
+ * significant data. Pipelines containing exclusively stateless intermediate
+ * operations can be processed in a single pass, whether sequential or parallel,
+ * with minimal data buffering.
+ *
+ * <p>Further, some operations are deemed <em>short-circuiting</em> operations.
+ * An intermediate operation is short-circuiting if, when presented with
+ * infinite input, it may produce a finite stream as a result. A terminal
+ * operation is short-circuiting if, when presented with infinite input, it may
+ * terminate in finite time. Having a short-circuiting operation in the pipeline
+ * is a necessary, but not sufficient, condition for the processing of an infinite
+ * stream to terminate normally in finite time.
+ *
+ * <h3>Parallelism</h3>
+ *
+ * <p>Processing elements with an explicit {@code for-}loop is inherently serial.
+ * Streams facilitate parallel execution by reframing the computation as a pipeline of
+ * aggregate operations, rather than as imperative operations on each individual
+ * element. All streams operations can execute either in serial or in parallel.
+ * The stream implementations in the JDK create serial streams unless parallelism is
+ * explicitly requested. For example, {@code Collection} has methods
+ * {@link java.util.Collection#stream} and {@link java.util.Collection#parallelStream},
+ * which produce sequential and parallel streams respectively; other
+ * stream-bearing methods such as {@link java.util.stream.IntStream#range(int, int)}
+ * produce sequential streams but these streams can be efficiently parallelized by
+ * invoking their {@link java.util.stream.BaseStream#parallel()} method.
+ * To execute the prior "sum of weights of widgets" query in parallel, we would
+ * do:
+ *
+ * <pre>{@code
+ * int sumOfWeights = widgets.}<code><b>parallelStream()</b></code>{@code
+ * .filter(b -> b.getColor() == RED)
+ * .mapToInt(b -> b.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * <p>The only difference between the serial and parallel versions of this
+ * example is the creation of the initial stream, using "{@code parallelStream()}"
+ * instead of "{@code stream()}". When the terminal operation is initiated,
+ * the stream pipeline is executed sequentially or in parallel depending on the
+ * orientation of the stream on which it is invoked. Whether a stream will execute in serial or
+ * parallel can be determined with the {@code isParallel()} method, and the
+ * orientation of a stream can be modified with the
+ * {@link java.util.stream.BaseStream#sequential()} and
+ * {@link java.util.stream.BaseStream#parallel()} operations. When the terminal
+ * operation is initiated, the stream pipeline is executed sequentially or in
+ * parallel depending on the mode of the stream on which it is invoked.
+ *
+ * <p>Except for operations identified as explicitly nondeterministic, such
+ * as {@code findAny()}, whether a stream executes sequentially or in parallel
+ * should not change the result of the computation.
+ *
+ * <p>Most stream operations accept parameters that describe user-specified
+ * behavior, which are often lambda expressions. To preserve correct behavior,
+ * these <em>behavioral parameters</em> must be <em>non-interfering</em>, and in
+ * most cases must be <em>stateless</em>. Such parameters are always instances
+ * of a <a href="../function/package-summary.html">functional interface</a> such
+ * as {@link java.util.function.Function}, and are often lambda expressions or
+ * method references.
+ *
+ * <h3><a name="NonInterference">Non-interference</a></h3>
+ *
+ * Streams enable you to execute possibly-parallel aggregate operations over a
+ * variety of data sources, including even non-thread-safe collections such as
+ * {@code ArrayList}. This is possible only if we can prevent
+ * <em>interference</em> with the data source during the execution of a stream
+ * pipeline. Except for the escape-hatch operations {@code iterator()} and
+ * {@code spliterator()}, execution begins when the terminal operation is
+ * invoked, and ends when the terminal operation completes. For most data
+ * sources, preventing interference means ensuring that the data source is
+ * <em>not modified at all</em> during the execution of the stream pipeline.
+ * The notable exception to this are streams whose sources are concurrent
+ * collections, which are specifically designed to handle concurrent modification.
+ * Concurrent stream sources are those whose {@code Spliterator} reports the
+ * {@code CONCURRENT} characteristic.
+ *
+ * <p>Accordingly, behavioral parameters in stream pipelines whose source might
+ * not be concurrent should never modify the stream's data source.
+ * A behavioral parameter is said to <em>interfere</em> with a non-concurrent
+ * data source if it modifies, or causes to be
+ * modified, the stream's data source. The need for non-interference applies
+ * to all pipelines, not just parallel ones. Unless the stream source is
+ * concurrent, modifying a stream's data source during execution of a stream
+ * pipeline can cause exceptions, incorrect answers, or nonconformant behavior.
+ *
+ * For well-behaved stream sources, the source can be modified before the
+ * terminal operation commences and those modifications will be reflected in
+ * the covered elements. For example, consider the following code:
+ *
+ * <pre>{@code
+ * List<String> l = new ArrayList(Arrays.asList("one", "two"));
+ * Stream<String> sl = l.stream();
+ * l.add("three");
+ * String s = sl.collect(joining(" "));
+ * }</pre>
+ *
+ * First a list is created consisting of two strings: "one"; and "two". Then a
+ * stream is created from that list. Next the list is modified by adding a third
+ * string: "three". Finally the elements of the stream are collected and joined
+ * together. Since the list was modified before the terminal {@code collect}
+ * operation commenced the result will be a string of "one two three". All the
+ * streams returned from JDK collections, and most other JDK classes,
+ * are well-behaved in this manner; for streams generated by other libraries, see
+ * <a href="package-summary.html#StreamSources">Low-level stream
+ * construction</a> for requirements for building well-behaved streams.
+ *
+ * <h3><a name="Statelessness">Stateless behaviors</a></h3>
+ *
+ * Stream pipeline results may be nondeterministic or incorrect if the behavioral
+ * parameters to the stream operations are <em>stateful</em>. A stateful lambda
+ * (or other object implementing the appropriate functional interface) is one
+ * whose result depends on any state which might change during the execution
+ * of the stream pipeline. An example of a stateful lambda is the parameter
+ * to {@code map()} in:
+ *
+ * <pre>{@code
+ * Set<Integer> seen = Collections.synchronizedSet(new HashSet<>());
+ * stream.parallel().map(e -> { if (seen.add(e)) return 0; else return e; })...
+ * }</pre>
+ *
+ * Here, if the mapping operation is performed in parallel, the results for the
+ * same input could vary from run to run, due to thread scheduling differences,
+ * whereas, with a stateless lambda expression the results would always be the
+ * same.
+ *
+ * <p>Note also that attempting to access mutable state from behavioral parameters
+ * presents you with a bad choice with respect to safety and performance; if
+ * you do not synchronize access to that state, you have a data race and
+ * therefore your code is broken, but if you do synchronize access to that
+ * state, you risk having contention undermine the parallelism you are seeking
+ * to benefit from. The best approach is to avoid stateful behavioral
+ * parameters to stream operations entirely; there is usually a way to
+ * restructure the stream pipeline to avoid statefulness.
+ *
+ * <h3>Side-effects</h3>
+ *
+ * Side-effects in behavioral parameters to stream operations are, in general,
+ * discouraged, as they can often lead to unwitting violations of the
+ * statelessness requirement, as well as other thread-safety hazards.
+ *
+ * <p>If the behavioral parameters do have side-effects, unless explicitly
+ * stated, there are no guarantees as to the
+ * <a href="../concurrent/package-summary.html#MemoryVisibility"><i>visibility</i></a>
+ * of those side-effects to other threads, nor are there any guarantees that
+ * different operations on the "same" element within the same stream pipeline
+ * are executed in the same thread. Further, the ordering of those effects
+ * may be surprising. Even when a pipeline is constrained to produce a
+ * <em>result</em> that is consistent with the encounter order of the stream
+ * source (for example, {@code IntStream.range(0,5).parallel().map(x -> x*2).toArray()}
+ * must produce {@code [0, 2, 4, 6, 8]}), no guarantees are made as to the order
+ * in which the mapper function is applied to individual elements, or in what
+ * thread any behavioral parameter is executed for a given element.
+ *
+ * <p>Many computations where one might be tempted to use side effects can be more
+ * safely and efficiently expressed without side-effects, such as using
+ * <a href="package-summary.html#Reduction">reduction</a> instead of mutable
+ * accumulators. However, side-effects such as using {@code println()} for debugging
+ * purposes are usually harmless. A small number of stream operations, such as
+ * {@code forEach()} and {@code peek()}, can operate only via side-effects;
+ * these should be used with care.
+ *
+ * <p>As an example of how to transform a stream pipeline that inappropriately
+ * uses side-effects to one that does not, the following code searches a stream
+ * of strings for those matching a given regular expression, and puts the
+ * matches in a list.
+ *
+ * <pre>{@code
+ * ArrayList<String> results = new ArrayList<>();
+ * stream.filter(s -> pattern.matcher(s).matches())
+ * .forEach(s -> results.add(s)); // Unnecessary use of side-effects!
+ * }</pre>
+ *
+ * This code unnecessarily uses side-effects. If executed in parallel, the
+ * non-thread-safety of {@code ArrayList} would cause incorrect results, and
+ * adding needed synchronization would cause contention, undermining the
+ * benefit of parallelism. Furthermore, using side-effects here is completely
+ * unnecessary; the {@code forEach()} can simply be replaced with a reduction
+ * operation that is safer, more efficient, and more amenable to
+ * parallelization:
+ *
+ * <pre>{@code
+ * List<String>results =
+ * stream.filter(s -> pattern.matcher(s).matches())
+ * .collect(Collectors.toList()); // No side-effects!
+ * }</pre>
+ *
+ * <h3><a name="Ordering">Ordering</a></h3>
+ *
+ * <p>Streams may or may not have a defined <em>encounter order</em>. Whether
+ * or not a stream has an encounter order depends on the source and the
+ * intermediate operations. Certain stream sources (such as {@code List} or
+ * arrays) are intrinsically ordered, whereas others (such as {@code HashSet})
+ * are not. Some intermediate operations, such as {@code sorted()}, may impose
+ * an encounter order on an otherwise unordered stream, and others may render an
+ * ordered stream unordered, such as {@link java.util.stream.BaseStream#unordered()}.
+ * Further, some terminal operations may ignore encounter order, such as
+ * {@code forEach()}.
+ *
+ * <p>If a stream is ordered, most operations are constrained to operate on the
+ * elements in their encounter order; if the source of a stream is a {@code List}
+ * containing {@code [1, 2, 3]}, then the result of executing {@code map(x -> x*2)}
+ * must be {@code [2, 4, 6]}. However, if the source has no defined encounter
+ * order, then any permutation of the values {@code [2, 4, 6]} would be a valid
+ * result.
+ *
+ * <p>For sequential streams, the presence or absence of an encounter order does
+ * not affect performance, only determinism. If a stream is ordered, repeated
+ * execution of identical stream pipelines on an identical source will produce
+ * an identical result; if it is not ordered, repeated execution might produce
+ * different results.
+ *
+ * <p>For parallel streams, relaxing the ordering constraint can sometimes enable
+ * more efficient execution. Certain aggregate operations,
+ * such as filtering duplicates ({@code distinct()}) or grouped reductions
+ * ({@code Collectors.groupingBy()}) can be implemented more efficiently if ordering of elements
+ * is not relevant. Similarly, operations that are intrinsically tied to encounter order,
+ * such as {@code limit()}, may require
+ * buffering to ensure proper ordering, undermining the benefit of parallelism.
+ * In cases where the stream has an encounter order, but the user does not
+ * particularly <em>care</em> about that encounter order, explicitly de-ordering
+ * the stream with {@link java.util.stream.BaseStream#unordered() unordered()} may
+ * improve parallel performance for some stateful or terminal operations.
+ * However, most stream pipelines, such as the "sum of weight of blocks" example
+ * above, still parallelize efficiently even under ordering constraints.
+ *
+ * <h2><a name="Reduction">Reduction operations</a></h2>
+ *
+ * A <em>reduction</em> operation (also called a <em>fold</em>) takes a sequence
+ * of input elements and combines them into a single summary result by repeated
+ * application of a combining operation, such as finding the sum or maximum of
+ * a set of numbers, or accumulating elements into a list. The streams classes have
+ * multiple forms of general reduction operations, called
+ * {@link java.util.stream.Stream#reduce(java.util.function.BinaryOperator) reduce()}
+ * and {@link java.util.stream.Stream#collect(java.util.stream.Collector) collect()},
+ * as well as multiple specialized reduction forms such as
+ * {@link java.util.stream.IntStream#sum() sum()}, {@link java.util.stream.IntStream#max() max()},
+ * or {@link java.util.stream.IntStream#count() count()}.
+ *
+ * <p>Of course, such operations can be readily implemented as simple sequential
+ * loops, as in:
+ * <pre>{@code
+ * int sum = 0;
+ * for (int x : numbers) {
+ * sum += x;
+ * }
+ * }</pre>
+ * However, there are good reasons to prefer a reduce operation
+ * over a mutative accumulation such as the above. Not only is a reduction
+ * "more abstract" -- it operates on the stream as a whole rather than individual
+ * elements -- but a properly constructed reduce operation is inherently
+ * parallelizable, so long as the function(s) used to process the elements
+ * are <a href="package-summary.html#Associativity">associative</a> and
+ * <a href="package-summary.html#NonInterfering">stateless</a>.
+ * For example, given a stream of numbers for which we want to find the sum, we
+ * can write:
+ * <pre>{@code
+ * int sum = numbers.stream().reduce(0, (x,y) -> x+y);
+ * }</pre>
+ * or:
+ * <pre>{@code
+ * int sum = numbers.stream().reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>These reduction operations can run safely in parallel with almost no
+ * modification:
+ * <pre>{@code
+ * int sum = numbers.parallelStream().reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>Reduction parallellizes well because the implementation
+ * can operate on subsets of the data in parallel, and then combine the
+ * intermediate results to get the final correct answer. (Even if the language
+ * had a "parallel for-each" construct, the mutative accumulation approach would
+ * still required the developer to provide
+ * thread-safe updates to the shared accumulating variable {@code sum}, and
+ * the required synchronization would then likely eliminate any performance gain from
+ * parallelism.) Using {@code reduce()} instead removes all of the
+ * burden of parallelizing the reduction operation, and the library can provide
+ * an efficient parallel implementation with no additional synchronization
+ * required.
+ *
+ * <p>The "widgets" examples shown earlier shows how reduction combines with
+ * other operations to replace for loops with bulk operations. If {@code widgets}
+ * is a collection of {@code Widget} objects, which have a {@code getWeight} method,
+ * we can find the heaviest widget with:
+ * <pre>{@code
+ * OptionalInt heaviest = widgets.parallelStream()
+ * .mapToInt(Widget::getWeight)
+ * .max();
+ * }</pre>
+ *
+ * <p>In its more general form, a {@code reduce} operation on elements of type
+ * {@code <T>} yielding a result of type {@code <U>} requires three parameters:
+ * <pre>{@code
+ * <U> U reduce(U identity,
+ * BiFunction<U, ? super T, U> accumulator,
+ * BinaryOperator<U> combiner);
+ * }</pre>
+ * Here, the <em>identity</em> element is both an initial seed value for the reduction
+ * and a default result if there are no input elements. The <em>accumulator</em>
+ * function takes a partial result and the next element, and produces a new
+ * partial result. The <em>combiner</em> function combines two partial results
+ * to produce a new partial result. (The combiner is necessary in parallel
+ * reductions, where the input is partitioned, a partial accumulation computed
+ * for each partition, and then the partial results are combined to produce a
+ * final result.)
+ *
+ * <p>More formally, the {@code identity} value must be an <em>identity</em> for
+ * the combiner function. This means that for all {@code u},
+ * {@code combiner.apply(identity, u)} is equal to {@code u}. Additionally, the
+ * {@code combiner} function must be <a href="package-summary.html#Associativity">associative</a> and
+ * must be compatible with the {@code accumulator} function: for all {@code u}
+ * and {@code t}, {@code combiner.apply(u, accumulator.apply(identity, t))} must
+ * be {@code equals()} to {@code accumulator.apply(u, t)}.
+ *
+ * <p>The three-argument form is a generalization of the two-argument form,
+ * incorporating a mapping step into the accumulation step. We could
+ * re-cast the simple sum-of-weights example using the more general form as
+ * follows:
+ * <pre>{@code
+ * int sumOfWeights = widgets.stream()
+ * .reduce(0,
+ * (sum, b) -> sum + b.getWeight())
+ * Integer::sum);
+ * }</pre>
+ * though the explicit map-reduce form is more readable and therefore should
+ * usually be preferred. The generalized form is provided for cases where
+ * significant work can be optimized away by combining mapping and reducing
+ * into a single function.
+ *
+ * <h3><a name="MutableReduction">Mutable reduction</a></h3>
+ *
+ * A <em>mutable reduction operation</em> accumulates input elements into a
+ * mutable result container, such as a {@code Collection} or {@code StringBuilder},
+ * as it processes the elements in the stream.
+ *
+ * <p>If we wanted to take a stream of strings and concatenate them into a
+ * single long string, we <em>could</em> achieve this with ordinary reduction:
+ * <pre>{@code
+ * String concatenated = strings.reduce("", String::concat)
+ * }</pre>
+ *
+ * <p>We would get the desired result, and it would even work in parallel. However,
+ * we might not be happy about the performance! Such an implementation would do
+ * a great deal of string copying, and the run time would be <em>O(n^2)</em> in
+ * the number of characters. A more performant approach would be to accumulate
+ * the results into a {@link java.lang.StringBuilder}, which is a mutable
+ * container for accumulating strings. We can use the same technique to
+ * parallelize mutable reduction as we do with ordinary reduction.
+ *
+ * <p>The mutable reduction operation is called
+ * {@link java.util.stream.Stream#collect(Collector) collect()},
+ * as it collects together the desired results into a result container such
+ * as a {@code Collection}.
+ * A {@code collect} operation requires three functions:
+ * a supplier function to construct new instances of the result container, an
+ * accumulator function to incorporate an input element into a result
+ * container, and a combining function to merge the contents of one result
+ * container into another. The form of this is very similar to the general
+ * form of ordinary reduction:
+ * <pre>{@code
+ * <R> R collect(Supplier<R> supplier,
+ * BiConsumer<R, ? super T> accumulator,
+ * BiConsumer<R, R> combiner);
+ * }</pre>
+ * <p>As with {@code reduce()}, a benefit of expressing {@code collect} in this
+ * abstract way is that it is directly amenable to parallelization: we can
+ * accumulate partial results in parallel and then combine them, so long as the
+ * accumulation and combining functions satisfy the appropriate requirements.
+ * For example, to collect the String representations of the elements in a
+ * stream into an {@code ArrayList}, we could write the obvious sequential
+ * for-each form:
+ * <pre>{@code
+ * ArrayList<String> strings = new ArrayList<>();
+ * for (T element : stream) {
+ * strings.add(element.toString());
+ * }
+ * }</pre>
+ * Or we could use a parallelizable collect form:
+ * <pre>{@code
+ * ArrayList<String> strings = stream.collect(() -> new ArrayList<>(),
+ * (c, e) -> c.add(e.toString()),
+ * (c1, c2) -> c1.addAll(c2));
+ * }</pre>
+ * or, pulling the mapping operation out of the accumulator function, we could
+ * express it more succinctly as:
+ * <pre>{@code
+ * List<String> strings = stream.map(Object::toString)
+ * .collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
+ * }</pre>
+ * Here, our supplier is just the {@link java.util.ArrayList#ArrayList()
+ * ArrayList constructor}, the accumulator adds the stringified element to an
+ * {@code ArrayList}, and the combiner simply uses {@link java.util.ArrayList#addAll addAll}
+ * to copy the strings from one container into the other.
+ *
+ * <p>The three aspects of {@code collect} -- supplier, accumulator, and
+ * combiner -- are tightly coupled. We can use the abstraction of a
+ * {@link java.util.stream.Collector} to capture all three aspects. The
+ * above example for collecting strings into a {@code List} can be rewritten
+ * using a standard {@code Collector} as:
+ * <pre>{@code
+ * List<String> strings = stream.map(Object::toString)
+ * .collect(Collectors.toList());
+ * }</pre>
+ *
+ * <p>Packaging mutable reductions into a Collector has another advantage:
+ * composability. The class {@link java.util.stream.Collectors} contains a
+ * number of predefined factories for collectors, including combinators
+ * that transform one collector into another. For example, suppose we have a
+ * collector that computes the sum of the salaries of a stream of
+ * employees, as follows:
+ *
+ * <pre>{@code
+ * Collector<Employee, ?, Integer> summingSalaries
+ * = Collectors.summingInt(Employee::getSalary);
+ * }</pre>
+ *
+ * (The {@code ?} for the second type parameter merely indicates that we don't
+ * care about the intermediate representation used by this collector.)
+ * If we wanted to create a collector to tabulate the sum of salaries by
+ * department, we could reuse {@code summingSalaries} using
+ * {@link java.util.stream.Collectors#groupingBy(java.util.function.Function, java.util.stream.Collector) groupingBy}:
+ *
+ * <pre>{@code
+ * Map<Department, Integer> salariesByDept
+ * = employees.stream().collect(Collectors.groupingBy(Employee::getDepartment,
+ * summingSalaries));
+ * }</pre>
+ *
+ * <p>As with the regular reduction operation, {@code collect()} operations can
+ * only be parallelized if appropriate conditions are met. For any partially
+ * accumulated result, combining it with an empty result container must
+ * produce an equivalent result. That is, for a partially accumulated result
+ * {@code p} that is the result of any series of accumulator and combiner
+ * invocations, {@code p} must be equivalent to
+ * {@code combiner.apply(p, supplier.get())}.
+ *
+ * <p>Further, however the computation is split, it must produce an equivalent
+ * result. For any input elements {@code t1} and {@code t2}, the results
+ * {@code r1} and {@code r2} in the computation below must be equivalent:
+ * <pre>{@code
+ * A a1 = supplier.get();
+ * accumulator.accept(a1, t1);
+ * accumulator.accept(a1, t2);
+ * R r1 = finisher.apply(a1); // result without splitting
+ *
+ * A a2 = supplier.get();
+ * accumulator.accept(a2, t1);
+ * A a3 = supplier.get();
+ * accumulator.accept(a3, t2);
+ * R r2 = finisher.apply(combiner.apply(a2, a3)); // result with splitting
+ * }</pre>
+ *
+ * <p>Here, equivalence generally means according to {@link java.lang.Object#equals(Object)}.
+ * but in some cases equivalence may be relaxed to account for differences in
+ * order.
+ *
+ * <h3><a name="ConcurrentReduction">Reduction, concurrency, and ordering</a></h3>
+ *
+ * With some complex reduction operations, for example a {@code collect()} that
+ * produces a {@code Map}, such as:
+ * <pre>{@code
+ * Map<Buyer, List<Transaction>> salesByBuyer
+ * = txns.parallelStream()
+ * .collect(Collectors.groupingBy(Transaction::getBuyer));
+ * }</pre>
+ * it may actually be counterproductive to perform the operation in parallel.
+ * This is because the combining step (merging one {@code Map} into another by
+ * key) can be expensive for some {@code Map} implementations.
+ *
+ * <p>Suppose, however, that the result container used in this reduction
+ * was a concurrently modifiable collection -- such as a
+ * {@link java.util.concurrent.ConcurrentHashMap}. In that case, the parallel
+ * invocations of the accumulator could actually deposit their results
+ * concurrently into the same shared result container, eliminating the need for
+ * the combiner to merge distinct result containers. This potentially provides
+ * a boost to the parallel execution performance. We call this a
+ * <em>concurrent</em> reduction.
+ *
+ * <p>A {@link java.util.stream.Collector} that supports concurrent reduction is
+ * marked with the {@link java.util.stream.Collector.Characteristics#CONCURRENT}
+ * characteristic. However, a concurrent collection also has a downside. If
+ * multiple threads are depositing results concurrently into a shared container,
+ * the order in which results are deposited is non-deterministic. Consequently,
+ * a concurrent reduction is only possible if ordering is not important for the
+ * stream being processed. The {@link java.util.stream.Stream#collect(Collector)}
+ * implementation will only perform a concurrent reduction if
+ * <ul>
+ * <li>The stream is parallel;</li>
+ * <li>The collector has the
+ * {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic,
+ * and;</li>
+ * <li>Either the stream is unordered, or the collector has the
+ * {@link java.util.stream.Collector.Characteristics#UNORDERED} characteristic.
+ * </ul>
+ * You can ensure the stream is unordered by using the
+ * {@link java.util.stream.BaseStream#unordered()} method. For example:
+ * <pre>{@code
+ * Map<Buyer, List<Transaction>> salesByBuyer
+ * = txns.parallelStream()
+ * .unordered()
+ * .collect(groupingByConcurrent(Transaction::getBuyer));
+ * }</pre>
+ * (where {@link java.util.stream.Collectors#groupingByConcurrent} is the
+ * concurrent equivalent of {@code groupingBy}).
+ *
+ * <p>Note that if it is important that the elements for a given key appear in
+ * the order they appear in the source, then we cannot use a concurrent
+ * reduction, as ordering is one of the casualties of concurrent insertion.
+ * We would then be constrained to implement either a sequential reduction or
+ * a merge-based parallel reduction.
+ *
+ * <h3><a name="Associativity">Associativity</a></h3>
+ *
+ * An operator or function {@code op} is <em>associative</em> if the following
+ * holds:
+ * <pre>{@code
+ * (a op b) op c == a op (b op c)
+ * }</pre>
+ * The importance of this to parallel evaluation can be seen if we expand this
+ * to four terms:
+ * <pre>{@code
+ * a op b op c op d == (a op b) op (c op d)
+ * }</pre>
+ * So we can evaluate {@code (a op b)} in parallel with {@code (c op d)}, and
+ * then invoke {@code op} on the results.
+ *
+ * <p>Examples of associative operations include numeric addition, min, and
+ * max, and string concatenation.
+ *
+ * <h2><a name="StreamSources">Low-level stream construction</a></h2>
+ *
+ * So far, all the stream examples have used methods like
+ * {@link java.util.Collection#stream()} or {@link java.util.Arrays#stream(Object[])}
+ * to obtain a stream. How are those stream-bearing methods implemented?
+ *
+ * <p>The class {@link java.util.stream.StreamSupport} has a number of
+ * low-level methods for creating a stream, all using some form of a
+ * {@link java.util.Spliterator}. A spliterator is the parallel analogue of an
+ * {@link java.util.Iterator}; it describes a (possibly infinite) collection of
+ * elements, with support for sequentially advancing, bulk traversal, and
+ * splitting off some portion of the input into another spliterator which can
+ * be processed in parallel. At the lowest level, all streams are driven by a
+ * spliterator.
+ *
+ * <p>There are a number of implementation choices in implementing a
+ * spliterator, nearly all of which are tradeoffs between simplicity of
+ * implementation and runtime performance of streams using that spliterator.
+ * The simplest, but least performant, way to create a spliterator is to
+ * create one from an iterator using
+ * {@link java.util.Spliterators#spliteratorUnknownSize(java.util.Iterator, int)}.
+ * While such a spliterator will work, it will likely offer poor parallel
+ * performance, since we have lost sizing information (how big is the
+ * underlying data set), as well as being constrained to a simplistic
+ * splitting algorithm.
+ *
+ * <p>A higher-quality spliterator will provide balanced and known-size
+ * splits, accurate sizing information, and a number of other
+ * {@link java.util.Spliterator#characteristics() characteristics} of the
+ * spliterator or data that can be used by implementations to optimize
+ * execution.
+ *
+ * <p>Spliterators for mutable data sources have an additional challenge;
+ * timing of binding to the data, since the data could change between the time
+ * the spliterator is created and the time the stream pipeline is executed.
+ * Ideally, a spliterator for a stream would report a characteristic of
+
+ * {@code IMMUTABLE} or {@code CONCURRENT}; if not it should be
+ * <a href="../Spliterator.html#binding"><em>late-binding</em></a>. If a source
+ * cannot directly supply a recommended spliterator, it may indirectly supply
+ * a spliterator using a {@code Supplier}, and construct a stream via the
+ * {@code Supplier}-accepting versions of
+ * {@link java.util.stream.StreamSupport#stream(Supplier, int, boolean) stream()}.
+ * The spliterator is obtained from the supplier only after the terminal
+ * operation of the stream pipeline commences.
+ *
+ * <p>These requirements significantly reduce the scope of potential
+ * interference between mutations of the stream source and execution of stream
+ * pipelines. Streams based on spliterators with the desired characteristics,
+ * or those using the Supplier-based factory forms, are immune to
+ * modifications of the data source prior to commencement of the terminal
+ * operation (provided the behavioral parameters to the stream operations meet
+ * the required criteria for non-interference and statelessness). See
+ * <a href="package-summary.html#NonInterference">Non-Interference</a>
+ * for more details.
+ *
+ * @since 1.8
+ */
+package java.util.stream;
+
+import java.util.function.BinaryOperator;
+import java.util.function.UnaryOperator;
diff --git a/openjdk_java_files.mk b/openjdk_java_files.mk
index b77f2ae..d70ca30 100644
--- a/openjdk_java_files.mk
+++ b/openjdk_java_files.mk
@@ -753,6 +753,42 @@
ojluni/src/main/java/java/util/jar/JarVerifier.java \
ojluni/src/main/java/java/util/jar/Manifest.java \
ojluni/src/main/java/java/util/jar/Pack200.java \
+ ojluni/src/main/java/java/util/stream/AbstractPipeline.java \
+ ojluni/src/main/java/java/util/stream/AbstractShortCircuitTask.java \
+ ojluni/src/main/java/java/util/stream/AbstractSpinedBuffer.java \
+ ojluni/src/main/java/java/util/stream/AbstractTask.java \
+ ojluni/src/main/java/java/util/stream/BaseStream.java \
+ ojluni/src/main/java/java/util/stream/Collector.java \
+ ojluni/src/main/java/java/util/stream/Collectors.java \
+ ojluni/src/main/java/java/util/stream/DistinctOps.java \
+ ojluni/src/main/java/java/util/stream/DoublePipeline.java \
+ ojluni/src/main/java/java/util/stream/DoubleStream.java \
+ ojluni/src/main/java/java/util/stream/FindOps.java \
+ ojluni/src/main/java/java/util/stream/ForEachOps.java \
+ ojluni/src/main/java/java/util/stream/IntPipeline.java \
+ ojluni/src/main/java/java/util/stream/IntStream.java \
+ ojluni/src/main/java/java/util/stream/LongPipeline.java \
+ ojluni/src/main/java/java/util/stream/LongStream.java \
+ ojluni/src/main/java/java/util/stream/MatchOps.java \
+ ojluni/src/main/java/java/util/stream/Node.java \
+ ojluni/src/main/java/java/util/stream/Nodes.java \
+ ojluni/src/main/java/java/util/stream/package-info.java \
+ ojluni/src/main/java/java/util/stream/PipelineHelper.java \
+ ojluni/src/main/java/java/util/stream/ReduceOps.java \
+ ojluni/src/main/java/java/util/stream/ReferencePipeline.java \
+ ojluni/src/main/java/java/util/stream/Sink.java \
+ ojluni/src/main/java/java/util/stream/SliceOps.java \
+ ojluni/src/main/java/java/util/stream/SortedOps.java \
+ ojluni/src/main/java/java/util/stream/SpinedBuffer.java \
+ ojluni/src/main/java/java/util/stream/Stream.java \
+ ojluni/src/main/java/java/util/stream/StreamOpFlag.java \
+ ojluni/src/main/java/java/util/stream/StreamShape.java \
+ ojluni/src/main/java/java/util/stream/Streams.java \
+ ojluni/src/main/java/java/util/stream/StreamSpliterators.java \
+ ojluni/src/main/java/java/util/stream/StreamSupport.java \
+ ojluni/src/main/java/java/util/stream/TerminalOp.java \
+ ojluni/src/main/java/java/util/stream/TerminalSink.java \
+ ojluni/src/main/java/java/util/stream/Tripwire.java \
ojluni/src/main/java/java/util/JumboEnumSet.java \
ojluni/src/main/java/java/util/LinkedHashMap.java \
ojluni/src/main/java/java/util/LinkedHashSet.java \
@@ -821,6 +857,7 @@
ojluni/src/main/java/java/util/SortedSet.java \
ojluni/src/main/java/java/util/Spliterator.java \
ojluni/src/main/java/java/util/Spliterators.java \
+ ojluni/src/main/java/java/util/SplittableRandom.java \
ojluni/src/main/java/java/util/Stack.java \
ojluni/src/main/java/java/util/StringTokenizer.java \
ojluni/src/main/java/java/util/Timer.java \