Java源码示例:org.apache.flink.api.common.typeutils.SameTypePairComparator

示例1
public InPlaceMutableHashTable(TypeSerializer<T> serializer, TypeComparator<T> comparator, List<MemorySegment> memory) {
	super(serializer, comparator);
	this.numAllMemorySegments = memory.size();
	this.freeMemorySegments = new ArrayList<>(memory);

	// some sanity checks first
	if (freeMemorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. InPlaceMutableHashTable needs at least " +
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}

	// Get the size of the first memory segment and record it. All further buffers must have the same size.
	// the size must also be a power of 2
	segmentSize = freeMemorySegments.get(0).size();
	if ( (segmentSize & segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}

	this.numBucketsPerSegment = segmentSize / bucketSize;
	this.numBucketsPerSegmentBits = MathUtils.log2strict(this.numBucketsPerSegment);
	this.numBucketsPerSegmentMask = (1 << this.numBucketsPerSegmentBits) - 1;

	recordArea = new RecordArea(segmentSize);

	stagingSegments = new ArrayList<>();
	stagingSegments.add(forcedAllocateSegment());
	stagingSegmentsInView = new RandomAccessInputView(stagingSegments, segmentSize);
	stagingSegmentsOutView = new StagingOutputView(stagingSegments, segmentSize);

	prober = new HashTableProber<>(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));

	enableResize = buildSideSerializer.getLength() == -1;
}
 
示例2
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) {
	this.reducer = reducer;
	this.outputCollector = outputCollector;
	this.objectReuseEnabled = objectReuseEnabled;
	this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));
	this.reuse = buildSideSerializer.createInstance();
}
 
示例3
/**
 * The records are larger than one segment. Additionally, there is just barely enough memory,
 * so lots of compactions will happen.
 */
@Test
public void testLargeRecordsWithManyCompactions() {
	try {
		final int numElements = 1000;

		final String longString1 = getLongString(100000), longString2 = getLongString(110000);
		List<MemorySegment> memory = getMemory(3800, 32 * 1024);

		InPlaceMutableHashTable<Tuple2<Long, String>> table =
			new InPlaceMutableHashTable<>(serializer, comparator, memory);
		table.open();

		// first, we insert some elements
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString1));
		}

		// now, we replace the same elements with larger ones, causing fragmentation
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString2));
		}

		// check the results
		InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober =
			table.getProber(comparator, new SameTypePairComparator<>(comparator));
		Tuple2<Long, String> reuse = new Tuple2<>();
		for (long i = 0; i < numElements; i++) {
			assertNotNull(prober.getMatchFor(Tuple2.of(i, longString2), reuse));
		}

		table.close();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
示例4
public InPlaceMutableHashTable(TypeSerializer<T> serializer, TypeComparator<T> comparator, List<MemorySegment> memory) {
	super(serializer, comparator);
	this.numAllMemorySegments = memory.size();
	this.freeMemorySegments = new ArrayList<>(memory);

	// some sanity checks first
	if (freeMemorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. InPlaceMutableHashTable needs at least " +
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}

	// Get the size of the first memory segment and record it. All further buffers must have the same size.
	// the size must also be a power of 2
	segmentSize = freeMemorySegments.get(0).size();
	if ( (segmentSize & segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}

	this.numBucketsPerSegment = segmentSize / bucketSize;
	this.numBucketsPerSegmentBits = MathUtils.log2strict(this.numBucketsPerSegment);
	this.numBucketsPerSegmentMask = (1 << this.numBucketsPerSegmentBits) - 1;

	recordArea = new RecordArea(segmentSize);

	stagingSegments = new ArrayList<>();
	stagingSegments.add(forcedAllocateSegment());
	stagingSegmentsInView = new RandomAccessInputView(stagingSegments, segmentSize);
	stagingSegmentsOutView = new StagingOutputView(stagingSegments, segmentSize);

	prober = new HashTableProber<>(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));

	enableResize = buildSideSerializer.getLength() == -1;
}
 
示例5
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) {
	this.reducer = reducer;
	this.outputCollector = outputCollector;
	this.objectReuseEnabled = objectReuseEnabled;
	this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));
	this.reuse = buildSideSerializer.createInstance();
}
 
示例6
/**
 * The records are larger than one segment. Additionally, there is just barely enough memory,
 * so lots of compactions will happen.
 */
@Test
public void testLargeRecordsWithManyCompactions() {
	try {
		final int numElements = 1000;

		final String longString1 = getLongString(100000), longString2 = getLongString(110000);
		List<MemorySegment> memory = getMemory(3800, 32 * 1024);

		InPlaceMutableHashTable<Tuple2<Long, String>> table =
			new InPlaceMutableHashTable<>(serializer, comparator, memory);
		table.open();

		// first, we insert some elements
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString1));
		}

		// now, we replace the same elements with larger ones, causing fragmentation
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString2));
		}

		// check the results
		InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober =
			table.getProber(comparator, new SameTypePairComparator<>(comparator));
		Tuple2<Long, String> reuse = new Tuple2<>();
		for (long i = 0; i < numElements; i++) {
			assertNotNull(prober.getMatchFor(Tuple2.of(i, longString2), reuse));
		}

		table.close();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
示例7
public InPlaceMutableHashTable(TypeSerializer<T> serializer, TypeComparator<T> comparator, List<MemorySegment> memory) {
	super(serializer, comparator);
	this.numAllMemorySegments = memory.size();
	this.freeMemorySegments = new ArrayList<>(memory);

	// some sanity checks first
	if (freeMemorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. InPlaceMutableHashTable needs at least " +
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}

	// Get the size of the first memory segment and record it. All further buffers must have the same size.
	// the size must also be a power of 2
	segmentSize = freeMemorySegments.get(0).size();
	if ( (segmentSize & segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}

	this.numBucketsPerSegment = segmentSize / bucketSize;
	this.numBucketsPerSegmentBits = MathUtils.log2strict(this.numBucketsPerSegment);
	this.numBucketsPerSegmentMask = (1 << this.numBucketsPerSegmentBits) - 1;

	recordArea = new RecordArea(segmentSize);

	stagingSegments = new ArrayList<>();
	stagingSegments.add(forcedAllocateSegment());
	stagingSegmentsInView = new RandomAccessInputView(stagingSegments, segmentSize);
	stagingSegmentsOutView = new StagingOutputView(stagingSegments, segmentSize);

	prober = new HashTableProber<>(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));

	enableResize = buildSideSerializer.getLength() == -1;
}
 
示例8
public ReduceFacade(ReduceFunction<T> reducer, Collector<T> outputCollector, boolean objectReuseEnabled) {
	this.reducer = reducer;
	this.outputCollector = outputCollector;
	this.objectReuseEnabled = objectReuseEnabled;
	this.prober = getProber(buildSideComparator, new SameTypePairComparator<>(buildSideComparator));
	this.reuse = buildSideSerializer.createInstance();
}
 
示例9
/**
 * The records are larger than one segment. Additionally, there is just barely enough memory,
 * so lots of compactions will happen.
 */
@Test
public void testLargeRecordsWithManyCompactions() {
	try {
		final int numElements = 1000;

		final String longString1 = getLongString(100000), longString2 = getLongString(110000);
		List<MemorySegment> memory = getMemory(3800, 32 * 1024);

		InPlaceMutableHashTable<Tuple2<Long, String>> table =
			new InPlaceMutableHashTable<>(serializer, comparator, memory);
		table.open();

		// first, we insert some elements
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString1));
		}

		// now, we replace the same elements with larger ones, causing fragmentation
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString2));
		}

		// check the results
		InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober =
			table.getProber(comparator, new SameTypePairComparator<>(comparator));
		Tuple2<Long, String> reuse = new Tuple2<>();
		for (long i = 0; i < numElements; i++) {
			assertNotNull(prober.getMatchFor(Tuple2.of(i, longString2), reuse));
		}

		table.close();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
示例10
/**
 * This test validates that new inserts (rather than updates) in "insertOrReplace()" properly
 * react to out of memory conditions.
 */
@Test
public void testInsertsWithInsertOrReplace() {
	try {
		final int numElements = 1000;

		final String longString = getLongString(10000);
		List<MemorySegment> memory = getMemory(1000, 32 * 1024);

		// we create a hash table that thinks the records are super large. that makes it choose initially
		// a lot of memory for the partition buffers, and start with a smaller hash table. that way
		// we trigger a hash table growth early.
		CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>(
			tuple2LongStringSerializer, tuple2LongStringComparator, memory, 100);
		table.open();

		// first, we insert some elements
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString));
		}

		// now, we replace the same elements, causing fragmentation
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString));
		}
		
		// now we insert an additional set of elements. without compaction during this insertion,
		// the memory will run out
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i + numElements, longString));
		}

		// check the results
		CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober =
			table.getProber(tuple2LongStringComparator, new SameTypePairComparator<>(tuple2LongStringComparator));
		Tuple2<Long, String> reuse = new Tuple2<>();
		for (long i = 0; i < numElements; i++) {
			assertNotNull(prober.getMatchFor(Tuple2.of(i, longString), reuse));
			assertNotNull(prober.getMatchFor(Tuple2.of(i + numElements, longString), reuse));
		}
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
示例11
@Test
public void testResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例12
@Test
public void testDoubleResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例13
@Test
public void testTripleResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(2*ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + 4*ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例14
/**
 * This test validates that new inserts (rather than updates) in "insertOrReplace()" properly
 * react to out of memory conditions.
 */
@Test
public void testInsertsWithInsertOrReplace() {
	try {
		final int numElements = 1000;

		final String longString = getLongString(10000);
		List<MemorySegment> memory = getMemory(1000, 32 * 1024);

		// we create a hash table that thinks the records are super large. that makes it choose initially
		// a lot of memory for the partition buffers, and start with a smaller hash table. that way
		// we trigger a hash table growth early.
		CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>(
			tuple2LongStringSerializer, tuple2LongStringComparator, memory, 100);
		table.open();

		// first, we insert some elements
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString));
		}

		// now, we replace the same elements, causing fragmentation
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString));
		}
		
		// now we insert an additional set of elements. without compaction during this insertion,
		// the memory will run out
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i + numElements, longString));
		}

		// check the results
		CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober =
			table.getProber(tuple2LongStringComparator, new SameTypePairComparator<>(tuple2LongStringComparator));
		Tuple2<Long, String> reuse = new Tuple2<>();
		for (long i = 0; i < numElements; i++) {
			assertNotNull(prober.getMatchFor(Tuple2.of(i, longString), reuse));
			assertNotNull(prober.getMatchFor(Tuple2.of(i + numElements, longString), reuse));
		}
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
示例15
@Test
public void testResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例16
@Test
public void testDoubleResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例17
@Test
public void testTripleResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(2*ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + 4*ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例18
/**
 * This test validates that new inserts (rather than updates) in "insertOrReplace()" properly
 * react to out of memory conditions.
 */
@Test
public void testInsertsWithInsertOrReplace() {
	try {
		final int numElements = 1000;

		final String longString = getLongString(10000);
		List<MemorySegment> memory = getMemory(1000, 32 * 1024);

		// we create a hash table that thinks the records are super large. that makes it choose initially
		// a lot of memory for the partition buffers, and start with a smaller hash table. that way
		// we trigger a hash table growth early.
		CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>(
			tuple2LongStringSerializer, tuple2LongStringComparator, memory, 100);
		table.open();

		// first, we insert some elements
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString));
		}

		// now, we replace the same elements, causing fragmentation
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i, longString));
		}
		
		// now we insert an additional set of elements. without compaction during this insertion,
		// the memory will run out
		for (long i = 0; i < numElements; i++) {
			table.insertOrReplaceRecord(Tuple2.of(i + numElements, longString));
		}

		// check the results
		CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Tuple2<Long, String>> prober =
			table.getProber(tuple2LongStringComparator, new SameTypePairComparator<>(tuple2LongStringComparator));
		Tuple2<Long, String> reuse = new Tuple2<>();
		for (long i = 0; i < numElements; i++) {
			assertNotNull(prober.getMatchFor(Tuple2.of(i, longString), reuse));
			assertNotNull(prober.getMatchFor(Tuple2.of(i + numElements, longString), reuse));
		}
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
示例19
@Test
public void testResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例20
@Test
public void testDoubleResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}
 
示例21
@Test
public void testTripleResize() {
	// Only CompactingHashTable
	try {
		final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
		final Random rnd = new Random(RANDOM_SEED);
		final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);

		List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
		CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
		table.open();

		for (int i = 0; i < NUM_PAIRS; i++) {
			table.insert(pairs[i]);
		}

		AbstractHashTableProber<IntPair, IntPair> prober =
			table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
		IntPair target = new IntPair();

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		// make sure there is enough memory for resize
		memory.addAll(getMemory(2*ADDITIONAL_MEM));
		b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
		assertTrue(b);

		for (int i = 0; i < NUM_PAIRS; i++) {
			assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
			assertEquals(pairs[i].getValue(), target.getValue());
		}

		table.close();
		assertEquals("Memory lost", NUM_MEM_PAGES + 4*ADDITIONAL_MEM, table.getFreeMemory().size());
	} catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	}
}