Java源码示例:org.deeplearning4j.nn.conf.ConvolutionMode

示例1
private MultiLayerNetwork getDepthwiseConv2dNet(CNN2DFormat format, boolean setOnLayerAlso, ConvolutionMode cm) {
    if (setOnLayerAlso) {
        return getNetWithLayer(new DepthwiseConvolution2D.Builder()
                .depthMultiplier(2)
                .kernelSize(3, 3)
                .stride(2, 2)
                .activation(Activation.TANH)
                .dataFormat(format)
                .nOut(3)
                .helperAllowFallback(false)
                .build(), format, cm, null);
    } else {
        return getNetWithLayer(new DepthwiseConvolution2D.Builder()
                .depthMultiplier(2)
                .kernelSize(3, 3)
                .stride(2, 2)
                .activation(Activation.TANH)
                .nOut(3)
                .helperAllowFallback(false)
                .build(), format, cm, null);
    }
}
 
示例2
private void buildPooling2DLayer(KerasLayerConfiguration conf, Integer kerasVersion) throws Exception {
    Map<String, Object> layerConfig = new HashMap<>();
    layerConfig.put(conf.getLAYER_FIELD_CLASS_NAME(), conf.getLAYER_CLASS_NAME_MAX_POOLING_2D());
    Map<String, Object> config = new HashMap<>();
    config.put(conf.getLAYER_FIELD_NAME(), LAYER_NAME);
    List<Integer> kernelSizeList = new ArrayList<>();
    kernelSizeList.add(KERNEL_SIZE[0]);
    kernelSizeList.add(KERNEL_SIZE[1]);
    config.put(conf.getLAYER_FIELD_POOL_SIZE(), kernelSizeList);
    List<Integer> subsampleList = new ArrayList<>();
    subsampleList.add(STRIDE[0]);
    subsampleList.add(STRIDE[1]);
    config.put(conf.getLAYER_FIELD_POOL_STRIDES(), subsampleList);
    config.put(conf.getLAYER_FIELD_BORDER_MODE(), BORDER_MODE_VALID);
    layerConfig.put(conf.getLAYER_FIELD_CONFIG(), config);
    layerConfig.put(conf.getLAYER_FIELD_KERAS_VERSION(), kerasVersion);

    SubsamplingLayer layer = new KerasPooling2D(layerConfig).getSubsampling2DLayer();
    assertEquals(LAYER_NAME, layer.getLayerName());
    assertArrayEquals(KERNEL_SIZE, layer.getKernelSize());
    assertArrayEquals(STRIDE, layer.getStride());
    assertEquals(POOLING_TYPE, layer.getPoolingType());
    assertEquals(ConvolutionMode.Truncate, layer.getConvolutionMode());
    assertArrayEquals(VALID_PADDING, layer.getPadding());
}
 
示例3
/**
 * Test identity mapping for 2d convolution
 */
@Test
public void testIdConv2D() {
    final INDArray input = Nd4j.randn(DataType.FLOAT,1,5,7,11);
    final String inputName = "input";
    final String conv = "conv";
    final String output = "output";
    final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder()
            .graphBuilder()
            .setInputTypes(InputType.inferInputType(input))
            .addInputs(inputName)
            .setOutputs(output)
            .layer(conv, new ConvolutionLayer.Builder(3,5)
                    .convolutionMode(ConvolutionMode.Same)
                    .nOut(input.size(1))
                    .weightInit(new WeightInitIdentity())
                    .activation(new ActivationIdentity())
                    .build(), inputName)
            .layer(output, new CnnLossLayer.Builder().activation(new ActivationIdentity()).build(), conv)
            .build());
    graph.init();

    assertEquals("Mapping was not identity!", input, graph.outputSingle(input));
}
 
示例4
public void computeOutputSize() {
    int nIn = (int) getNIn();

    if (inputSize == null) {
        throw new IllegalArgumentException("Input size has to be specified for locally connected layers.");
    }

    boolean nchw = format == CNN2DFormat.NCHW;

    int[] inputShape = nchw ? new int[] {1, nIn, inputSize[0], inputSize[1]} : new int[] {1, inputSize[0], inputSize[1], nIn};
    INDArray dummyInputForShapeInference = Nd4j.ones(inputShape);

    if (cm == ConvolutionMode.Same) {
        this.outputSize = ConvolutionUtils.getOutputSize(dummyInputForShapeInference, kernel, stride, null, cm,
                        dilation, format);
        this.padding = ConvolutionUtils.getSameModeTopLeftPadding(outputSize, inputSize, kernel, stride, dilation);
        this.paddingBr = ConvolutionUtils.getSameModeBottomRightPadding(outputSize, inputSize, kernel, stride, dilation);
    } else {
        this.outputSize = ConvolutionUtils.getOutputSize(dummyInputForShapeInference, kernel, stride, padding, cm,
                        dilation, format);
    }
}
 
示例5
public static ComputationGraph getOriginalGraph(int seed){
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .weightInit(WeightInit.XAVIER)
            .activation(Activation.TANH)
            .convolutionMode(ConvolutionMode.Same)
            .updater(new Sgd(0.3))
            .graphBuilder()
            .addInputs("in")
            .layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
            .layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
            .layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
            .layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
            .layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
            .layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
            .setOutputs("5")
            .setInputTypes(InputType.convolutionalFlat(28,28,1))
            .build();


    ComputationGraph net = new ComputationGraph(conf);
    net.init();
    return net;
}
 
示例6
@Test
public void testDepthwiseConv2d() {
    try {
        for (boolean helpers : new boolean[]{false, true}) {
            for (ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Truncate, ConvolutionMode.Same}) {
                Nd4j.getRandom().setSeed(12345);
                Nd4j.getEnvironment().allowHelpers(helpers);
                String msg = helpers ? "With helpers (" + cm + ")" : "No helpers (" + cm + ")";
                System.out.println(" --- " + msg + " ---");

                INDArray inNCHW = Nd4j.rand(this.dataType, 2, 3, 12, 12);
                INDArray labels = TestUtils.randomOneHot(2, 10);

                TestCase tc = TestCase.builder()
                        .msg(msg)
                        .net1(getDepthwiseConv2dNet(CNN2DFormat.NCHW, true, cm))
                        .net2(getDepthwiseConv2dNet(CNN2DFormat.NCHW, false, cm))
                        .net3(getDepthwiseConv2dNet(CNN2DFormat.NHWC, true, cm))
                        .net4(getDepthwiseConv2dNet(CNN2DFormat.NHWC, false, cm))
                        .inNCHW(inNCHW)
                        .labelsNCHW(labels)
                        .labelsNHWC(labels)
                        .testLayerIdx(1)
                        .helpers(helpers)
                        .build();

                testHelper(tc);
            }
        }
    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
示例7
private void buildBlock4a(ComputationGraphConfiguration.GraphBuilder graph) {
    convolution2dAndBN(graph, "inception_4a_3x3",
            96, 640, new int[]{1, 1}, new int[]{1, 1},
            192, 96, new int[]{3, 3}, new int[]{1, 1}
            , new int[]{1, 1, 1, 1}, "inception_3c");
    String rel1 = lastReluId();

    convolution2dAndBN(graph, "inception_4a_5x5",
            32, 640, new int[]{1, 1}, new int[]{1, 1},
            64, 32, new int[]{5, 5}, new int[]{1, 1}
            , new int[]{2, 2, 2, 2}, "inception_3c");
    String rel2 = lastReluId();

    graph.addLayer("avg7",
            new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.AVG, new int[]{3, 3},
                    new int[]{3, 3})
                    .convolutionMode(ConvolutionMode.Truncate)
                    .build(),
            "inception_3c");
    convolution2dAndBN(graph, "inception_4a_pool",
            128, 640, new int[]{1, 1}, new int[]{1, 1},
            null, null, null, null
            , new int[]{2, 2, 2, 2}, "avg7");
    String pad1 = lastPaddingId();

    convolution2dAndBN(graph, "inception_4a_1x1",
            256, 640, new int[]{1, 1}, new int[]{1, 1},
            null, null, null, null
            , null, "inception_3c");
    String rel4 = lastReluId();
    graph.addVertex("inception_4a", new MergeVertex(), rel1, rel2, rel4, pad1);

}
 
示例8
@Test
public void testLRN() {
    try {
        for (boolean helpers : new boolean[]{false, true}) {
            for (ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Truncate, ConvolutionMode.Same}) {
                Nd4j.getRandom().setSeed(12345);
                Nd4j.getEnvironment().allowHelpers(helpers);
                String msg = helpers ? "With helpers (" + cm + ")" : "No helpers (" + cm + ")";
                System.out.println(" --- " + msg + " ---");

                INDArray inNCHW = Nd4j.rand(this.dataType, 2, 3, 12, 12);
                INDArray labels = TestUtils.randomOneHot(2, 10);

                TestCase tc = TestCase.builder()
                        .msg(msg)
                        .net1(getLrnLayer(CNN2DFormat.NCHW, true, cm))
                        .net2(getLrnLayer(CNN2DFormat.NCHW, false, cm))
                        .net3(getLrnLayer(CNN2DFormat.NHWC, true, cm))
                        .net4(getLrnLayer(CNN2DFormat.NHWC, false, cm))
                        .inNCHW(inNCHW)
                        .labelsNCHW(labels)
                        .labelsNHWC(labels)
                        .testLayerIdx(1)
                        .helpers(helpers)
                        .build();

                testHelper(tc);
            }
        }
    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
示例9
private void buildBlock5a(ComputationGraphConfiguration.GraphBuilder graph) {
    convolution2dAndBN(graph, "inception_5a_3x3",
            96, 1024, new int[]{1, 1}, new int[]{1, 1},
            384, 96, new int[]{3, 3}, new int[]{1, 1},
            new int[]{1, 1, 1, 1}, "inception_4e");
    String relu1 = lastReluId();

    graph.addLayer("avg9",
            new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.AVG, new int[]{3, 3},
                    new int[]{3, 3})
                    .convolutionMode(ConvolutionMode.Truncate)
                    .build(),
            "inception_4e");
    convolution2dAndBN(graph, "inception_5a_pool",
            96, 1024, new int[]{1, 1}, new int[]{1, 1},
            null, null, null, null,
            new int[]{1, 1, 1, 1}, "avg9");
    String pad1 = lastPaddingId();

    convolution2dAndBN(graph, "inception_5a_1x1",
            256, 1024, new int[]{1, 1}, new int[]{1, 1},
            null, null, null, null,
            null, "inception_4e");
    String rel3 = lastReluId();

    graph.addVertex("inception_5a", new MergeVertex(), relu1, pad1, rel3);
}
 
示例10
private void buildBlock5b(ComputationGraphConfiguration.GraphBuilder graph) {
    convolution2dAndBN(graph, "inception_5b_3x3",
            96, 736, new int[]{1, 1}, new int[]{1, 1},
            384, 96, new int[]{3, 3}, new int[]{1, 1},
            new int[]{1, 1, 1, 1}, "inception_5a");
    String rel1 = lastReluId();

    graph.addLayer("max2",
            new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[]{3, 3},
                    new int[]{2, 2})
                    .convolutionMode(ConvolutionMode.Truncate)
                    .build(),
            "inception_5a");
    convolution2dAndBN(graph, "inception_5b_pool",
            96, 736, new int[]{1, 1}, new int[]{1, 1},
            null, null, null, null,
            null, "max2");
    graph.addLayer(nextPaddingId(),
            zeroPadding(1), lastReluId());
    String pad1 = lastPaddingId();

    convolution2dAndBN(graph, "inception_5b_1x1",
            256, 736, new int[]{1, 1}, new int[]{1, 1},
            null, null, null, null,
            null, "inception_5a");
    String rel2 = lastReluId();

    graph.addVertex("inception_5b", new MergeVertex(), rel1, pad1, rel2);
}
 
示例11
/**
 * Get convolution border mode from Keras layer configuration.
 *
 * @param layerConfig dictionary containing Keras layer configuration
 * @return Border mode of convolutional layers
 * @throws InvalidKerasConfigurationException Invalid Keras configuration
 */
public static ConvolutionMode getConvolutionModeFromConfig(Map<String, Object> layerConfig,
                                                           KerasLayerConfiguration conf)
        throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException {
    Map<String, Object> innerConfig = KerasLayerUtils.getInnerLayerConfigFromConfig(layerConfig, conf);
    if (!innerConfig.containsKey(conf.getLAYER_FIELD_BORDER_MODE()))
        throw new InvalidKerasConfigurationException("Could not determine convolution border mode: no "
                + conf.getLAYER_FIELD_BORDER_MODE() + " field found");
    String borderMode = (String) innerConfig.get(conf.getLAYER_FIELD_BORDER_MODE());
    ConvolutionMode convolutionMode;
    if (borderMode.equals(conf.getLAYER_BORDER_MODE_SAME())) {
        /* Keras relies upon the Theano and TensorFlow border mode definitions and operations:
         * TH: http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv.conv2d
         * TF: https://www.tensorflow.org/api_docs/python/nn/convolution#conv2d
         */
        convolutionMode = ConvolutionMode.Same;

    } else if (borderMode.equals(conf.getLAYER_BORDER_MODE_VALID()) ||
            borderMode.equals(conf.getLAYER_BORDER_MODE_FULL())) {
        convolutionMode = ConvolutionMode.Truncate;
    } else if(borderMode.equals(conf.getLAYER_BORDER_MODE_CAUSAL())) {
        convolutionMode = ConvolutionMode.Causal;
    } else {
        throw new UnsupportedKerasConfigurationException("Unsupported convolution border mode: " + borderMode);
    }
    return convolutionMode;
}
 
示例12
private void buildPooling3DLayer(KerasLayerConfiguration conf, Integer kerasVersion) throws Exception {
    Map<String, Object> layerConfig = new HashMap<>();
    layerConfig.put(conf.getLAYER_FIELD_CLASS_NAME(), conf.getLAYER_CLASS_NAME_MAX_POOLING_3D());
    Map<String, Object> config = new HashMap<>();
    config.put(conf.getLAYER_FIELD_NAME(), LAYER_NAME);
    List<Integer> kernelSizeList = new ArrayList<>();
    kernelSizeList.add(KERNEL_SIZE[0]);
    kernelSizeList.add(KERNEL_SIZE[1]);
    kernelSizeList.add(KERNEL_SIZE[2]);
    config.put(conf.getLAYER_FIELD_POOL_SIZE(), kernelSizeList);
    List<Integer> subsampleList = new ArrayList<>();
    subsampleList.add(STRIDE[0]);
    subsampleList.add(STRIDE[1]);
    subsampleList.add(STRIDE[2]);
    config.put(conf.getLAYER_FIELD_POOL_STRIDES(), subsampleList);
    config.put(conf.getLAYER_FIELD_BORDER_MODE(), BORDER_MODE_VALID);
    layerConfig.put(conf.getLAYER_FIELD_CONFIG(), config);
    layerConfig.put(conf.getLAYER_FIELD_KERAS_VERSION(), kerasVersion);

    Subsampling3DLayer layer = new KerasPooling3D(layerConfig).getSubsampling3DLayer();
    assertEquals(LAYER_NAME, layer.getLayerName());
    assertArrayEquals(KERNEL_SIZE, layer.getKernelSize());
    assertArrayEquals(STRIDE, layer.getStride());
    assertEquals(POOLING_TYPE, layer.getPoolingType());
    assertEquals(ConvolutionMode.Truncate, layer.getConvolutionMode());
    assertArrayEquals(VALID_PADDING, layer.getPadding());
}
 
示例13
public Builder(int capsuleDimensions, int channels,
        int[] kernelSize, int[] stride, int[] padding, int[] dilation,
        ConvolutionMode convolutionMode){
    this.capsuleDimensions = capsuleDimensions;
    this.channels = channels;
    this.setKernelSize(kernelSize);
    this.setStride(stride);
    this.setPadding(padding);
    this.setDilation(dilation);
    this.convolutionMode = convolutionMode;
}
 
示例14
private MultiLayerNetwork getCropping2dNet(CNN2DFormat format, boolean setOnLayerAlso) {
    if (setOnLayerAlso) {
       return getNetWithLayer(new Cropping2D.Builder(2,2)
                        .dataFormat(format).build(), format, ConvolutionMode.Same, null);
    } else {
        return getNetWithLayer(new Cropping2D.Builder(2,2)
                .build(), format, ConvolutionMode.Same, null);
    }
}
 
示例15
/**
 * @deprecated Use {@link #getOutputTypeCnnLayers(InputType, int[], int[], int[], int[], ConvolutionMode, long, long, String, CNN2DFormat, Class)}
 */
@Deprecated
public static InputType getOutputTypeCnnLayers(InputType inputType, int[] kernelSize, int[] stride, int[] padding,
                                               int[] dilation, ConvolutionMode convolutionMode, long outputDepth, long layerIdx, String layerName,
                                               Class<?> layerClass) {
    return getOutputTypeCnnLayers(inputType, kernelSize, stride, padding, dilation, convolutionMode, outputDepth,
            layerIdx, layerName, CNN2DFormat.NCHW, layerClass);
}
 
示例16
private MultiLayerNetwork getNetWithLayer(Layer layer, CNN2DFormat format, ConvolutionMode cm, InputType inputType) {
    NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder()
            .dataType(this.dataType)
            .seed(12345)
            .convolutionMode(cm)
            .list()
            .layer(new ConvolutionLayer.Builder()
                    .kernelSize(3, 3)
                    .stride(2, 2)
                    .activation(Activation.TANH)
                    .dataFormat(format)
                    .nOut(3)
                    .helperAllowFallback(false)
                    .build())
            .layer(layer)
            .layer(new OutputLayer.Builder().activation(Activation.SOFTMAX).nOut(10).build())
            .setInputType(inputType != null ? inputType : InputType.convolutional(12, 12, 3, format));

    if(format == CNN2DFormat.NHWC && !(layer instanceof GlobalPoolingLayer)){
        //Add a preprocessor due to the differences in how NHWC and NCHW activations are flattened
        //DL4J's flattening behaviour matches Keras (hence TF) for import compatibility
        builder.inputPreProcessor(2, new ComposableInputPreProcessor(new NHWCToNCHWPreprocessor(), new CnnToFeedForwardPreProcessor()));
    }

    MultiLayerNetwork net = new MultiLayerNetwork(builder.build());
    net.init();
    return net;
}
 
示例17
/**
 * Check that the convolution mode is consistent with the padding specification
 */
public static void validateConvolutionModePadding(ConvolutionMode mode, int padding) {
    if (mode == ConvolutionMode.Same) {
        boolean nullPadding = true;
        if (padding != 0) nullPadding = false;
        if (!nullPadding)
            throw new IllegalArgumentException("Padding cannot be used when using the `same' convolution mode");

    }
}
 
示例18
/**
 * Check that the convolution mode is consistent with the padding specification
 */
public static void validateConvolutionModePadding(ConvolutionMode mode, int[] padding) {
    if (mode == ConvolutionMode.Same) {
        boolean nullPadding = true;
        for (int i : padding) {
            if (i != 0) nullPadding = false;
        }
        if (!nullPadding)
            throw new IllegalArgumentException("Padding cannot be used when using the `same' convolution mode");
    }
}
 
示例19
/**
 * Given a mask array for a 1D CNN layer of shape [minibatch, sequenceLength], reduce the mask according to the 1D CNN layer configuration.
 * Unlike RNN layers, 1D CNN layers may down-sample the data; consequently, we need to down-sample the mask array
 * in the same way, to maintain the correspondence between the masks and the output activations
 *
 * @param in       Input size
 * @param kernel   Kernel size
 * @param stride   Stride
 * @param padding  Padding
 * @param dilation Dilation
 * @param cm       Convolution mode
 * @return Reduced mask
 */
public static INDArray cnn1dMaskReduction(INDArray in, int kernel, int stride, int padding, int dilation, ConvolutionMode cm){
    Preconditions.checkState(in.rank()==2, "Rank must be 2 for cnn1d mask array - shape ", in.shape());
    if((cm == ConvolutionMode.Same || cm == ConvolutionMode.Causal) && stride == 1 ){
        return in;
    }

    if(!Shape.hasDefaultStridesForShape(in)){
        in = in.dup();
    }

    INDArray reshaped4d = in.reshape(in.size(0), 1, in.size(1), 1);

    int[] outSize;
    int[] pad = null;
    int[] k = new int[]{kernel,1};
    int[] s = new int[]{stride, 1};
    int[] d = new int[]{dilation, 1};
    if (cm == ConvolutionMode.Same || cm == ConvolutionMode.Causal) {
        outSize = ConvolutionUtils.getOutputSize(reshaped4d, k, s, null, cm, d, CNN2DFormat.NCHW); //Also performs validation
    } else {
        pad = new int[]{padding, 0};
        outSize = ConvolutionUtils.getOutputSize(reshaped4d, k, s, pad, cm, d, CNN2DFormat.NCHW); //Also performs validation
    }
    int outH = outSize[0];

    INDArray output = Nd4j.createUninitialized(new int[]{(int)in.size(0), 1, outH, 1}, 'c');

    DynamicCustomOp op = new MaxPooling2D(reshaped4d, output, Pooling2DConfig.builder()
            .kH(k[0]).kW(k[1])
            .sH(s[0]).sW(s[1])
            .pH(pad == null ? 0 : pad[0]).pW(pad == null ? 0 : pad[1])
            .dH(d[0]).dW(d[1])
            .isSameMode(cm == ConvolutionMode.Same || cm == ConvolutionMode.Causal)
            .isNHWC(false)
            .build());

    Nd4j.getExecutioner().exec(op);
    return output.reshape('c', in.size(0), outH);
}
 
示例20
@Test
public void test1dForward(){
    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(123)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).l2(2e-4)
            .updater(new Nesterovs(0.9)).dropOut(0.5)
            .list()
            .layer(new LocallyConnected1D.Builder().kernelSize(4).nIn(3)
                    .stride(1).nOut(16).dropOut(0.5)
                    .convolutionMode(ConvolutionMode.Strict)
                    .setInputSize(28)
                    .activation(Activation.RELU).weightInit(
                            WeightInit.XAVIER)
                    .build())
            .layer(new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS) //output layer
                    .nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.recurrent(3,  8));

    MultiLayerConfiguration conf = builder.build();
    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();

    INDArray input = Nd4j.ones(10, 3, 8);
    INDArray output = network.output(input, false);;
    for (int i = 0; i < 100; i++) { // TODO: this falls flat for 1000 iterations on my machine
        output = network.output(input, false);
    }

    assertArrayEquals(new long[] {(8 - 4 + 1) * 10, 10}, output.shape());
    network.fit(input, output);

}
 
示例21
@Test
public void testYoloHouseNumber() throws Exception {

    File f = Resources.asFile("regression_testing/100b4/HouseNumberDetection_100b4.bin");
    ComputationGraph net = ComputationGraph.load(f, true);

    int nBoxes = 5;
    int nClasses = 10;

    ConvolutionLayer cl = (ConvolutionLayer) ((LayerVertex) net.getConfiguration().getVertices()
            .get("convolution2d_9")).getLayerConf().getLayer();
    assertEquals(nBoxes * (5 + nClasses), cl.getNOut());
    assertEquals(new ActivationIdentity(), cl.getActivationFn());
    assertEquals(ConvolutionMode.Same, cl.getConvolutionMode());
    assertEquals(new WeightInitXavier(), cl.getWeightInitFn());
    assertArrayEquals(new int[]{1, 1}, cl.getKernelSize());

    INDArray outExp;
    File f2 = Resources.asFile("regression_testing/100b4/HouseNumberDetection_Output_100b4.bin");
    try (DataInputStream dis = new DataInputStream(new FileInputStream(f2))) {
        outExp = Nd4j.read(dis);
    }

    INDArray in;
    File f3 = Resources.asFile("regression_testing/100b4/HouseNumberDetection_Input_100b4.bin");
    try (DataInputStream dis = new DataInputStream(new FileInputStream(f3))) {
        in = Nd4j.read(dis);
    }

    INDArray outAct = net.outputSingle(in);

    boolean eq = outExp.equalsWithEps(outAct.castTo(outExp.dataType()), 1e-3);
    assertTrue(eq);
}
 
示例22
@Test
@Ignore("AB 2019/05/23 - Failing on linux-x86_64-cuda-9.2 - see issue #7657")
public void testYoloHouseNumber() throws Exception {

    File f = Resources.asFile("regression_testing/100b3/HouseNumberDetection_100b3.bin");
    ComputationGraph net = ComputationGraph.load(f, true);

    int nBoxes = 5;
    int nClasses = 10;

    ConvolutionLayer cl = (ConvolutionLayer)((LayerVertex)net.getConfiguration().getVertices().get("convolution2d_9")).getLayerConf().getLayer();
    assertEquals(nBoxes * (5 + nClasses), cl.getNOut());
    assertEquals(new ActivationIdentity(), cl.getActivationFn());
    assertEquals(ConvolutionMode.Same, cl.getConvolutionMode());
    assertEquals(new WeightInitXavier(), cl.getWeightInitFn());
    assertArrayEquals(new int[]{1,1}, cl.getKernelSize());
    assertArrayEquals(new int[]{1,1}, cl.getKernelSize());

    INDArray outExp;
    File f2 = Resources.asFile("regression_testing/100b3/HouseNumberDetection_Output_100b3.bin");
    try(DataInputStream dis = new DataInputStream(new FileInputStream(f2))){
        outExp = Nd4j.read(dis);
    }

    INDArray in;
    File f3 = Resources.asFile("regression_testing/100b3/HouseNumberDetection_Input_100b3.bin");
    try(DataInputStream dis = new DataInputStream(new FileInputStream(f3))){
        in = Nd4j.read(dis);
    }

    INDArray outAct = net.outputSingle(in);

    boolean eq = outExp.equalsWithEps(outAct.castTo(outExp.dataType()), 1e-3);
    assertTrue(eq);
}
 
示例23
@Test
public void testSubsampling2d() {
    try {
        for (boolean helpers : new boolean[]{false, true}) {
            for (ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Truncate, ConvolutionMode.Same}) {
                Nd4j.getRandom().setSeed(12345);
                Nd4j.getEnvironment().allowHelpers(helpers);
                String msg = helpers ? "With helpers (" + cm + ")" : "No helpers (" + cm + ")";
                System.out.println(" --- " + msg + " ---");

                INDArray inNCHW = Nd4j.rand(this.dataType, 2, 3, 12, 12);
                INDArray labels = TestUtils.randomOneHot(2, 10);

                TestCase tc = TestCase.builder()
                        .msg(msg)
                        .net1(getSubsampling2dNet(CNN2DFormat.NCHW, true, cm))
                        .net2(getSubsampling2dNet(CNN2DFormat.NCHW, false, cm))
                        .net3(getSubsampling2dNet(CNN2DFormat.NHWC, true, cm))
                        .net4(getSubsampling2dNet(CNN2DFormat.NHWC, false, cm))
                        .inNCHW(inNCHW)
                        .labelsNCHW(labels)
                        .labelsNHWC(labels)
                        .testLayerIdx(1)
                        .helpers(helpers)
                        .build();

                testHelper(tc);
            }
        }
    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
示例24
@Test
public void testCnnLossLayer() {
    try {
        for (boolean helpers : new boolean[]{false, true}) {
            Nd4j.getRandom().setSeed(12345);
            Nd4j.getEnvironment().allowHelpers(helpers);
            String msg = helpers ? "With helpers" : "No helpers";
            System.out.println(" --- " + msg + " ---");

            INDArray inNCHW = Nd4j.rand(this.dataType, 2, 3, 12, 12);
            INDArray labelsNHWC = TestUtils.randomOneHot(this.dataType,2*6*6, 3);
            labelsNHWC = labelsNHWC.reshape(2,6,6,3);
            INDArray labelsNCHW = labelsNHWC.permute(0,3,1,2).dup();



            TestCase tc = TestCase.builder()
                    .msg(msg)
                    .net1(getCnnLossNet(CNN2DFormat.NCHW, true, ConvolutionMode.Same))
                    .net2(getCnnLossNet(CNN2DFormat.NCHW, false, ConvolutionMode.Same))
                    .net3(getCnnLossNet(CNN2DFormat.NHWC, true, ConvolutionMode.Same))
                    .net4(getCnnLossNet(CNN2DFormat.NHWC, false, ConvolutionMode.Same))
                    .inNCHW(inNCHW)
                    .labelsNCHW(labelsNCHW)
                    .labelsNHWC(labelsNHWC)
                    .testLayerIdx(1)
                    .nhwcOutput(true)
                    .helpers(helpers)
                    .build();

            testHelper(tc);
        }
    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
示例25
private MultiLayerNetwork getZeroPaddingNet(CNN2DFormat format, boolean setOnLayerAlso) {
    if (setOnLayerAlso) {
        return getNetWithLayer(new ZeroPaddingLayer.Builder(2,2)
                        .dataFormat(format).build(), format, ConvolutionMode.Same, null);
    } else {
        return getNetWithLayer(new ZeroPaddingLayer.Builder(2,2).build(),
                format, ConvolutionMode.Same, null);
    }
}
 
示例26
@Test
public void testConv2d() {
    try {
        for (boolean helpers : new boolean[]{false, true}) {
            for (ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Truncate, ConvolutionMode.Same}) {
                Nd4j.getRandom().setSeed(12345);
                Nd4j.getEnvironment().allowHelpers(helpers);
                String msg = helpers ? "With helpers (" + cm + ")" : "No helpers (" + cm + ")";
                System.out.println(" --- " + msg + " ---");

                INDArray inNCHW = Nd4j.rand(this.dataType, 2, 3, 12, 12);
                INDArray labels = TestUtils.randomOneHot(2, 10);

                TestCase tc = TestCase.builder()
                        .msg(msg)
                        .net1(getConv2dNet(CNN2DFormat.NCHW, true, cm))
                        .net2(getConv2dNet(CNN2DFormat.NCHW, false, cm))
                        .net3(getConv2dNet(CNN2DFormat.NHWC, true, cm))
                        .net4(getConv2dNet(CNN2DFormat.NHWC, false, cm))
                        .inNCHW(inNCHW)
                        .labelsNCHW(labels)
                        .labelsNHWC(labels)
                        .testLayerIdx(1)
                        .helpers(helpers)
                        .build();

                testHelper(tc);
            }
        }
    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
示例27
public ComputationGraph init() {
    	ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .iterations(iterations)
                .activation(Activation.LEAKYRELU)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                .lrPolicyDecayRate(0.5)
                .learningRateDecayPolicy(LearningRatePolicy.Score)
                .updater(Adam.builder().build())
                .weightInit(WeightInit.XAVIER)
                .learningRate(0.02)
                .miniBatch(miniBatch)
                .convolutionMode(ConvolutionMode.Truncate)
                .trainingWorkspaceMode(WorkspaceMode.SINGLE)
                .inferenceWorkspaceMode(WorkspaceMode.SINGLE)
                .graphBuilder();
    	
    	// set input & output
		graph
			.addInputs("input").setInputTypes(InputType.convolutionalFlat(height, width, channels))
			.addLayer("policy", new OutputLayer.Builder()
	                .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
	                .activation(Activation.SOFTMAX)
	                .nOut(numClasses).build(), "embeddings_c")
			.addLayer("value1", new OutputLayer.Builder()
	                .lossFunction(LossFunctions.LossFunction.MSE)
	                .activation(Activation.TANH)
	                .nOut(1).build(), "embeddings_r1")
//	        .addLayer("value2", new OutputLayer.Builder()
//	                .lossFunction(LossFunctions.LossFunction.MSE)
//	                .activation(Activation.TANH)
//	                .nOut(1).build(), "embeddings_r2")
	        .setOutputs("policy", "value1", "value2")
			.backprop(true).pretrain(false);
		
		int kernelSize = 128;
		
		graph.addLayer("c-layer0", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.LEAKYRELU).nOut(kernelSize).build(), "input");
		
		int blockNum = 8;
		String prevLayer = "c-layer0";
		for (int i = 1; i <= blockNum; i++) {
			String layerName = "c-block" + i + "-";
			graph.addLayer(layerName + "1", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.LEAKYRELU).nOut(kernelSize).build(), prevLayer);
			graph.addLayer(layerName + "2", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(kernelSize).build(), layerName + "1");
			graph.addVertex("shortcut" + i, new ElementWiseVertex(ElementWiseVertex.Op.Add), layerName + "2", prevLayer);
			graph.addLayer(layerName + "3", new ActivationLayer.Builder().activation(Activation.LEAKYRELU).build(), "shortcut" + i);
			prevLayer = layerName + "3";
		}
		
		// for classification
		graph.addLayer("embeddings_c", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(2).build(), prevLayer);
		
		// for value regression
		graph.addLayer("reg-c-layer1", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(1).build(), prevLayer);
		graph.addLayer("embeddings_r1", new DenseLayer.Builder().activation(Activation.IDENTITY).nOut(256).build(), "reg-c-layer1");
		
//		graph.addLayer("reg-c-layer2", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(1).build(), prevLayer);
//		graph.addLayer("embeddings_r2", new DenseLayer.Builder().activation(Activation.IDENTITY).nOut(256).build(), "reg-c-layer2");

		ComputationGraphConfiguration conf = graph.build();
		ComputationGraph model = new ComputationGraph(conf);
		model.init();
		
		log.info("\nNumber of params: " + model.numParams()+"\n");
		return model;
    }
 
示例28
@Test
public void testGlobalLocalConfigCompGraph() {
    for (ConvolutionMode cm : new ConvolutionMode[] {ConvolutionMode.Strict, ConvolutionMode.Truncate,
                    ConvolutionMode.Same}) {
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(cm).graphBuilder().addInputs("in")
                        .addLayer("0", new ConvolutionLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0)
                                        .nIn(3).nOut(
                                                        3)
                                        .build(), "in")
                        .addLayer("1", new ConvolutionLayer.Builder().convolutionMode(ConvolutionMode.Strict)
                                        .kernelSize(3, 3).stride(3, 3).padding(0, 0)
                                        .nIn(3).nOut(
                                                        3)
                                        .build(), "0")
                        .addLayer("2", new ConvolutionLayer.Builder().convolutionMode(ConvolutionMode.Truncate)
                                        .kernelSize(3, 3).stride(3, 3).padding(0, 0)
                                        .nIn(3).nOut(
                                                        3)
                                        .build(), "1")
                        .addLayer("3", new ConvolutionLayer.Builder().convolutionMode(ConvolutionMode.Same)
                                        .kernelSize(3, 3).stride(3, 3).padding(0, 0).nIn(3).nOut(3).build(), "2")
                        .addLayer("4", new SubsamplingLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0)
                                        .build(), "3")
                        .addLayer("5", new SubsamplingLayer.Builder().convolutionMode(ConvolutionMode.Strict)
                                        .kernelSize(3, 3).stride(3, 3).padding(0, 0).build(), "4")
                        .addLayer("6", new SubsamplingLayer.Builder().convolutionMode(ConvolutionMode.Truncate)
                                        .kernelSize(3, 3).stride(3, 3).padding(0, 0).build(), "5")
                        .addLayer("7", new SubsamplingLayer.Builder().convolutionMode(ConvolutionMode.Same)
                                        .kernelSize(3, 3).stride(3, 3).padding(0, 0).build(), "6")
                        .addLayer("8", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                                .activation(Activation.SOFTMAX).nOut(3).build(), "7")
                        .setOutputs("8").build();

        assertEquals(cm, ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer())
                        .getConvolutionMode());
        assertEquals(ConvolutionMode.Strict,
                        ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer())
                                        .getConvolutionMode());
        assertEquals(ConvolutionMode.Truncate,
                        ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer())
                                        .getConvolutionMode());
        assertEquals(ConvolutionMode.Same,
                        ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("3")).getLayerConf().getLayer())
                                        .getConvolutionMode());

        assertEquals(cm, ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("4")).getLayerConf().getLayer())
                        .getConvolutionMode());
        assertEquals(ConvolutionMode.Strict,
                        ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("5")).getLayerConf().getLayer())
                                        .getConvolutionMode());
        assertEquals(ConvolutionMode.Truncate,
                        ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("6")).getLayerConf().getLayer())
                                        .getConvolutionMode());
        assertEquals(ConvolutionMode.Same,
                        ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("7")).getLayerConf().getLayer())
                                        .getConvolutionMode());
    }
}
 
示例29
static ConvolutionLayer convolution(int filterSize, int in, int out) {
    return new ConvolutionLayer.Builder(new int[]{filterSize, filterSize})
            .convolutionMode(ConvolutionMode.Truncate)
            .nIn(in).nOut(out)
            .build();
}
 
示例30
@Test
public void testCnn1dWithMasking(){
    int length = 12;
    int convNIn = 2;
    int convNOut1 = 3;
    int convNOut2 = 4;
    int finalNOut = 3;

    int pnorm = 2;

    SubsamplingLayer.PoolingType[] poolingTypes =
            new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX, SubsamplingLayer.PoolingType.AVG};

    for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
        for(ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Same, ConvolutionMode.Truncate}){
            for( int stride : new int[]{1, 2}){
                String s = cm + ", stride=" + stride + ", pooling=" + poolingType;
                log.info("Starting test: " + s);
                Nd4j.getRandom().setSeed(12345);

                MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .dataType(DataType.DOUBLE)
                        .updater(new NoOp())
                        .activation(Activation.TANH)
                        .dist(new NormalDistribution(0, 1)).convolutionMode(cm)
                        .seed(12345)
                        .list()
                        .layer(new Convolution1DLayer.Builder().kernelSize(2)
                                .stride(stride).nIn(convNIn).nOut(convNOut1)
                                .build())
                        .layer(new Subsampling1DLayer.Builder(poolingType).kernelSize(2)
                                .stride(stride).pnorm(pnorm).build())
                        .layer(new Convolution1DLayer.Builder().kernelSize(2)
                                .stride(stride).nIn(convNOut1).nOut(convNOut2)
                                .build())
                        .layer(new GlobalPoolingLayer(PoolingType.AVG))
                        .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                        .setInputType(InputType.recurrent(convNIn, length)).build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                INDArray f = Nd4j.rand(new int[]{2, convNIn, length});
                INDArray fm = Nd4j.create(2, length);
                fm.get(NDArrayIndex.point(0), NDArrayIndex.all()).assign(1);
                fm.get(NDArrayIndex.point(1), NDArrayIndex.interval(0,6)).assign(1);

                INDArray label = TestUtils.randomOneHot(2, finalNOut);

                boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(f)
                        .labels(label).inputMask(fm));

                assertTrue(s, gradOK);
                TestUtils.testModelSerialization(net);

                //TODO also check that masked step values don't impact forward pass, score or gradients

                DataSet ds = new DataSet(f,label,fm,null);
                double scoreBefore = net.score(ds);
                net.setInput(f);
                net.setLabels(label);
                net.setLayerMaskArrays(fm, null);
                net.computeGradientAndScore();
                INDArray gradBefore = net.getFlattenedGradients().dup();
                f.putScalar(1, 0, 10, 10.0);
                f.putScalar(1, 1, 11, 20.0);
                double scoreAfter = net.score(ds);
                net.setInput(f);
                net.setLabels(label);
                net.setLayerMaskArrays(fm, null);
                net.computeGradientAndScore();
                INDArray gradAfter = net.getFlattenedGradients().dup();

                assertEquals(scoreBefore, scoreAfter, 1e-6);
                assertEquals(gradBefore, gradAfter);
            }
        }
    }
}