🎉 🎉 RAPIDMINER 9.10 IS OUT!!! 🎉🎉

Download the latest version helping analytics teams accelerate time-to-value for streaming and IIOT use cases.

CLICK HERE TO DOWNLOAD

simple image classification cats vs dogs not working (WARNING: Couldn't update network in epoch X)

User69473User69473 Member Posts: 1 Learner III
edited March 19 in Help
Hello everyone,

I`m trying to train a simple CNN using the deep learning extension but I keep getting a warning message that the network has not been updated, and obviously the predictions are wrong.

Im using the example mnist process as a template, and i have three folders(train, testing, validation) with two sub-folders in each (images of cats and dogs).

In the image preprocessing step I do two things, resize all images to 100x100 pixels, and also rescale the color scale [0,1].

In the network architecture I changed the output layer to be 1 neuron activated with sigmoid function, and Im using cross-entropy for loss function,

what am I doing wrong?

thanks in advance

here are the folders with the files 

this is my process:

g="UTF-8"?><process version="9.8.001">
  <context>
    <input/>
    <output/>
    <macros/>
  </context>
  <operator activated="true" class="process" compatibility="9.8.001" expanded="true" name="Process" origin="GENERATED_TUTORIAL">
    <parameter key="logverbosity" value="init"/>
    <parameter key="random_seed" value="2001"/>
    <parameter key="send_mail" value="never"/>
    <parameter key="notification_email" value=""/>
    <parameter key="process_duration_for_mail" value="30"/>
    <parameter key="encoding" value="SYSTEM"/>
    <process expanded="true">
      <operator activated="true" class="image_handling:read_image_meta_data" compatibility="0.2.001" expanded="true" height="68" name="Training Images" origin="GENERATED_TUTORIAL" width="90" x="45" y="34">
        <parameter key="directory" value="D:/Dropbox/FEN/Semestres/2019-1/Fundamentals of ML - MIT_UCH/Ayudantias/DL/mini cat vs dogs/test_dir"/>
        <parameter key="use_label" value="true"/>
      </operator>
      <operator activated="true" class="image_handling:image_pre_processor" compatibility="0.2.001" expanded="true" height="103" name="Pre-Process Images" origin="GENERATED_TUTORIAL" width="90" x="246" y="34">
        <parameter key="path" value="Path"/>
        <parameter key="use_label" value="false"/>
        <process expanded="true">
          <operator activated="true" class="image_handling:color_scale_image" compatibility="0.2.001" expanded="true" height="68" name="Color Scale Image" width="90" x="246" y="34">
            <parameter key="minimum" value="0.0"/>
            <parameter key="maximum" value="1.0"/>
          </operator>
          <operator activated="true" class="image_handling:resize_image" compatibility="0.2.001" expanded="true" height="68" name="Resize Image" width="90" x="514" y="34">
            <parameter key="width" value="100"/>
            <parameter key="height" value="100"/>
          </operator>
          <connect from_port="transform" to_op="Color Scale Image" to_port="transformer"/>
          <connect from_op="Color Scale Image" from_port="transformer" to_op="Resize Image" to_port="transformer"/>
          <connect from_op="Resize Image" from_port="transformer" to_port="transform"/>
          <portSpacing port="source_transform" spacing="0"/>
          <portSpacing port="sink_transform" spacing="0"/>
        </process>
      </operator>
      <operator activated="true" class="deeplearning:dl4j_tensor_sequential_neural_network" compatibility="1.1.001" expanded="true" height="145" name="Deep Learning (Tensor)" origin="GENERATED_TUTORIAL" width="90" x="380" y="136">
        <parameter key="epochs" value="20"/>
        <parameter key="use_miniBatch" value="true"/>
        <parameter key="batch_size" value="64"/>
        <parameter key="log_each_epoch" value="true"/>
        <parameter key="epochs_per_log" value="10"/>
        <parameter key="loss_function" value="Cross Entropy (Binary Classification)"/>
        <parameter key="optimization_method" value="Stochastic Gradient Descent"/>
        <parameter key="backpropagation" value="Standard"/>
        <parameter key="backpropagation_length" value="50"/>
        <parameter key="use_early_stopping" value="false"/>
        <parameter key="condition_strategy" value="score improvement"/>
        <parameter key="patience" value="5"/>
        <parameter key="minimal_score_improvement" value="0.0"/>
        <parameter key="best_epoch_score" value="0.01"/>
        <parameter key="max_iteration_score" value="3.0"/>
        <parameter key="max_iteration_time" value="10"/>
        <parameter key="updater" value="AdaDelta"/>
        <parameter key="learning_rate" value="0.01"/>
        <parameter key="momentum" value="0.9"/>
        <parameter key="rho" value="0.98"/>
        <parameter key="epsilon" value="1.0E-6"/>
        <parameter key="beta1" value="0.9"/>
        <parameter key="beta2" value="0.999"/>
        <parameter key="RMSdecay" value="0.95"/>
        <parameter key="weight_initialization" value="Xavier"/>
        <parameter key="bias_initialization" value="0.01"/>
        <parameter key="use_regularization" value="true"/>
        <parameter key="l1_strength" value="0.1"/>
        <parameter key="l2_strength" value="0.1"/>
        <parameter key="cudnn_algo_mode" value="Prefer fastest"/>
        <parameter key="infer_input_shape" value="true"/>
        <parameter key="network_type" value="Simple Neural Network"/>
        <parameter key="use_local_random_seed" value="false"/>
        <parameter key="local_random_seed" value="1992"/>
        <process expanded="true">
          <operator activated="true" class="deeplearning:dl4j_convolutional_layer" compatibility="1.1.001" expanded="true" height="68" name="Add Convolutional Layer" origin="GENERATED_TUTORIAL" width="90" x="45" y="34">
            <parameter key="number_of_activation_maps" value="64"/>
            <parameter key="kernel_size" value="3.3"/>
            <parameter key="stride_size" value="1.1"/>
            <parameter key="activation_function" value="ReLU (Rectified Linear Unit)"/>
            <parameter key="use_dropout" value="false"/>
            <parameter key="dropout_rate" value="0.25"/>
            <parameter key="overwrite_networks_weight_initialization" value="false"/>
            <parameter key="weight_initialization" value="Normal"/>
            <parameter key="overwrite_networks_bias_initialization" value="false"/>
            <parameter key="bias_initialization" value="0.0"/>
          </operator>
          <operator activated="true" class="deeplearning:dl4j_pooling_layer" compatibility="1.1.001" expanded="true" height="68" name="Add Pooling Layer" origin="GENERATED_TUTORIAL" width="90" x="179" y="34">
            <parameter key="Pooling Method" value="max"/>
            <parameter key="PNorm Value" value="1.0"/>
            <parameter key="Kernel Size" value="2.2"/>
            <parameter key="Stride Size" value="2.2"/>
          </operator>
          <operator activated="true" class="deeplearning:dl4j_dropout_layer" compatibility="1.1.001" expanded="true" height="68" name="Add Dropout Layer" origin="GENERATED_TUTORIAL" width="90" x="313" y="34">
            <parameter key="use_dropout" value="true"/>
            <parameter key="dropout_rate" value="0.25"/>
          </operator>
          <operator activated="true" class="deeplearning:dl4j_dense_layer" compatibility="1.1.001" expanded="true" height="68" name="Add Fully-Connected Layer" origin="GENERATED_TUTORIAL" width="90" x="447" y="34">
            <parameter key="neurons" value="128"/>
            <parameter key="activation_function" value="ReLU (Rectified Linear Unit)"/>
            <parameter key="use_dropout" value="true"/>
            <parameter key="dropout_rate" value="0.25"/>
            <parameter key="overwrite_networks_weight_initialization" value="false"/>
            <parameter key="weight_initialization" value="Normal"/>
            <parameter key="overwrite_networks_bias_initialization" value="false"/>
            <parameter key="bias_initialization" value="0.0"/>
          </operator>
          <operator activated="true" class="deeplearning:dl4j_dense_layer" compatibility="1.1.001" expanded="true" height="68" name="Add Fully-Connected Layer (2)" origin="GENERATED_TUTORIAL" width="90" x="581" y="34">
            <parameter key="neurons" value="1"/>
            <parameter key="activation_function" value="Sigmoid"/>
            <parameter key="use_dropout" value="false"/>
            <parameter key="dropout_rate" value="0.25"/>
            <parameter key="overwrite_networks_weight_initialization" value="false"/>
            <parameter key="weight_initialization" value="Normal"/>
            <parameter key="overwrite_networks_bias_initialization" value="false"/>
            <parameter key="bias_initialization" value="0.0"/>
          </operator>
          <connect from_port="in layerArchitecture" to_op="Add Convolutional Layer" to_port="layerArchitecture"/>
          <connect from_op="Add Convolutional Layer" from_port="layerArchitecture" to_op="Add Pooling Layer" to_port="layerArchitecture"/>
          <connect from_op="Add Pooling Layer" from_port="layerArchitecture" to_op="Add Dropout Layer" to_port="layerArchitecture"/>
          <connect from_op="Add Dropout Layer" from_port="layerArchitecture" to_op="Add Fully-Connected Layer" to_port="layerArchitecture"/>
          <connect from_op="Add Fully-Connected Layer" from_port="layerArchitecture" to_op="Add Fully-Connected Layer (2)" to_port="layerArchitecture"/>
          <connect from_op="Add Fully-Connected Layer (2)" from_port="layerArchitecture" to_port="out layerArchitecture"/>
          <portSpacing port="source_in layerArchitecture" spacing="0"/>
          <portSpacing port="sink_out layerArchitecture" spacing="0"/>
        </process>
      </operator>
      <operator activated="true" class="nd4j:group_models_generic" compatibility="1.0.000" expanded="true" height="103" name="Group Models (Generic)" origin="GENERATED_TUTORIAL" width="90" x="514" y="34"/>
      <operator activated="true" class="image_handling:read_image_meta_data" compatibility="0.2.001" expanded="true" height="68" name="Testing Images" origin="GENERATED_TUTORIAL" width="90" x="581" y="187">
        <parameter key="directory" value="D:/Dropbox/FEN/Semestres/2019-1/Fundamentals of ML - MIT_UCH/Ayudantias/DL/mini cat vs dogs/validation_dir"/>
        <parameter key="use_label" value="true"/>
      </operator>
      <operator activated="true" class="nd4j:apply_model_generic" compatibility="1.0.000" expanded="true" height="82" name="Apply Model (Generic)" origin="GENERATED_TUTORIAL" width="90" x="648" y="34"/>
      <operator activated="true" class="append" compatibility="9.8.001" expanded="true" height="82" name="Append" origin="GENERATED_TUTORIAL" width="90" x="782" y="34">
        <parameter key="datamanagement" value="double_array"/>
        <parameter key="data_management" value="auto"/>
        <parameter key="merge_type" value="all"/>
      </operator>
      <operator activated="true" class="performance_classification" compatibility="9.8.001" expanded="true" height="82" name="Performance" origin="GENERATED_TUTORIAL" width="90" x="916" y="34">
        <parameter key="main_criterion" value="first"/>
        <parameter key="accuracy" value="true"/>
        <parameter key="classification_error" value="false"/>
        <parameter key="kappa" value="false"/>
        <parameter key="weighted_mean_recall" value="false"/>
        <parameter key="weighted_mean_precision" value="false"/>
        <parameter key="spearman_rho" value="false"/>
        <parameter key="kendall_tau" value="false"/>
        <parameter key="absolute_error" value="false"/>
        <parameter key="relative_error" value="false"/>
        <parameter key="relative_error_lenient" value="false"/>
        <parameter key="relative_error_strict" value="false"/>
        <parameter key="normalized_absolute_error" value="false"/>
        <parameter key="root_mean_squared_error" value="false"/>
        <parameter key="root_relative_squared_error" value="false"/>
        <parameter key="squared_error" value="false"/>
        <parameter key="correlation" value="false"/>
        <parameter key="squared_correlation" value="false"/>
        <parameter key="cross-entropy" value="false"/>
        <parameter key="margin" value="false"/>
        <parameter key="soft_margin_loss" value="false"/>
        <parameter key="logistic_loss" value="false"/>
        <parameter key="skip_undefined_labels" value="true"/>
        <parameter key="use_example_weights" value="true"/>
        <list key="class_weights"/>
      </operator>
      <connect from_op="Training Images" from_port="output" to_op="Pre-Process Images" to_port="example set"/>
      <connect from_op="Pre-Process Images" from_port="tensor" to_op="Deep Learning (Tensor)" to_port="training set"/>
      <connect from_op="Pre-Process Images" from_port="preprocessing model" to_op="Group Models (Generic)" to_port="models in 1"/>
      <connect from_op="Deep Learning (Tensor)" from_port="model" to_op="Group Models (Generic)" to_port="models in 2"/>
      <connect from_op="Group Models (Generic)" from_port="model out" to_op="Apply Model (Generic)" to_port="model"/>
      <connect from_op="Testing Images" from_port="output" to_op="Apply Model (Generic)" to_port="unlabelled data"/>
      <connect from_op="Apply Model (Generic)" from_port="labelled data" to_op="Append" to_port="example set 1"/>
      <connect from_op="Append" from_port="merged set" to_op="Performance" to_port="labelled data"/>
      <connect from_op="Performance" from_port="performance" to_port="result 1"/>
      <connect from_op="Performance" from_port="example set" to_port="result 2"/>
      <portSpacing port="source_input 1" spacing="0"/>
      <portSpacing port="sink_result 1" spacing="0"/>
      <portSpacing port="sink_result 2" spacing="0"/>
      <portSpacing port="sink_result 3" spacing="0"/>
      <description align="left" color="yellow" colored="false" height="101" resized="true" width="157" x="10" y="118">Set the training directory to the MNIST training folder</description>
      <description align="left" color="yellow" colored="false" height="50" resized="true" width="357" x="482" y="307">Set the testing directory to the MNIST testing folder</description>
    </process>
  </operator>
</process>




Sign In or Register to comment.