Help using DL4J

pblack476pblack476 Member Posts: 83 Maven
As an exercise I have this process:

<?xml version="1.0" encoding="UTF-8"?><process version="9.4.001"><br>  <context><br>    <input/><br>    <output/><br>    <macros/><br>  </context><br>  <operator activated="true" class="process" compatibility="9.4.001" expanded="true" name="Process"><br>    <parameter key="logverbosity" value="init"/><br>    <parameter key="random_seed" value="2001"/><br>    <parameter key="send_mail" value="never"/><br>    <parameter key="notification_email" value=""/><br>    <parameter key="process_duration_for_mail" value="30"/><br>    <parameter key="encoding" value="SYSTEM"/><br>    <process expanded="true"><br>      <operator activated="true" class="retrieve" compatibility="9.4.001" expanded="true" height="68" name="Retrieve XPCM11.SA -DAILY - clean 5 daysignal" width="90" x="45" y="34"><br>        <parameter key="repository_entry" value="//Local Repository/data/XPCM11.SA -DAILY - clean 5 daysignal"/><br>      </operator><br>      <operator activated="true" class="subprocess" compatibility="9.4.001" expanded="true" height="82" name="AFE" width="90" x="179" y="34"><br>        <process expanded="true"><br>          <operator activated="true" class="set_role" compatibility="9.4.001" expanded="true" height="82" name="Set Role" width="90" x="45" y="34"><br>            <parameter key="attribute_name" value="SIG CHANGE"/><br>            <parameter key="target_role" value="label"/><br>            <list key="set_additional_roles"><br>              <parameter key="Date" value="id"/><br>            </list><br>          </operator><br>          <operator activated="true" class="normalize" compatibility="9.4.001" expanded="true" height="103" name="Normalize" width="90" x="179" y="34"><br>            <parameter key="return_preprocessing_model" value="false"/><br>            <parameter key="create_view" value="false"/><br>            <parameter key="attribute_filter_type" value="value_type"/><br>            <parameter key="attribute" value=""/><br>            <parameter key="attributes" value=""/><br>            <parameter key="use_except_expression" value="false"/><br>            <parameter key="value_type" value="numeric"/><br>            <parameter key="use_value_type_exception" value="false"/><br>            <parameter key="except_value_type" value="real"/><br>            <parameter key="block_type" value="value_series"/><br>            <parameter key="use_block_type_exception" value="false"/><br>            <parameter key="except_block_type" value="value_series_end"/><br>            <parameter key="invert_selection" value="false"/><br>            <parameter key="include_special_attributes" value="false"/><br>            <parameter key="method" value="Z-transformation"/><br>            <parameter key="min" value="0.0"/><br>            <parameter key="max" value="1.0"/><br>            <parameter key="allow_negative_values" value="false"/><br>          </operator><br>          <operator activated="true" class="multiply" compatibility="9.4.001" expanded="true" height="103" name="Multiply" width="90" x="313" y="34"/><br>          <operator activated="true" class="model_simulator:automatic_feature_engineering" compatibility="9.4.001" expanded="true" height="103" name="Automatic Feature Engineering" width="90" x="514" y="187"><br>            <parameter key="mode" value="feature selection and generation"/><br>            <parameter key="balance for accuracy" value="1.0"/><br>            <parameter key="show progress dialog" value="true"/><br>            <parameter key="use_local_random_seed" value="false"/><br>            <parameter key="local_random_seed" value="1992"/><br>            <parameter key="use optimization heuristics" value="true"/><br>            <parameter key="maximum generations" value="30"/><br>            <parameter key="population size" value="10"/><br>            <parameter key="use multi-starts" value="true"/><br>            <parameter key="number of multi-starts" value="5"/><br>            <parameter key="generations until multi-start" value="10"/><br>            <parameter key="use time limit" value="false"/><br>            <parameter key="time limit in seconds" value="60"/><br>            <parameter key="use subset for generation" value="false"/><br>            <parameter key="maximum function complexity" value="10"/><br>            <parameter key="use_plus" value="true"/><br>            <parameter key="use_diff" value="true"/><br>            <parameter key="use_mult" value="true"/><br>            <parameter key="use_div" value="true"/><br>            <parameter key="reciprocal_value" value="true"/><br>            <parameter key="use_square_roots" value="true"/><br>            <parameter key="use_exp" value="true"/><br>            <parameter key="use_log" value="true"/><br>            <parameter key="use_absolute_values" value="true"/><br>            <parameter key="use_sgn" value="true"/><br>            <parameter key="use_min" value="true"/><br>            <parameter key="use_max" value="true"/><br>            <process expanded="true"><br>              <operator activated="true" class="concurrency:cross_validation" compatibility="9.4.001" expanded="true" height="145" name="Cross Validation" width="90" x="45" y="34"><br>                <parameter key="split_on_batch_attribute" value="false"/><br>                <parameter key="leave_one_out" value="false"/><br>                <parameter key="number_of_folds" value="10"/><br>                <parameter key="sampling_type" value="automatic"/><br>                <parameter key="use_local_random_seed" value="true"/><br>                <parameter key="local_random_seed" value="1992"/><br>                <parameter key="enable_parallel_execution" value="true"/><br>                <process expanded="true"><br>                  <operator activated="true" class="deeplearning:dl4j_sequential_neural_network" compatibility="0.9.001" expanded="true" height="103" name="Deep Learning" width="90" x="112" y="34"><br>                    <parameter key="loss_function" value="Negative Log Likelihood (Classification)"/><br>                    <parameter key="epochs" value="100"/><br>                    <parameter key="use_miniBatch" value="false"/><br>                    <parameter key="batch_size" value="32"/><br>                    <parameter key="updater" value="RMSProp"/><br>                    <parameter key="learning_rate" value="0.01"/><br>                    <parameter key="momentum" value="0.9"/><br>                    <parameter key="rho" value="0.95"/><br>                    <parameter key="epsilon" value="1.0E-6"/><br>                    <parameter key="beta1" value="0.9"/><br>                    <parameter key="beta2" value="0.999"/><br>                    <parameter key="RMSdecay" value="0.95"/><br>                    <parameter key="weight_initialization" value="Normal"/><br>                    <parameter key="bias_initialization" value="0.0"/><br>                    <parameter key="use_regularization" value="false"/><br>                    <parameter key="l1_strength" value="0.1"/><br>                    <parameter key="l2_strength" value="0.1"/><br>                    <parameter key="optimization_method" value="Stochastic Gradient Descent"/><br>                    <parameter key="backpropagation" value="Standard"/><br>                    <parameter key="backpropagation_length" value="50"/><br>                    <parameter key="infer_input_shape" value="true"/><br>                    <parameter key="network_type" value="Simple Neural Network"/><br>                    <parameter key="log_each_epoch" value="true"/><br>                    <parameter key="epochs_per_log" value="10"/><br>                    <parameter key="use_local_random_seed" value="false"/><br>                    <parameter key="local_random_seed" value="1992"/><br>                    <process expanded="true"><br>                      <operator activated="true" class="deeplearning:dl4j_lstm_layer" compatibility="0.9.001" expanded="true" height="68" name="Add LSTM Layer (3)" width="90" x="112" y="34"><br>                        <parameter key="neurons" value="8"/><br>                        <parameter key="gate_activation" value="TanH"/><br>                        <parameter key="forget_gate_bias_initialization" value="1.0"/><br>                      </operator><br>                      <operator activated="true" class="deeplearning:dl4j_dense_layer" compatibility="0.9.001" expanded="true" height="68" name="Add Fully-Connected Layer (3)" width="90" x="246" y="34"><br>                        <parameter key="number_of_neurons" value="3"/><br>                        <parameter key="activation_function" value="Softmax"/><br>                        <parameter key="use_dropout" value="false"/><br>                        <parameter key="dropout_rate" value="0.25"/><br>                        <parameter key="overwrite_networks_weight_initialization" value="false"/><br>                        <parameter key="weight_initialization" value="Normal"/><br>                        <parameter key="overwrite_networks_bias_initialization" value="false"/><br>                        <parameter key="bias_initialization" value="0.0"/><br>                      </operator><br>                      <connect from_port="layerArchitecture" to_op="Add LSTM Layer (3)" to_port="layerArchitecture"/><br>                      <connect from_op="Add LSTM Layer (3)" from_port="layerArchitecture" to_op="Add Fully-Connected Layer (3)" to_port="layerArchitecture"/><br>                      <connect from_op="Add Fully-Connected Layer (3)" from_port="layerArchitecture" to_port="layerArchitecture"/><br>                      <portSpacing port="source_layerArchitecture" spacing="0"/><br>                      <portSpacing port="sink_layerArchitecture" spacing="0"/><br>                    </process><br>                  </operator><br>                  <connect from_port="training set" to_op="Deep Learning" to_port="training set"/><br>                  <connect from_op="Deep Learning" from_port="model" to_port="model"/><br>                  <portSpacing port="source_training set" spacing="0"/><br>                  <portSpacing port="sink_model" spacing="0"/><br>                  <portSpacing port="sink_through 1" spacing="0"/><br>                </process><br>                <process expanded="true"><br>                  <operator activated="true" class="apply_model" compatibility="9.4.001" expanded="true" height="82" name="Apply Model" width="90" x="112" y="34"><br>                    <list key="application_parameters"/><br>                    <parameter key="create_view" value="false"/><br>                  </operator><br>                  <operator activated="true" class="performance_classification" compatibility="9.4.001" expanded="true" height="82" name="Performance (2)" width="90" x="246" y="34"><br>                    <parameter key="main_criterion" value="classification_error"/><br>                    <parameter key="accuracy" value="false"/><br>                    <parameter key="classification_error" value="true"/><br>                    <parameter key="kappa" value="true"/><br>                    <parameter key="weighted_mean_recall" value="true"/><br>                    <parameter key="weighted_mean_precision" value="true"/><br>                    <parameter key="spearman_rho" value="true"/><br>                    <parameter key="kendall_tau" value="true"/><br>                    <parameter key="absolute_error" value="true"/><br>                    <parameter key="relative_error" value="true"/><br>                    <parameter key="relative_error_lenient" value="true"/><br>                    <parameter key="relative_error_strict" value="true"/><br>                    <parameter key="normalized_absolute_error" value="true"/><br>                    <parameter key="root_mean_squared_error" value="true"/><br>                    <parameter key="root_relative_squared_error" value="true"/><br>                    <parameter key="squared_error" value="true"/><br>                    <parameter key="correlation" value="true"/><br>                    <parameter key="squared_correlation" value="true"/><br>                    <parameter key="cross-entropy" value="false"/><br>                    <parameter key="margin" value="false"/><br>                    <parameter key="soft_margin_loss" value="false"/><br>                    <parameter key="logistic_loss" value="false"/><br>                    <parameter key="skip_undefined_labels" value="true"/><br>                    <parameter key="use_example_weights" value="true"/><br>                    <list key="class_weights"/><br>                  </operator><br>                  <connect from_port="model" to_op="Apply Model" to_port="model"/><br>                  <connect from_port="test set" to_op="Apply Model" to_port="unlabelled data"/><br>                  <connect from_op="Apply Model" from_port="labelled data" to_op="Performance (2)" to_port="labelled data"/><br>                  <connect from_op="Performance (2)" from_port="performance" to_port="performance 1"/><br>                  <portSpacing port="source_model" spacing="0"/><br>                  <portSpacing port="source_test set" spacing="0"/><br>                  <portSpacing port="source_through 1" spacing="0"/><br>                  <portSpacing port="sink_test set results" spacing="0"/><br>                  <portSpacing port="sink_performance 1" spacing="0"/><br>                  <portSpacing port="sink_performance 2" spacing="0"/><br>                </process><br>              </operator><br>              <connect from_port="example set source" to_op="Cross Validation" to_port="example set"/><br>              <connect from_op="Cross Validation" from_port="performance 1" to_port="performance sink"/><br>              <portSpacing port="source_example set source" spacing="0"/><br>              <portSpacing port="sink_performance sink" spacing="0"/><br>            </process><br>          </operator><br>          <operator activated="true" class="model_simulator:apply_feature_set" compatibility="9.4.001" expanded="true" height="82" name="Apply Feature Set" width="90" x="715" y="34"><br>            <parameter key="handle missings" value="true"/><br>            <parameter key="keep originals" value="false"/><br>            <parameter key="originals special role" value="true"/><br>            <parameter key="recreate missing attributes" value="true"/><br>          </operator><br>          <connect from_port="in 1" to_op="Set Role" to_port="example set input"/><br>          <connect from_op="Set Role" from_port="example set output" to_op="Normalize" to_port="example set input"/><br>          <connect from_op="Normalize" from_port="example set output" to_op="Multiply" to_port="input"/><br>          <connect from_op="Multiply" from_port="output 1" to_op="Apply Feature Set" to_port="example set"/><br>          <connect from_op="Multiply" from_port="output 2" to_op="Automatic Feature Engineering" to_port="example set in"/><br>          <connect from_op="Automatic Feature Engineering" from_port="feature set" to_op="Apply Feature Set" to_port="feature set"/><br>          <connect from_op="Apply Feature Set" from_port="example set" to_port="out 1"/><br>          <portSpacing port="source_in 1" spacing="0"/><br>          <portSpacing port="source_in 2" spacing="0"/><br>          <portSpacing port="sink_out 1" spacing="0"/><br>          <portSpacing port="sink_out 2" spacing="0"/><br>        </process><br>      </operator><br>      <operator activated="true" class="split_data" compatibility="9.4.001" expanded="true" height="103" name="Split Data" width="90" x="313" y="34"><br>        <enumeration key="partitions"><br>          <parameter key="ratio" value="0.9"/><br>          <parameter key="ratio" value="0.1"/><br>        </enumeration><br>        <parameter key="sampling_type" value="automatic"/><br>        <parameter key="use_local_random_seed" value="false"/><br>        <parameter key="local_random_seed" value="1992"/><br>      </operator><br>      <operator activated="true" class="deeplearning:dl4j_sequential_neural_network" compatibility="0.9.001" expanded="true" height="103" name="Deep Learning (2)" width="90" x="514" y="34"><br>        <parameter key="loss_function" value="Multiclass Cross Entropy (Classification)"/><br>        <parameter key="epochs" value="100"/><br>        <parameter key="use_miniBatch" value="false"/><br>        <parameter key="batch_size" value="32"/><br>        <parameter key="updater" value="Adam"/><br>        <parameter key="learning_rate" value="0.01"/><br>        <parameter key="momentum" value="0.9"/><br>        <parameter key="rho" value="0.95"/><br>        <parameter key="epsilon" value="1.0E-6"/><br>        <parameter key="beta1" value="0.9"/><br>        <parameter key="beta2" value="0.999"/><br>        <parameter key="RMSdecay" value="0.95"/><br>        <parameter key="weight_initialization" value="Normal"/><br>        <parameter key="bias_initialization" value="0.0"/><br>        <parameter key="use_regularization" value="false"/><br>        <parameter key="l1_strength" value="0.1"/><br>        <parameter key="l2_strength" value="0.1"/><br>        <parameter key="optimization_method" value="Stochastic Gradient Descent"/><br>        <parameter key="backpropagation" value="Standard"/><br>        <parameter key="backpropagation_length" value="50"/><br>        <parameter key="infer_input_shape" value="true"/><br>        <parameter key="network_type" value="Simple Neural Network"/><br>        <parameter key="log_each_epoch" value="true"/><br>        <parameter key="epochs_per_log" value="10"/><br>        <parameter key="use_local_random_seed" value="false"/><br>        <parameter key="local_random_seed" value="1992"/><br>        <process expanded="true"><br>          <operator activated="true" class="deeplearning:dl4j_lstm_layer" compatibility="0.9.001" expanded="true" height="68" name="Add LSTM Layer" width="90" x="112" y="34"><br>            <parameter key="neurons" value="8"/><br>            <parameter key="gate_activation" value="ReLU (Rectified Linear Unit)"/><br>            <parameter key="forget_gate_bias_initialization" value="1.0"/><br>          </operator><br>          <operator activated="true" class="deeplearning:dl4j_dense_layer" compatibility="0.9.001" expanded="true" height="68" name="Add Fully-Connected Layer" width="90" x="246" y="34"><br>            <parameter key="number_of_neurons" value="3"/><br>            <parameter key="activation_function" value="Softmax"/><br>            <parameter key="use_dropout" value="false"/><br>            <parameter key="dropout_rate" value="0.25"/><br>            <parameter key="overwrite_networks_weight_initialization" value="false"/><br>            <parameter key="weight_initialization" value="Normal"/><br>            <parameter key="overwrite_networks_bias_initialization" value="false"/><br>            <parameter key="bias_initialization" value="0.0"/><br>          </operator><br>          <connect from_port="layerArchitecture" to_op="Add LSTM Layer" to_port="layerArchitecture"/><br>          <connect from_op="Add LSTM Layer" from_port="layerArchitecture" to_op="Add Fully-Connected Layer" to_port="layerArchitecture"/><br>          <connect from_op="Add Fully-Connected Layer" from_port="layerArchitecture" to_port="layerArchitecture"/><br>          <portSpacing port="source_layerArchitecture" spacing="0"/><br>          <portSpacing port="sink_layerArchitecture" spacing="0"/><br>        </process><br>      </operator><br>      <operator activated="true" class="apply_model" compatibility="9.4.001" expanded="true" height="82" name="Apply Model (2)" width="90" x="514" y="238"><br>        <list key="application_parameters"/><br>        <parameter key="create_view" value="false"/><br>      </operator><br>      <operator activated="true" class="multiply" compatibility="9.4.001" expanded="true" height="103" name="Multiply (2)" width="90" x="648" y="238"/><br>      <operator activated="true" class="performance_classification" compatibility="9.4.001" expanded="true" height="82" name="Performance" width="90" x="782" y="289"><br>        <parameter key="main_criterion" value="classification_error"/><br>        <parameter key="accuracy" value="true"/><br>        <parameter key="classification_error" value="true"/><br>        <parameter key="kappa" value="true"/><br>        <parameter key="weighted_mean_recall" value="true"/><br>        <parameter key="weighted_mean_precision" value="true"/><br>        <parameter key="spearman_rho" value="true"/><br>        <parameter key="kendall_tau" value="true"/><br>        <parameter key="absolute_error" value="true"/><br>        <parameter key="relative_error" value="true"/><br>        <parameter key="relative_error_lenient" value="true"/><br>        <parameter key="relative_error_strict" value="true"/><br>        <parameter key="normalized_absolute_error" value="true"/><br>        <parameter key="root_mean_squared_error" value="true"/><br>        <parameter key="root_relative_squared_error" value="true"/><br>        <parameter key="squared_error" value="true"/><br>        <parameter key="correlation" value="true"/><br>        <parameter key="squared_correlation" value="true"/><br>        <parameter key="cross-entropy" value="false"/><br>        <parameter key="margin" value="false"/><br>        <parameter key="soft_margin_loss" value="false"/><br>        <parameter key="logistic_loss" value="false"/><br>        <parameter key="skip_undefined_labels" value="true"/><br>        <parameter key="use_example_weights" value="true"/><br>        <list key="class_weights"/><br>      </operator><br>      <connect from_op="Retrieve XPCM11.SA -DAILY - clean 5 daysignal" from_port="output" to_op="AFE" to_port="in 1"/><br>      <connect from_op="AFE" from_port="out 1" to_op="Split Data" to_port="example set"/><br>      <connect from_op="Split Data" from_port="partition 1" to_op="Deep Learning (2)" to_port="training set"/><br>      <connect from_op="Split Data" from_port="partition 2" to_op="Apply Model (2)" to_port="unlabelled data"/><br>      <connect from_op="Deep Learning (2)" from_port="model" to_op="Apply Model (2)" to_port="model"/><br>      <connect from_op="Apply Model (2)" from_port="labelled data" to_op="Multiply (2)" to_port="input"/><br>      <connect from_op="Multiply (2)" from_port="output 1" to_port="result 2"/><br>      <connect from_op="Multiply (2)" from_port="output 2" to_op="Performance" to_port="labelled data"/><br>      <connect from_op="Performance" from_port="performance" to_port="result 1"/><br>      <portSpacing port="source_input 1" spacing="0"/><br>      <portSpacing port="sink_result 1" spacing="0"/><br>      <portSpacing port="sink_result 2" spacing="0"/><br>      <portSpacing port="sink_result 3" spacing="0"/><br>    </process><br>  </operator><br></process><br><br>
The issue is that I cannot get it to run. I get an error saying "there seems to be nothing wrong with this process but It failed to run". activating Debug mode I get this:

Exception: java.lang.ArrayIndexOutOfBoundsException
Message: null
Stack trace:

  sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
  sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
  sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
  java.lang.reflect.Constructor.newInstance(Constructor.java:423)
  java.util.concurrent.ForkJoinTask.getThrowableException(ForkJoinTask.java:598)
  java.util.concurrent.ForkJoinTask.get(ForkJoinTask.java:1005)
  com.rapidminer.studio.concurrency.internal.AbstractConcurrencyContext.collectResults(AbstractConcurrencyContext.java:206)
  com.rapidminer.studio.concurrency.internal.StudioConcurrencyContext.collectResults(StudioConcurrencyContext.java:33)
  com.rapidminer.studio.concurrency.internal.AbstractConcurrencyContext.call(AbstractConcurrencyContext.java:141)
  com.rapidminer.studio.concurrency.internal.StudioConcurrencyContext.call(StudioConcurrencyContext.java:33)
  com.rapidminer.Process.executeRootInPool(Process.java:1355)
  com.rapidminer.Process.execute(Process.java:1319)
  com.rapidminer.Process.run(Process.java:1291)
  com.rapidminer.Process.run(Process.java:1177)
  com.rapidminer.Process.run(Process.java:1130)
  com.rapidminer.Process.run(Process.java:1125)
  com.rapidminer.Process.run(Process.java:1115)
  com.rapidminer.gui.ProcessThread.run(ProcessThread.java:65)

Cause
Exception: java.lang.ArrayIndexOutOfBoundsException
Message: null
Stack trace:

  sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
  sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
  sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
  java.lang.reflect.Constructor.newInstance(Constructor.java:423)
  java.util.concurrent.ForkJoinTask.getThrowableException(ForkJoinTask.java:598)
  java.util.concurrent.ForkJoinTask.reportException(ForkJoinTask.java:677)
  java.util.concurrent.ForkJoinTask.invoke(ForkJoinTask.java:735)
  com.rapidminer.studio.concurrency.internal.RecursiveWrapper.call(RecursiveWrapper.java:120)
  com.rapidminer.studio.concurrency.internal.AbstractConcurrencyContext.call(AbstractConcurrencyContext.java:135)
  com.rapidminer.studio.concurrency.internal.StudioConcurrencyContext.call(StudioConcurrencyContext.java:33)
  com.rapidminer.extension.concurrency.execution.BackgroundExecutionService.executeOperatorTasks(BackgroundExecutionService.java:401)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator.performParallelValidation(CrossValidationOperator.java:667)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator.doExampleSetWork(CrossValidationOperator.java:311)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator.doWork(CrossValidationOperator.java:243)
  com.rapidminer.operator.Operator.execute(Operator.java:1031)
  com.rapidminer.operator.execution.SimpleUnitExecutor.execute(SimpleUnitExecutor.java:77)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:812)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:807)
  java.security.AccessController.doPrivileged(Native Method)
  com.rapidminer.operator.ExecutionUnit.execute(ExecutionUnit.java:807)
  com.rapidminer.extension.modelsimulator.operator.feature_engineering.AutomaticFeatureEngineeringOperator.evaluate(AutomaticFeatureEngineeringOperator.java:403)
  com.rapidminer.extension.modelsimulator.operator.feature_engineering.AutomaticFeatureEngineeringOperator.access$200(AutomaticFeatureEngineeringOperator.java:79)
  com.rapidminer.extension.modelsimulator.operator.feature_engineering.AutomaticFeatureEngineeringOperator$1PerformanceCalculator.calculateError(AutomaticFeatureEngineeringOperator.java:270)
  com.rapidminer.extension.modelsimulator.operator.feature_engineering.optimization.AutomaticFeatureEngineering.evaluate(AutomaticFeatureEngineering.java:278)
  com.rapidminer.extension.modelsimulator.operator.feature_engineering.optimization.AutomaticFeatureEngineering.run(AutomaticFeatureEngineering.java:198)
  com.rapidminer.extension.modelsimulator.operator.feature_engineering.AutomaticFeatureEngineeringOperator.doWork(AutomaticFeatureEngineeringOperator.java:337)
  com.rapidminer.operator.Operator.execute(Operator.java:1031)
  com.rapidminer.operator.execution.SimpleUnitExecutor.execute(SimpleUnitExecutor.java:77)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:812)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:807)
  java.security.AccessController.doPrivileged(Native Method)
  com.rapidminer.operator.ExecutionUnit.execute(ExecutionUnit.java:807)
  com.rapidminer.operator.OperatorChain.doWork(OperatorChain.java:423)
  com.rapidminer.operator.SimpleOperatorChain.doWork(SimpleOperatorChain.java:99)
  com.rapidminer.operator.Operator.execute(Operator.java:1031)
  com.rapidminer.operator.execution.SimpleUnitExecutor.execute(SimpleUnitExecutor.java:77)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:812)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:807)
  java.security.AccessController.doPrivileged(Native Method)
  com.rapidminer.operator.ExecutionUnit.execute(ExecutionUnit.java:807)
  com.rapidminer.operator.OperatorChain.doWork(OperatorChain.java:423)
  com.rapidminer.operator.Operator.execute(Operator.java:1031)
  com.rapidminer.Process.executeRoot(Process.java:1378)
  com.rapidminer.Process.lambda$executeRootInPool$5(Process.java:1357)
  com.rapidminer.studio.concurrency.internal.AbstractConcurrencyContext$AdaptedCallable.exec(AbstractConcurrencyContext.java:328)
  java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
  java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
  java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
  java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)

Cause
Exception: java.lang.ArrayIndexOutOfBoundsException
Message: 1
Stack trace:

  com.rapidminer.extension.deeplearning.ioobjects.DeepLearningModel.performPrediction(DeepLearningModel.java:159)
  com.rapidminer.operator.learner.PredictionModel.apply(PredictionModel.java:116)
  com.rapidminer.operator.ModelApplier.doWork(ModelApplier.java:134)
  com.rapidminer.operator.Operator.execute(Operator.java:1031)
  com.rapidminer.operator.execution.SimpleUnitExecutor.execute(SimpleUnitExecutor.java:77)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:812)
  com.rapidminer.operator.ExecutionUnit$2.run(ExecutionUnit.java:807)
  java.security.AccessController.doPrivileged(Native Method)
  com.rapidminer.operator.ExecutionUnit.execute(ExecutionUnit.java:807)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator.test(CrossValidationOperator.java:800)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator.access$300(CrossValidationOperator.java:77)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator$8.call(CrossValidationOperator.java:658)
  com.rapidminer.extension.concurrency.operator.validation.CrossValidationOperator$8.call(CrossValidationOperator.java:643)
  com.rapidminer.extension.concurrency.execution.BackgroundExecutionService$ExecutionCallable.call(BackgroundExecutionService.java:365)
  com.rapidminer.studio.concurrency.internal.RecursiveWrapper.compute(RecursiveWrapper.java:88)
  java.util.concurrent.CountedCompleter.exec(CountedCompleter.java:731)
  java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
  java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
  java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
  java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
I tried tweaking everything I could to no effect. If anyone has something they can contribute I would appreciate it.
Here is the dataset used as well.

Best Answers

Answers

  • varunm1varunm1 Moderator, Member Posts: 1,207 Unicorn
    Can you provide data set? The .ioo should be accompanied with .md and .propertied files as well. My RM is not detecting this .ioo in the repository.
    Regards,
    Varun
    https://www.varunmandalapu.com/

    Be Safe. Follow precautions and Maintain Social Distancing

  • pblack476pblack476 Member Posts: 83 Maven
    @varunm1 I could not find the files you mentioned but here is the .csv for the dataset.
  • lionelderkrikorlionelderkrikor Moderator, RapidMiner Certified Analyst, Member Posts: 1,195 Unicorn
    Hi @pblack476,

    Very strange behaviour. I'm able to reproduce it.

    The problem is linked to the Deep Learning operator and in particular to LSTM layer.
    When this layer is removed, the process works fine.

    Moreover when the LSTM layer is present : 
     - the bug seems linked to the fact that your label (SIG CHANGE) is not numerical, but when I ' m applying the Nominal to Numerical operator (with unique integers) before, RapidMiner raises also an error "Indexes must be the same length as array rank" 

    Other ideas ?

    Regards,

    Lionel
  • varunm1varunm1 Moderator, Member Posts: 1,207 Unicorn
    Hello @lionelderkrikor

    I am scratching my head as well, do you think its a bug or are we missing something, cause the process seems to be fine?

    Regards,
    Varun
    https://www.varunmandalapu.com/

    Be Safe. Follow precautions and Maintain Social Distancing

  • pblack476pblack476 Member Posts: 83 Maven
    @pschlunder  Can you offer any insight on this?

    @lionelderkrikor I have noticed that the Deep Learning operator is to blame here as I can run the process with other learners in its place but I had not narrowed it down to the LTSM layer. However the LTSM layer operator itself does not seem to be completely faulty as you can run it in the S&P DL example just fine.
  • varunm1varunm1 Moderator, Member Posts: 1,207 Unicorn
    edited November 2019
    @pblack476 do you mean the S&P for the regression problem? I think this specific issue seems to be only in classification labels.
    Regards,
    Varun
    https://www.varunmandalapu.com/

    Be Safe. Follow precautions and Maintain Social Distancing

  • pblack476pblack476 Member Posts: 83 Maven
    @varunm1 Yes I meant that. I guess that is the case as I have done some regression models that worked fine before this. The other example thou, the ICU mortality one is a classification problem that works, and has the LTSM layer as well. My theory is that i need to use Deep Learning (tensor) in that case? But why? I frankly do not understand what is the (tensor) variant for yet.
  • pblack476pblack476 Member Posts: 83 Maven
    I tried removing the whole AFE subprocess, cross-validation and all. Had to convert everything to numerical because deep Learning operator does not accept nominal values apparently, but still got an error after the DL operator finished.

    "Exception: java.lang.ArrayIndexOutOfBoundsException
    Message: null"

  • varunm1varunm1 Moderator, Member Posts: 1,207 Unicorn
    edited November 2019
    @pblack476 yep.

    My theory is that i need to use Deep Learning (tensor) in that case? But why? I frankly do not understand what is the (tensor) variant for yet.
    Good question, I think it is used to covert example sets with multi-sample time series per subject. For example, if you have 10 subjects and each subject has multiple time-series samples (10 samples per subject), then we need to segment them individually so that each subject will be considered as a single example rather than each data point in a subject as an example. This way algorithm can learn the time inputs in each example set (subject).

    But in your case, I am not sure why you should use that.

    I already tried what you did, the issue arises in apply model operator. It looks like the way model expecting input from test data had some discrepancies

    Regards,
    Varun
    https://www.varunmandalapu.com/

    Be Safe. Follow precautions and Maintain Social Distancing

  • lionelderkrikorlionelderkrikor Moderator, RapidMiner Certified Analyst, Member Posts: 1,195 Unicorn

    I am scratching my head as well, do you think its a bug or are we missing something, cause the process seems to be fine?

    My intimate conviction is that it is not a bug : 
    I tried to (re)build the same process with Keras extension and there is also an error raising when an LSTM is used as first layer.
    but there is a more "explicit" error message. It is said that LTSM expects a  3D matrix in entry which it is not the case in this project (a classic dataset  / 2D).
    After researches on the Net, it is said that effectively "The input of the LTSM is always is a 3D array".

    Regards,

    Lionel


  • pblack476pblack476 Member Posts: 83 Maven
    @varunm1 Thanks for the reply very much.

    I now have another issue with that. Because at first I tried the tensor alternative but I got stuck when trying to use it inside a cross validation. With the CV operator it expects a "Model" object between the train/test barrier, but the DL(tensor) operator outputs a DL-tensor-model object and it cannot continue.

    Is there a way around this?
  • varunm1varunm1 Moderator, Member Posts: 1,207 Unicorn
    @pblack476

    I got an idea, give it a try and see in cross-validation with your data if it works, or else follow the manual way. One more important thing is "Timeseries to Tensor" operator only accepts collection and not a direct example set. The decision tree operator used inside CV is dummy as the mod port expects some input

    <?xml version="1.0" encoding="UTF-8"?><process version="9.5.000">
    <context>
    <input/>
    <output/>
    <macros/>
    </context>
    <operator activated="true" class="process" compatibility="9.4.000" expanded="true" name="Process" origin="GENERATED_SAMPLE">
    <parameter key="logverbosity" value="init"/>
    <parameter key="random_seed" value="2001"/>
    <parameter key="send_mail" value="never"/>
    <parameter key="notification_email" value=""/>
    <parameter key="process_duration_for_mail" value="30"/>
    <parameter key="encoding" value="SYSTEM"/>
    <process expanded="true">
    <operator activated="true" class="retrieve" compatibility="9.5.000" expanded="true" height="68" name="Retrieve 'XPCM11.SA -DAILY" width="90" x="179" y="85">
    <parameter key="repository_entry" value="My_Question/'XPCM11.SA -DAILY"/>
    </operator>
    <operator activated="true" class="select_attributes" compatibility="9.5.000" expanded="true" height="82" name="Select Attributes" width="90" x="313" y="85">
    <parameter key="attribute_filter_type" value="single"/>
    <parameter key="attribute" value="Date"/>
    <parameter key="attributes" value=""/>
    <parameter key="use_except_expression" value="false"/>
    <parameter key="value_type" value="attribute_value"/>
    <parameter key="use_value_type_exception" value="false"/>
    <parameter key="except_value_type" value="time"/>
    <parameter key="block_type" value="attribute_block"/>
    <parameter key="use_block_type_exception" value="false"/>
    <parameter key="except_block_type" value="value_matrix_row_start"/>
    <parameter key="invert_selection" value="true"/>
    <parameter key="include_special_attributes" value="false"/>
    </operator>
    <operator activated="true" class="set_role" compatibility="9.5.000" expanded="true" height="82" name="Set Role" width="90" x="447" y="85">
    <parameter key="attribute_name" value="SIG CHANGE"/>
    <parameter key="target_role" value="label"/>
    <list key="set_additional_roles"/>
    </operator>
    <operator activated="true" class="concurrency:cross_validation" compatibility="9.5.000" expanded="true" height="166" name="Cross Validation" width="90" x="581" y="85">
    <parameter key="split_on_batch_attribute" value="false"/>
    <parameter key="leave_one_out" value="false"/>
    <parameter key="number_of_folds" value="5"/>
    <parameter key="sampling_type" value="automatic"/>
    <parameter key="use_local_random_seed" value="false"/>
    <parameter key="local_random_seed" value="1992"/>
    <parameter key="enable_parallel_execution" value="false"/>
    <process expanded="true">
    <operator activated="true" class="multiply" compatibility="9.5.000" expanded="true" height="103" name="Multiply" width="90" x="45" y="85"/>
    <operator activated="true" class="concurrency:parallel_decision_tree" compatibility="9.5.000" expanded="true" height="103" name="Decision Tree" width="90" x="179" y="34">
    <parameter key="criterion" value="gain_ratio"/>
    <parameter key="maximal_depth" value="10"/>
    <parameter key="apply_pruning" value="true"/>
    <parameter key="confidence" value="0.1"/>
    <parameter key="apply_prepruning" value="true"/>
    <parameter key="minimal_gain" value="0.01"/>
    <parameter key="minimal_leaf_size" value="2"/>
    <parameter key="minimal_size_for_split" value="4"/>
    <parameter key="number_of_prepruning_alternatives" value="3"/>
    </operator>
    <operator activated="true" breakpoints="after" class="loop_examples" compatibility="9.5.000" expanded="true" height="103" name="Loop Examples" width="90" x="179" y="187">
    <parameter key="iteration_macro" value="example"/>
    <process expanded="true">
    <operator activated="true" class="filter_example_range" compatibility="9.5.000" expanded="true" height="82" name="Filter Example Range" width="90" x="179" y="34">
    <parameter key="first_example" value="%{example}"/>
    <parameter key="last_example" value="%{example}"/>
    <parameter key="invert_filter" value="false"/>
    </operator>
    <connect from_port="example set" to_op="Filter Example Range" to_port="example set input"/>
    <connect from_op="Filter Example Range" from_port="example set output" to_port="output 1"/>
    <portSpacing port="source_example set" spacing="0"/>
    <portSpacing port="sink_example set" spacing="0"/>
    <portSpacing port="sink_output 1" spacing="0"/>
    <portSpacing port="sink_output 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="true" class="deeplearning:dl4j_timeseries_converter" compatibility="0.9.001" expanded="true" height="68" name="TimeSeries to Tensor" width="90" x="313" y="187"/>
    <operator activated="true" class="deeplearning:dl4j_tensor_sequential_neural_network" compatibility="0.9.001" expanded="true" height="103" name="Deep Learning (Tensor)" origin="GENERATED_SAMPLE" width="90" x="447" y="136">
    <parameter key="loss_function" value="Multiclass Cross Entropy (Classification)"/>
    <parameter key="epochs" value="5"/>
    <parameter key="use_miniBatch" value="false"/>
    <parameter key="batch_size" value="1"/>
    <parameter key="updater" value="Adam"/>
    <parameter key="learning_rate" value="0.005"/>
    <parameter key="momentum" value="0.9"/>
    <parameter key="rho" value="0.95"/>
    <parameter key="epsilon" value="1.0E-6"/>
    <parameter key="beta1" value="0.9"/>
    <parameter key="beta2" value="0.999"/>
    <parameter key="RMSdecay" value="0.95"/>
    <parameter key="weight_initialization" value="Xavier"/>
    <parameter key="bias_initialization" value="0.0"/>
    <parameter key="use_regularization" value="false"/>
    <parameter key="l1_strength" value="0.1"/>
    <parameter key="l2_strength" value="0.1"/>
    <parameter key="optimization_method" value="Stochastic Gradient Descent"/>
    <parameter key="backpropagation" value="Standard"/>
    <parameter key="backpropagation_length" value="50"/>
    <parameter key="infer_input_shape" value="true"/>
    <parameter key="network_type" value="Recurrent with TimeSeries"/>
    <parameter key="input_dimension" value="86"/>
    <parameter key="timeseries_steps" value="155"/>
    <parameter key="log_each_epoch" value="true"/>
    <parameter key="epochs_per_log" value="10"/>
    <parameter key="use_local_random_seed" value="false"/>
    <parameter key="local_random_seed" value="1992"/>
    <process expanded="true">
    <operator activated="true" class="deeplearning:dl4j_lstm_layer" compatibility="0.9.001" expanded="true" height="68" name="Add LSTM Layer" origin="GENERATED_SAMPLE" width="90" x="179" y="136">
    <parameter key="neurons" value="5"/>
    <parameter key="gate_activation" value="TanH"/>
    <parameter key="forget_gate_bias_initialization" value="1.0"/>
    </operator>
    <operator activated="true" class="deeplearning:dl4j_dense_layer" compatibility="0.9.001" expanded="true" height="68" name="Add Fully-Connected Layer" origin="GENERATED_SAMPLE" width="90" x="447" y="136">
    <parameter key="number_of_neurons" value="3"/>
    <parameter key="activation_function" value="Sigmoid"/>
    <parameter key="use_dropout" value="false"/>
    <parameter key="dropout_rate" value="0.25"/>
    <parameter key="overwrite_networks_weight_initialization" value="false"/>
    <parameter key="weight_initialization" value="Normal"/>
    <parameter key="overwrite_networks_bias_initialization" value="false"/>
    <parameter key="bias_initialization" value="0.0"/>
    </operator>
    <connect from_port="layerArchitecture" to_op="Add LSTM Layer" to_port="layerArchitecture"/>
    <connect from_op="Add LSTM Layer" from_port="layerArchitecture" to_op="Add Fully-Connected Layer" to_port="layerArchitecture"/>
    <connect from_op="Add Fully-Connected Layer" from_port="layerArchitecture" to_port="layerArchitecture"/>
    <portSpacing port="source_layerArchitecture" spacing="0"/>
    <portSpacing port="sink_layerArchitecture" spacing="0"/>
    </process>
    <description align="center" color="transparent" colored="false" width="126">Double click the operator for its inner layer structure</description>
    </operator>
    <connect from_port="training set" to_op="Multiply" to_port="input"/>
    <connect from_op="Multiply" from_port="output 1" to_op="Loop Examples" to_port="example set"/>
    <connect from_op="Multiply" from_port="output 2" to_op="Decision Tree" to_port="training set"/>
    <connect from_op="Decision Tree" from_port="model" to_port="model"/>
    <connect from_op="Loop Examples" from_port="output 1" to_op="TimeSeries to Tensor" to_port="collection"/>
    <connect from_op="TimeSeries to Tensor" from_port="tensor" to_op="Deep Learning (Tensor)" to_port="training set"/>
    <connect from_op="Deep Learning (Tensor)" from_port="model" to_port="through 1"/>
    <portSpacing port="source_training set" spacing="0"/>
    <portSpacing port="sink_model" spacing="0"/>
    <portSpacing port="sink_through 1" spacing="0"/>
    <portSpacing port="sink_through 2" spacing="0"/>
    </process>
    <process expanded="true">
    <operator activated="true" class="multiply" compatibility="9.5.000" expanded="true" height="103" name="Multiply (2)" width="90" x="45" y="187"/>
    <operator activated="true" class="loop_examples" compatibility="9.5.000" expanded="true" height="103" name="Loop Examples (2)" width="90" x="179" y="238">
    <parameter key="iteration_macro" value="example"/>
    <process expanded="true">
    <operator activated="true" class="filter_example_range" compatibility="9.5.000" expanded="true" height="82" name="Filter Example Range (3)" width="90" x="179" y="34">
    <parameter key="first_example" value="%{example}"/>
    <parameter key="last_example" value="%{example}"/>
    <parameter key="invert_filter" value="false"/>
    </operator>
    <connect from_port="example set" to_op="Filter Example Range (3)" to_port="example set input"/>
    <connect from_op="Filter Example Range (3)" from_port="example set output" to_port="output 1"/>
    <portSpacing port="source_example set" spacing="0"/>
    <portSpacing port="sink_example set" spacing="0"/>
    <portSpacing port="sink_output 1" spacing="0"/>
    <portSpacing port="sink_output 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="true" class="deeplearning:dl4j_timeseries_converter" compatibility="0.9.001" expanded="true" height="68" name="TimeSeries to Tensor (3)" origin="GENERATED_SAMPLE" width="90" x="313" y="238"/>
    <operator activated="true" class="deeplearning:dl4j_apply_tensor_model" compatibility="0.9.001" expanded="true" height="82" name="Apply Model (Tensor) (2)" origin="GENERATED_SAMPLE" width="90" x="514" y="187">
    <description align="center" color="transparent" colored="false" width="126">This operator adds the prediction to the input tensor and provides it as a Collection of ExampleSets.</description>
    </operator>
    <operator activated="true" class="loop_collection" compatibility="9.5.000" expanded="true" height="82" name="Loop Collection (2)" origin="GENERATED_SAMPLE" width="90" x="648" y="238">
    <parameter key="set_iteration_macro" value="false"/>
    <parameter key="macro_name" value="iteration"/>
    <parameter key="macro_start_value" value="1"/>
    <parameter key="unfold" value="false"/>
    <process expanded="true">
    <operator activated="true" class="filter_example_range" compatibility="9.5.000" expanded="true" height="82" name="Filter Example Range (2)" origin="GENERATED_SAMPLE" width="90" x="112" y="34">
    <parameter key="first_example" value="1"/>
    <parameter key="last_example" value="1"/>
    <parameter key="invert_filter" value="false"/>
    </operator>
    <connect from_port="single" to_op="Filter Example Range (2)" to_port="example set input"/>
    <connect from_op="Filter Example Range (2)" from_port="example set output" to_port="output 1"/>
    <portSpacing port="source_single" spacing="0"/>
    <portSpacing port="sink_output 1" spacing="0"/>
    <portSpacing port="sink_output 2" spacing="0"/>
    <description align="center" color="yellow" colored="false" height="145" resized="false" width="180" x="45" y="157">Looping over all patients (ExampleSets) from the Apply Model (Tensor)'s Collection output to extract one measurement each (since all contain the same prediction).</description>
    </process>
    <description align="center" color="transparent" colored="false" width="126">Loop over all patients (ExampleSets) from the Apply Model (Tensor)'s Collection output.</description>
    </operator>
    <operator activated="true" class="append" compatibility="9.5.000" expanded="true" height="82" name="Append (2)" origin="GENERATED_SAMPLE" width="90" x="782" y="238">
    <parameter key="datamanagement" value="double_array"/>
    <parameter key="data_management" value="auto"/>
    <parameter key="merge_type" value="all"/>
    </operator>
    <operator activated="true" class="performance_classification" compatibility="9.5.000" expanded="true" height="82" name="Performance_LSTM" origin="GENERATED_SAMPLE" width="90" x="916" y="238">
    <parameter key="main_criterion" value="first"/>
    <parameter key="accuracy" value="true"/>
    <parameter key="classification_error" value="false"/>
    <parameter key="kappa" value="false"/>
    <parameter key="weighted_mean_recall" value="false"/>
    <parameter key="weighted_mean_precision" value="false"/>
    <parameter key="spearman_rho" value="false"/>
    <parameter key="kendall_tau" value="false"/>
    <parameter key="absolute_error" value="false"/>
    <parameter key="relative_error" value="false"/>
    <parameter key="relative_error_lenient" value="false"/>
    <parameter key="relative_error_strict" value="false"/>
    <parameter key="normalized_absolute_error" value="false"/>
    <parameter key="root_mean_squared_error" value="false"/>
    <parameter key="root_relative_squared_error" value="false"/>
    <parameter key="squared_error" value="false"/>
    <parameter key="correlation" value="false"/>
    <parameter key="squared_correlation" value="false"/>
    <parameter key="cross-entropy" value="false"/>
    <parameter key="margin" value="false"/>
    <parameter key="soft_margin_loss" value="false"/>
    <parameter key="logistic_loss" value="false"/>
    <parameter key="skip_undefined_labels" value="true"/>
    <parameter key="use_example_weights" value="true"/>
    <list key="class_weights"/>
    </operator>
    <operator activated="true" class="apply_model" compatibility="9.5.000" expanded="true" height="82" name="Apply Model" width="90" x="179" y="34">
    <list key="application_parameters"/>
    <parameter key="create_view" value="false"/>
    </operator>
    <operator activated="true" class="performance_classification" compatibility="9.5.000" expanded="true" height="82" name="Performance_DT" width="90" x="313" y="34">
    <parameter key="main_criterion" value="first"/>
    <parameter key="accuracy" value="true"/>
    <parameter key="classification_error" value="false"/>
    <parameter key="kappa" value="false"/>
    <parameter key="weighted_mean_recall" value="false"/>
    <parameter key="weighted_mean_precision" value="false"/>
    <parameter key="spearman_rho" value="false"/>
    <parameter key="kendall_tau" value="false"/>
    <parameter key="absolute_error" value="false"/>
    <parameter key="relative_error" value="false"/>
    <parameter key="relative_error_lenient" value="false"/>
    <parameter key="relative_error_strict" value="false"/>
    <parameter key="normalized_absolute_error" value="false"/>
    <parameter key="root_mean_squared_error" value="false"/>
    <parameter key="root_relative_squared_error" value="false"/>
    <parameter key="squared_error" value="false"/>
    <parameter key="correlation" value="false"/>
    <parameter key="squared_correlation" value="false"/>
    <parameter key="cross-entropy" value="false"/>
    <parameter key="margin" value="false"/>
    <parameter key="soft_margin_loss" value="false"/>
    <parameter key="logistic_loss" value="false"/>
    <parameter key="skip_undefined_labels" value="true"/>
    <parameter key="use_example_weights" value="true"/>
    <list key="class_weights"/>
    </operator>
    <connect from_port="model" to_op="Apply Model" to_port="model"/>
    <connect from_port="test set" to_op="Multiply (2)" to_port="input"/>
    <connect from_port="through 1" to_op="Apply Model (Tensor) (2)" to_port="model"/>
    <connect from_op="Multiply (2)" from_port="output 1" to_op="Apply Model" to_port="unlabelled data"/>
    <connect from_op="Multiply (2)" from_port="output 2" to_op="Loop Examples (2)" to_port="example set"/>
    <connect from_op="Loop Examples (2)" from_port="output 1" to_op="TimeSeries to Tensor (3)" to_port="collection"/>
    <connect from_op="TimeSeries to Tensor (3)" from_port="tensor" to_op="Apply Model (Tensor) (2)" to_port="unlabelled tensor"/>
    <connect from_op="Apply Model (Tensor) (2)" from_port="labeled data" to_op="Loop Collection (2)" to_port="collection"/>
    <connect from_op="Loop Collection (2)" from_port="output 1" to_op="Append (2)" to_port="example set 1"/>
    <connect from_op="Append (2)" from_port="merged set" to_op="Performance_LSTM" to_port="labelled data"/>
    <connect from_op="Performance_LSTM" from_port="performance" to_port="performance 2"/>
    <connect from_op="Apply Model" from_port="labelled data" to_op="Performance_DT" to_port="labelled data"/>
    <connect from_op="Performance_DT" from_port="performance" to_port="performance 1"/>
    <portSpacing port="source_model" spacing="0"/>
    <portSpacing port="source_test set" spacing="0"/>
    <portSpacing port="source_through 1" spacing="0"/>
    <portSpacing port="source_through 2" spacing="0"/>
    <portSpacing port="sink_test set results" spacing="0"/>
    <portSpacing port="sink_performance 1" spacing="0"/>
    <portSpacing port="sink_performance 2" spacing="0"/>
    <portSpacing port="sink_performance 3" spacing="0"/>
    </process>
    </operator>
    <connect from_op="Retrieve 'XPCM11.SA -DAILY" from_port="output" to_op="Select Attributes" to_port="example set input"/>
    <connect from_op="Select Attributes" from_port="example set output" to_op="Set Role" to_port="example set input"/>
    <connect from_op="Set Role" from_port="example set output" to_op="Cross Validation" to_port="example set"/>
    <connect from_op="Cross Validation" from_port="performance 1" to_port="result 1"/>
    <portSpacing port="source_input 1" spacing="0"/>
    <portSpacing port="sink_result 1" spacing="0"/>
    <portSpacing port="sink_result 2" spacing="0"/>
    </process>
    </operator>
    </process>

    Hope this helps.

    Regards,
    Varun
    https://www.varunmandalapu.com/

    Be Safe. Follow precautions and Maintain Social Distancing

  • pblack476pblack476 Member Posts: 83 Maven
    edited November 2019
    @varunm1 Seems good but I get a NETWORK CONFIGURATION PROBLEM "Something went wrong when trying to set up your network" on the DL (tensor) operator.
  • varunm1varunm1 Moderator, Member Posts: 1,207 Unicorn
    edited November 2019
    @pblack476 yeah me too, I thought the issue is with data or the way I am creating the collection of example sets from the original data
    Regards,
    Varun
    https://www.varunmandalapu.com/

    Be Safe. Follow precautions and Maintain Social Distancing

  • pblack476pblack476 Member Posts: 83 Maven
    @varunm1 I have decided that for now I won't be using DL4J. Do you know if the h20 DL operator can work with LTSM layers through one of its configurations?
Sign In or Register to comment.