Options

process documents loop

slimikslimik Member Posts: 7 Contributor I
edited November 2019 in Help

Hello everyone,

 

i'm trying to perform sentiment analysis to a bunch of data and i want to test different parameters of Process Document from Data operator.It is obvious that i can't test manually all the possible combinations of prune methods or token filtering (within Process Documents operator) . Is there any way to loop this procedure in order to find the optimum values of the parameters?

 

Thank you

Best Answer

  • Options
    Thomas_OttThomas_Ott RapidMiner Certified Analyst, RapidMiner Certified Expert, Member Posts: 1,761 Unicorn
    Solution Accepted

    Yes, you could embed the Process Documents from Data operator inside a Grid Optimization operator and then vary the parameters. 

Answers

  • Options
    slimikslimik Member Posts: 7 Contributor I

    thank you @Thomas_Ott for your help!

    Should i embed all the process flow inside the Optimize Parameters (grid) or only the process documents from data? 

    You can see my process flow below:

     

    <?xml version="1.0" encoding="UTF-8"?><process version="7.3.000">
    <context>
    <input/>
    <output/>
    <macros/>
    </context>
    <operator activated="true" class="process" compatibility="6.0.002" expanded="true" name="Process">
    <parameter key="logverbosity" value="init"/>
    <parameter key="random_seed" value="2001"/>
    <parameter key="send_mail" value="never"/>
    <parameter key="notification_email" value=""/>
    <parameter key="process_duration_for_mail" value="30"/>
    <parameter key="encoding" value="SYSTEM"/>
    <process expanded="true">
    <operator activated="true" class="read_excel" compatibility="7.3.000" expanded="true" height="68" name="Read Excel" width="90" x="45" y="34">
    <parameter key="sheet_number" value="1"/>
    <parameter key="imported_cell_range" value="A1"/>
    <parameter key="encoding" value="SYSTEM"/>
    <parameter key="first_row_as_names" value="true"/>
    <list key="annotations"/>
    <parameter key="date_format" value=""/>
    <parameter key="time_zone" value="SYSTEM"/>
    <parameter key="locale" value="English (United States)"/>
    <list key="data_set_meta_data_information"/>
    <parameter key="read_not_matching_values_as_missings" value="true"/>
    <parameter key="datamanagement" value="double_array"/>
    </operator>
    <operator activated="true" breakpoints="after" class="text:process_document_from_data" compatibility="7.3.000" expanded="true" height="82" name="Process Documents from Data" width="90" x="246" y="34">
    <parameter key="create_word_vector" value="true"/>
    <parameter key="vector_creation" value="TF-IDF"/>
    <parameter key="add_meta_information" value="true"/>
    <parameter key="keep_text" value="true"/>
    <parameter key="prune_method" value="absolute"/>
    <parameter key="prune_below_percent" value="3.0"/>
    <parameter key="prune_above_percent" value="30.0"/>
    <parameter key="prune_below_absolute" value="1"/>
    <parameter key="prune_above_absolute" value="10"/>
    <parameter key="prune_below_rank" value="0.05"/>
    <parameter key="prune_above_rank" value="0.95"/>
    <parameter key="datamanagement" value="double_sparse_array"/>
    <parameter key="select_attributes_and_weights" value="false"/>
    <list key="specify_weights"/>
    <process expanded="true">
    <operator activated="true" class="text:tokenize" compatibility="7.3.000" expanded="true" height="68" name="Tokenize" width="90" x="112" y="34">
    <parameter key="mode" value="non letters"/>
    <parameter key="characters" value=".:"/>
    <parameter key="language" value="English"/>
    <parameter key="max_token_length" value="3"/>
    </operator>
    <operator activated="true" class="text:transform_cases" compatibility="7.3.000" expanded="true" height="68" name="Transform Cases" width="90" x="246" y="34">
    <parameter key="transform_to" value="lower case"/>
    </operator>
    <operator activated="true" class="text:filter_by_length" compatibility="7.3.000" expanded="true" height="68" name="Filter Tokens (by Length)" width="90" x="246" y="187">
    <parameter key="min_chars" value="4"/>
    <parameter key="max_chars" value="30"/>
    </operator>
    <operator activated="true" class="text:stem_porter" compatibility="7.3.000" expanded="true" height="68" name="Stem (Porter)" width="90" x="380" y="136"/>
    <operator activated="true" class="text:filter_stopwords_english" compatibility="7.3.000" expanded="true" height="68" name="Filter Stopwords (English)" width="90" x="514" y="34"/>
    <operator activated="true" class="text:generate_n_grams_terms" compatibility="7.3.000" expanded="true" height="68" name="Generate n-Grams (Terms)" width="90" x="648" y="34">
    <parameter key="max_length" value="1"/>
    </operator>
    <connect from_port="document" to_op="Tokenize" to_port="document"/>
    <connect from_op="Tokenize" from_port="document" to_op="Transform Cases" to_port="document"/>
    <connect from_op="Transform Cases" from_port="document" to_op="Filter Tokens (by Length)" to_port="document"/>
    <connect from_op="Filter Tokens (by Length)" from_port="document" to_op="Stem (Porter)" to_port="document"/>
    <connect from_op="Stem (Porter)" from_port="document" to_op="Filter Stopwords (English)" to_port="document"/>
    <connect from_op="Filter Stopwords (English)" from_port="document" to_op="Generate n-Grams (Terms)" to_port="document"/>
    <connect from_op="Generate n-Grams (Terms)" from_port="document" to_port="document 1"/>
    <portSpacing port="source_document" spacing="0"/>
    <portSpacing port="sink_document 1" spacing="0"/>
    <portSpacing port="sink_document 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="true" class="set_role" compatibility="7.3.000" expanded="true" height="82" name="Set Role" width="90" x="380" y="34">
    <parameter key="attribute_name" value="Sentiment"/>
    <parameter key="target_role" value="label"/>
    <list key="set_additional_roles"/>
    </operator>
    <operator activated="true" class="x_validation" compatibility="7.3.000" expanded="true" height="124" name="Validation" width="90" x="514" y="34">
    <parameter key="create_complete_model" value="false"/>
    <parameter key="average_performances_only" value="true"/>
    <parameter key="leave_one_out" value="false"/>
    <parameter key="number_of_validations" value="10"/>
    <parameter key="sampling_type" value="automatic"/>
    <parameter key="use_local_random_seed" value="false"/>
    <parameter key="local_random_seed" value="1992"/>
    <process expanded="true">
    <operator activated="true" class="support_vector_machine" compatibility="7.3.000" expanded="true" height="124" name="SVM (2)" width="90" x="179" y="34">
    <parameter key="kernel_type" value="dot"/>
    <parameter key="kernel_gamma" value="1.0"/>
    <parameter key="kernel_sigma1" value="1.0"/>
    <parameter key="kernel_sigma2" value="0.0"/>
    <parameter key="kernel_sigma3" value="2.0"/>
    <parameter key="kernel_shift" value="1.0"/>
    <parameter key="kernel_degree" value="2.0"/>
    <parameter key="kernel_a" value="1.0"/>
    <parameter key="kernel_b" value="0.0"/>
    <parameter key="kernel_cache" value="200"/>
    <parameter key="C" value="0.0"/>
    <parameter key="convergence_epsilon" value="0.001"/>
    <parameter key="max_iterations" value="100000"/>
    <parameter key="scale" value="true"/>
    <parameter key="calculate_weights" value="true"/>
    <parameter key="return_optimization_performance" value="true"/>
    <parameter key="L_pos" value="1.0"/>
    <parameter key="L_neg" value="1.0"/>
    <parameter key="epsilon" value="0.0"/>
    <parameter key="epsilon_plus" value="0.0"/>
    <parameter key="epsilon_minus" value="0.0"/>
    <parameter key="balance_cost" value="false"/>
    <parameter key="quadratic_loss_pos" value="false"/>
    <parameter key="quadratic_loss_neg" value="false"/>
    <parameter key="estimate_performance" value="false"/>
    </operator>
    <connect from_port="training" to_op="SVM (2)" to_port="training set"/>
    <connect from_op="SVM (2)" from_port="model" to_port="model"/>
    <portSpacing port="source_training" spacing="0"/>
    <portSpacing port="sink_model" spacing="0"/>
    <portSpacing port="sink_through 1" spacing="0"/>
    </process>
    <process expanded="true">
    <operator activated="true" class="apply_model" compatibility="7.1.001" expanded="true" height="82" name="Apply Model" width="90" x="45" y="34">
    <list key="application_parameters"/>
    <parameter key="create_view" value="false"/>
    </operator>
    <operator activated="true" class="performance" compatibility="7.3.000" expanded="true" height="82" name="Performance (2)" width="90" x="246" y="34">
    <parameter key="use_example_weights" value="true"/>
    </operator>
    <connect from_port="model" to_op="Apply Model" to_port="model"/>
    <connect from_port="test set" to_op="Apply Model" to_port="unlabelled data"/>
    <connect from_op="Apply Model" from_port="labelled data" to_op="Performance (2)" to_port="labelled data"/>
    <connect from_op="Performance (2)" from_port="performance" to_port="averagable 1"/>
    <portSpacing port="source_model" spacing="0"/>
    <portSpacing port="source_test set" spacing="0"/>
    <portSpacing port="source_through 1" spacing="0"/>
    <portSpacing port="sink_averagable 1" spacing="0"/>
    <portSpacing port="sink_averagable 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="true" class="read_excel" compatibility="7.3.000" expanded="true" height="68" name="Read Excel (2)" width="90" x="45" y="187">
    <parameter key="sheet_number" value="1"/>
    <parameter key="imported_cell_range" value="A1"/>
    <parameter key="encoding" value="SYSTEM"/>
    <parameter key="first_row_as_names" value="true"/>
    <list key="annotations"/>
    <parameter key="date_format" value=""/>
    <parameter key="time_zone" value="SYSTEM"/>
    <parameter key="locale" value="English (United States)"/>
    <list key="data_set_meta_data_information"/>
    <parameter key="read_not_matching_values_as_missings" value="true"/>
    <parameter key="datamanagement" value="double_array"/>
    </operator>
    <operator activated="true" class="text:process_document_from_data" compatibility="7.3.000" expanded="true" height="82" name="Process Apply Documents" width="90" x="246" y="187">
    <parameter key="create_word_vector" value="true"/>
    <parameter key="vector_creation" value="TF-IDF"/>
    <parameter key="add_meta_information" value="true"/>
    <parameter key="keep_text" value="true"/>
    <parameter key="prune_method" value="absolute"/>
    <parameter key="prune_below_percent" value="3.0"/>
    <parameter key="prune_above_percent" value="30.0"/>
    <parameter key="prune_below_absolute" value="1"/>
    <parameter key="prune_above_absolute" value="10"/>
    <parameter key="prune_below_rank" value="0.05"/>
    <parameter key="prune_above_rank" value="0.95"/>
    <parameter key="datamanagement" value="double_sparse_array"/>
    <parameter key="select_attributes_and_weights" value="false"/>
    <list key="specify_weights"/>
    <process expanded="true">
    <operator activated="true" class="text:tokenize" compatibility="7.3.000" expanded="true" height="68" name="Tokenize (3)" width="90" x="45" y="34">
    <parameter key="mode" value="non letters"/>
    <parameter key="characters" value=".:"/>
    <parameter key="language" value="English"/>
    <parameter key="max_token_length" value="3"/>
    </operator>
    <operator activated="true" class="text:transform_cases" compatibility="7.3.000" expanded="true" height="68" name="Transform Cases (3)" width="90" x="179" y="34">
    <parameter key="transform_to" value="lower case"/>
    </operator>
    <operator activated="true" class="text:filter_by_length" compatibility="7.3.000" expanded="true" height="68" name="Filter Tokens (2)" width="90" x="179" y="238">
    <parameter key="min_chars" value="4"/>
    <parameter key="max_chars" value="25"/>
    </operator>
    <operator activated="true" class="text:stem_porter" compatibility="7.3.000" expanded="true" height="68" name="Stem (2)" width="90" x="380" y="136"/>
    <operator activated="true" class="text:filter_stopwords_english" compatibility="7.3.000" expanded="true" height="68" name="Filter Stopwords (3)" width="90" x="447" y="34"/>
    <operator activated="true" class="text:generate_n_grams_terms" compatibility="7.3.000" expanded="true" height="68" name="Generate n-Grams (2)" width="90" x="581" y="34">
    <parameter key="max_length" value="1"/>
    </operator>
    <connect from_port="document" to_op="Tokenize (3)" to_port="document"/>
    <connect from_op="Tokenize (3)" from_port="document" to_op="Transform Cases (3)" to_port="document"/>
    <connect from_op="Transform Cases (3)" from_port="document" to_op="Filter Tokens (2)" to_port="document"/>
    <connect from_op="Filter Tokens (2)" from_port="document" to_op="Stem (2)" to_port="document"/>
    <connect from_op="Stem (2)" from_port="document" to_op="Filter Stopwords (3)" to_port="document"/>
    <connect from_op="Filter Stopwords (3)" from_port="document" to_op="Generate n-Grams (2)" to_port="document"/>
    <connect from_op="Generate n-Grams (2)" from_port="document" to_port="document 1"/>
    <portSpacing port="source_document" spacing="0"/>
    <portSpacing port="sink_document 1" spacing="0"/>
    <portSpacing port="sink_document 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="true" class="apply_model" compatibility="7.1.001" expanded="true" height="82" name="Apply Model (2)" width="90" x="514" y="238">
    <list key="application_parameters"/>
    <parameter key="create_view" value="false"/>
    </operator>
    <connect from_op="Read Excel" from_port="output" to_op="Process Documents from Data" to_port="example set"/>
    <connect from_op="Process Documents from Data" from_port="example set" to_op="Set Role" to_port="example set input"/>
    <connect from_op="Process Documents from Data" from_port="word list" to_op="Process Apply Documents" to_port="word list"/>
    <connect from_op="Set Role" from_port="example set output" to_op="Validation" to_port="training"/>
    <connect from_op="Validation" from_port="model" to_op="Apply Model (2)" to_port="model"/>
    <connect from_op="Validation" from_port="training" to_port="result 1"/>
    <connect from_op="Validation" from_port="averagable 1" to_port="result 2"/>
    <connect from_op="Read Excel (2)" from_port="output" to_op="Process Apply Documents" to_port="example set"/>
    <connect from_op="Process Apply Documents" from_port="example set" to_op="Apply Model (2)" to_port="unlabelled data"/>
    <connect from_op="Apply Model (2)" from_port="labelled data" to_port="result 3"/>
    <portSpacing port="source_input 1" spacing="0"/>
    <portSpacing port="sink_result 1" spacing="0"/>
    <portSpacing port="sink_result 2" spacing="0"/>
    <portSpacing port="sink_result 3" spacing="0"/>
    <portSpacing port="sink_result 4" spacing="0"/>
    </process>
    </operator>
    </process>

     

  • Options
    Thomas_OttThomas_Ott RapidMiner Certified Analyst, RapidMiner Certified Expert, Member Posts: 1,761 Unicorn

    Hi @slimik,  I made some changes to your process. It's best to do the model optimization offline instead of incorporating it within a scoring process. The optimization routine can be very time intensive depending on how many parameters your choose to optimize. I selected a few parameters to show you how its done, but feel free to change them.

     

    I also place the Set Role operator before the Process Document operator too. 

     

    <?xml version="1.0" encoding="UTF-8"?><process version="7.3.000">
    <context>
    <input/>
    <output/>
    <macros/>
    </context>
    <operator activated="true" class="process" compatibility="6.0.002" expanded="true" name="Process">
    <process expanded="true">
    <operator activated="true" class="read_excel" compatibility="7.3.000" expanded="true" height="68" name="Read Excel" width="90" x="45" y="34">
    <list key="annotations"/>
    <list key="data_set_meta_data_information"/>
    </operator>
    <operator activated="false" class="read_excel" compatibility="7.3.000" expanded="true" height="68" name="Read Excel (2)" width="90" x="45" y="238">
    <list key="annotations"/>
    <list key="data_set_meta_data_information"/>
    </operator>
    <operator activated="true" class="set_role" compatibility="7.3.000" expanded="true" height="82" name="Set Role" width="90" x="179" y="34">
    <parameter key="attribute_name" value="Sentiment"/>
    <parameter key="target_role" value="label"/>
    <list key="set_additional_roles"/>
    </operator>
    <operator activated="true" class="optimize_parameters_grid" compatibility="7.3.000" expanded="true" height="124" name="Optimize Parameters (Grid)" width="90" x="313" y="34">
    <list key="parameters">
    <parameter key="Process Documents from Data.prune_method" value="percentual,absolute,by ranking"/>
    <parameter key="Process Documents from Data.prune_below_percent" value="[0.0;100.0;10;linear]"/>
    <parameter key="Process Documents from Data.prune_above_percent" value="[0.0;100.0;10;linear]"/>
    <parameter key="Process Documents from Data.prune_below_absolute" value="[0.0;100.0;10;linear]"/>
    <parameter key="Process Documents from Data.prune_above_absolute" value="[0.0;100.0;10;linear]"/>
    <parameter key="Process Documents from Data.prune_below_rank" value="[0.0;1.0;10;linear]"/>
    <parameter key="Process Documents from Data.prune_above_rank" value="[0.0;1.0;10;linear]"/>
    </list>
    <process expanded="true">
    <operator activated="true" breakpoints="after" class="text:process_document_from_data" compatibility="7.3.000" expanded="true" height="82" name="Process Documents from Data" width="90" x="313" y="34">
    <parameter key="keep_text" value="true"/>
    <parameter key="prune_method" value="absolute"/>
    <parameter key="prune_below_absolute" value="1"/>
    <parameter key="prune_above_absolute" value="10"/>
    <list key="specify_weights"/>
    <process expanded="true">
    <operator activated="true" class="text:tokenize" compatibility="7.3.000" expanded="true" height="68" name="Tokenize" width="90" x="112" y="34"/>
    <operator activated="true" class="text:transform_cases" compatibility="7.3.000" expanded="true" height="68" name="Transform Cases" width="90" x="246" y="34"/>
    <operator activated="true" class="text:filter_by_length" compatibility="7.3.000" expanded="true" height="68" name="Filter Tokens (by Length)" width="90" x="246" y="187">
    <parameter key="max_chars" value="30"/>
    </operator>
    <operator activated="true" class="text:stem_porter" compatibility="7.3.000" expanded="true" height="68" name="Stem (Porter)" width="90" x="380" y="136"/>
    <operator activated="true" class="text:filter_stopwords_english" compatibility="7.3.000" expanded="true" height="68" name="Filter Stopwords (English)" width="90" x="514" y="34"/>
    <operator activated="true" class="text:generate_n_grams_terms" compatibility="7.3.000" expanded="true" height="68" name="Generate n-Grams (Terms)" width="90" x="648" y="34">
    <parameter key="max_length" value="1"/>
    </operator>
    <connect from_port="document" to_op="Tokenize" to_port="document"/>
    <connect from_op="Tokenize" from_port="document" to_op="Transform Cases" to_port="document"/>
    <connect from_op="Transform Cases" from_port="document" to_op="Filter Tokens (by Length)" to_port="document"/>
    <connect from_op="Filter Tokens (by Length)" from_port="document" to_op="Stem (Porter)" to_port="document"/>
    <connect from_op="Stem (Porter)" from_port="document" to_op="Filter Stopwords (English)" to_port="document"/>
    <connect from_op="Filter Stopwords (English)" from_port="document" to_op="Generate n-Grams (Terms)" to_port="document"/>
    <connect from_op="Generate n-Grams (Terms)" from_port="document" to_port="document 1"/>
    <portSpacing port="source_document" spacing="0"/>
    <portSpacing port="sink_document 1" spacing="0"/>
    <portSpacing port="sink_document 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="true" class="x_validation" compatibility="7.3.000" expanded="true" height="124" name="Validation" width="90" x="514" y="34">
    <process expanded="true">
    <operator activated="true" class="support_vector_machine" compatibility="7.3.000" expanded="true" height="124" name="SVM (2)" width="90" x="179" y="34"/>
    <connect from_port="training" to_op="SVM (2)" to_port="training set"/>
    <connect from_op="SVM (2)" from_port="model" to_port="model"/>
    <portSpacing port="source_training" spacing="0"/>
    <portSpacing port="sink_model" spacing="0"/>
    <portSpacing port="sink_through 1" spacing="0"/>
    </process>
    <process expanded="true">
    <operator activated="true" class="apply_model" compatibility="7.1.001" expanded="true" height="82" name="Apply Model" width="90" x="45" y="34">
    <list key="application_parameters"/>
    </operator>
    <operator activated="true" class="performance" compatibility="7.3.000" expanded="true" height="82" name="Performance (2)" width="90" x="246" y="34"/>
    <connect from_port="model" to_op="Apply Model" to_port="model"/>
    <connect from_port="test set" to_op="Apply Model" to_port="unlabelled data"/>
    <connect from_op="Apply Model" from_port="labelled data" to_op="Performance (2)" to_port="labelled data"/>
    <connect from_op="Performance (2)" from_port="performance" to_port="averagable 1"/>
    <portSpacing port="source_model" spacing="0"/>
    <portSpacing port="source_test set" spacing="0"/>
    <portSpacing port="source_through 1" spacing="0"/>
    <portSpacing port="sink_averagable 1" spacing="0"/>
    <portSpacing port="sink_averagable 2" spacing="0"/>
    </process>
    </operator>
    <connect from_port="input 1" to_op="Process Documents from Data" to_port="example set"/>
    <connect from_op="Process Documents from Data" from_port="example set" to_op="Validation" to_port="training"/>
    <connect from_op="Validation" from_port="averagable 1" to_port="performance"/>
    <portSpacing port="source_input 1" spacing="0"/>
    <portSpacing port="source_input 2" spacing="0"/>
    <portSpacing port="sink_performance" spacing="0"/>
    <portSpacing port="sink_result 1" spacing="0"/>
    <portSpacing port="sink_result 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="false" class="text:process_document_from_data" compatibility="7.3.000" expanded="true" height="82" name="Process Apply Documents" width="90" x="246" y="238">
    <parameter key="keep_text" value="true"/>
    <parameter key="prune_method" value="absolute"/>
    <parameter key="prune_below_absolute" value="1"/>
    <parameter key="prune_above_absolute" value="10"/>
    <list key="specify_weights"/>
    <process expanded="true">
    <operator activated="true" class="text:tokenize" compatibility="7.3.000" expanded="true" height="68" name="Tokenize (3)" width="90" x="45" y="34"/>
    <operator activated="true" class="text:transform_cases" compatibility="7.3.000" expanded="true" height="68" name="Transform Cases (3)" width="90" x="179" y="34"/>
    <operator activated="true" class="text:filter_by_length" compatibility="7.3.000" expanded="true" height="68" name="Filter Tokens (2)" width="90" x="179" y="238"/>
    <operator activated="true" class="text:stem_porter" compatibility="7.3.000" expanded="true" height="68" name="Stem (2)" width="90" x="380" y="136"/>
    <operator activated="true" class="text:filter_stopwords_english" compatibility="7.3.000" expanded="true" height="68" name="Filter Stopwords (3)" width="90" x="447" y="34"/>
    <operator activated="true" class="text:generate_n_grams_terms" compatibility="7.3.000" expanded="true" height="68" name="Generate n-Grams (2)" width="90" x="581" y="34">
    <parameter key="max_length" value="1"/>
    </operator>
    <connect from_port="document" to_op="Tokenize (3)" to_port="document"/>
    <connect from_op="Tokenize (3)" from_port="document" to_op="Transform Cases (3)" to_port="document"/>
    <connect from_op="Transform Cases (3)" from_port="document" to_op="Filter Tokens (2)" to_port="document"/>
    <connect from_op="Filter Tokens (2)" from_port="document" to_op="Stem (2)" to_port="document"/>
    <connect from_op="Stem (2)" from_port="document" to_op="Filter Stopwords (3)" to_port="document"/>
    <connect from_op="Filter Stopwords (3)" from_port="document" to_op="Generate n-Grams (2)" to_port="document"/>
    <connect from_op="Generate n-Grams (2)" from_port="document" to_port="document 1"/>
    <portSpacing port="source_document" spacing="0"/>
    <portSpacing port="sink_document 1" spacing="0"/>
    <portSpacing port="sink_document 2" spacing="0"/>
    </process>
    </operator>
    <operator activated="false" class="apply_model" compatibility="7.1.001" expanded="true" height="82" name="Apply Model (2)" width="90" x="447" y="238">
    <list key="application_parameters"/>
    </operator>
    <connect from_op="Read Excel" from_port="output" to_op="Set Role" to_port="example set input"/>
    <connect from_op="Read Excel (2)" from_port="output" to_op="Process Apply Documents" to_port="example set"/>
    <connect from_op="Set Role" from_port="example set output" to_op="Optimize Parameters (Grid)" to_port="input 1"/>
    <connect from_op="Optimize Parameters (Grid)" from_port="performance" to_port="result 1"/>
    <connect from_op="Optimize Parameters (Grid)" from_port="parameter" to_port="result 2"/>
    <connect from_op="Optimize Parameters (Grid)" from_port="result 1" to_port="result 3"/>
    <connect from_op="Process Apply Documents" from_port="example set" to_op="Apply Model (2)" to_port="unlabelled data"/>
    <portSpacing port="source_input 1" spacing="0"/>
    <portSpacing port="sink_result 1" spacing="0"/>
    <portSpacing port="sink_result 2" spacing="0"/>
    <portSpacing port="sink_result 3" spacing="0"/>
    <portSpacing port="sink_result 4" spacing="0"/>
    <description align="center" color="yellow" colored="false" height="105" resized="false" width="180" x="447" y="128">Do your scoring after you optimized the model. The optimization schemes can be very time intensive.</description>
    </process>
    </operator>
    </process>
  • Options
    slimikslimik Member Posts: 7 Contributor I

    @Thomas_Ott thank you very much!!The solution that you suggest me worked perfectly

Sign In or Register to comment.