The attribute abandon was already present in the example set

JBhullarJBhullar Member Posts: 10 Contributor I
edited November 2018 in Help
I am working on building a classification model for classifying the text as positive, negative or neutral using  Linear SVM. i have trained it using a pre labelled dataset. But on testing the model shows an error as "The attribute abandon was already present in the example set" in "Process documents from data" operator. have tried all options for keeping the "keep text" ON/OFF but nothing worked. How to resolve it?

Answers

  • mschmitzmschmitz Administrator, Moderator, Employee, RapidMiner Certified Analyst, RapidMiner Certified Expert, University Professor Posts: 2,321  RM Data Scientist
    Hi,

    can you maybe share the process?

    ~Martin
    - Head of Data Science Services at RapidMiner -
    Dortmund, Germany
  • JBhullarJBhullar Member Posts: 10 Contributor I
    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
    <process version="6.5.002">
      <context>
        <input/>
        <output/>
        <macros/>
      </context>
      <operator activated="true" class="process" compatibility="6.5.002" expanded="true" name="Process">
        <process expanded="true">
          <operator activated="false" class="filter_examples" compatibility="6.5.002" expanded="true" height="94" name="Filter Examples (2)" width="90" x="179" y="30">
            <parameter key="invert_filter" value="true"/>
            <list key="filters_list">
              <parameter key="filters_entry_key" value="text.starts_with.RT"/>
            </list>
          </operator>
          <operator activated="true" class="retrieve" compatibility="6.5.002" expanded="true" height="60" name="Retrieve" width="90" x="313" y="75">
            <parameter key="repository_entry" value="../data/Store2"/>
          </operator>
          <operator activated="true" class="retrieve" compatibility="6.5.002" expanded="true" height="60" name="Retrieve (2)" width="90" x="313" y="210">
            <parameter key="repository_entry" value="../data/STORE"/>
          </operator>
          <operator activated="true" class="text:process_document_from_file" compatibility="6.5.000" expanded="true" height="76" name="Process Documents from Files" width="90" x="45" y="75">
            <list key="text_directories">
              <parameter key="n" value="C:\Users\JASLEEN\Desktop\New folder\neg"/>
            </list>
            <process expanded="true">
              <operator activated="true" class="text:tokenize" compatibility="6.5.000" expanded="true" height="60" name="Tokenize (2)" width="90" x="179" y="75"/>
              <connect from_port="document" to_op="Tokenize (2)" to_port="document"/>
              <connect from_op="Tokenize (2)" from_port="document" to_port="document 1"/>
              <portSpacing port="source_document" spacing="0"/>
              <portSpacing port="sink_document 1" spacing="0"/>
              <portSpacing port="sink_document 2" spacing="0"/>
            </process>
          </operator>
          <operator activated="true" class="nominal_to_text" compatibility="6.5.002" expanded="true" height="76" name="Nominal to Text" width="90" x="45" y="255"/>
          <operator activated="true" class="replace" compatibility="6.5.002" expanded="true" height="76" name="Replace" width="90" x="179" y="165">
            <parameter key="replace_what" value="(?:https?:\/\/)?(?:[\w]+\.)([a-zA-Z\.]{2,6})([\/\w\.-]*)*\/?"/>
          </operator>
          <operator activated="true" class="replace" compatibility="6.5.002" expanded="true" height="76" name="Replace (2)" width="90" x="179" y="300">
            <parameter key="attribute_filter_type" value="numeric_value_filter"/>
            <parameter key="numeric_condition" value="&gt;=0"/>
            <parameter key="replace_what" value="[0-9]"/>
          </operator>
          <operator activated="true" class="replace" compatibility="6.5.002" expanded="true" height="76" name="Replace (3)" width="90" x="179" y="435">
            <parameter key="attribute_filter_type" value="regular_expression"/>
            <parameter key="regular_expression" value="@([A-Za-z0-9_]+)"/&gt;
            <parameter key="invert_selection" value="true"/>
            <parameter key="replace_what" value="@([A-Za-z0-9_]+)"/&gt;
          </operator>
          <operator activated="true" class="text:process_document_from_data" compatibility="6.5.000" expanded="true" height="76" name="Process Documents from Data" width="90" x="447" y="255">
            <parameter key="prune_below_percent" value="2.0"/>
            <list key="specify_weights"/>
            <process expanded="true">
              <operator activated="true" class="text:tokenize" compatibility="6.5.000" expanded="true" height="60" name="Tokenize" width="90" x="45" y="75"/>
              <operator activated="true" class="wordnet:open_wordnet_dictionary" compatibility="5.3.000" expanded="true" height="60" name="Open WordNet Dictionary" width="90" x="112" y="390">
                <parameter key="directory" value="C:\Program Files (x86)\WordNet\2.1\dict"/>
              </operator>
              <operator activated="true" class="text:filter_stopwords_english" compatibility="6.5.000" expanded="true" height="60" name="Filter Stopwords (English)" width="90" x="112" y="165"/>
              <operator activated="true" class="text:filter_by_length" compatibility="6.5.000" expanded="true" height="60" name="Filter Tokens (by Length)" width="90" x="112" y="255">
                <parameter key="min_chars" value="3"/>
              </operator>
              <operator activated="true" class="wordnet:stem_wordnet" compatibility="5.3.000" expanded="true" height="76" name="Stem (WordNet)" width="90" x="246" y="300"/>
              <connect from_port="document" to_op="Tokenize" to_port="document"/>
              <connect from_op="Tokenize" from_port="document" to_op="Filter Stopwords (English)" to_port="document"/>
              <connect from_op="Open WordNet Dictionary" from_port="dictionary" to_op="Stem (WordNet)" to_port="dictionary"/>
              <connect from_op="Filter Stopwords (English)" from_port="document" to_op="Filter Tokens (by Length)" to_port="document"/>
              <connect from_op="Filter Tokens (by Length)" from_port="document" to_op="Stem (WordNet)" to_port="document"/>
              <connect from_op="Stem (WordNet)" from_port="document" to_port="document 1"/>
              <portSpacing port="source_document" spacing="0"/>
              <portSpacing port="sink_document 1" spacing="0"/>
              <portSpacing port="sink_document 2" spacing="0"/>
            </process>
          </operator>
          <operator activated="true" class="apply_model" compatibility="6.5.002" expanded="true" height="76" name="Apply Model" width="90" x="514" y="120">
            <list key="application_parameters"/>
          </operator>
          <connect from_op="Retrieve" from_port="output" to_op="Apply Model" to_port="model"/>
          <connect from_op="Retrieve (2)" from_port="output" to_op="Process Documents from Data" to_port="word list"/>
          <connect from_op="Process Documents from Files" from_port="example set" to_op="Nominal to Text" to_port="example set input"/>
          <connect from_op="Nominal to Text" from_port="example set output" to_op="Replace" to_port="example set input"/>
          <connect from_op="Replace" from_port="example set output" to_op="Replace (2)" to_port="example set input"/>
          <connect from_op="Replace (2)" from_port="example set output" to_op="Replace (3)" to_port="example set input"/>
          <connect from_op="Replace (3)" from_port="example set output" to_op="Process Documents from Data" to_port="example set"/>
          <connect from_op="Process Documents from Data" from_port="example set" to_op="Apply Model" to_port="unlabelled data"/>
          <connect from_op="Apply Model" from_port="labelled data" to_port="result 1"/>
          <portSpacing port="source_input 1" spacing="0"/>
          <portSpacing port="sink_result 1" spacing="0"/>
          <portSpacing port="sink_result 2" spacing="0"/>
        </process>
      </operator>
    </process>
  • mschmitzmschmitz Administrator, Moderator, Employee, RapidMiner Certified Analyst, RapidMiner Certified Expert, University Professor Posts: 2,321  RM Data Scientist
    Hi,

    the process is a bit odd. You load a file from disc, tokenize it, do some replacements and tokenize it again? Most likely you need to remove the tokenize in your Process data from Files operator? Otherwise it simply tries to create the coloum a second time. this does not work.
    The other option would be to remove the back of words before the Process documents on the right.

    ~Martin
    - Head of Data Science Services at RapidMiner -
    Dortmund, Germany
Sign In or Register to comment.