Tests on artificial data with 90% noise

In [1]:
library(adabag)
library(naivebayes)
Loading required package: rpart
Loading required package: caret
Loading required package: lattice
Loading required package: ggplot2
Loading required package: foreach
Loading required package: doParallel
Loading required package: iterators
Loading required package: parallel
In [2]:
# files available in: /net/aistaff/kleiweg/spraak/fa
train = read.table("data090.train", header=TRUE, sep="\t", quote="", row.names=1)
test  = read.table("data090.test",  header=TRUE, sep="\t", quote="", row.names=1)
In [3]:
train[1:10,]
C.ClassC.W1C.W2C.W3C.W4C.W5C.W6C.W7C.W8C.W9C.W11C.W12C.W13C.W14C.W15C.W16C.W17C.W18C.W19C.W20
168C 1.A2 2.B2 3.C1 4.B1 5.B1 6.C3 7.C2 8.B1 9.B1 11.C112.A113.A214.C415.B116.C417.B318.B119.A220.C3
391A 1.B3 2.A1 3.A1 4.B2 5.A3 6.C2 7.B4 8.C1 9.B4 11.A112.C113.C614.A115.B116.A117.A218.C119.A220.B2
373B 1.B2 2.B2 3.C2 4.B4 5.B1 6.A2 7.C2 8.B1 9.A1 11.A112.A113.C314.B115.B116.B117.A318.A119.B420.B3
488C 1.C3 2.C6 3.A1 4.B4 5.B1 6.C3 7.B4 8.A1 9.C2 11.B212.B113.C314.C115.B116.A117.B318.A119.A220.A1
242C 1.C2 2.B2 3.C2 4.B2 5.B1 6.C3 7.A1 8.C2 9.C3 11.C212.A113.B114.C115.A416.B117.B318.C119.B420.A1
781C 1.B6 2.A1 3.C5 4.A3 5.A2 6.C1 7.B4 8.B1 9.A1 11.A112.C113.B214.A115.A416.C117.B518.A219.A120.A1
194B 1.C2 2.A1 3.B2 4.A4 5.B1 6.A2 7.A2 8.B1 9.C3 11.A112.B113.B214.C115.B116.C317.B118.B119.B120.B1
893B 1.A2 2.B2 3.A2 4.B2 5.A9 6.B2 7.B2 8.B1 9.C1 11.A112.A113.C314.A115.B116.B117.B118.C219.B420.B3
324B 1.B6 2.C1 3.A2 4.B2 5.C3 6.B2 7.A2 8.B1 9.C3 11.A112.B113.B214.A115.B116.C417.B518.B119.A220.A1
430B 1.C3 2.A1 3.A2 4.C4 5.A9 6.B1 7.A1 8.A8 9.B3 11.B212.A113.B114.C115.A216.B117.B318.A419.A220.B2

Bagging (AdaBag)

In [4]:
bag <- bagging(C.Class ~ ., data=train)
train.bagging <- predict(bag, newdata=train)
 test.bagging <- predict(bag, newdata=test)
100 * (1 - train.bagging$error)
100 * (1 -  test.bagging$error)
86.3333333333333
51

Boosting (AdaBoost)

In [5]:
boost <- boosting(C.Class ~ ., data=train)
train.boosting <- predict(boost, newdata=train)
 test.boosting <- predict(boost, newdata=test)
100 * (1 - train.boosting$error)
100 * (1 -  test.boosting$error)
100
53

Naive Bayes

In [6]:
score <- function(obs, exp) {
  return(100 * sum(obs == exp[,"C.Class"]) / length(obs))
}

nb <- naive_bayes(C.Class ~ ., data=train)
train.nb <- predict(nb, train)
 test.nb <- predict(nb, test)
score(train.nb, train)
score( test.nb, test)
65.6666666666667
50

simpel.go

In [7]:
out <- system2(c("./simpel", "data090.train", "data090.test"), stdout=TRUE, stderr=TRUE)
cat(out, sep="\n")
Training score:	 65.6%
Testing score:	 49.0%