Tests on artificial data with 50% noise

In [1]:
library(adabag)
library(naivebayes)
Loading required package: rpart
Loading required package: caret
Loading required package: lattice
Loading required package: ggplot2
Loading required package: foreach
Loading required package: doParallel
Loading required package: iterators
Loading required package: parallel
In [2]:
# files available in: /net/aistaff/kleiweg/spraak/fa
train = read.table("data050.train", header=TRUE, sep="\t", quote="", row.names=1)
test  = read.table("data050.test",  header=TRUE, sep="\t", quote="", row.names=1)
In [3]:
train[1:10,]
C.ClassC.W1C.W2C.W3C.W4C.W5C.W6C.W7C.W8C.W9C.W11C.W12C.W13C.W14C.W15C.W16C.W17C.W18C.W19C.W20
503A 1.A2 2.C2 3.C1 4.A2 5.A1 6.A1 7.A1 8.A3 9.A4 11.B112.A113.A214.B515.A116.C217.A118.A219.A320.A1
554C 1.A1 2.C1 3.C2 4.C1 5.A2 6.C1 7.C1 8.C1 9.B2 11.B112.C113.C814.C115.A216.C417.A118.C219.C820.C2
792B 1.B4 2.B1 3.B1 4.B1 5.B1 6.B5 7.B1 8.C1 9.B2 11.B212.B213.B814.C115.B116.C317.A118.A319.B320.B1
743B 1.B4 2.B1 3.B1 4.C1 5.B4 6.B5 7.B1 8.A3 9.B3 11.B112.B113.C714.A315.B116.A117.B718.B319.B320.B1
466A 1.C3 2.A1 3.A1 4.B1 5.A1 6.B5 7.A1 8.A1 9.A5 11.B212.A113.B414.C115.A216.A117.A118.A319.B320.B1
542C 1.C1 2.C2 3.A1 4.A1 5.C2 6.C1 7.B1 8.A1 9.C4 11.C112.C113.C314.B415.C116.C317.C118.A319.A520.C2
933A 1.A1 2.B1 3.A1 4.A1 5.A2 6.A1 7.A1 8.C1 9.A1 11.B112.B413.A114.A215.A416.A117.A118.A319.C420.A1
440B 1.C2 2.A1 3.B1 4.B1 5.A2 6.B5 7.B1 8.C1 9.B2 11.C112.C113.C414.C115.B116.B317.B518.C119.B220.B1
926B 1.B3 2.B1 3.B1 4.B1 5.B2 6.B5 7.C1 8.C1 9.B3 11.B212.B313.B114.A415.B116.C417.B618.B419.C820.B1
403C 1.B3 2.C1 3.C1 4.A1 5.B3 6.C1 7.C1 8.B6 9.C4 11.C212.C113.A114.C115.C116.B317.C118.C219.B220.A1

Bagging (AdaBag)

In [4]:
bag <- bagging(C.Class ~ ., data=train)
train.bagging <- predict(bag, newdata=train)
 test.bagging <- predict(bag, newdata=test)
100 * (1 - train.bagging$error)
100 * (1 -  test.bagging$error)
99.3333333333333
92

Boosting (AdaBoost)

In [5]:
boost <- boosting(C.Class ~ ., data=train)
train.boosting <- predict(boost, newdata=train)
 test.boosting <- predict(boost, newdata=test)
100 * (1 - train.boosting$error)
100 * (1 -  test.boosting$error)
100
96

Naive Bayes

In [6]:
score <- function(obs, exp) {
  return(100 * sum(obs == exp[,"C.Class"]) / length(obs))
}

nb <- naive_bayes(C.Class ~ ., data=train)
train.nb <- predict(nb, train)
 test.nb <- predict(nb, test)
score(train.nb, train)
score( test.nb, test)
99.8888888888889
98

simpel.go

In [7]:
out <- system2(c("./simpel", "data050.train", "data050.test"), stdout=TRUE, stderr=TRUE)
cat(out, sep="\n")
Training score:	 99.9%
Testing score:	 99.0%