Tests on artificial data with 70% noise

In [1]:
library(adabag)
library(naivebayes)
Loading required package: rpart
Loading required package: caret
Loading required package: lattice
Loading required package: ggplot2
Loading required package: foreach
Loading required package: doParallel
Loading required package: iterators
Loading required package: parallel
In [2]:
# files available in: /net/aistaff/kleiweg/spraak/fa
train = read.table("data070.train", header=TRUE, sep="\t", quote="", row.names=1)
test  = read.table("data070.test",  header=TRUE, sep="\t", quote="", row.names=1)
In [3]:
train[1:10,]
C.ClassC.W1C.W2C.W3C.W4C.W5C.W6C.W7C.W8C.W9C.W11C.W12C.W13C.W14C.W15C.W16C.W17C.W18C.W19C.W20
758B 1.B2 2.C4 3.B1 4.C1 5.B2 6.B1 7.C1 8.A2 9.A1 11.B112.B213.A214.A115.B116.A117.A418.B319.B120.C1
987A 1.B1 2.B1 3.C1 4.A1 5.C4 6.C1 7.C1 8.B6 9.B3 11.A112.A113.B114.A115.A416.B617.B718.C119.C120.A1
392B 1.C1 2.C4 3.B1 4.A1 5.A1 6.B1 7.B2 8.C1 9.A1 11.B212.A113.A214.B315.C116.A117.A418.A219.B120.B1
820C 1.A3 2.B1 3.C1 4.C1 5.A1 6.C1 7.C1 8.C1 9.C1 11.A112.C113.B414.C315.C116.A117.C218.C119.C120.A1
485B 1.B2 2.A3 3.A1 4.A1 5.B2 6.B1 7.B1 8.B8 9.A1 11.B212.B213.A214.B415.B116.B517.B718.C119.A220.B2
251C 1.C1 2.C4 3.A1 4.C1 5.B2 6.A1 7.A1 8.A2 9.B3 11.A112.C113.B514.C715.C116.B317.A318.C119.B120.A1
247B 1.B1 2.A1 3.C1 4.B1 5.B2 6.C1 7.B1 8.B9 9.B3 11.C112.C113.B614.C715.B116.A117.B718.B319.C120.C1
882C 1.B1 2.C2 3.B1 4.B1 5.C6 6.A1 7.C1 8.B9 9.C1 11.A112.C213.A214.C215.B116.C317.C218.B119.C120.C1
549B 1.B2 2.A3 3.B1 4.B1 5.B2 6.B1 7.A1 8.B6 9.C1 11.B112.B113.B614.C415.B116.B617.B718.B319.B120.B2
584B 1.B1 2.A1 3.B4 4.A4 5.C7 6.B1 7.A1 8.B9 9.B3 11.A112.B413.C214.C415.C116.B617.B718.C119.A220.B1

Bagging (AdaBag)

In [4]:
bag <- bagging(C.Class ~ ., data=train)
train.bagging <- predict(bag, newdata=train)
 test.bagging <- predict(bag, newdata=test)
100 * (1 - train.bagging$error)
100 * (1 -  test.bagging$error)
93.7777777777778
83

Boosting (AdaBoost)

In [5]:
boost <- boosting(C.Class ~ ., data=train)
train.boosting <- predict(boost, newdata=train)
 test.boosting <- predict(boost, newdata=test)
100 * (1 - train.boosting$error)
100 * (1 -  test.boosting$error)
100
88

Naive Bayes

In [6]:
score <- function(obs, exp) {
  return(100 * sum(obs == exp[,"C.Class"]) / length(obs))
}

nb <- naive_bayes(C.Class ~ ., data=train)
train.nb <- predict(nb, train)
 test.nb <- predict(nb, test)
score(train.nb, train)
score( test.nb, test)
93
92

simpel.go

In [7]:
out <- system2(c("./simpel", "data070.train", "data070.test"), stdout=TRUE, stderr=TRUE)
cat(out, sep="\n")
Training score:	 92.3%
Testing score:	 91.0%