textmodel Performance Comparisons

library("quanteda")
## Package version: 4.0.1
## Unicode version: 14.0
## ICU version: 71.1
## Parallel computing: disabled
## See https://quanteda.io for tutorials and examples.
library("quanteda.textmodels")

Naive Bayes

quanteda.textmodels implements fast methods for fitting and predicting Naive Bayes textmodels built especially for sparse document-feature matrices from textual data. It implements two models: multinomial and Bernoulli. (See Manning, Raghavan, and Schütze 2008, Chapter 13.)

Here, we compare performance for the two models, and then to the performance from two other packages for fitting these models.

For these tests, we will choose the dataset of 50,000 movie reviews from Maas et. al. (2011). We will use their partition into test and training sets for training and fitting our models.

# large movie review database of 50,000 movie reviews

dfmat <- tokens(data_corpus_LMRD) %>%
dfm()
dfmat_train <- dfm_subset(dfmat, set == "train")
dfmat_test <- dfm_subset(dfmat, set == "test")

Comparing the performance of fitting the model:

library("microbenchmark")
microbenchmark(
multi = textmodel_nb(dfmat_train, dfmat_train$polarity, distribution = "multinomial"), bern = textmodel_nb(dfmat_train, dfmat_train$polarity, distribution = "Bernoulli"),
times = 20
)
## Warning in microbenchmark(multi = textmodel_nb(dfmat_train,
## dfmat_train$polarity, : less accurate nanosecond times to avoid potential ## integer overflows ## Unit: milliseconds ## expr min lq mean median uq max neval cld ## multi 50.83127 51.56771 64.86338 59.03010 62.54054 147.6292 20 a ## bern 58.01836 66.80708 71.62813 68.35528 69.48256 142.3839 20 a And for prediction: microbenchmark( multi = predict(textmodel_nb(dfmat_train, dfmat_train$polarity, distribution = "multinomial"),
newdata = dfmat_test),
bern = predict(textmodel_nb(dfmat_train, dfmat_train$polarity, distribution = "Bernoulli"), newdata = dfmat_test), times = 20 ) ## Unit: milliseconds ## expr min lq mean median uq max neval cld ## multi 57.55937 58.91091 67.68381 65.34137 67.42403 148.6692 20 a ## bern 82.82636 90.40652 92.33755 91.81712 95.12195 103.6249 20 b Now let’s see how textmodel_nb() compares to equivalent functions from other packages. Multinomial: library("fastNaiveBayes") library("naivebayes") ## naivebayes 1.0.0 loaded ## For more information please visit: ## https://majkamichal.github.io/naivebayes/ microbenchmark( textmodels = { tmod <- textmodel_nb(dfmat_train, dfmat_train$polarity, smooth = 1, distribution = "multinomial")
pred <- predict(tmod, newdata = dfmat_test)
},
fastNaiveBayes = {
tmod <- fnb.multinomial(as(dfmat_train, "dgCMatrix"), y = dfmat_train$polarity, laplace = 1, sparse = TRUE) pred <- predict(tmod, newdata = as(dfmat_test, "dgCMatrix")) }, naivebayes = { tmod = multinomial_naive_bayes(as(dfmat_train, "dgCMatrix"), dfmat_train$polarity, laplace = 1)
pred <- predict(tmod, newdata = as(dfmat_test, "dgCMatrix"))
},
times = 20
)
## Unit: milliseconds
##            expr      min       lq      mean   median        uq      max neval
##      textmodels 57.66162 59.39572  65.20553 66.81821  68.27248  77.8911    20
##  fastNaiveBayes 85.46265 91.71940 101.32351 98.01616 102.18373 178.8495    20
##      naivebayes 68.98701 70.72477  83.34398 75.69656  81.87509 212.1918    20
##  cld
##  a
##   b
##    c

And Bernoulli. Note here that while we are supplying the Boolean matrix to textmodel_nb(), this re-weighting from the count matrix would have been performed automatically within the function had we not done so in advance - it’s done here just for comparison.

dfmat_train_bern <- dfm_weight(dfmat_train, scheme = "boolean")
dfmat_test_bern <- dfm_weight(dfmat_test, scheme = "boolean")

microbenchmark(
textmodel_nb = {
tmod <-  textmodel_nb(dfmat_train_bern, dfmat_train$polarity, smooth = 1, distribution = "Bernoulli") pred <- predict(tmod, newdata = dfmat_test) }, fastNaiveBayes = { tmod <- fnb.bernoulli(as(dfmat_train_bern, "dgCMatrix"), y = dfmat_train$polarity, laplace = 1, sparse = TRUE)
pred <- predict(tmod, newdata = as(dfmat_test_bern, "dgCMatrix"))
},
naivebayes = {
tmod = bernoulli_naive_bayes(as(dfmat_train_bern, "dgCMatrix"), dfmat_train\$polarity, laplace = 1)
pred <- predict(tmod, newdata = as(dfmat_test_bern, "dgCMatrix"))
},
times = 20
)
## Unit: milliseconds
##            expr      min       lq      mean    median        uq      max neval
##    textmodel_nb 84.15849 86.64536  94.62188  94.09139 100.34684 112.4117    20
##  fastNaiveBayes 94.04449 98.89282 108.01997 104.90315 107.18821 189.3230    20
##      naivebayes 75.77546 76.96858  86.13614  83.53652  87.03496 168.9143    20
##  cld
##   a
##    b
##   a

References

Maas, Andrew L., Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts (2011). “Learning Word Vectors for Sentiment Analysis”. The 49th Annual Meeting of the Association for Computational Linguistics (ACL 2011).

Majka M (2020). naivebayes: High Performance Implementation of the Naive Bayes Algorithm in R. R package version 0.9.7, <URL: https://CRAN.R-project.org/package=naivebayes>. Date: 2020-03-08.

Manning, Christopher D., Prabhakar Raghavan, and Hinrich Schütze (2008). Introduction to Information Retrieval. Cambridge University Press.

Skogholt, Martin (2020). fastNaiveBayes: Extremely Fast Implementation of a Naive Bayes Classifier. R package version 2.2.1. https://github.com/mskogholt/fastNaiveBayes. Date: 2020-05-04.