CRAN Package Check Results for Package topicmodels.etm

Last updated on 2025-10-30 01:50:20 CET.

Flavor Version Tinstall Tcheck Ttotal Status Flags
r-devel-linux-x86_64-debian-clang 0.1.0 11.09 283.39 294.48 NOTE
r-devel-linux-x86_64-debian-gcc 0.1.0 17.53 243.09 260.62 NOTE
r-devel-linux-x86_64-fedora-clang 0.1.0 44.00 414.60 458.60 NOTE
r-devel-linux-x86_64-fedora-gcc 0.1.0 44.00 407.85 451.85 NOTE
r-devel-windows-x86_64 0.1.0 14.00 108.00 122.00 ERROR
r-patched-linux-x86_64 0.1.0 10.78 275.45 286.23 NOTE
r-release-linux-x86_64 0.1.0 10.42 276.87 287.29 NOTE
r-release-macos-arm64 0.1.0 8.00 95.00 103.00 NOTE
r-release-macos-x86_64 0.1.0 15.00 193.00 208.00 NOTE
r-release-windows-x86_64 0.1.0 21.00 138.00 159.00 ERROR
r-oldrel-macos-arm64 0.1.0 6.00 41.00 47.00 NOTE
r-oldrel-macos-x86_64 0.1.0 7.00 58.00 65.00 NOTE
r-oldrel-windows-x86_64 0.1.0 14.00 179.00 193.00 ERROR

Check Details

Version: 0.1.0
Check: CRAN incoming feasibility
Result: NOTE Maintainer: ‘Jan Wijffels <jwijffels@bnosac.be>’ The Description field contains David M. Blei (2019), available at <arXiv:1907.04907>. Please refer to arXiv e-prints via their arXiv DOI <doi:10.48550/arXiv.YYMM.NNNNN>. Flavors: r-devel-linux-x86_64-debian-clang, r-devel-linux-x86_64-debian-gcc

Version: 0.1.0
Check: Rd files
Result: NOTE checkRd: (-1) ETM.Rd:33: Lost braces in \itemize; \value handles \item{}{} directly checkRd: (-1) ETM.Rd:34: Lost braces in \itemize; \value handles \item{}{} directly checkRd: (-1) ETM.Rd:35: Lost braces in \itemize; \value handles \item{}{} directly checkRd: (-1) ETM.Rd:36: Lost braces in \itemize; \value handles \item{}{} directly checkRd: (-1) ETM.Rd:37: Lost braces in \itemize; \value handles \item{}{} directly Flavors: r-devel-linux-x86_64-debian-clang, r-devel-linux-x86_64-debian-gcc, r-devel-linux-x86_64-fedora-clang, r-devel-linux-x86_64-fedora-gcc, r-devel-windows-x86_64, r-patched-linux-x86_64, r-release-linux-x86_64, r-release-macos-arm64, r-release-macos-x86_64, r-release-windows-x86_64, r-oldrel-macos-arm64, r-oldrel-macos-x86_64, r-oldrel-windows-x86_64

Version: 0.1.0
Check: examples
Result: ERROR Running examples in 'topicmodels.etm-Ex.R' failed The error most likely occurred in: > ### Name: ETM > ### Title: Topic Modelling in Semantic Embedding Spaces > ### Aliases: ETM > > ### ** Examples > > library(torch) > library(topicmodels.etm) > library(word2vec) > library(udpipe) > data(brussels_reviews_anno, package = "udpipe") > ## > ## Toy example with pretrained embeddings > ## > > ## a. build word2vec model > x <- subset(brussels_reviews_anno, language %in% "nl") > x <- paste.data.frame(x, term = "lemma", group = "doc_id") > set.seed(4321) > w2v <- word2vec(x = x$lemma, dim = 15, iter = 20, type = "cbow", min_count = 5) > embeddings <- as.matrix(w2v) > > ## b. build document term matrix on nouns + adjectives, align with the embedding terms > dtm <- subset(brussels_reviews_anno, language %in% "nl" & upos %in% c("NOUN", "ADJ")) > dtm <- document_term_frequencies(dtm, document = "doc_id", term = "lemma") > dtm <- document_term_matrix(dtm) > dtm <- dtm_conform(dtm, columns = rownames(embeddings)) > dtm <- dtm[dtm_rowsums(dtm) > 0, ] > > ## create and fit an embedding topic model - 8 topics, theta 100-dimensional > if (torch::torch_is_installed()) { + + set.seed(4321) + torch_manual_seed(4321) + model <- ETM(k = 8, dim = 100, embeddings = embeddings, dropout = 0.5) + optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012) + overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000) + scores <- predict(model, dtm, type = "topics") + + lastbatch <- subset(overview$loss, overview$loss$batch_is_last == TRUE) + plot(lastbatch$epoch, lastbatch$loss) + plot(overview$loss_test) + + ## show top words in each topic + terminology <- predict(model, type = "terms", top_n = 7) + terminology + + ## + ## Toy example without pretrained word embeddings + ## + set.seed(4321) + torch_manual_seed(4321) + model <- ETM(k = 8, dim = 100, embeddings = 15, dropout = 0.5, vocab = colnames(dtm)) + optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012) + overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000) + terminology <- predict(model, type = "terms", top_n = 7) + terminology + + + + ## Don't show: + ## + ## Another example using fit_original + ## + data(ng20, package = "topicmodels.etm") + vocab <- ng20$vocab + tokens <- ng20$bow_tr$tokens + counts <- ng20$bow_tr$counts + + torch_manual_seed(123456789) + model <- ETM(k = 4, vocab = vocab, dim = 5, embeddings = 25) + model + optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012) + + traindata <- list(tokens = tokens, counts = counts, vocab = vocab) + test1 <- list(tokens = ng20$bow_ts_h1$tokens, counts = ng20$bow_ts_h1$counts, vocab = vocab) + test2 <- list(tokens = ng20$bow_ts_h2$tokens, counts = ng20$bow_ts_h2$counts, vocab = vocab) + + out <- model$fit_original(data = traindata, test1 = test1, test2 = test2, epoch = 4, + optimizer = optimizer, batch_size = 1000, + lr_anneal_factor = 4, lr_anneal_nonmono = 10) + test <- subset(out$loss, out$loss$batch_is_last == TRUE) + plot(test$epoch, test$loss) + + topic.centers <- as.matrix(model, type = "embedding", which = "topics") + word.embeddings <- as.matrix(model, type = "embedding", which = "words") + topic.terminology <- as.matrix(model, type = "beta") + + terminology <- predict(model, type = "terms", top_n = 4) + terminology + ## End(Don't show) + + } Flavor: r-devel-windows-x86_64

Version: 0.1.0
Check: whether package can be installed
Result: WARN Found the following significant warnings: Warning: Torch libraries are installed but loading them caused a segfault. Flavor: r-release-windows-x86_64

Version: 0.1.0
Check: examples
Result: ERROR Running examples in 'topicmodels.etm-Ex.R' failed The error most likely occurred in: > ### Name: ETM > ### Title: Topic Modelling in Semantic Embedding Spaces > ### Aliases: ETM > > ### ** Examples > > library(torch) > library(topicmodels.etm) > library(word2vec) > library(udpipe) > data(brussels_reviews_anno, package = "udpipe") > ## > ## Toy example with pretrained embeddings > ## > > ## a. build word2vec model > x <- subset(brussels_reviews_anno, language %in% "nl") > x <- paste.data.frame(x, term = "lemma", group = "doc_id") > set.seed(4321) > w2v <- word2vec(x = x$lemma, dim = 15, iter = 20, type = "cbow", min_count = 5) > embeddings <- as.matrix(w2v) > > ## b. build document term matrix on nouns + adjectives, align with the embedding terms > dtm <- subset(brussels_reviews_anno, language %in% "nl" & upos %in% c("NOUN", "ADJ")) > dtm <- document_term_frequencies(dtm, document = "doc_id", term = "lemma") > dtm <- document_term_matrix(dtm) > dtm <- dtm_conform(dtm, columns = rownames(embeddings)) > dtm <- dtm[dtm_rowsums(dtm) > 0, ] > > ## create and fit an embedding topic model - 8 topics, theta 100-dimensional > if (torch::torch_is_installed()) { + + set.seed(4321) + torch_manual_seed(4321) + model <- ETM(k = 8, dim = 100, embeddings = embeddings, dropout = 0.5) + optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012) + overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000) + scores <- predict(model, dtm, type = "topics") + + lastbatch <- subset(overview$loss, overview$loss$batch_is_last == TRUE) + plot(lastbatch$epoch, lastbatch$loss) + plot(overview$loss_test) + + ## show top words in each topic + terminology <- predict(model, type = "terms", top_n = 7) + terminology + + ## + ## Toy example without pretrained word embeddings + ## + set.seed(4321) + torch_manual_seed(4321) + model <- ETM(k = 8, dim = 100, embeddings = 15, dropout = 0.5, vocab = colnames(dtm)) + optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012) + overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000) + terminology <- predict(model, type = "terms", top_n = 7) + terminology + + + + ## Don't show: + ## + ## Another example using fit_original + ## + data(ng20, package = "topicmodels.etm") + vocab <- ng20$vocab + tokens <- ng20$bow_tr$tokens + counts <- ng20$bow_tr$counts + + torch_manual_seed(123456789) + model <- ETM(k = 4, vocab = vocab, dim = 5, embeddings = 25) + model + optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012) + + traindata <- list(tokens = tokens, counts = counts, vocab = vocab) + test1 <- list(tokens = ng20$bow_ts_h1$tokens, counts = ng20$bow_ts_h1$counts, vocab = vocab) + test2 <- list(tokens = ng20$bow_ts_h2$tokens, counts = ng20$bow_ts_h2$counts, vocab = vocab) + + out <- model$fit_original(data = traindata, test1 = test1, test2 = test2, epoch = 4, + optimizer = optimizer, batch_size = 1000, + lr_anneal_factor = 4, lr_anneal_nonmono = 10) + test <- subset(out$loss, out$loss$batch_is_last == TRUE) + plot(test$epoch, test$loss) + + topic.centers <- as.matrix(model, type = "embedding", which = "topics") + word.embeddings <- as.matrix(model, type = "embedding", which = "words") + topic.terminology <- as.matrix(model, type = "beta") + + terminology <- predict(model, type = "terms", top_n = 4) + terminology + ## End(Don't show) + + } Error in cpp_torch_manual_seed(as.character(seed)) : Lantern is not loaded. Please use `install_torch()` to install additional dependencies. Calls: torch_manual_seed -> cpp_torch_manual_seed Execution halted Flavors: r-release-windows-x86_64, r-oldrel-windows-x86_64

These binaries (installable software) and packages are in development.
They may not be fully stable and should be used with caution. We make no claims about them.
Health stats visible at Monitor.