Supervised learning

M. Benesty

2017-11-09

library(fastrtext)

data("train_sentences")
data("test_sentences")

# prepare data
tmp_file_model <- tempfile()

train_labels <- paste0("__label__", train_sentences[,"class.text"])
train_texts <- tolower(train_sentences[,"text"])
train_to_write <- paste(train_labels, train_texts)
train_tmp_file_txt <- tempfile()
writeLines(text = train_to_write, con = train_tmp_file_txt)

test_labels <- paste0("__label__", test_sentences[,"class.text"])
test_labels_without_prefix <- test_sentences[,"class.text"]
test_texts <- tolower(test_sentences[,"text"])
test_to_write <- paste(test_labels, test_texts)

# learn model
execute(commands = c("supervised", "-input", train_tmp_file_txt, "-output", tmp_file_model, "-dim", 20, "-lr", 1, "-epoch", 20, "-wordNgrams", 2, "-verbose", 1))
## 
Read 0M words
## Number of words:  5060
## Number of labels: 15
## 
Progress: 100.0%  words/sec/thread: 2364298  lr: 0.000000  loss: 0.306825  eta: 0h0m
# load model
model <- load_model(tmp_file_model)
## add .bin extension to the path
# prediction are returned as a list with words and probabilities
predictions <- predict(model, sentences = test_to_write)
print(head(predictions, 5))
## [[1]]
##      OWNX 
## 0.9980469 
## 
## [[2]]
##      MISC 
## 0.9921875 
## 
## [[3]]
##      MISC 
## 0.9921875 
## 
## [[4]]
##      OWNX 
## 0.9238281 
## 
## [[5]]
##     AIMX 
## 0.984375
# Compute accuracy
mean(names(unlist(predictions)) == test_labels_without_prefix)
## [1] 0.8366667
# because there is only one category by observation, hamming loss will be the same
get_hamming_loss(as.list(test_labels_without_prefix), predictions)
## [1] 0.8366667
# test predictions
predictions <- predict(model, sentences = test_to_write)
print(head(predictions, 5))
## [[1]]
##      OWNX 
## 0.9980469 
## 
## [[2]]
##      MISC 
## 0.9921875 
## 
## [[3]]
##      MISC 
## 0.9921875 
## 
## [[4]]
##      OWNX 
## 0.9238281 
## 
## [[5]]
##     AIMX 
## 0.984375
# you can get flat list of results when you are retrieving only one label per observation
print(head(predict(model, sentences = test_to_write, simplify = TRUE)))
##      OWNX      MISC      MISC      OWNX      AIMX      CONT 
## 0.9980469 0.9921875 0.9921875 0.9238281 0.9843750 0.3945313
# free memory
unlink(train_tmp_file_txt)
unlink(tmp_file_model)
rm(model)
gc()
##           used (Mb) gc trigger (Mb) max used (Mb)
## Ncells  551382 29.5     940480 50.3   750400 40.1
## Vcells 1138243  8.7    1943012 14.9  1543057 11.8