Part I: Find the language

library(httr)
library(jsonlite)
library(tidyverse)

# Step 1: get the key
cogapikey<-"79d64b40a4b443bfae930c29e998aaab"

# Step 2: find the url and endpoint path
api_url<-"https://westcentralus.api.cognitive.microsoft.com"
language_endpoint<-"/text/analytics/v2.1/languages"

# Step 3: convert the input into JSON
text=c("is this english?"
       ,"tak er der mere kage"
       ,"merci beaucoup"
       ,"guten morgen"
       ,"bonjour"
       ,"merde"
       ,"tēnā rawa atu koe"
       ,"That's terrible"
       ,"R is awesome"
       ,"ये क्या हुआ"
       ,"发生了什么事")


# Put data in an object that converts to the expected schema for the API
textdf <- tibble(text=text, id=seq_along(text))

mydata <- textdf %>% 
  list(documents=.)
  
myJSONdata <- toJSON(mydata)

myJSONdata
## {"documents":[{"text":"is this english?","id":1},{"text":"tak er der mere kage","id":2},{"text":"merci beaucoup","id":3},{"text":"guten morgen","id":4},{"text":"bonjour","id":5},{"text":"merde","id":6},{"text":"tēnā rawa atu koe","id":7},{"text":"That's terrible","id":8},{"text":"R is awesome","id":9},{"text":"ये क्या हुआ","id":10},{"text":"发生了什么事","id":11}]}
# Step 4: send request
response <- POST(url = api_url, 
                 path = language_endpoint, 
                 config = add_headers(`Ocp-Apim-Subscription-Key`=cogapikey),
                 body=myJSONdata)
  
response
## Response [https://westcentralus.api.cognitive.microsoft.com/text/analytics/v2.1/languages]
##   Date: 2019-07-12 13:20
##   Status: 200
##   Content-Type: application/json; charset=utf-8
##   Size: 952 B
# Step 5: extract results from response
respframe <- response %>% 
  content() %>% 
  flatten_df() %>% 
  mutate(detectedLanguages = invoke_map(tibble, detectedLanguages),
         id=as.numeric(id)) %>% 
  unnest() %>% 
  select(id,language=iso6391Name)
  
respframe
# next steps: use the data in analysis e.g. plotting, merging, modelling etc
textdf <- textdf %>% 
  left_join(respframe, by="id")

textdf

Part II: Find the sentiment

# Step 1: get the key: use same key

# Step 2: find the new endpoint
sentiment_endpoint <- "/text/analytics/v2.1/sentiment"

# Step 3: convert the input into JSON
mydata<-list(documents = textdf)

myJSONdata <- toJSON(mydata)

myJSONdata
## {"documents":[{"text":"is this english?","id":1,"language":"en"},{"text":"tak er der mere kage","id":2,"language":"da"},{"text":"merci beaucoup","id":3,"language":"fr"},{"text":"guten morgen","id":4,"language":"de"},{"text":"bonjour","id":5,"language":"en"},{"text":"merde","id":6,"language":"en"},{"text":"tēnā rawa atu koe","id":7,"language":"en"},{"text":"That's terrible","id":8,"language":"en"},{"text":"R is awesome","id":9,"language":"en"},{"text":"ये क्या हुआ","id":10,"language":"hi"},{"text":"发生了什么事","id":11,"language":"zh_chs"}]}
# Step 4: send the request
response <- POST(url = api_url, 
                 path = sentiment_endpoint, 
                 config = add_headers(`Ocp-Apim-Subscription-Key`=cogapikey),
                 body=myJSONdata)
response
## Response [https://westcentralus.api.cognitive.microsoft.com/text/analytics/v2.1/sentiment]
##   Date: 2019-07-12 13:20
##   Status: 200
##   Content-Type: application/json; charset=utf-8
##   Size: 667 B
# Step 5: extract results from response
respframe <- response %>% 
  content() %>%
  flatten_df() %>% 
  mutate(id=as.numeric(id))
  
respframe
# next steps: use the data in analysis e.g. plotting, merging, modelling etc
textdf <- textdf %>%
  left_join(respframe, by="id")
  
textdf