import os
import sys
import random
import json
import numpy as np
import pandas as pd
import api_blueLib as bk

Url = 'http://services.bluekai.com/Services/WS/SegmentInventory?countries=ALL'
uDom = 'http://services.bluekai.com'
uPath = '/Services/WS/'
uServ = 'audiences'
data = '{}'
method = 'GET'


newUrl = bk.signatureInputBuilder(uDom+uPath+uServ,'GET', None)
audL = json.loads(bk.doRequest(newUrl,'GET', None))
aReach = []
aCamp = []
aud = audL['audiences'][15]
for aud in audL['audiences']:
    print aud['name']
    newUrl = bk.signatureInputBuilder(uDom+uPath+uServ+'/'+str(aud['id']),'GET', None)
    audComp = json.loads(bk.doRequest(newUrl,'GET', None))
    aCamp.append(json.dumps(audComp['campaigns']))
    data = json.dumps(audComp['segments'],separators=(',', ':'))
    newUrl = bk.signatureInputBuilder(Url,'POST',data)
    resp = json.loads(bk.doRequest(newUrl,'POST',data))
    sCat = ''
    sReach = ''
    for cat in resp['AND'][0]['AND'][0]['OR']:
        sCat += str(cat['AND'][0]['cat']) + ', '
        sReach += str(cat['AND'][0]['reach']) + ', '
    aLine = {'name':aud['name'],'id':aud['id'],'reach':resp['reach'],'camp':json.dumps(audComp['campaigns']),"cat":sCat,"catReach":sReach}
    aReach.append(aLine)

# for col in aLine:
#     aLine[col] = data[col].str.decode('iso-8859-1').str.encode('utf-8')

audState = pd.DataFrame(aReach)
##audState.to_csv("../../raw/audReach.csv",encding='utf-8')
audState.to_csv("../../raw/audReach.csv",encoding='cp1252')
# aList = namedtuple('name', 'id')
# aName = [aList(**k) for k in resp["audiences"]]
			

Bluekai api

Self programmed usage (campaign sync, reach calculation...) of bluekai DMP. CampSync AudienceReach Libraries UserData

import api_dfpLib as dfp
import tempfile
import urlparse,urllib,urllib2,requests,cookielib
import base64
import json,gzip,zlib,sys,csv
import time
import StringIO
import os
from googleads import dfp
from googleads import errors


class dfpAuth():
    def __init__(self):
        credFile = os.environ['LAV_DIR'] + '/credenza/googleads.yaml'
        dfp_client = dfp.DfpClient.LoadFromStorage(credFile)
        network_service = dfp_client.GetService('NetworkService', version='v201702')
        self.report_downloader = dfp_client.GetDataDownloader(version='v201702')
        self.root_ad_unit_id = (network_service.getCurrentNetwork()['effectiveRootAdUnitId'])
        
    def runRep(self,report_job):
        try:
            report_job_id = self.report_downloader.WaitForReport(report_job)
        except errors.DfpReportError, e:
            print 'Failed to generate report. Error was: %s' % e

        report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
        self.report_downloader.DownloadReportToFile(report_job_id,'CSV_DUMP',report_file)
        report_file.close()
        content = gzip.GzipFile(report_file.name).read()
        cr = csv.reader(content.splitlines(), delimiter=',')
        cr_list = list(cr)
        campL = []
        for row in cr_list:
            campL.append(row)
        return campL
			

Dfp/doubleclick api

Retrieve inventory and basin forecast from doubleclick Inventory SetCredentials


import urlparse,urllib,urllib2,requests,cookielib
import base64
import json,gzip,zlib,sys,csv
import time
import StringIO
import os

reload(sys)
sys.setdefaultencoding('utf8')

headers = {"Accept":"application/json","Content-type":"application/x-www-form-urlencoded; charset=UTF-8","User_Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
baseUrl = 'http://api.dashboard.ad.dotandad.com:9190'
credUrl = '/api/v1/token'
repUrl = '/api/v1/reports'
flightUrl = '/api/v1/flights'
posUrl = '/api/v1/sizesPositions'
revUrl = '/api/v1/reports/monetization/'
key_file = os.environ['LAV_DIR'] + '/credenza/dotandmedia.json'
cred = []
with open(key_file) as f:
    cred = json.load(f)
def getToken():
    sData = {"params": {},"version": "1.1","method": "getConnectionTest"}
    resp = requests.post(baseUrl+credUrl,headers=headers,data=json.dumps(cred))
    return resp.json()['resource']['token']

def waitRep(token,query,headers):
    r = requests.post(baseUrl+repUrl,headers=headers,data=json.dumps(query,separators=(',', ':')))
    print(r.status_code,r.reason,r.text)
    headers = {"Content-type":"application/x-www-form-urlencoded"}
    query = {"token":token}
    url = baseUrl+repUrl+'/'+r.json()['resource']['reportUuid']
    counter = 1
    stat = 404
    while stat not in [200,401,500]:
        rep = requests.post(url,data=json.dumps(query,separators=(',', ':')),headers=headers)
        stat = int(rep.status_code)
        time.sleep(2)
        counter += 1
        if(counter > 100):
            break
        print(rep.status_code,rep.reason,counter)
    return rep
    
def formRep(zipped):
    content = gzip.GzipFile(fileobj=StringIO.StringIO(zipped)).read()
    cr = csv.reader(content.splitlines(), delimiter=',')
    cr_list = list(cr)
    campL = []
    for row in cr_list:
        campL.append(row)
    return campL
    ##content = zlib.decompress(zipped)##decode('utf-8')
    # fo = open("ciccia.tar.gz","w")
    # fo.write(rep.content)


def flightList(query,headers):
    r = requests.post(baseUrl+flightUrl,headers=headers,data=json.dumps(query))
    print(r.status_code,r.reason)
    return r


def wrBuffer(df):
    buff = StringIO.StringIO()
    df.to_csv(buff)
    buff.seek(0)
    output = buff.getvalue()
    txt = str(output)
    buff.close()
    return txt
    ##clipboard.copy(output)

def wrMatrix(df):
    buff = StringIO.StringIO()
    df.to_csv(buff,header=False, index=False)
    buff.seek(0)
    output = buff.getvalue()
    txt = str(output)
    buff.close()
    return txt
##clipboard.copy(output)
			

dot&media api

Partition of inventory into class of same statistical properties Daily inventory Daily inventory flight list

##https://developer.yahoo.com/brightroll/dsp/api/docs/authentication/
##https://developer.yahoo.com/apps/0TH8345e/

import urllib
import urllib2
import cookielib
import base64
import requests
import json
import csv
import re
import time
import datetime
import numpy as np
import pandas as pd
import os

print '-----------------------api-brightroll--------------------------------'

headers = {"Accept":"application/json","Content-type":"application/x-www-form-urlencoded; charset=UTF-8","User_Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
today = time.strftime("%Y-%m-%d")
dataQ = [(datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),(datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")]

sData = {"params": {},"version": "1.1","method": "getConnectionTest"}
key_file = os.environ['HOME'] + '/lav/media/credenza/gemini.json'
cred = []
with open(key_file) as f:
    cred = json.load(f)

sData = {"grant_type":"authorization_code","redirect_uri":"oob","code":cred['app_code']}
sData = {"grant_type":"refresh_token","redirect_uri":"oob","refresh_token":cred['refresh_token']}
headers = {"Authorization":"Basic "+base64.standard_b64encode(cred['client_id']+":"+cred['client_secret']),"Content-Type":"application/json"}
sData = {"grant_type":"refresh_token","redirect_uri":"oob","refresh_token":cred['refresh_token']}
baseUrl = 'https://api.login.yahoo.com/oauth2/'
resq = requests.post(baseUrl+'get_token',headers=headers, data=json.dumps(sData))
token = resq.json()['access_token']
headers = {"Content-Type":"application/json","X-Auth-Method":"OAUTH","X-Auth-Token":token}
sData = {"reportOption": {
    "timezone": "Europe/Paris"
    ,"currency": 3
    ,"dimensionTypeIds": [4,5]
    ##,"dimensionTypeIds": [5,39]
    ,"metricTypeIds": [1,2,23,44,46]
    #,"metricTypeIds": [1,2]
},
         "intervalTypeId": 1
         ,"dateTypeId": 2
        # ,"startDate": dataQ[0] + "T00:00:00-05:00"
         # ,"endDate": dataQ[1] + "T11:59:59-05:00"
         ,"startDate": "2017-02-01" + "T00:00:00-05:00"
         ,"endDate": "2017-02-20" + "T11:59:59-05:00"
}
baseUrl = "http://api-sched-v3.admanagerplus.yahoo.com/yamplus_api/extreport/"
resq = requests.post(baseUrl,headers=headers, data=json.dumps(sData))
time.sleep(.5)
repId = resq.json()['customerReportId']

print resq.json()['status']
stat = ""
counter = 1
while stat not in ["Success",401]:
    resq = requests.get("http://api-sched-v3.admanagerplus.yahoo.com/yamplus_api/extreport/"+repId,headers=headers)
    time.sleep(2)
    stat = resq.json()['status']
    time.sleep(2)
    print counter
    counter += 1
    if counter > 100:
        break


    
print stat
repLink = resq.json()['url']
repFile = '/var/www/webdav/report_brightroll/' + 'camp' + today + 'Brightroll' + '.csv'
urllib.urlretrieve(repLink,filename=repFile)


resq = requests.get(repLink,headers=headers,verify=False)
content = resq.content.decode('utf-8').strip()
content = resq.content.decode('ascii', 'ignore').decode('ascii')
cr = csv.reader(content.splitlines(), delimiter=',')
campL = []
for i in range(0,cr.line_num):
    campL.append(cr.next())
    print i

for row in cr:
    campL.append(row)

    
cr_list = list(cr)
for row in cr_list:
    campL.append(row)

campDf = pd.DataFrame(campL)
##campDf.to_csv(repFile,encding='utf-8')
##campDf.to_csv("../../raw/audUseBright.csv",encoding='utf-8')
# aList = namedtuple('name', 'id')
			

Yahoo! api

Api scripting for brightroll and gemini

import oauth2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import OAuth2WebServerFlow
from wsgiref.simple_server import make_server
import oauth2
import oauth2.grant
import oauth2.error
import oauth2.store.memory
import oauth2.tokengenerator
import oauth2.web.wsgi
import json

key_file = os.environ['HOME'] + '/lav/media/credenza/gemini.json'
cred = []
with open(key_file) as f:
    cred = json.load(f)

nuggad_api    = 'https://api.nuggad.net'
oauth_client = OAuth2::Client.new(cred['client_id'],cred['client_secret'], :site => nuggad_api)
token = oauth_client.password.get_token(cred['user_name'], cred['password'])
response = token.get('/networks', :format => 'json')
puts response.body

class ExampleSiteAdapter(oauth2.web.AuthorizationCodeGrantSiteAdapter,
                         oauth2.web.ImplicitGrantSiteAdapter):
    def authenticate(self, request, environ, scopes, client):
        if request.post_param("confirm") == "confirm":
            return {}
        raise oauth2.error.UserNotAuthenticated
    def render_auth_page(self, request, response, environ, scopes, client):
        response.body = '''
''' return response def user_has_denied_access(self, request): if request.post_param("deny") == "deny": return True return False client_store = oauth2.store.memory.ClientStore() client_store.add_client(client_id="abc", client_secret="xyz",redirect_uris=["http://localhost/callback"]) site_adapter = ExampleSiteAdapter() token_store = oauth2.store.memory.TokenStore() provider = oauth2.Provider( access_token_store=token_store, auth_code_store=token_store, client_store=client_store, token_generator=oauth2.tokengenerator.Uuid4() ) # Add Grants you want to support provider.add_grant(oauth2.grant.AuthorizationCodeGrant(site_adapter=site_adapter)) provider.add_grant(oauth2.grant.ImplicitGrant(site_adapter=site_adapter)) # Add refresh token capability and set expiration time of access tokens # to 30 days provider.add_grant(oauth2.grant.RefreshToken(expires_in=2592000)) # Wrap the controller with the Wsgi adapter app = oauth2.web.wsgi.Application(provider=provider) if __name__ == "__main__": httpd = make_server('', 8080, app) httpd.serve_forever() flow = OAuth2WebServerFlow(client_id='your_client_id', client_secret='your_client_secret', scope='https://www.googleapis.com/auth/calendar', redirect_uri='http://example.com/auth_return') flow = flow_from_clientsecrets('path_to_directory/client_secrets.json', scope='https://www.googleapis.com/auth/calendar', redirect_uri='http://example.com/auth_return') client_secret = "xyz" api_server_url = "http://localhost:8080" def __init__(self): self.access_token = None self.auth_token = None self.token_type = "" def __call__(self, env, start_response): if env["PATH_INFO"] == "/app": status, body, headers = self._serve_application(env) elif env["PATH_INFO"] == "/callback": status, body, headers = self._read_auth_token(env) else: status = "301 Moved" body = "" headers = {"Location": "/app"} start_response(status,[(header, val) for header,val in headers.iteritems()]) return body def _request_access_token(self): print ("Requesting access token...") post_params = {"client_id": self.client_id, "client_secret": self.client_secret, "code": self.auth_token, "grant_type": "authorization_code", "redirect_uri": self.callback_url} token_endpoint = self.api_server_url + "/token" result = urllib.urlopen(token_endpoint,urllib.urlencode(post_params)) content = "" for line in result: content += line result = json.loads(content) self.access_token = result["access_token"] self.token_type = result["token_type"] confirmation = "Received access token '%s' of type '%s'" % (self.access_token, self.token_type) print (confirmation) return "302 Found", "", {"Location": "/app"} def _read_auth_token(self, env): print ("Receiving authorization token...") query_params = urlparse.parse_qs(env["QUERY_STRING"]) if "error" in query_params: location = "/app?error=" + query_params["error"][0] return "302 Found", "", {"Location": location} self.auth_token = query_params["code"][0] print ("Received temporary authorization token '%s'" % (self.auth_token,)) return "302 Found", "", {"Location": "/app"} def _request_auth_token(self): print ("Requesting authorization token...") auth_endpoint = self.api_server_url + "/authorize" query = urllib.urlencode({"client_id": "abc","redirect_uri": self.callback_url,"response_type": "code"}) location = "%s?%s" % (auth_endpoint, query) return "302 Found", "", {"Location": location} def _serve_application(self, env): query_params = urlparse.parse_qs(env["QUERY_STRING"]) if("error" in query_params and query_params["error"][0] == "access_denied"): return "200 OK", "User has denied access", {} if self.access_token is None: if self.auth_token is None: return self._request_auth_token() else : return self._request_access_token() else: confirmation = "Current access token '%s' of type '%s'" % (self.access_token, self.token_type) return "200 OK", str(confirmation), {} def run_app_server(): app = ClientApplication() try: httpd = make_server('', 8081, app, handler_class=ClientRequestHandler) print ("Starting Client app on http://localhost:8081/...") httpd.serve_forever() except KeyboardInterrupt: httpd.server_close() def run_auth_server(): client_store = ClientStore() client_store.add_client(client_id="abc", client_secret="xyz", redirect_uris=["http://localhost:8081/callback"]) token_store = TokenStore() provider = Provider(access_token_store=token_store, auth_code_store=token_store, client_store=client_store, token_generator=Uuid4()) provider.add_grant(AuthorizationCodeGrant(site_adapter=TestSiteAdapter())) try: app = Application([url(provider.authorize_path, OAuth2Handler, dict(provider=provider)), url(provider.token_path, OAuth2Handler, dict(provider=provider)), ]) app.listen(8080) print ("Starting OAuth2 server on http://localhost:8080/...") IOLoop.current().start() except KeyboardInterrupt: IOLoop.close() def main(): auth_server = Process(target=run_auth_server) auth_server.start() app_server = Process(target=run_app_server) app_server.start() print ("Access http://localhost:8081/app in your browser") def sigint_handler(signal, frame): print ("Terminating servers...") auth_server.terminate() auth_server.join() app_server.terminate() app_server.join() signal.signal(signal.SIGINT, sigint_handler) if __name__ == "__main__": main()

Nugg.ad/krux api

Api scripting for krux and nugg.ad dmp.

import urlparse
from urllib import quote_plus as urlquote
import urllib2
import requests
import json
import re
import time
import MySQLdb
from pandas.io import sql
import sqlalchemy
import os
import pandas as pd
import datetime


headers = {"Accept":"application/json","Content-type":"application/x-www-form-urlencoded; charset=UTF-8","User_Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
baseUrl = 'https://report2.webtrekk.de/cgi-bin/wt/JSONRPC.cgi'

sData = {"params": {},"version": "1.1","method": "getConnectionTest"}
resq = requests.post(baseUrl,data=json.dumps(sData))
key_file = os.environ['HOME'] + '/lav/media/credenza/webtrekk.json'
cred = []
with open(key_file) as f:
    cred = json.load(f)
resq = requests.post(baseUrl,data=json.dumps(cred))
token = resq.json()['result']

query = {"params": {"token": token,"report_name": "Player"},"version": "1.1","method": "getReportData"}
resq = requests.post(baseUrl,data=json.dumps(query))
videoD = resq.json()['result']['analyses'][1]['analysisData']

query = {"params": {"token": token,"report_name": "preroll"},"version": "1.1","method": "getReportData"}
resq = requests.post(baseUrl,data=json.dumps(query))
prerollD = resq.json()['result']['analyses'][0]['analysisData']

videoL = []
roll = 0
partner = 0
for d in range(0,len(videoD)):
    if re.search("roll",videoD[d][1]):
        roll = roll + int(videoD[d][2])
    if re.search("embed",videoD[d][1]):
        partner = partner + int(videoD[d][2])



today = str(resq.json()['result']['analyses'][0]['timeStart'])[0:10]
todayD = datetime.datetime.strptime(today,"%Y-%m-%d")
videoL = pd.DataFrame(
    {"date":today,videoD[0][1]:[videoD[0][2]],videoD[1][1]:[videoD[1][2]],videoD[2][1]:[videoD[2][2]],"preroll":[roll],"embed":[partner],"week":todayD.strftime("%y-%W"),"weekD":todayD.weekday()}
##    ,index=[today]
    ,columns=["date",videoD[0][1],videoD[1][1],videoD[2][1],"preroll","embed","week","weekD"])
videoL.index.name = "idx"

key_file = os.environ['HOME'] + '/lav/media/credenza/intertino.json'
cred = []
with open(key_file) as f:
    cred = json.load(f)

cred = cred['mysql']['intertino']
engine = sqlalchemy.create_engine('mysql://'+cred['user']+':'+cred['pass']+'@'+cred['host']+'/'+cred['db'],echo=False)
conn = engine.connect()
videoL.to_sql('inventory_webtrekk_preroll',conn,if_exists='append',chunksize=100)
conn.close()
			

webtrekk api

Retrieve and cache data for webtrekk

#!/usr/bin/env Rscript
setwd('/home/sabeiro/lav/media')
source('script/graphEnv.R')
library(twitteR)
library(RSentiment)
library(wordcloud)
library(XML)
library("wordcloud")
pal2 <- brewer.pal(8,"Dark2")
## require('devtools')
## install_github('mananshah99/sentR')
require('sentR')
lapply(c('twitteR','lubridate','network','sna','qdap','tm','rjson'),library, character.only = TRUE)
set.seed(95616)

cred <- fromJSON(paste(readLines("credenza/twitter.json"),collapse=""))

download.file(url="http://curl.haxx.se/ca/cacert.pem", destfile="cacert.pem")
options(httr_oauth_cache=T) #This will enable the use of a local file to cache OAuth access credentials between R sessions.
setup_twitter_oauth(cred$consumer_key,cred$consumer_secret,cred$access_token,cred$access_secret)
##r_stats <- searchTwitter("#Rstats", n=1500, cainfo="cacert.pem")
startDate <- '2016-12-13'
r_stats <- NULL
r_stats <- rbind(r_stats,searchTwitter("@startup_italia ", n=1500,since = starDate))

r_stats <- NULL
r_stats <- rbind(r_stats,searchTwitter("@MakerFaireRome", n=3000,since = startDate))
## saveRDS(r_stats, 'raw/Tweets.RDS')
## r_stats= readRDS('raw/Tweets.RDS')
twitD = twListToDF(r_stats)
twitD$text <- sapply(r_stats, function(x) x$getText())
twitD$text <- iconv(twitD$text, to = "ascii",sub="") 
twitD <- twitD[!is.na(twitD$text),]
twitD$text <- tryTolower(twitD$text)
twitD$text <- gsub('(http.*\\s*)[^[:blank:]]+', '',twitD$text)
##twitD$text <- gsub('(@|#)[^[:blank:]]+', '',twitD$text)
##gsub(" ?(f|ht)tp(s?)://(.*)[.][a-z]+", "", x)
twitD$text <- gsub('(\\.|!|\\?)\\s+|(\\++)',' ',twitD$text) 
twitD$text <- gsub("[[:punct:]]","",twitD$text)
twitD$text <- gsub("[[:digit:]]","",twitD$text)
twitD$text <- gsub("\\n","",twitD$text)
write.csv(twitD,"raw/twitter12.csv")

stopwordsIt <- read.csv("out/train/stopwords_it.csv",header=F)$V1
twitD$text = removeWords(twitD$text, stopwordsIt)
sentIt <- read.csv("out/train/sentiment_it.csv")

# Zoom in on conference day
p = ggplot(twitD, aes(created)) + 
    geom_density(aes(fill = isRetweet), alpha = .5) +
    scale_fill_discrete(guide = 'none') +
    xlab('All tweets')
p
if(TRUE){##polarity
# Split into retweets and original tweets
sp = split(twitD, twitD$isRetweet)
orig = sp[['FALSE']]
# Extract the retweets and pull the original author's screenname
rt = mutate(sp[['TRUE']], sender = substr(text, 5, regexpr(':', text) - 1))

pol = polarity(orig$text,negators=c("no","non"),amplifiers=unique(sentIt$positive),deamplifiers=unique(sentIt$negative))

head(pol$all$neg.words,200)

out <- classify.aggregate(twitD$text, unique(sentIt$positive), unique(sentIt$negative))
out <- classify.naivebayes(twitD$text)
write.csv(out,"out/twitterUD.csv")

orig$emotionalValence = sapply(pol, function(x) x$all$polarity)
# As reality check, what are the most and least positive tweets
orig$text[which.max(orig$emotionalValence)]
## [1] "Hey, this Open Science Framework sounds like a great way to  collaborate openly! Where do I sign up? Here: https://t.co/9oAClb0hCP #MSST2016"
orig$text[which.min(orig$emotionalValence)]
## [1] "1 Replications are boring 2 replications are attack 3 reputations will suffer 4 only easy ones will be done 5 bad studies are bad #MSST2016"
# How does emotionalValence change over the day?
ggplot(orig,aes(created, emotionalValence)) +
    geom_point() + 
    geom_smooth(span = .5)


ggplot(orig, aes(x = emotionalValence, y = retweetCount)) +
    geom_point(position = 'jitter') +
    geom_smooth()

polWordTables = 
    sapply(pol, function(p) {
        words = c(positiveWords = paste(p[[1]]$pos.words[[1]], collapse = ' '), 
                  negativeWords = paste(p[[1]]$neg.words[[1]], collapse = ' '))
        gsub('-', '', words)  # Get rid of nothing found's "-"
    }) %>%
    apply(1, paste, collapse = ' ') %>% 
    stripWhitespace() %>% 
    strsplit(' ') %>%
    sapply(table)

par(mfrow = c(1, 2))
invisible(
    lapply(1:2, function(i) {
    dotchart(sort(polWordTables[[i]]), cex = .8)
    mtext(names(polWordTables)[i])
    }))

polSplit = split(orig, sign(orig$emotionalValence))
polText = sapply(polSplit, function(df) {
    paste(tolower(df$text), collapse = ' ') %>%
        gsub(' (http|@)[^[:blank:]]+', '', .) %>%
        gsub('[[:punct:]]', '', .)
    }) %>%
    structure(names = c('negative', 'neutral', 'positive'))
head(polSplit)


# remove emotive words
polText['negative'] = removeWords(polText['negative'], names(polWordTables$negativeWords))
polText['positive'] = removeWords(polText['positive'], names(polWordTables$positiveWords))

# Make a corpus by valence and a wordcloud from it
corp = make_corpus(polText)
col3 = RColorBrewer::brewer.pal(3, 'Paired') # Define some pretty colors, mostly for later
wordcloud::comparison.cloud(as.matrix(TermDocumentMatrix(corp)), 
                            max.words = 100, min.freq = 2, random.order=FALSE, 
                            rot.per = 0, colors = col3, vfont = c("sans serif", "plain"))
}



if(TRUE){##wordcloud
text_corpus <- Corpus(VectorSource(twitD$text))
##text_corpus <- tm_map(text_corpus, removeWords, stopwordsIt)
text_corpus <- tm_map(text_corpus,stemDocument)
##twitD$text_corpus = tm_map(twitD$text_corpus, str_replace_all,"[^[:alnum:]]", " ")
dtm <- DocumentTermMatrix(text_corpus)
##Terms(dtm)
twitW = data.frame(words=colnames((dtm)),freq=colSums(as.matrix(dtm)))
lim = quantile(twitW$freq,0.70)
twitW <- twitW[twitW$freq > lim,]
wordcloud(twitW$words,twitW$freq,max.words=300,colors=brewer.pal(10,"Dark2"),scale=c(3,0.5),random.order=F)
}

class_emo = classify_emotion(twitD$text, algorithm="bayes", prior=1.0)
emotion = class_emo[,7]
emotion[is.na(emotion)] = "unknown"

# classify polarity
class_pol = classify_polarity(twitD, algorithm="bayes")
# get polarity best fit
polarity = class_pol[,4]
# data frame with results
sent_df = data.frame(text=twitD, emotion=emotion,polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
sent_df = within(sent_df, emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))


# plot distribution of emotions
ggplot(sent_df, aes(x=emotion)) +
geom_bar(aes(y=..count.., fill=emotion)) +
scale_fill_brewer(palette="Dark2") +
labs(x="emotion categories", y="number of tweets") +
opts(title = "Sentiment Analysis of Tweets about Starbucks\n(classification by emotion)",
     plot.title = theme_text(size=12))

# plot distribution of polarity
ggplot(sent_df, aes(x=polarity)) +
geom_bar(aes(y=..count.., fill=polarity)) +
scale_fill_brewer(palette="RdGy") +
labs(x="polarity categories", y="number of tweets") +
opts(title = "Sentiment Analysis of Tweets about Starbucks\n(classification by polarity)",
     plot.title = theme_text(size=12))

# separating text by emotion
emos = levels(factor(sent_df$emotion))
nemo = length(emos)
emo.docs = rep("", nemo)
for (i in 1:nemo){
   tmp = some_txt[emotion == emos[i]]
   emo.docs[i] = paste(tmp, collapse=" ")
}

# remove stopwords
emo.docs = removeWords(emo.docs, stopwords("english"))
# create corpus
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = emos

# comparison word cloud
comparison.cloud(tdm, colors = brewer.pal(nemo, "Dark2"),
   scale = c(3,.5), random.order = FALSE, title.size = 1.5)
twitD_corpus <- tm_map(twitD$text_corpus, function(x)removeWords(x,stopwords("italian")))
inspect(twitD$text_corpus)
wordcloud(twitD_corpus,min.freq=2,max.words=100, random.order=T, colors=pal2)
			

twitter api

Retrieve and analyze data from twitter