Skip to content

Instantly share code, notes, and snippets.

@abhiglobalistic
Created October 13, 2017 15:36
Show Gist options
  • Save abhiglobalistic/d3b60a5f9d20d1dad22ce598b6cbf615 to your computer and use it in GitHub Desktop.
Save abhiglobalistic/d3b60a5f9d20d1dad22ce598b6cbf615 to your computer and use it in GitHub Desktop.
# get an instance of classification class
classification = Classification.Classification()
# Get data after processing
processedData = classification.getProcessedData()
# prepare Training data from processedData data
preTrainingData = classification.prepareTrainingData(processedData) #arguments and links
yeilds arguments and links as follows
[('this is a test sentence','Claim')] --> Claim is the label (it can also be MajorClaim and Premise)
#get word features from training data
Arg_word_features = classification.getWordFeatures(Arguments)
Link_word_features = classification.getWordFeatures(Links)
# get training set
ArgumentTraining_set = nltk.classify.apply_features(Arg_Extract_features, Arguments) --> "Arg_Extract_features" method "InitiateProcess"
LinksTraining_set = nltk.classify.apply_features(Link_Extract_features, Links)
# provide key as essay+"index of essay"
# essays in bratessays folder
test_data = dataProcess.getTestData('essay81')
# Load classifiers from pickle files
# model pickle files in "picklefiles" folder select from available models
Arg_naiveclassifier_f = open('picklefiles/Argument_Naivebayes.pickle', "rb")
NaiveArgumentSentimentClassifier = pickle.load(Arg_naiveclassifier_f)
Arg_naiveclassifier_f.close()
# perform classification and predict labels for each sentence from the essay
# use other classifiers as well
for sentence in test_data:
NaivePrediction = classification.getClassifierPrediction(NaiveArgumentSentimentClassifier,sentence,Arg_word_features)
predictedArg = (sentence,NaivePrediction)
predictedArgData.append(predictedArg)
print ("Naive Bayes prediction: ",predictedArg)
# seperate out claims and premises sentences from predicted data
claims = []
premises = []
for item in predictedArgData:
if item[1] == 'Premise':
prem = [item[0],item[1]]
premises.append(prem)
if item[1] == 'Claim':
clm = [item[0],item[1]]
claims.append(clm)
# get predicted claims and premises and run sentiment similarity score .
# get an instance of RelationAnalyzer and pass claims and premises
relationAnalyzer = RelationAnalyzer().performRelationAnalysis(claims,premises)
# finally perform support and attacks relation classification with the scored set
# compare each claim with all the premises
# use NaiveLinksSentimentClassifier --> Classifies supports and attack labels and predicts label on sentence similarity scored data
for claim in scoredClaimsList:
for premise in scoredPremiseList:
ClaimNaivePrediction = classification.getClassifierPrediction(NaiveLinksSentimentClassifier, claim, Link_word_features)
PremiseNaivePrediction = classification.getClassifierPrediction(NaiveLinksSentimentClassifier, premise, Link_word_features)
if ClaimNaivePrediction == 'supports' and PremiseNaivePrediction == 'supports':
print ('%s:Supports:%s' % (claim, premise))
if ClaimNaivePrediction == 'attacks' and PremiseNaivePrediction == 'attacks':
print ('%s:Attacks:%s' %(claim, premise))
predictedLink = (sentence, NaivePrediction)
predictedLinkData.append(predictedLink)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment