2024-07-26 04:19:06 -07:00
import type {
IDataObject ,
IExecuteFunctions ,
INodeExecutionData ,
INodeParameters ,
INodeType ,
INodeTypeDescription ,
} from 'n8n-workflow' ;
import { NodeConnectionType , NodeOperationError } from 'n8n-workflow' ;
import type { BaseLanguageModel } from '@langchain/core/language_models/base' ;
import { HumanMessage } from '@langchain/core/messages' ;
import { SystemMessagePromptTemplate , ChatPromptTemplate } from '@langchain/core/prompts' ;
import { OutputFixingParser , StructuredOutputParser } from 'langchain/output_parsers' ;
import { z } from 'zod' ;
import { getTracingConfig } from '../../../utils/tracing' ;
const DEFAULT_SYSTEM_PROMPT_TEMPLATE =
'You are highly intelligent and accurate sentiment analyzer. Analyze the sentiment of the provided text. Categorize it into one of the following: {categories}. Use the provided formatting instructions. Only output the JSON.' ;
const DEFAULT_CATEGORIES = 'Positive, Neutral, Negative' ;
const configuredOutputs = ( parameters : INodeParameters , defaultCategories : string ) = > {
const options = ( parameters ? . options ? ? { } ) as IDataObject ;
const categories = ( options ? . categories as string ) ? ? defaultCategories ;
const categoriesArray = categories . split ( ',' ) . map ( ( cat ) = > cat . trim ( ) ) ;
const ret = categoriesArray . map ( ( cat ) = > ( { type : NodeConnectionType . Main , displayName : cat } ) ) ;
return ret ;
} ;
export class SentimentAnalysis implements INodeType {
description : INodeTypeDescription = {
displayName : 'Sentiment Analysis' ,
name : 'sentimentAnalysis' ,
icon : 'fa:balance-scale-left' ,
iconColor : 'black' ,
group : [ 'transform' ] ,
version : 1 ,
description : 'Analyze the sentiment of your text' ,
codex : {
categories : [ 'AI' ] ,
subcategories : {
AI : [ 'Chains' , 'Root Nodes' ] ,
} ,
resources : {
primaryDocumentation : [
{
url : 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.sentimentanalysis/' ,
} ,
] ,
} ,
} ,
defaults : {
name : 'Sentiment Analysis' ,
} ,
inputs : [
{ displayName : '' , type : NodeConnectionType . Main } ,
{
displayName : 'Model' ,
maxConnections : 1 ,
type : NodeConnectionType . AiLanguageModel ,
required : true ,
} ,
] ,
outputs : ` ={{( ${ configuredOutputs } )( $ parameter, " ${ DEFAULT_CATEGORIES } ")}} ` ,
properties : [
{
displayName : 'Text to Analyze' ,
name : 'inputText' ,
type : 'string' ,
required : true ,
default : '' ,
description : 'Use an expression to reference data in previous nodes or enter static text' ,
typeOptions : {
rows : 2 ,
} ,
} ,
{
displayName :
'Sentiment scores are LLM-generated estimates, not statistically rigorous measurements. They may be inconsistent across runs and should be used as rough indicators only.' ,
name : 'detailedResultsNotice' ,
type : 'notice' ,
default : '' ,
displayOptions : {
show : {
'/options.includeDetailedResults' : [ true ] ,
} ,
} ,
} ,
{
displayName : 'Options' ,
name : 'options' ,
type : 'collection' ,
default : { } ,
placeholder : 'Add Option' ,
options : [
{
displayName : 'Sentiment Categories' ,
name : 'categories' ,
type : 'string' ,
default : DEFAULT_CATEGORIES ,
description : 'A comma-separated list of categories to analyze' ,
noDataExpression : true ,
typeOptions : {
rows : 2 ,
} ,
} ,
{
displayName : 'System Prompt Template' ,
name : 'systemPromptTemplate' ,
type : 'string' ,
default : DEFAULT_SYSTEM_PROMPT_TEMPLATE ,
description : 'String to use directly as the system prompt template' ,
typeOptions : {
rows : 6 ,
} ,
} ,
{
displayName : 'Include Detailed Results' ,
name : 'includeDetailedResults' ,
type : 'boolean' ,
default : false ,
description :
'Whether to include sentiment strength and confidence scores in the output' ,
} ,
{
displayName : 'Enable Auto-Fixing' ,
name : 'enableAutoFixing' ,
type : 'boolean' ,
default : true ,
2024-09-05 00:39:44 -07:00
description :
'Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)' ,
2024-07-26 04:19:06 -07:00
} ,
] ,
} ,
] ,
} ;
async execute ( this : IExecuteFunctions ) : Promise < INodeExecutionData [ ] [ ] > {
const items = this . getInputData ( ) ;
const llm = ( await this . getInputConnectionData (
NodeConnectionType . AiLanguageModel ,
0 ,
) ) as BaseLanguageModel ;
const returnData : INodeExecutionData [ ] [ ] = [ ] ;
for ( let i = 0 ; i < items . length ; i ++ ) {
try {
const sentimentCategories = this . getNodeParameter (
'options.categories' ,
i ,
DEFAULT_CATEGORIES ,
) as string ;
const categories = sentimentCategories
. split ( ',' )
. map ( ( cat ) = > cat . trim ( ) )
. filter ( Boolean ) ;
if ( categories . length === 0 ) {
throw new NodeOperationError ( this . getNode ( ) , 'No sentiment categories provided' , {
itemIndex : i ,
} ) ;
}
// Initialize returnData with empty arrays for each category
if ( returnData . length === 0 ) {
returnData . push ( . . . Array . from ( { length : categories.length } , ( ) = > [ ] ) ) ;
}
const options = this . getNodeParameter ( 'options' , i , { } ) as {
systemPromptTemplate? : string ;
includeDetailedResults? : boolean ;
enableAutoFixing? : boolean ;
} ;
const schema = z . object ( {
sentiment : z.enum ( categories as [ string , . . . string [ ] ] ) ,
strength : z
. number ( )
. min ( 0 )
. max ( 1 )
. describe ( 'Strength score for sentiment in relation to the category' ) ,
confidence : z.number ( ) . min ( 0 ) . max ( 1 ) ,
} ) ;
const structuredParser = StructuredOutputParser . fromZodSchema ( schema ) ;
const parser = options . enableAutoFixing
? OutputFixingParser . fromLLM ( llm , structuredParser )
: structuredParser ;
const systemPromptTemplate = SystemMessagePromptTemplate . fromTemplate (
` ${ options . systemPromptTemplate ? ? DEFAULT_SYSTEM_PROMPT_TEMPLATE }
{ format_instructions } ` ,
) ;
const input = this . getNodeParameter ( 'inputText' , i ) as string ;
const inputPrompt = new HumanMessage ( input ) ;
const messages = [
await systemPromptTemplate . format ( {
categories : sentimentCategories ,
format_instructions : parser.getFormatInstructions ( ) ,
} ) ,
inputPrompt ,
] ;
const prompt = ChatPromptTemplate . fromMessages ( messages ) ;
const chain = prompt . pipe ( llm ) . pipe ( parser ) . withConfig ( getTracingConfig ( this ) ) ;
try {
const output = await chain . invoke ( messages ) ;
const sentimentIndex = categories . findIndex (
( s ) = > s . toLowerCase ( ) === output . sentiment . toLowerCase ( ) ,
) ;
if ( sentimentIndex !== - 1 ) {
const resultItem = { . . . items [ i ] } ;
const sentimentAnalysis : IDataObject = {
category : output.sentiment ,
} ;
if ( options . includeDetailedResults ) {
sentimentAnalysis . strength = output . strength ;
sentimentAnalysis . confidence = output . confidence ;
}
resultItem . json = {
. . . resultItem . json ,
sentimentAnalysis ,
} ;
returnData [ sentimentIndex ] . push ( resultItem ) ;
}
} catch ( error ) {
throw new NodeOperationError (
this . getNode ( ) ,
'Error during parsing of LLM output, please check your LLM model and configuration' ,
{
itemIndex : i ,
} ,
) ;
}
} catch ( error ) {
2024-08-30 00:59:30 -07:00
if ( this . continueOnFail ( ) ) {
2024-07-26 04:19:06 -07:00
const executionErrorData = this . helpers . constructExecutionMetaData (
this . helpers . returnJsonArray ( { error : error.message } ) ,
{ itemData : { item : i } } ,
) ;
returnData [ 0 ] . push ( . . . executionErrorData ) ;
continue ;
}
throw error ;
}
}
return returnData ;
}
}