JavaScript SDK

The HelpingAI JavaScript SDK provides a convenient way to interact with the HelpingAI API from JavaScript and Node.js applications. It includes full TypeScript support and all the features you need to build emotionally intelligent applications.

Installation

Install the HelpingAI JavaScript SDK using npm:

Bash
npm install helpingai

Or using yarn:

Bash
yarn add helpingai

Requirements

  • Node.js 14+
  • Modern browsers with fetch support

Quick Start

Basic Setup (Node.js)

JavaScript
import { HelpingAI } from 'helpingai';

// Initialize the client
const client = new HelpingAI({
  apiKey: 'your-api-key-here'
});

// Make your first request
async function main() {
  const response = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [
      {role: 'user', content: "Hello! I'm excited to try HelpingAI."}
    ]
  });

  console.log(response.choices[0].message.content);
}

main();

Environment Variables

You can also set your API key as an environment variable:

Bash
export HELPINGAI_API_KEY="your-api-key-here"
JavaScript
import { HelpingAI } from 'helpingai';

// Client will automatically use HELPINGAI_API_KEY
const client = new HelpingAI();

const response = await client.chat.completions.create({
  model: 'Dhanishtha-2.0-preview',
  messages: [
    {role: 'user', content: 'Hello!'}
  ]
});

Browser Usage

HTML
<!DOCTYPE html>
<html>
<head>
    <title>HelpingAI Browser Example</title>
</head>
<body>
    <script type="module">
        import { HelpingAI } from 'https://cdn.skypack.dev/helpingai';
        
        const client = new HelpingAI({
          apiKey: 'your-api-key-here'
        });
        
        async function chat() {
          const response = await client.chat.completions.create({
            model: 'Dhanishtha-2.0-preview',
            messages: [
              {role: 'user', content: 'Hello from the browser!'}
            ]
          });
          
          document.body.innerHTML = response.choices[0].message.content;
        }
        
        chat();
    </script>
</body>
</html>

Core Features

Chat Completions

Generate conversational responses with emotional intelligence:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function basicChat() {
  const response = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [
      {role: 'system', content: 'You are a helpful assistant.'},
      {role: 'user', content: "I'm feeling overwhelmed with work today."}
    ],
    temperature: 0.7,
    max_tokens: 200
  });

  console.log(response.choices[0].message.content);
  console.log(`Tokens used: ${response.usage.total_tokens}`);
}

basicChat();

Streaming Responses

Get real-time responses as they're generated:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function streamingChat() {
  const stream = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [
      {role: 'user', content: 'Tell me a story about courage'}
    ],
    stream: true,
    temperature: 0.8,
    max_tokens: 500
  });

  process.stdout.write('AI Response: ');
  for await (const chunk of stream) {
    if (chunk.choices[0]?.delta?.content) {
      process.stdout.write(chunk.choices[0].delta.content);
    }
  }
  console.log(); // New line
}

streamingChat();

Chain of Recursive Thoughts

See how the AI thinks with the hideThink parameter:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function showReasoning() {
  const response = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [
      {role: 'user', content: "What's 15 * 24? Show your work."}
    ],
    hideThink: false,  // Show reasoning process
    temperature: 0.3,
    max_tokens: 400
  });

  console.log(response.choices[0].message.content);
}

showReasoning();

Tool Calling

Execute functions during conversations:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

function getWeather(location) {
  // Mock weather function
  return `The weather in ${location} is sunny and 72°F`;
}

async function toolCalling() {
  const tools = [{
    type: "function",
    function: {
      name: "get_weather",
      description: "Get current weather for a location",
      parameters: {
        type: "object",
        properties: {
          location: {type: "string", description: "City name"}
        },
        required: ["location"]
      }
    }
  }];

  let messages = [
    {role: 'user', content: "What's the weather like in Tokyo?"}
  ];

  const response = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: messages,
    tools: tools,
    tool_choice: "auto"
  });

  // Handle tool calls
  const message = response.choices[0].message;
  if (message.tool_calls) {
    messages.push(message);
    
    for (const toolCall of message.tool_calls) {
      const functionName = toolCall.function.name;
      const functionArgs = JSON.parse(toolCall.function.arguments);
      
      let result;
      if (functionName === "get_weather") {
        result = getWeather(functionArgs.location);
      }
      
      messages.push({
        role: "tool",
        tool_call_id: toolCall.id,
        content: result
      });
    }
    
    // Get final response
    const finalResponse = await client.chat.completions.create({
      model: 'Dhanishtha-2.0-preview',
      messages: messages
    });
    
    console.log(finalResponse.choices[0].message.content);
  }
}

toolCalling();

Models

List and retrieve information about available models:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function listModels() {
  // List all models
  const models = await client.models.list();
  for (const model of models.data) {
    console.log(`Model: ${model.id}`);
    console.log(`Created: ${model.created}`);
    console.log(`Owned by: ${model.owned_by}`);
    console.log('---');
  }

  // Get specific model info
  const model = await client.models.retrieve('Dhanishtha-2.0-preview');
  console.log(`Model ID: ${model.id}`);
  console.log(`Created: ${model.created}`);
}

listModels();

Advanced Usage

Custom Configuration

Configure the client with custom settings:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key',
  baseURL: 'https://api.helpingai.co/v1',  // Custom base URL
  timeout: 30000,  // Request timeout in milliseconds
  maxRetries: 3,   // Number of retries on failure
  defaultHeaders: {
    'User-Agent': 'MyApp/1.0'
  }
});

Error Handling

Handle API errors gracefully:

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function handleErrors() {
  try {
    const response = await client.chat.completions.create({
      model: 'Dhanishtha-2.0-preview',
      messages: [
        {role: 'user', content: 'Hello!'}
      ]
    });
    console.log(response.choices[0].message.content);
    
  } catch (error) {
    if (error.status === 401) {
      console.error('Invalid API key');
    } else if (error.status === 429) {
      console.error('Rate limit exceeded');
    } else if (error.status >= 500) {
      console.error('Server error');
    } else {
      console.error('API error:', error.message);
    }
  }
}

handleErrors();

Streaming in Browser

JavaScript
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function browserStreaming() {
  const responseDiv = document.getElementById('response');
  
  const stream = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [
      {role: 'user', content: 'Write a poem about the ocean'}
    ],
    stream: true,
    temperature: 0.8
  });

  let fullResponse = '';
  for await (const chunk of stream) {
    if (chunk.choices[0]?.delta?.content) {
      fullResponse += chunk.choices[0].delta.content;
      responseDiv.textContent = fullResponse;
    }
  }
}

React Integration

JSX
import React, { useState } from 'react';
import { HelpingAI } from 'helpingai';

const client = new HelpingAI({
  apiKey: process.env.REACT_APP_HELPINGAI_API_KEY
});

function ChatComponent() {
  const [messages, setMessages] = useState([]);
  const [input, setInput] = useState('');
  const [loading, setLoading] = useState(false);

  const sendMessage = async () => {
    if (!input.trim()) return;

    const userMessage = { role: 'user', content: input };
    const newMessages = [...messages, userMessage];
    setMessages(newMessages);
    setInput('');
    setLoading(true);

    try {
      const response = await client.chat.completions.create({
        model: 'Dhanishtha-2.0-preview',
        messages: newMessages
      });

      const assistantMessage = {
        role: 'assistant',
        content: response.choices[0].message.content
      };

      setMessages([...newMessages, assistantMessage]);
    } catch (error) {
      console.error('Error:', error);
    } finally {
      setLoading(false);
    }
  };

  return (
    <div>
      <div>
        {messages.map((msg, index) => (
          <div key={index}>
            <strong>{msg.role}:</strong> {msg.content}
          </div>
        ))}
      </div>
      <input
        value={input}
        onChange={(e) => setInput(e.target.value)}
        onKeyPress={(e) => e.key === 'Enter' && sendMessage()}
        disabled={loading}
      />
      <button onClick={sendMessage} disabled={loading}>
        {loading ? 'Sending...' : 'Send'}
      </button>
    </div>
  );
}

export default ChatComponent;

TypeScript Support

The SDK includes full TypeScript support:

TypeScript
import { HelpingAI, ChatCompletion, ChatCompletionMessage } from 'helpingai';

const client = new HelpingAI({
  apiKey: 'your-api-key'
});

async function typedChat(): Promise<string> {
  const response: ChatCompletion = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [
      { role: 'user', content: 'Hello!' }
    ]
  });

  const message: ChatCompletionMessage = response.choices[0].message;
  return message.content;
}

Type Definitions

TypeScript
interface ChatCompletion {
  id: string;
  object: string;
  created: number;
  model: string;
  choices: Choice[];
  usage: Usage;
}

interface Choice {
  index: number;
  message: ChatCompletionMessage;
  finish_reason: string;
}

interface ChatCompletionMessage {
  role: 'system' | 'user' | 'assistant' | 'tool';
  content: string;
  tool_calls?: ToolCall[];
}

interface Usage {
  prompt_tokens: number;
  completion_tokens: number;
  total_tokens: number;
}

Best Practices

1. Connection Pooling

JavaScript
import { HelpingAI } from 'helpingai';

// Create a single client instance and reuse it
const client = new HelpingAI({
  apiKey: 'your-api-key'
});

// Use the same client for multiple requests
async function multipleRequests() {
  const promises = [];
  
  for (let i = 0; i < 5; i++) {
    promises.push(
      client.chat.completions.create({
        model: 'Dhanishtha-2.0-preview',
        messages: [{role: 'user', content: `Request ${i}`}]
      })
    );
  }
  
  const responses = await Promise.all(promises);
  responses.forEach((response, index) => {
    console.log(`Response ${index}: ${response.choices[0].message.content}`);
  });
}

2. Rate Limiting

JavaScript
class RateLimiter {
  constructor(requestsPerMinute = 60) {
    this.requests = [];
    this.limit = requestsPerMinute;
  }

  async throttle() {
    const now = Date.now();
    this.requests = this.requests.filter(time => now - time < 60000);
    
    if (this.requests.length >= this.limit) {
      const waitTime = 60000 - (now - this.requests[0]);
      await new Promise(resolve => setTimeout(resolve, waitTime));
      return this.throttle();
    }
    
    this.requests.push(now);
  }
}

const rateLimiter = new RateLimiter(60);

async function rateLimitedRequest() {
  await rateLimiter.throttle();
  
  const response = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: [{role: 'user', content: 'Hello!'}]
  });
  
  return response;
}

3. Token Management

JavaScript
function estimateTokens(text) {
  // Rough estimation: ~4 characters per token
  return Math.ceil(text.length / 4);
}

async function managedChat(messages, maxTokens = 4000) {
  let totalTokens = 0;
  const managedMessages = [];
  
  // Add messages while staying under token limit
  for (const message of messages.reverse()) {
    const messageTokens = estimateTokens(message.content);
    if (totalTokens + messageTokens > maxTokens) break;
    
    managedMessages.unshift(message);
    totalTokens += messageTokens;
  }
  
  const response = await client.chat.completions.create({
    model: 'Dhanishtha-2.0-preview',
    messages: managedMessages
  });
  
  return response;
}

Migration from OpenAI

If you're migrating from the OpenAI JavaScript library:

JavaScript
// Before (OpenAI)
import OpenAI from 'openai';
const openai = new OpenAI({ apiKey: 'openai-key' });

// After (HelpingAI)
import { HelpingAI } from 'helpingai';
const client = new HelpingAI({ apiKey: 'helpingai-key' });

// The API calls remain the same!
const response = await client.chat.completions.create({
  model: 'Dhanishtha-2.0-preview',  // Just change the model
  messages: [{role: 'user', content: 'Hello!'}]
});

Examples Repository

Find more examples in our GitHub repository:

Support

Next Steps