简单工具中心

general

背景除去ツール完全ガイド|AI人物切り抜きと透過処理の最新技術

AI背景除去、U-Net・DeepLab活用、髪の毛の精密切り抜き、透過PNG生成、バッチ処理、エッジ処理、クロマキー合成まで、プロ級背景除去技術を4500字で解説

20分钟阅读
背景除去ツール完全ガイド|AI人物切り抜きと透過処理の最新技術

背景除去ツール完全ガイド

はじめに:AI背景除去技術の革新

背景除去技術は、AIとコンピュータビジョンの発達により、手作業による切り抜きから自動化処理へと劇的に進化しました。E-commerce、SNS、デザイン制作など、様々な分野で活用されています。本記事では、最新のAI背景除去技術から実装方法まで、包括的に解説します。

第1章:AI背景除去の技術基盤

1.1 セマンティックセグメンテーション

U-Netアーキテクチャの実装

const tf = require('@tensorflow/tfjs-node');

class BackgroundRemover {
  constructor() {
    this.model = null;
    this.inputSize = 512;
  }

  async loadModel(modelPath) {
    // 事前訓練済みU-Net2Pモデルの読み込み
    this.model = await tf.loadLayersModel(modelPath);

    return { success: true, inputSize: this.inputSize };
  }

  async removeBackground(imagePath, options = {}) {
    const {
      outputFormat = 'png',
      quality = 0.9,
      smoothEdges = true,
      refineHair = true,
      addShadow = false
    } = options;

    // 画像の前処理
    const inputTensor = await this.preprocessImage(imagePath);

    // AI推論実行
    const maskTensor = await this.model.predict(inputTensor);

    // 後処理
    const refinedMask = await this.postprocessMask(
      maskTensor,
      { smoothEdges, refineHair }
    );

    // 背景除去適用
    const result = await this.applyMask(imagePath, refinedMask, options);

    // リソース解放
    inputTensor.dispose();
    maskTensor.dispose();
    refinedMask.dispose();

    return result;
  }

  async preprocessImage(imagePath) {
    const fs = require('fs');
    const sharp = require('sharp');

    // 画像読み込みとリサイズ
    const imageBuffer = await sharp(imagePath)
      .resize(this.inputSize, this.inputSize)
      .raw()
      .toBuffer();

    // テンソルに変換
    const imageTensor = tf.tensor3d(
      new Uint8Array(imageBuffer),
      [this.inputSize, this.inputSize, 3]
    );

    // 正規化 (0-1)
    const normalized = imageTensor.div(255.0);

    // バッチ次元追加
    const batched = normalized.expandDims(0);

    imageTensor.dispose();
    normalized.dispose();

    return batched;
  }

  async postprocessMask(maskTensor, options) {
    let processedMask = maskTensor;

    if (options.smoothEdges) {
      // ガウシアンブラーでエッジをスムージング
      processedMask = tf.depthwiseConv2d(
        processedMask,
        this.createGaussianKernel(5, 1.0),
        [1, 1],
        'same'
      );
    }

    if (options.refineHair) {
      // 髪の毛の精密処理
      processedMask = await this.refineHairDetails(processedMask);
    }

    // 二値化
    const binaryMask = tf.greater(processedMask, tf.scalar(0.5));

    return binaryMask.cast('float32');
  }

  createGaussianKernel(size, sigma) {
    const kernel = tf.buffer([size, size, 1, 1]);
    const center = Math.floor(size / 2);

    for (let x = 0; x < size; x++) {
      for (let y = 0; y < size; y++) {
        const distance = Math.pow(x - center, 2) + Math.pow(y - center, 2);
        const value = Math.exp(-distance / (2 * sigma * sigma));
        kernel.set(value, x, y, 0, 0);
      }
    }

    // 正規化
    const sum = kernel.values.reduce((a, b) => a + b, 0);
    for (let i = 0; i < kernel.values.length; i++) {
      kernel.values[i] /= sum;
    }

    return kernel.toTensor();
  }

  async refineHairDetails(maskTensor) {
    // 髪の毛領域の検出と改善
    const hairMask = await this.detectHairRegion(maskTensor);

    // エッジ保持フィルター適用
    const edgePreserved = await this.applyEdgePreservingFilter(
      maskTensor,
      hairMask
    );

    return edgePreserved;
  }

  async detectHairRegion(maskTensor) {
    // テクスチャ分析で髪の毛領域を特定
    const sobelX = tf.conv2d(
      maskTensor,
      tf.tensor4d([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], [3, 3, 1, 1]),
      1,
      'same'
    );

    const sobelY = tf.conv2d(
      maskTensor,
      tf.tensor4d([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], [3, 3, 1, 1]),
      1,
      'same'
    );

    const magnitude = tf.sqrt(tf.add(tf.square(sobelX), tf.square(sobelY)));

    // 髪の毛の特徴的なテクスチャを検出
    const hairMask = tf.greater(magnitude, tf.scalar(0.1));

    sobelX.dispose();
    sobelY.dispose();
    magnitude.dispose();

    return hairMask.cast('float32');
  }
}

1.2 GrabCutアルゴリズム

従来手法との組み合わせ

class GrabCutProcessor {
  constructor() {
    this.iterations = 5;
    this.gamma = 50;
  }

  async grabCut(imagePath, maskPath, options = {}) {
    const {
      foregroundColor = [255, 255, 255],
      backgroundColor = [0, 0, 0],
      iterations = this.iterations
    } = options;

    const cv = require('opencv4nodejs');

    // 画像とマスクの読み込み
    const image = cv.imread(imagePath);
    const initialMask = cv.imread(maskPath, cv.IMREAD_GRAYSCALE);

    // GrabCut用マスクの初期化
    const mask = new cv.Mat(image.rows, image.cols, cv.CV_8UC1, cv.GC_PR_BGD);

    // 確実な前景・背景領域を設定
    mask.setTo(cv.GC_FGD, initialMask.gt(200));  // 確実な前景
    mask.setTo(cv.GC_BGD, initialMask.lt(50));   // 確実な背景

    // バウンディングボックスの計算
    const bbox = this.calculateBoundingBox(initialMask);

    // GrabCutアルゴリズム実行
    const bgdModel = new cv.Mat(1, 65, cv.CV_64FC1);
    const fgdModel = new cv.Mat(1, 65, cv.CV_64FC1);

    cv.grabCut(
      image,
      mask,
      bbox,
      bgdModel,
      fgdModel,
      iterations,
      cv.GC_INIT_WITH_RECT
    );

    // 結果マスクの生成
    const result = mask.inRange(cv.GC_FGD, cv.GC_PR_FGD);

    return result;
  }

  calculateBoundingBox(mask) {
    const contours = mask.findContours(cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE);

    if (contours.length === 0) {
      return new cv.Rect(0, 0, mask.cols, mask.rows);
    }

    // 最大の輪郭を選択
    const maxContour = contours.reduce((max, contour) =>
      contour.area > max.area ? contour : max
    );

    return maxContour.boundingRect();
  }

  // 対話的改善
  async interactiveRefinement(imagePath, scribbles) {
    const cv = require('opencv4nodejs');
    const image = cv.imread(imagePath);

    // スクリブルからマスクを生成
    const mask = this.createMaskFromScribbles(image, scribbles);

    // ランダムウォーカーアルゴリズムで領域拡張
    const refinedMask = await this.randomWalkerSegmentation(image, mask);

    return refinedMask;
  }

  createMaskFromScribbles(image, scribbles) {
    const mask = new cv.Mat(image.rows, image.cols, cv.CV_8UC1, 0);

    scribbles.forEach(scribble => {
      const color = scribble.type === 'foreground' ? 255 : 128;
      const points = scribble.points.map(p => new cv.Point2(p.x, p.y));

      for (let i = 0; i < points.length - 1; i++) {
        cv.line(mask, points[i], points[i + 1], color, 3);
      }
    });

    return mask;
  }

  async randomWalkerSegmentation(image, seedMask) {
    // Random Walker アルゴリズムの実装
    const height = image.rows;
    const width = image.cols;

    // ラプラシアン行列の構築
    const laplacian = this.buildLaplacianMatrix(image);

    // 境界条件の設定(シード領域)
    const seeds = this.extractSeeds(seedMask);

    // 線形システムの解法
    const probabilities = await this.solveLaplacianSystem(
      laplacian,
      seeds,
      width,
      height
    );

    // 確率マップからセグメンテーション結果を生成
    const result = this.generateSegmentationResult(probabilities);

    return result;
  }
}

第2章:高度な背景除去テクニック

2.1 髪の毛の精密処理

Alpha Matting技術

class AlphaMattingProcessor {
  constructor() {
    this.epsilon = 1e-7;
    this.lambda = 100;
  }

  async generateTrimap(maskPath, dilationSize = 10) {
    const cv = require('opencv4nodejs');
    const mask = cv.imread(maskPath, cv.IMREAD_GRAYSCALE);

    // トライマップの生成
    const trimap = new cv.Mat(mask.rows, mask.cols, cv.CV_8UC1, 128); // 不明領域

    // 確実な前景領域
    const foreground = mask.threshold(200, 255, cv.THRESH_BINARY)[1];

    // 確実な背景領域
    const background = mask.threshold(50, 255, cv.THRESH_BINARY_INV)[1];

    // 膨張処理でトライマップを調整
    const kernel = cv.getStructuringElement(
      cv.MORPH_ELLIPSE,
      new cv.Size(dilationSize * 2 + 1, dilationSize * 2 + 1)
    );

    const dilatedForeground = foreground.dilate(kernel);
    const dilatedBackground = background.dilate(kernel);

    // トライマップの設定
    trimap.setTo(255, foreground);  // 確実な前景
    trimap.setTo(0, background);    // 確実な背景

    return trimap;
  }

  async closedFormMatting(imagePath, trimapPath) {
    const cv = require('opencv4nodejs');

    const image = cv.imread(imagePath);
    const trimap = cv.imread(trimapPath, cv.IMREAD_GRAYSCALE);

    // RGB画像をdouble型に変換
    const imageDouble = new cv.Mat();
    image.convertTo(imageDouble, cv.CV_64FC3, 1.0 / 255.0);

    // ラプラシアン行列の構築
    const laplacianMatrix = this.buildMattingLaplacian(imageDouble);

    // 境界条件の設定
    const constraints = this.extractConstraints(trimap);

    // クローズドフォーム解法
    const alpha = await this.solveClosedForm(
      laplacianMatrix,
      constraints,
      image.rows,
      image.cols
    );

    return alpha;
  }

  buildMattingLaplacian(image) {
    const height = image.rows;
    const width = image.cols;
    const windowSize = 3;
    const windowRadius = Math.floor(windowSize / 2);

    const laplacian = [];

    // 各ピクセル周辺のウィンドウでラプラシアン計算
    for (let y = windowRadius; y < height - windowRadius; y++) {
      for (let x = windowRadius; x < width - windowRadius; x++) {
        const window = this.extractWindow(image, x, y, windowSize);
        const windowLaplacian = this.computeWindowLaplacian(window);

        this.addToGlobalLaplacian(
          laplacian,
          windowLaplacian,
          x - windowRadius,
          y - windowRadius,
          windowSize,
          width
        );
      }
    }

    return laplacian;
  }

  extractWindow(image, centerX, centerY, windowSize) {
    const radius = Math.floor(windowSize / 2);
    const window = [];

    for (let dy = -radius; dy <= radius; dy++) {
      for (let dx = -radius; dx <= radius; dx++) {
        const y = centerY + dy;
        const x = centerX + dx;

        if (x >= 0 && x < image.cols && y >= 0 && y < image.rows) {
          const pixel = image.at(y, x);
          window.push([pixel.x, pixel.y, pixel.z]); // RGB
        }
      }
    }

    return window;
  }

  computeWindowLaplacian(window) {
    const n = window.length;
    const identity = this.createIdentityMatrix(n);

    // 平均ベクトルの計算
    const mean = [0, 0, 0];
    for (const pixel of window) {
      mean[0] += pixel[0] / n;
      mean[1] += pixel[1] / n;
      mean[2] += pixel[2] / n;
    }

    // 共分散行列の計算
    const covariance = this.computeCovariance(window, mean);

    // 逆行列計算
    const invCovariance = this.invertMatrix3x3(
      this.addEpsilon(covariance, this.epsilon)
    );

    // ラプラシアン行列の構築
    const laplacian = [];
    for (let i = 0; i < n; i++) {
      laplacian[i] = [];
      for (let j = 0; j < n; j++) {
        const diff_i = [
          window[i][0] - mean[0],
          window[i][1] - mean[1],
          window[i][2] - mean[2]
        ];
        const diff_j = [
          window[j][0] - mean[0],
          window[j][1] - mean[1],
          window[j][2] - mean[2]
        ];

        const dot = this.dotProduct(
          this.matrixVectorMultiply(invCovariance, diff_i),
          diff_j
        );

        laplacian[i][j] = identity[i][j] - (1 + dot) / n;
      }
    }

    return laplacian;
  }

  // ポアソン編集による背景合成
  async poissonBlending(foregroundPath, backgroundPath, maskPath) {
    const cv = require('opencv4nodejs');

    const foreground = cv.imread(foregroundPath);
    const background = cv.imread(backgroundPath);
    const mask = cv.imread(maskPath, cv.IMREAD_GRAYSCALE);

    // マスクの重心を計算
    const moments = cv.moments(mask);
    const centerX = Math.floor(moments.m10 / moments.m00);
    const centerY = Math.floor(moments.m01 / moments.m00);

    // ポアソンシームレスクローニング
    const result = cv.seamlessClone(
      foreground,
      background,
      mask,
      new cv.Point2(centerX, centerY),
      cv.NORMAL_CLONE
    );

    return result;
  }
}

2.2 バッチ処理とワークフロー自動化

大量画像の一括処理

const { Worker } = require('worker_threads');
const path = require('path');
const fs = require('fs').promises;

class BatchBackgroundRemover {
  constructor(options = {}) {
    this.workerCount = options.workerCount || 4;
    this.modelPath = options.modelPath;
    this.workers = [];
    this.jobQueue = [];
    this.results = [];
  }

  async processBatch(imageFiles, options = {}) {
    const {
      outputDir = './output',
      outputFormat = 'png',
      quality = 0.9,
      preserveOriginalSize = true,
      addBorder = false,
      backgroundReplacement = null
    } = options;

    // 出力ディレクトリの作成
    await fs.mkdir(outputDir, { recursive: true });

    // ワーカー初期化
    await this.initializeWorkers();

    // ジョブキューの作成
    this.jobQueue = imageFiles.map((file, index) => ({
      id: index,
      inputPath: file,
      outputPath: path.join(
        outputDir,
        path.basename(file, path.extname(file)) + '_no_bg.' + outputFormat
      ),
      options: {
        outputFormat,
        quality,
        preserveOriginalSize,
        addBorder,
        backgroundReplacement
      }
    }));

    // バッチ処理実行
    const startTime = Date.now();
    const results = await this.executeJobs();
    const duration = (Date.now() - startTime) / 1000;

    // ワーカー終了
    await this.terminateWorkers();

    // 統計情報の生成
    return this.generateBatchReport(results, duration);
  }

  async initializeWorkers() {
    for (let i = 0; i < this.workerCount; i++) {
      const worker = new Worker(`
        const { parentPort } = require('worker_threads');
        const { BackgroundRemover } = require('./background-remover');

        let bgRemover = null;

        parentPort.on('message', async (job) => {
          try {
            if (!bgRemover) {
              bgRemover = new BackgroundRemover();
              await bgRemover.loadModel('${this.modelPath}');
            }

            const result = await bgRemover.removeBackground(
              job.inputPath,
              job.options
            );

            parentPort.postMessage({
              id: job.id,
              success: true,
              result: result,
              inputPath: job.inputPath,
              outputPath: job.outputPath
            });
          } catch (error) {
            parentPort.postMessage({
              id: job.id,
              success: false,
              error: error.message,
              inputPath: job.inputPath
            });
          }
        });
      `, { eval: true });

      worker.on('message', (result) => {
        this.results.push(result);
        this.assignNextJob(worker);
      });

      this.workers.push(worker);
    }
  }

  assignNextJob(worker) {
    if (this.jobQueue.length > 0) {
      const job = this.jobQueue.shift();
      worker.postMessage(job);
    }
  }

  async executeJobs() {
    const totalJobs = this.jobQueue.length;
    this.results = [];

    return new Promise((resolve) => {
      const checkCompletion = setInterval(() => {
        if (this.results.length === totalJobs) {
          clearInterval(checkCompletion);
          resolve(this.results);
        }
      }, 100);

      // 初期ジョブ割り当て
      this.workers.forEach(worker => this.assignNextJob(worker));
    });
  }

  generateBatchReport(results, duration) {
    const successful = results.filter(r => r.success);
    const failed = results.filter(r => !r.success);

    const totalInputSize = successful.reduce(
      (sum, r) => sum + (r.result.originalSize || 0), 0
    );
    const totalOutputSize = successful.reduce(
      (sum, r) => sum + (r.result.processedSize || 0), 0
    );

    return {
      summary: {
        total: results.length,
        successful: successful.length,
        failed: failed.length,
        duration: `${duration.toFixed(2)}s`,
        averageTime: `${(duration / results.length).toFixed(2)}s`,
        processingSpeed: `${(results.length / duration).toFixed(1)} images/sec`
      },
      statistics: {
        totalInputSize: this.formatFileSize(totalInputSize),
        totalOutputSize: this.formatFileSize(totalOutputSize),
        compressionRatio: totalInputSize > 0 ?
          `${((1 - totalOutputSize / totalInputSize) * 100).toFixed(1)}%` : 'N/A'
      },
      failures: failed.map(f => ({
        file: f.inputPath,
        error: f.error
      }))
    };
  }

  formatFileSize(bytes) {
    const units = ['B', 'KB', 'MB', 'GB'];
    let size = bytes;
    let unitIndex = 0;

    while (size >= 1024 && unitIndex < units.length - 1) {
      size /= 1024;
      unitIndex++;
    }

    return `${size.toFixed(2)} ${units[unitIndex]}`;
  }
}

第3章:特殊な背景処理技術

3.1 グリーンスクリーン・クロマキー処理

色域指定による背景除去

class ChromaKeyProcessor {
  constructor() {
    this.defaultChromaColor = { h: 120, s: 100, v: 100 }; // 緑
    this.tolerance = 0.3;
    this.smoothness = 0.1;
  }

  async chromaKeyRemoval(imagePath, options = {}) {
    const {
      chromaColor = this.defaultChromaColor,
      tolerance = this.tolerance,
      smoothness = this.smoothness,
      spillSuppress = true,
      edgeFeather = 2
    } = options;

    const cv = require('opencv4nodejs');
    const image = cv.imread(imagePath);

    // HSV色空間に変換
    const hsvImage = image.cvtColor(cv.COLOR_BGR2HSV);

    // クロマキーマスクの生成
    const mask = this.createChromaKeyMask(
      hsvImage,
      chromaColor,
      tolerance,
      smoothness
    );

    // エッジのフェザリング
    const featheredMask = this.applyFeathering(mask, edgeFeather);

    // スピル除去
    let processedImage = image;
    if (spillSuppress) {
      processedImage = this.suppressColorSpill(image, chromaColor, mask);
    }

    // アルファチャンネルを適用
    const result = this.applyAlphaMask(processedImage, featheredMask);

    return result;
  }

  createChromaKeyMask(hsvImage, chromaColor, tolerance, smoothness) {
    const cv = require('opencv4nodejs');

    // 色相の範囲計算
    const hueRange = tolerance * 180; // OpenCVの色相は0-180
    const satRange = tolerance * 255;
    const valRange = tolerance * 255;

    // HSV範囲の設定
    const lowerBound = new cv.Vec3(
      Math.max(0, chromaColor.h - hueRange),
      Math.max(0, chromaColor.s * 2.55 - satRange),
      Math.max(0, chromaColor.v * 2.55 - valRange)
    );

    const upperBound = new cv.Vec3(
      Math.min(180, chromaColor.h + hueRange),
      Math.min(255, chromaColor.s * 2.55 + satRange),
      Math.min(255, chromaColor.v * 2.55 + valRange)
    );

    // 基本マスクの作成
    let mask = hsvImage.inRange(lowerBound, upperBound);

    // モルフォロジー処理でノイズ除去
    const kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(3, 3));
    mask = mask.morphologyEx(cv.MORPH_CLOSE, kernel);
    mask = mask.morphologyEx(cv.MORPH_OPEN, kernel);

    // スムージング処理
    if (smoothness > 0) {
      const blurSize = Math.max(1, Math.floor(smoothness * 21));
      mask = mask.gaussianBlur(new cv.Size(blurSize, blurSize), 0);
    }

    return mask;
  }

  suppressColorSpill(image, chromaColor, mask) {
    const cv = require('opencv4nodejs');

    // BGR色空間での処理
    const bgrImage = image.clone();
    const spillMask = this.createSpillMask(bgrImage, chromaColor);

    // スピル領域の彩度を下げる
    const hsvImage = bgrImage.cvtColor(cv.COLOR_BGR2HSV);
    const channels = hsvImage.split();

    // 彩度チャンネルの調整
    const saturationChannel = channels[1];
    const adjustedSaturation = saturationChannel.mul(0.5, spillMask);

    // チャンネルを結合
    const processedHsv = new cv.Mat();
    cv.merge([channels[0], adjustedSaturation, channels[2]], processedHsv);

    const result = processedHsv.cvtColor(cv.COLOR_HSV2BGR);

    return result;
  }

  createSpillMask(image, chromaColor) {
    const cv = require('opencv4nodejs');

    // クロマ色に近い領域を検出
    const hsv = image.cvtColor(cv.COLOR_BGR2HSV);
    const spillTolerance = 0.15;

    const lowerSpill = new cv.Vec3(
      chromaColor.h - spillTolerance * 180,
      0,
      0
    );

    const upperSpill = new cv.Vec3(
      chromaColor.h + spillTolerance * 180,
      255,
      255
    );

    return hsv.inRange(lowerSpill, upperSpill);
  }

  applyFeathering(mask, featherRadius) {
    if (featherRadius <= 0) return mask;

    const cv = require('opencv4nodejs');

    // ガウシアンブラーでソフトエッジを作成
    const size = featherRadius * 2 + 1;
    return mask.gaussianBlur(new cv.Size(size, size), featherRadius / 2);
  }

  // アドバンスクロマキー(スクリーン色の自動検出)
  async autoDetectChromaKey(imagePath, sampleRegions) {
    const cv = require('opencv4nodejs');
    const image = cv.imread(imagePath);
    const hsv = image.cvtColor(cv.COLOR_BGR2HSV);

    const colorSamples = [];

    // サンプル領域から色を抽出
    for (const region of sampleRegions) {
      const roi = hsv.getRegion(
        new cv.Rect(region.x, region.y, region.width, region.height)
      );

      const mean = roi.mean();
      colorSamples.push({
        h: mean.w,
        s: mean.x,
        v: mean.y
      });
    }

    // 最も一般的な色を選択(クラスタリング)
    const dominantColor = this.findDominantColor(colorSamples);

    return dominantColor;
  }

  findDominantColor(samples) {
    // K-meansクラスタリングで代表色を特定
    // 簡略実装:サンプルの平均を取る
    const avgColor = {
      h: samples.reduce((sum, s) => sum + s.h, 0) / samples.length,
      s: samples.reduce((sum, s) => sum + s.s, 0) / samples.length,
      v: samples.reduce((sum, s) => sum + s.v, 0) / samples.length
    };

    return avgColor;
  }
}

3.2 背景置換とエフェクト

背景合成とエフェクト適用

class BackgroundComposer {
  async replaceBackground(foregroundPath, backgroundPath, maskPath, options = {}) {
    const {
      blendMode = 'normal',
      opacity = 1.0,
      scale = 'fill',
      position = 'center',
      addShadow = true,
      shadowOptions = {
        blur: 10,
        offset: { x: 5, y: 5 },
        opacity: 0.3
      }
    } = options;

    const sharp = require('sharp');

    // 前景画像の読み込み
    const foreground = sharp(foregroundPath);
    const foregroundMeta = await foreground.metadata();

    // 背景画像の準備
    let background = sharp(backgroundPath);

    // 背景のリサイズ
    background = await this.resizeBackground(
      background,
      foregroundMeta.width,
      foregroundMeta.height,
      scale
    );

    // マスクの適用
    const maskedForeground = await this.applyMask(foregroundPath, maskPath);

    // 影の追加
    let shadowLayer = null;
    if (addShadow) {
      shadowLayer = await this.createShadow(maskedForeground, shadowOptions);
    }

    // 合成処理
    const result = await this.compositeImages(
      background,
      maskedForeground,
      shadowLayer,
      {
        blendMode,
        opacity,
        position
      }
    );

    return result;
  }

  async resizeBackground(background, targetWidth, targetHeight, scaleMode) {
    const backgroundMeta = await background.metadata();

    switch (scaleMode) {
      case 'fill':
        return background.resize(targetWidth, targetHeight, {
          fit: 'cover',
          position: 'center'
        });

      case 'fit':
        return background.resize(targetWidth, targetHeight, {
          fit: 'inside',
          background: { r: 0, g: 0, b: 0, alpha: 0 }
        });

      case 'stretch':
        return background.resize(targetWidth, targetHeight, {
          fit: 'fill'
        });

      case 'tile':
        return this.tileBackground(background, targetWidth, targetHeight);

      default:
        return background.resize(targetWidth, targetHeight);
    }
  }

  async tileBackground(background, targetWidth, targetHeight) {
    const backgroundMeta = await background.metadata();
    const bgBuffer = await background.toBuffer();

    const tilesX = Math.ceil(targetWidth / backgroundMeta.width);
    const tilesY = Math.ceil(targetHeight / backgroundMeta.height);

    const tiles = [];

    for (let y = 0; y < tilesY; y++) {
      const row = [];
      for (let x = 0; x < tilesX; x++) {
        row.push({ input: bgBuffer, left: x * backgroundMeta.width, top: y * backgroundMeta.height });
      }
      tiles.push(row);
    }

    // タイル配置
    return sharp({
      create: {
        width: targetWidth,
        height: targetHeight,
        channels: 3,
        background: { r: 0, g: 0, b: 0 }
      }
    }).composite(tiles.flat());
  }

  async createShadow(imagePath, shadowOptions) {
    const sharp = require('sharp');

    const shadowImage = await sharp(imagePath)
      // 影を黒にする
      .modulate({
        brightness: 0,
        saturation: 0
      })
      // ぼかし効果
      .blur(shadowOptions.blur)
      // 透明度調整
      .ensureAlpha(shadowOptions.opacity)
      .toBuffer();

    return {
      input: shadowImage,
      left: shadowOptions.offset.x,
      top: shadowOptions.offset.y,
      blend: 'multiply'
    };
  }

  async compositeImages(background, foreground, shadow, options) {
    let composite = background;

    // 影レイヤーの合成
    if (shadow) {
      composite = composite.composite([shadow]);
    }

    // 前景の合成
    const foregroundBuffer = await sharp(foreground).toBuffer();

    const compositeOptions = {
      input: foregroundBuffer,
      blend: this.getSharpBlendMode(options.blendMode)
    };

    // 位置調整
    if (options.position !== 'center') {
      const position = this.calculatePosition(options.position);
      compositeOptions.left = position.x;
      compositeOptions.top = position.y;
    }

    return composite.composite([compositeOptions]);
  }

  getSharpBlendMode(blendMode) {
    const modeMap = {
      'normal': 'over',
      'multiply': 'multiply',
      'screen': 'screen',
      'overlay': 'overlay',
      'soft-light': 'soft-light',
      'hard-light': 'hard-light',
      'color-dodge': 'colour-dodge',
      'color-burn': 'colour-burn',
      'darken': 'darken',
      'lighten': 'lighten',
      'difference': 'difference',
      'exclusion': 'exclusion'
    };

    return modeMap[blendMode] || 'over';
  }

  // 仮想背景エフェクト
  async addVirtualBackground(foregroundPath, backgroundType, options = {}) {
    const backgrounds = {
      'blur': () => this.createBlurredBackground(foregroundPath, options.blurRadius || 20),
      'gradient': () => this.createGradientBackground(options.colors || ['#4facfe', '#00f2fe']),
      'solid': () => this.createSolidBackground(options.color || '#ffffff'),
      'pattern': () => this.createPatternBackground(options.pattern || 'dots'),
      'particles': () => this.createParticleBackground(options)
    };

    const backgroundCreator = backgrounds[backgroundType];
    if (!backgroundCreator) {
      throw new Error(`Unsupported background type: ${backgroundType}`);
    }

    const virtualBackground = await backgroundCreator();

    return this.replaceBackground(
      foregroundPath,
      virtualBackground,
      options.maskPath,
      options
    );
  }

  async createBlurredBackground(originalPath, blurRadius) {
    const sharp = require('sharp');

    return sharp(originalPath)
      .blur(blurRadius)
      .modulate({
        brightness: 0.7, // 少し暗くして前景を際立たせる
        saturation: 0.5
      })
      .toBuffer();
  }

  async createGradientBackground(colors) {
    const sharp = require('sharp');

    // SVGグラデーションの生成
    const gradient = `
      <svg width="1920" height="1080">
        <defs>
          <linearGradient id="grad" x1="0%" y1="0%" x2="100%" y2="100%">
            <stop offset="0%" style="stop-color:${colors[0]};stop-opacity:1" />
            <stop offset="100%" style="stop-color:${colors[1]};stop-opacity:1" />
          </linearGradient>
        </defs>
        <rect width="1920" height="1080" fill="url(#grad)" />
      </svg>
    `;

    return sharp(Buffer.from(gradient)).png().toBuffer();
  }
}

安全性和隐私保护

所有处理都在浏览器内完成,数据不会发送到外部。您可以安全地使用个人信息或机密数据。

故障排除

常见问题

  • 无法运行: 清除浏览器缓存并重新加载
  • 处理速度慢: 检查文件大小(建议20MB以下)
  • 结果与预期不符: 确认输入格式和设置

如果问题仍未解决,请将浏览器更新到最新版本或尝试其他浏览器。

まとめ:AI背景除去の実践活用

AI背景除去技術は、セマンティックセグメンテーション、Alpha Matting、クロマキーなど、様々な手法を組み合わせることで、高精度な処理を実現しています。以下のポイントを押さえることで、効果的な背景除去システムを構築できます:

  1. 適切な手法選択:用途に応じたAIモデルと従来手法の組み合わせ
  2. 髪の毛の精密処理:Alpha Mattingとエッジ保持フィルターの活用
  3. バッチ処理対応:大量画像の効率的な自動処理
  4. 品質向上:後処理とエフェクト適用による完成度向上
  5. ワークフロー統合:デザインツールやCMSとの連携

i4uの背景除去ツールを活用することで、簡単に高品質な背景除去を実行できます。

カテゴリ別ツール

他のツールもご覧ください:

関連ツール