Want to show your appreciation and help with hosting costs? Support us on Patreon!
Description: proteus-b-2.json
Submitted on November 17, 2021 at 09:23 PM

New Paste 1 (Text)

{
  "jsonVersion": "1.0.0",
  "version": "2",
  "minAppVersion": "2.3.0",
  "enabled": 1,
  "shortName": "prob",
  "preflight": 3,
  "postflight": 0,
  "interlacedFrames": 0,
  "noiseLevel": 0.4,
  "keepColor": 1,
  "gui": {
    "name": "Proteus 6-Parameter",
    "displayPri": 31,
    "showScale": 1,
    "desc": "Upscale video with multiple parameters for fine-tuning. Parameters include Compression reversion, detail recovery, sharpening, noise reduction, dehaloing, and antialiasing / deblurring. Use \"Comparison View\" when trying out the model to hone in on the best results.",    "minScale": 0.25,
    "maxScale": 6,
	  "parameters": {
      "compression": {
        "name": "Revert Compression",
        "guiType": "int",
        "default": 0.0,
        "guiMax": 100,
        "guiMin": 0,
        "description": "Reduces compression artifacts from codec encoding, such as blockiness or mosquito noise. Higher values are best for low bitrate videos.<br/><br/>Note that the value should be relative to the amount of compression artifacts in the input video - higher values on a video with few compression artifacts will introduce more artifacts into the output.",
        "precision": 1,
        "modelMax": 1,
        "modelMin": 0,
        "modelType": "float",
        "displayPri": 8
      },
      "details": {
        "name": "Recover Details",
        "guiType": "int",
        "default": 0.0,
        "guiMax": 100,
        "guiMin": 0,
        "description": "Used to recover fine texture and detail lost due to in-camera noise suppression.<br/><br/>This value is relative to the amount of noise suppression in the camera used for the input video, and higher values may introduce artifacts if the input video has little to no in-camera noise suppression.",
        "precision": 1,
        "modelMax": 1,
        "modelMin": 0,
        "modelType": "float",
        "displayPri": 7
      },
      "blur": {
        "name": "Sharpen",
        "guiType": "int",
        "default": 0.0,
        "guiMax": 100,
        "guiMin": 0,
        "description": "Additional sharpening of the video. Use this if the input video looks too soft.<br/><br/>The value set should be relative to the amount of softness in the input video - if the input video is already sharp, higher values will introduce more artifacts.",
        "precision": 1,
        "modelMax": 1,
        "modelMin": 0,
        "modelType": "float",
        "displayPri": 6
      },
      "noise": {
        "name": "Reduce Noise",
        "guiType": "int",
        "default": 0.0,
        "guiMax": 100,
        "guiMin": 0,
        "description": "Removes ISO noise from the input video. Higher values remove more noise but may also remove fine details.<br/><br/>Note that this value is relative to the amount of noise found in the input video - higher values on videos with low amounts of ISO noise may introduce more artifacts.",
        "precision": 1,
        "modelMax": 1,
        "modelMin": 0,
        "modelType": "float",
        "displayPri": 5
      },
      "halo": {
        "name": "Dehalo",
        "guiType": "int",
        "default": 0.0,
        "guiMax": 100,
        "guiMin": 0,
        "description": "Increase this if the input video has halo or ring artifacts around strong edges caused by oversharpening.<br/><br/>This value is relative to the amount of haloing artifacts in the input video, and has a \"sweet spot\". Values that are too high for the input video may cause additional artifacts to appear.",
        "precision": 1,
        "modelMax": 1,
        "modelMin": 0,
        "modelType": "float",
        "displayPri": 4
      },
      "preBlur": {
        "name": "Antialias / DeBlur",
        "guiType": "int",
        "default": 0.0,
        "guiMax": 100,
        "guiMin": -100,
        "description": "Adjusts both the antialiasing and deblurring strength relative to the amount of aliasing and blurring in the input video. <br/><br/>Negative values are better if the input video has aliasing artifacts such as moire patterns or staircasing. Positive values are better if the input video has more lens blurring than aliasing artifacts. ",
        "precision": 1,
        "modelMax": 1,
        "modelMin": -1,
        "modelType": "float",
        "displayPri": 3,
        "sliderAccentShowsChange": true
      },
      "resetParams": {
        "name": "Reset to Default",
        "guiType": "resetBtn",
        "default": "none",
        "description": "Reset all parameters back to their default values",
        "displayPri": 2
      },
      "autoDetect": {
        "name": "Auto",
        "guiType": "autoBtn",
        "default": "none",
        "description": "The AI model will attempt to find the best values for the current frame",
        "autoModelKey": "prap-1",
        "displayPri": 1
      }
    },
    "modelPickerParams": {
      "vidQuality": [0, 1, 2],
      "vidType": ["P", "CG"],
      "vidArtifactType": [ "Compression", "Noise", "Blurry", "None"]
    },
    "searchableKeywords": ["Proteus", "Antialias"],
    "hiddenKeywords": ["Pr"],
    "beforeImg": "/tldb/images/model-thumbnails/PROB_Original.png",
    "afterImg": "/tldb/images/model-thumbnails/PROB_Enhanced.png"
  },
  "inputs": {
    "fnetInput": "fnet/input",
    "gnetInput": "generator/input"
  },
  "backends": {
    "openvino": {
      "parallel": 2,
      "model": "pr",
      "scales": {
        "1": {
          "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ov.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ov.tz"],
          "outputs": [
            "fnet/autoencode_unit/output_stage/mul",
            "generator/generator_unit/output_stage/add"
          ]
        },
        "2": {
          "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ov.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ov.tz"],
          "outputs": [
            "fnet/autoencode_unit/output_stage/mul",
            "generator/generator_unit/output_stage/add"
          ]
        },
        "4": {
          "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ov.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ov.tz"],
          "outputs": [
            "fnet/autoencode_unit/output_stage/mul",
            "generator/generator_unit/output_stage/add"
          ]
        }
      }
    },
    "coreml": {
        "parallel": 1,
        "model": "pr",
        "scales": {
          "1": {
        "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
            "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ml.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ml.tz"],
            "outputs": ["fnet/output", "generator/output"]
          },
      "2": {
        "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
            "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ml.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ml.tz"],
            "outputs": ["fnet/output", "generator/output"]
          },
      "4": {
        "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
            "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ml.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ml.tz"],
            "outputs": ["fnet/output", "generator/output"]
          }
        }
      },
    "onnx": {
      "parallel": 1,
      "model": "pr",
      "scales": {
        "1": {
      "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp32-[H]x[W]-[S]x-ox.tz","[N]-v[V]-gnet-fp32-[H]x[W]-[S]x-ox.tz"],
          "outputs": ["fnet/output", "generator/output"]
        },
    "2": {
      "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp32-[H]x[W]-[S]x-ox.tz","[N]-v[V]-gnet-fp32-[H]x[W]-[S]x-ox.tz"],
          "outputs": ["fnet/output", "generator/output"]
        },
    "4": {
      "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp32-[H]x[W]-[S]x-ox.tz","[N]-v[V]-gnet-fp32-[H]x[W]-[S]x-ox.tz"],
          "outputs": ["fnet/output", "generator/output"]
        }
      }
    },
    "onnx16": {
      "parallel": 1,
      "model": "pr",
      "scales": {
        "1": {
      "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ox.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ox.tz"],
          "outputs": ["fnet/output", "generator/output"]
        },
    "2": {
      "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ox.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ox.tz"],
          "outputs": ["fnet/output", "generator/output"]
        },
    "4": {
      "blocks": [288,288, 352,256, 480,384, 384,480, 384,576, 416,576, 576,448, 672,384, 672,576],
          "nets": ["[N]-v[V]-fnet-fp16-[H]x[W]-[S]x-ox.tz","[N]-v[V]-gnet-fp16-[H]x[W]-[S]x-ox.tz"],
          "outputs": ["fnet/output", "generator/output"]
        }
      }
    }
  }
}