﻿<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0">
  <channel>
    <title>VTrak E5000 Articles</title>
    <description>Latest articles happening in the VTrak E5000 category</description>
    <link>https://kb.promise.com</link>
    <item>
      <title>How to save a SMART log of HDD/SSD for further analysis?</title>
      <description>&lt;p&gt;&amp;nbsp;&lt;span lang="EN-US"&gt;A&amp;nbsp;&lt;strong&gt;SMART log&lt;/strong&gt;&amp;nbsp;(Self-Monitoring, Analysis, and Reporting Technology) is&amp;nbsp;built into PROMISE Vtrak and Vess RAID storage systems that monitors the health and performance of the drive and SSD and then can predict potential failures.&amp;nbsp;&lt;span data-huuid="6264469258383618483"&gt;It logs various attributes like read error rates, seek error rates, spin-up time, and temperature, providing insights into the drive's condition.&amp;nbsp;&lt;/span&gt;&lt;span data-huuid="6264469258383619706"&gt;By analyzing these logs, technician can identify potential problems before they lead to data loss or drive failure&lt;/span&gt;.&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;Please follow these procedures to collect the SMART log.&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;1].Login the WebPAM PROe&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;1.1] Open browser and put the management&amp;nbsp;portal IP&amp;nbsp;&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;1.2] put administrator account&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;1.3] default password is password&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;&lt;img src="/content/uploads/f1cc940d-c171-43e5-ae64-a5b6008a9e84/137776cf-5c4d-4831-8fc9-b38c007b0ea3_01.jpg" alt=""&gt;&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;2].Enable SMART log.&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;Go to Devices &amp;gt; Component List &amp;gt; Controller &amp;gt; Settings &amp;gt; Controller settings&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;&lt;img src="/content/uploads/f1cc940d-c171-43e5-ae64-a5b6008a9e84/5903694b-25e8-4d51-9d00-b38c007b4d01_02.jpg" alt=""&gt;&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;&lt;img src="/content/uploads/f1cc940d-c171-43e5-ae64-a5b6008a9e84/fa3877c7-6387-467e-9a72-b38c007b85a8_03.jpg" alt=""&gt;&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;3].Export SMART log.&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;Go to Administration &amp;gt; Import/Export &amp;gt; Export &amp;gt; Select "SMART log" from&amp;nbsp;Drop-down menu&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;&lt;img src="/content/uploads/f1cc940d-c171-43e5-ae64-a5b6008a9e84/c0b69287-235c-41dd-9315-b38c007bc80d_04.jpg" alt=""&gt;&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;span lang="EN-US"&gt;&lt;img src="/content/uploads/f1cc940d-c171-43e5-ae64-a5b6008a9e84/8e48aca8-b57f-44e4-9d75-b38c007c396e_05.jpg" alt=""&gt;&lt;/span&gt;&lt;/p&gt;</description>
      <pubDate>2025-11-05T07:32:31.5630000</pubDate>
      <link>https://kb.promise.com/thread/how-to-save-a-smart-log-of-hdd-ssd-for-further-analysis-1/</link>
    </item>
    <item>
      <title>VTrak E5000 Series Tutorial Videos : Intro, Cabling, Initial Setup, Mount and Access and Support and Component Replacement</title>
      <description>&lt;h1 class="style-scope ytd-watch-metadata"&gt;&lt;span style="text-decoration: underline;"&gt;Introduction&lt;/span&gt;&lt;/h1&gt;&#xD;
&lt;p&gt;&lt;iframe title="YouTube video player" src="https://www.youtube.com/embed/FYBWz4U5k-4?si=6KOiUEy7lYYZOcvK" width="560" height="315" frameborder="0" allowfullscreen=""&gt;&lt;/iframe&gt;&lt;/p&gt;&#xD;
&lt;h1 class="style-scope ytd-watch-metadata"&gt;&lt;span style="text-decoration: underline;"&gt;Cabling&lt;/span&gt;&lt;/h1&gt;&#xD;
&lt;p&gt;&lt;iframe title="YouTube video player" src="https://www.youtube.com/embed/IrDFM6KjCg8?si=4ZYCX2F5JP5OtKZ9" width="560" height="315" frameborder="0" allowfullscreen=""&gt;&lt;/iframe&gt;&lt;/p&gt;&#xD;
&lt;h1 class="style-scope ytd-watch-metadata"&gt;&lt;span style="text-decoration: underline;"&gt;Initial Setup&lt;/span&gt;&lt;/h1&gt;&#xD;
&lt;p&gt;&lt;iframe title="YouTube video player" src="https://www.youtube.com/embed/FpR7FkQ35T4?si=GCgznfb8eZR9JYA0" width="560" height="315" frameborder="0" allowfullscreen=""&gt;&lt;/iframe&gt;&lt;/p&gt;&#xD;
&lt;h1 class="style-scope ytd-watch-metadata"&gt;&lt;span style="text-decoration: underline;"&gt;Mount and Access&lt;/span&gt;&lt;/h1&gt;&#xD;
&lt;p&gt;&lt;iframe title="YouTube video player" src="https://www.youtube.com/embed/3hvAVxgz8us?si=pXrtakVkD0wZGVcB" width="560" height="315" frameborder="0" allowfullscreen=""&gt;&lt;/iframe&gt;&lt;/p&gt;&#xD;
&lt;h1 class="style-scope ytd-watch-metadata"&gt;&lt;span style="text-decoration: underline;"&gt;Support and Component Replacement&lt;/span&gt;&lt;/h1&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;iframe title="YouTube video player" src="https://www.youtube.com/embed/axxct3Zr_4U?si=HuqUcHHuXolW4sHf" width="560" height="315" frameborder="0" allowfullscreen=""&gt;&lt;/iframe&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2024-11-20T17:58:25.0070000</pubDate>
      <link>https://kb.promise.com/thread/vtrak-e5000-series-tutorial-videos/</link>
    </item>
    <item>
      <title>VTrak E5000 Series Power Consumption</title>
      <description>&lt;p&gt;These numbers are derived from fully populated&amp;nbsp;VTrak E5x00 chassis utilizing SAS drives under full load:&lt;/p&gt;&#xD;
&lt;table width="710"&gt;&#xD;
&lt;tbody&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;&lt;strong&gt;Product&lt;/strong&gt;&lt;/td&gt;&#xD;
&lt;td&gt;&lt;strong&gt;Input Voltage (VAC)&lt;/strong&gt;&lt;/td&gt;&#xD;
&lt;td&gt;&lt;strong&gt;Power (Watts)&lt;/strong&gt;&lt;/td&gt;&#xD;
&lt;td&gt;&lt;strong&gt;Thermal Output (BTU/hour)&lt;/strong&gt;&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5300fD&lt;/td&gt;&#xD;
&lt;td&gt;120&lt;/td&gt;&#xD;
&lt;td&gt;308&lt;/td&gt;&#xD;
&lt;td&gt;1051&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5320fD&lt;/td&gt;&#xD;
&lt;td&gt;120&lt;/td&gt;&#xD;
&lt;td&gt;357&lt;/td&gt;&#xD;
&lt;td&gt;1218&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5600fD&lt;/td&gt;&#xD;
&lt;td&gt;120&lt;/td&gt;&#xD;
&lt;td&gt;335&lt;/td&gt;&#xD;
&lt;td&gt;1143&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5800fD&lt;/td&gt;&#xD;
&lt;td&gt;120&lt;/td&gt;&#xD;
&lt;td&gt;421&lt;/td&gt;&#xD;
&lt;td&gt;1437&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5300fD&lt;/td&gt;&#xD;
&lt;td&gt;230&lt;/td&gt;&#xD;
&lt;td&gt;296&lt;/td&gt;&#xD;
&lt;td&gt;1011&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5320fD&lt;/td&gt;&#xD;
&lt;td&gt;230&lt;/td&gt;&#xD;
&lt;td&gt;348&lt;/td&gt;&#xD;
&lt;td&gt;1188&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5600fD&lt;/td&gt;&#xD;
&lt;td&gt;230&lt;/td&gt;&#xD;
&lt;td&gt;334&lt;/td&gt;&#xD;
&lt;td&gt;1140&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td&gt;E5800fD&lt;/td&gt;&#xD;
&lt;td&gt;230&lt;/td&gt;&#xD;
&lt;td&gt;417&lt;/td&gt;&#xD;
&lt;td&gt;1423&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;/tbody&gt;&#xD;
&lt;/table&gt;</description>
      <pubDate>2024-11-15T14:49:45.6370000</pubDate>
      <link>https://kb.promise.com/thread/vtrak-e5000-series-power-consumption/</link>
    </item>
    <item>
      <title>How to save the Service Report in VTrak E5000 Series</title>
      <description>&lt;p&gt;In most troubleshooting scenarios, Promise Support will require the Service report to diagnose the problem effectively. This report contains event logs, configuration details, and the status of all hardware components.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;em&gt;&lt;strong&gt;Saving from the Home Page :&lt;/strong&gt;&lt;/em&gt;&lt;/p&gt;&#xD;
&lt;ul&gt;&#xD;
&lt;li&gt;Log in to the Web Interface of the unit.&lt;/li&gt;&#xD;
&lt;li&gt;Click the &amp;ldquo;&lt;strong&gt;Save Service Report&lt;/strong&gt;&amp;rdquo; icon located at the top right corner of the screen to generate the report.&lt;/li&gt;&#xD;
&lt;/ul&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240613071448im_/https://kb.promise.com/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/afd87a7e-2dde-405c-a451-b14f00c73682_screenshot-2024-04-11-at-7.57.17%E2%80%AFam.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;ul&gt;&#xD;
&lt;li&gt;The report will be saved as a compressed zip folder.&lt;/li&gt;&#xD;
&lt;li&gt;Unzip the folder to view the generated report.&lt;/li&gt;&#xD;
&lt;/ul&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Note: Generating the report may take a few minutes depending on the size of the array configuration.&lt;/strong&gt;&lt;/p&gt;</description>
      <pubDate>2024-11-05T19:51:41.7000000</pubDate>
      <link>https://kb.promise.com/thread/how-to-save-the-service-report-in-vtrak-e5000-series/</link>
    </item>
    <item>
      <title>Enabling multipathing (MPIO) on Windows Server 2019</title>
      <description>&lt;h3&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;/h3&gt;&#xD;
&lt;p&gt;In some storage configurations, it is possible to have multiple paths from the server to the storage. This is often done for redundant paths or higher bandwidth. Typically multipathing is used in Fibre Channel SANs, in SAS attached storage and in some iSCSI connections.&lt;/p&gt;&#xD;
&lt;p&gt;When there is a multipath configuration, Windows Disk Magagment tool will show the same device multiple times. In the example below the same LUN is seen 4 times by Windows Server.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/915621d7-8b3f-437f-880f-b00d017cbd16_01.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/ac730f19-8897-4b8c-b4bf-b21a00ef93c3_01.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;But there is only (1) LUN. It will be necessary to enable Windows MPIO to see only that one LUN.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;h3&gt;&lt;strong&gt;Enabling Windows MPIO&lt;/strong&gt;&lt;/h3&gt;&#xD;
&lt;p&gt;To enable Windows MPIO&amp;nbsp;, from the Server Manager select&amp;nbsp;&lt;strong&gt;Add Roles and Features&lt;/strong&gt;.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/548c4802-4a58-4e30-9de8-b00d017df0fd_02.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/f293249b-6d39-4c75-bba8-b21a00efb7f2_02.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;MPIO is a feature, click Next until you get to the features. Scroll down and enable MPIO.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/d5479cae-2b91-445c-bf60-b00d017fe8a3_03.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/6b4483d2-edd3-4c9b-a211-b21a00efc13f_03.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Click Next and click the Install button.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/0a4d795c-4484-4afb-8b8a-b00d017fcaed_04.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/5fe1bc48-c456-4bde-b8cd-b21a00efce2c_04.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Once the install is complete, close the Wizard and reboot the server.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/601f7af2-b698-4d0d-bc8a-b21a00efe174_05.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/4407c184-485f-449f-91e6-b00e01239a3d_05.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Once the server is booted, click the Start menu and type in MPIO&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/4a9ece97-7211-4f7d-b4ad-b00d01819c75_06.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/8c3d5153-8965-4bb9-a32d-b21a00f000b6_06.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Click MPIO to open the MPIO applet. Select the Discover Multi-Paths tab on top, select the Promise device in Others and click Add.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/b83f9720-065a-4aa7-8e9a-b00d01828e58_07.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/3dab9215-8030-405b-90bc-b21a00f010ce_07.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Another restart will be required. After the reboot the Disk Management tool shows only one disk.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/a56b58b8-0b62-4a07-8fba-b00d01842f27_08.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/1f985327-b45e-4f07-8dd6-b21a00f02123_08.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;To change the pathing policy open the device manager and select properties for the multipath promise LUN.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/1efda0e8-827d-455d-9a7f-b21a00f02c04_09.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/895bf622-f1b9-40f1-8ba7-b00d0184c711_09.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Select the MPIO tab at the top and the drop-down menu shows the pathing policies Windows supports.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/f906d9de-b75f-43b5-b297-b00d0185793c_10.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/42de6b20-8abf-425f-bd74-b21a00f038de_10.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;For best bandwidth Least Queue Depth is suggested.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;h3&gt;&lt;strong&gt;For iSCSI&lt;/strong&gt;&lt;/h3&gt;&#xD;
&lt;p&gt;iSCSI multipathing is an option in the MPIO applet. Select the&amp;nbsp;&lt;strong&gt;Discover Multi-Paths&lt;/strong&gt;&amp;nbsp;tab and click&amp;nbsp;&lt;strong&gt;Add Support for iSCSI devices&lt;/strong&gt;. Another reboot will be required.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="https://web.archive.org/web/20240617205536im_/https://kb.promise.com/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/596fcde1-caf5-43fb-ac8d-b00e01248a48_11.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;img src="/content/uploads/df16c4c6-7669-4a16-9ed5-aee500c4f594/93a490d3-7e8f-42be-b3d5-b21a00f059fe_11.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;Once Multipathing has been enabled the LUN can be formatted NTFS from the Disk Management console.&lt;/p&gt;</description>
      <pubDate>2024-10-31T14:35:37.2000000</pubDate>
      <link>https://kb.promise.com/thread/enabling-multipathing-mpio-on-windows-server-2019/</link>
    </item>
    <item>
      <title>Updating Firmware on the VTrak E5000 Subsystem</title>
      <description>&lt;p&gt;To update the firmware on the VTrak E5000, follow these steps:&lt;/p&gt;&#xD;
&lt;p&gt;1. Download the latest firmware image file from PROMISE support by visiting the website at&amp;nbsp;&lt;a href="http://www.promise.com/support/" target="_blank" rel="noreferrer, noopener"&gt;http://www.promise.com/support/&lt;/a&gt;. Save the file to your Host PC or TFTP server.&lt;/p&gt;&#xD;
&lt;p&gt;2. Before proceeding,&amp;nbsp;&lt;strong&gt;ensure that no background activities are running on the RAID subsystem&lt;/strong&gt;.&lt;/p&gt;&#xD;
&lt;p&gt;3. Access the Administration tab and navigate to the Firmware Update section.&lt;/p&gt;&#xD;
&lt;p&gt;4. Choose a download option:&lt;br&gt;-&amp;nbsp;&lt;strong&gt;Local File through HTTP&lt;/strong&gt;: Click the "Choose File" button, locate the firmware image file on your system, select it, and click "Open."&lt;br&gt;-&amp;nbsp;&lt;strong&gt;TFTP Server&lt;/strong&gt;: Enter the TFTP Server host name or IP address, port number, and file name.&lt;/p&gt;&#xD;
&lt;p&gt;5. Optionally, you can check the "Non-disruptive Image Update (NDIU)" box. This feature updates the RAID controllers and I/O modules one at a time, allowing I/O operations to continue during the firmware update. Note that updates with this option take longer to complete and are only supported by dual controller models.&lt;/p&gt;&#xD;
&lt;p&gt;6. Click the "Download" button. The subsequent screen will display the Flash Image (firmware image file) Version Number and Build Date.&lt;/p&gt;&#xD;
&lt;p&gt;7. Click "Submit" to initiate the firmware update. The progress of the update will be shown.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Important: Do not power off the subsystem during the update and refrain from navigating to other screens until the firmware update operation is completed.&lt;/strong&gt;&lt;/p&gt;&#xD;
&lt;p&gt;Once the update is finished, a message will prompt you to reboot the subsystem. Click "OK."&lt;/p&gt;&#xD;
&lt;p&gt;- If you selected the&amp;nbsp;&lt;strong&gt;Disruptive Flash Method&lt;/strong&gt;, the RAID subsystem and JBOD expansion units will automatically restart.&lt;br&gt;- If you chose the&amp;nbsp;&lt;strong&gt;Non-Disruptive Flash Method&lt;/strong&gt;, the system will automatically flash and restart the RAID controllers one at a time.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Automatic Restart&lt;/strong&gt;: If the NDIU box was not checked, the RAID subsystem and JBOD expansion units will automatically restart. This action temporarily disrupts I/O operations and may cause your WebPAM PROe connection to drop. To reestablish the connection, follow these steps:&lt;/p&gt;&#xD;
&lt;p&gt;1. Wait for a minimum of two minutes.&lt;br&gt;2. Click "Logout" in the WebPAM PROe Header and then log in again. If you encounter login issues, wait for 30 seconds and try again.&lt;br&gt;3. In your browser, click "Logout" in the WebPAM PROe Header and then log in again. If you cannot log in immediately, wait for 30 seconds and try again.&lt;/p&gt;</description>
      <pubDate>2024-10-31T13:07:17.6630000</pubDate>
      <link>https://kb.promise.com/thread/updating-firmware-on-the-vtrak-e5000-subsystem/</link>
    </item>
    <item>
      <title>Battery Reconditioning Process on VTrak E5000 Subsystem</title>
      <description>&lt;p&gt;Battery reconditioning plays a crucial role in maintaining the power supply to the controller cache, ensuring data integrity in the event of a power failure, particularly for unwritten data residing in the cache. The process of reconditioning involves discharging and subsequently recharging the battery to optimize its overall capacity and performance. It is important to note that reconditioning is performed in the background, potentially impacting the input/output (I/O) performance. Once the reconditioning process is completed, the cycle count of the battery increases by one.Battery reconditioning is enabled and scheduled by default. It is set to run on the first Saturday of January, March, May, July, September, and November. However, it is possible to modify the reconditioning status and schedule according to specific requirements&lt;/p&gt;&#xD;
&lt;p&gt;For an immediate battery reconditioning, follow these steps:&lt;/p&gt;&#xD;
&lt;p&gt;1. Navigate to the Device tab.&lt;br&gt;2. Click on the Component List icon.&lt;br&gt;3. Select the desired battery, then click on the Recondition button.&lt;br&gt;4. If a battery reconditioning is already scheduled, a dialog menu will appear, providing an explanation. To proceed with immediate reconditioning, enter "confirm" in the designated field and click on the Confirm button.&lt;br&gt;- Please note that during the reconditioning process, the battery's remaining capacity and estimated hold time will fluctuate as a result of the discharge and recharge cycles. This behavior is normal.&lt;/p&gt;&#xD;
&lt;p&gt;To modify the scheduled battery reconditioning, follow these steps:&lt;/p&gt;&#xD;
&lt;p&gt;1. Access the Administration tab.&lt;br&gt;2. Click on the Background Activities icon, displaying the list of Background Activities.&lt;br&gt;3. Click on the Scheduler button.&lt;br&gt;4. Hover over Battery Reconditioning and click on the Settings button.&lt;br&gt;5. Make the necessary adjustments to the settings, such as:&lt;br&gt;- Start Time&lt;br&gt;- Uncheck the Enable This Schedule box to disable the activity.&lt;br&gt;- Recurrence Pattern&lt;br&gt;- Start From&lt;br&gt;- End On&lt;/p&gt;&#xD;
&lt;p&gt;Remember to save the changes by clicking on the Save button to apply the new settings.&lt;/p&gt;</description>
      <pubDate>2024-10-31T13:05:51.6930000</pubDate>
      <link>https://kb.promise.com/thread/battery-reconditioning-process-on-vtrak-e5000-subsystem/</link>
    </item>
    <item>
      <title>E5000: Drive Error Reporting</title>
      <description>&lt;h2 style="text-align: center;"&gt;&lt;strong&gt;E5000 Hard Drive Error Reporting&lt;/strong&gt;&lt;/h2&gt;&#xD;
&lt;p style="text-align: center;"&gt;&lt;strong&gt;5/18/2018&lt;/strong&gt;&lt;/p&gt;&#xD;
&lt;p style="text-align: left;"&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p style="text-align: left;"&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p style="text-align: left;"&gt;&lt;span style="font-family: Verdana, sans-serif;"&gt;This article describes&lt;span class="bg-info"&gt; error threshold&lt;/span&gt; settings and actions for the E5000 family of controllers with firmware version 11.05.0000.00 or later.&amp;nbsp; The contents of this article also apply to the Ex30 family of controllers.&lt;br&gt;&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;p style="text-align: left;"&gt;&lt;span style="font-family: Verdana, sans-serif;"&gt;This table lists the settings and their default values:&lt;/span&gt;&lt;/p&gt;&#xD;
&lt;table width="600"&gt;&#xD;
&lt;tbody&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td width="367"&gt;&amp;nbsp;&lt;/td&gt;&#xD;
&lt;td style="border-color: #000000; width: 233px;" width="233"&gt;Default Value&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td style="border-color: #000000; width: 367px;" width="367"&gt;Reassigned Block Threshold (BBM Threshold)&lt;/td&gt;&#xD;
&lt;td style="border-color: #000000; width: 233px;" width="233"&gt;1024&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td style="border-color: #000000; width: 367px;" width="367"&gt;Error Block Threshold (Media Patrol Threshold)&lt;/td&gt;&#xD;
&lt;td style="border-color: #000000; width: 233px;" width="233"&gt;128&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td style="border-color: #000000; width: 367px;" width="367"&gt;Medium Error Threshold&lt;/td&gt;&#xD;
&lt;td style="border-color: #000000; width: 233px;" width="233"&gt;64&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;tr&gt;&#xD;
&lt;td style="border-color: #000000; width: 367px;" width="367"&gt;SMART&lt;/td&gt;&#xD;
&lt;td style="border-color: #000000; width: 233px;" width="233"&gt;disabled&lt;/td&gt;&#xD;
&lt;/tr&gt;&#xD;
&lt;/tbody&gt;&#xD;
&lt;/table&gt;&#xD;
&lt;h3&gt;&lt;strong&gt;Definitions:&lt;/strong&gt;&lt;/h3&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Reassigned Block Threshold&lt;/strong&gt;, also called &lt;strong&gt;Bad Block Manager Threshold (BBM Threshold)&lt;/strong&gt;, reads the grown defect list on SAS drives (known as the &amp;ldquo;G List&amp;rdquo;).&amp;nbsp;&amp;nbsp; On SATA drives, the BBM reads the DDF area written to the drive by the VTrak firmware.&amp;nbsp; Please note that the BBM count can only be cleared on SATA drives, and the array information must be deleted first.&amp;nbsp;If the BBM threshold has been reached, &lt;strong&gt;PDM&lt;/strong&gt; will begin.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;PDM&lt;/strong&gt;-&amp;nbsp; Pleaserefer to &lt;a href="http://kb.promise.com/thread/what-is-predictive-data-migration-pdm" target="_blank" rel="noreferrer, noopener"&gt;http://kb.promise.com/thread/what-is-predictive-data-migration-pdm&lt;/a&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Error Block Threshold&lt;/strong&gt; is also called the Media Patrol Threshold. If the threshold has been reached, PDM will begin. The Media Patrol Threshold is attributed to I/O errors occurring during read/write operations:&lt;/p&gt;&#xD;
&lt;p&gt;PdId: 3&lt;br&gt;OperationalStatus: OK&lt;br&gt;Alias: &lt;br&gt;PhysicalCapacity: 931.51GB&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; ConfigurableCapacity:931.32GB&lt;br&gt;UsedCapacity: 931.32GB&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; BlockSize:512Bytes&lt;br&gt;ConfigStatus: Array0 SeqNo1&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; Location:Encl1 Slot3&lt;br&gt;ModelNo: SEAGATE ST31000640SS&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp;&amp;nbsp; VisibleTo: AllControllers&lt;br&gt;SerialNo: 9QJ5AM78&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; FirmwareVersion:0003&lt;br&gt;DriveInterface: SAS 12Gb/s&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp;&amp;nbsp; Protocol:SPC-3&lt;br&gt;WWN: 5000-c500-1003-839f&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; NumOfPorts:2&lt;br&gt;Port1Speed: 12Gb/s&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; Port2Speed:12Gb/s&lt;br&gt;Port1SASAddress: 50-00-c5-00-10-03-83-9d&lt;br&gt;Port2SASAddress: 50-00-c5-00-10-03-83-9e&lt;br&gt;WriteCacheSupport: Yes&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; WriteCache:Enabled&lt;br&gt;RLACacheSupport: Yes&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; RLACache:Enabled&lt;br&gt;ReadCacheSupport: Yes&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; ReadCache:Enabled&lt;br&gt;CmdQueuingSupport: Yes&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; CmdQueuing:Enabled&lt;br&gt;MediumErrorThreshold: 0&lt;br&gt;&lt;strong&gt;Errors: 0&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; &amp;nbsp; NonRWErrors:0&lt;br&gt;ReadErrors: 0 &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; WriteErrors:0&lt;br&gt;&lt;/strong&gt;DriveTemperature: 34C/93F&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; ReferenceDriveTemperature:68C/154F&lt;br&gt;&lt;br&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Medium Error Threshold&lt;/strong&gt; is a global physical drive setting. The value is the number of bad blocks tolerated before the controller marks the drive as dead. &amp;nbsp;The value can be set from 0 to 4294967294. The threshold only functions when the array is in an OK or Degraded state. It will not function if the array is critical or if the array is not a redundant RAID type such as Raid 0.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;SMART&lt;/strong&gt; stands for &amp;nbsp;Self-Monitoring, Analysis, and Reporting Technology.&amp;nbsp; If enabled, a polling interval can be assigned. SMART will report errors detected on the disk and PDM will start if the threshold has been reached.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;h3&gt;&lt;strong&gt;Configuration:&lt;/strong&gt;&lt;/h3&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Setting thresholds from the CLI&lt;/strong&gt;&lt;/p&gt;&#xD;
&lt;p&gt;BBM Threshold&amp;ndash; (1~2048)&lt;/p&gt;&#xD;
&lt;p&gt;administrator@cli&amp;gt;bga -a mod -s "BBMThreshold=1024"&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;MediaPatrol Threshold &amp;ndash; (1~2048)&lt;/p&gt;&#xD;
&lt;p&gt;administrator@cli&amp;gt;bga -a mod -s "MediaPatrolThreahold=128"&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;MediumError Threshold &amp;ndash; (0~4294967294)&lt;/p&gt;&#xD;
&lt;p&gt;administrator@cli&amp;gt;phydrv -a mod -s "mediumerrorthreshold=1024"&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;SMART&lt;/p&gt;&#xD;
&lt;p&gt;administrator@cli&amp;gt;ctrl -a mod -s "smart=disable"&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Setting thresholds from WebPAMProE&lt;/strong&gt;&lt;/p&gt;&#xD;
&lt;p&gt;BBM Threshold&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/4441db6a-2733-473e-86de-a8e401638bc8_bbm.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;Media Patrol Threshold&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/8bf2be37-9f69-4f4d-910d-a8e40163fcc3_media-patrol.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;Smart&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="/content/uploads/8528c4a2-e9f1-4ae9-a732-a5b6006d4209/f220495b-aa12-4e4f-a753-a8e40164dbe7_smart.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2018-05-18T21:40:23.4830000</pubDate>
      <link>https://kb.promise.com/thread/e5000-drive-error-reporting/</link>
    </item>
    <item>
      <title>Perfect Rebuild (Vtrak E5000 Series)</title>
      <description>&lt;p&gt;The PerfectRebuild feature is an innovative approach to rebuilding a RAID array in order to significantly reduce the amount of time needed for completion.&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;This frees up CPU resources more quickly to be available for I/O and other demands.&lt;/p&gt;&#xD;
&lt;p&gt;PerfectRebuild ignores any portion of the logical drive where no write changes have occurred, focusing only on the parts that have changed.&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;The conventional approach has been to rebuild the entire logical drive, even sections with no write changes.&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;This reduction in the total time needed for a rebuild is especially signi cant for very large drives.&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;It does this by keeping metadata of the&amp;nbsp;logical drive to track writes to it.&lt;/p&gt;&#xD;
&lt;p&gt;In a normal rebuild, the entire&amp;nbsp;logical drive is rebuilt, even if there&amp;rsquo;s only 1 file in the logical drive.&amp;nbsp;This takes the same amount of time (hours) no matter how full or empty the Logical Drive is.&lt;/p&gt;&#xD;
&lt;p&gt;Perfect Rebuild keeps track of which blocks of the&amp;nbsp;Logical Drive have ben written to and only rebuilds the parts of the Logical Drive that have been written to, reducing the rebuild time.&lt;/p&gt;</description>
      <pubDate>2017-12-20T23:56:55.8100000</pubDate>
      <link>https://kb.promise.com/thread/perfect-rebuild-vtrak-e5000-series/</link>
    </item>
    <item>
      <title>How does the VTrak E5000 series calculate the remaining lifetime of SSDs?</title>
      <description>&lt;p&gt;The VTrak E5000 series can monitor SSDs to evaluate their remaining useful life.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;When using any &lt;strong&gt;SAS&lt;/strong&gt; Solid State Disks in the VTrak E5000, the VTrak controllers monitor the &amp;ldquo;Percentage Used Endurance Indicator&amp;rdquo; parameter of each SAS SSD to keep track of approximately how much time the SSD has been used.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;When using &lt;strong&gt;SATA&lt;/strong&gt; Solid State Disks in the VTrak E5000, the VTrak controllers monitor the SMART attributes from each SATA SSD to keep track of approximately how much time the SSD has been used.&lt;/p&gt;&#xD;
&lt;p&gt;The SMART data attributes are different for the various SATA SSD vendors, so be sure to only use SATA SSDs that are on the VTrak E5000 Compatibilty List if you want to have reliable lifetime data.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;The default threshold for SSD Percent Lifetime Remaining is 10, and if this value goes below 10 the VTrak E5000 controllers will begin a Predictive Data Migration (PDM) for that drive to a configured spare drive.&lt;/p&gt;&#xD;
&lt;p&gt;The Percent Lifetime Remaining threshhold can be changed by the user.&lt;/p&gt;&#xD;
&lt;p&gt;&lt;img src="/content/uploads/7e9c3c53-6af9-4c41-a2c8-a5b60079f1e6/1878945c-6f58-4db2-a429-a789012190ee_ssd-ctrl-settings.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;&#xD;
&lt;p&gt;If the Percent Lifetime Remaining value falls below the threshold, this will result in an entry in the Event Log of &amp;ldquo;SSD endurance indicator is lower than threshold&amp;rdquo; that has a Major severity level.&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2017-06-05T17:34:27.1070000</pubDate>
      <link>https://kb.promise.com/thread/how-does-the-vtrak-e5000-series-calculate-the-remaining-lifetime-of-ssds/</link>
    </item>
    <item>
      <title>4Gb SFPs are not Supported on the E5000 Series</title>
      <description>&lt;p&gt;The E5000 Series' FC ports does not support 4Gb SFPs; however, 4Gb speed negotiation is supported.&lt;/p&gt;&#xD;
&lt;p&gt;The E5000 Series requires a qualified 8Gb or 16Gb SFP to be installed on the controller's FC ports.&lt;br&gt;&lt;br&gt;&lt;a href="https://promise.com/DownloadFile.aspx?DownloadFileUID=4444" target="_blank" rel="noreferrer, noopener"&gt;VTrak E5000 Series Compatibility List&lt;/a&gt;&lt;br&gt;&lt;br&gt;&lt;a href="https://promise.com/DownloadFile.aspx?DownloadFileUID=4446" target="_blank" rel="noreferrer, noopener"&gt;VTrak E5000 Series Release Notes&lt;/a&gt;&lt;/p&gt;&#xD;
&lt;hr&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Contact Promise Technology Support&lt;br&gt;&lt;/strong&gt;Need more help? Save time by starting your support request online and a technical support agent will be assigned to your case.&lt;br&gt;&lt;br&gt;&lt;a href="https://support.promise.com/" target="_blank" rel="noreferrer, noopener"&gt;Promise Technology Technical Support &amp;gt;&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2017-03-21T19:33:24.9470000</pubDate>
      <link>https://kb.promise.com/thread/4gb-sfps-are-not-supported-on-the-e5000-series/</link>
    </item>
    <item>
      <title>Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5800f + (3) J5600s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5800f + (3) J5600s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5800f + (3) J5600s Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers and 3 SAS-connected Expansion Chassis: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 10 LUNs from 72 drives (head + 3 expansion units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 9 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;#array -a del -d 4 &lt;br&gt;#array -a del -d 5 &lt;br&gt;#array -a del -d 6 &lt;br&gt;#array -a del -d 7 &lt;br&gt;#array -a del -d 8 &lt;br&gt;#array -a del -d 9 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 21, 22, 23, 24 &lt;br&gt;# Alias: Scratch &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 21,22,23,24 -s "alias=Scratch" -l "alias=Scratch, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 5, 6, 9, 10, 13, 14, 17, 18 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10,13,14,17,18 -s "alias=Data1" -l "alias=Data1, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 7, 8, 11, 12, 15, 16, 19, 20&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12,15,16,19,20 -s "alias=Data2" -l "alias=Data2, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 25, 26, 29, 30, 33, 34, 37, 38&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 25,26,29,30,33,34,37,38 -s "alias=Data3" -l "alias=Data3, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 27, 28, 31, 32, 35, 36, 39, 40&lt;br&gt;# Alias: Data4&lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 27,28,31,32,35,36,39,40 -s "alias=Data4" -l "alias=Data4, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data5 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 41, 42, 45, 46, 49, 50, 53, 54&lt;br&gt;# Alias: Data5 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 41,42,45,46,49,50,53,54 -s "alias=Data5" -l "alias=Data5, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data6 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 43, 44, 47, 48, 51, 52, 55, 56&lt;br&gt;# Alias: Data6&lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 43,44,47,48,51,52,55,56 -s "alias=Data6" -l "alias=Data6, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data7 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 57, 58, 61, 62, 65, 66, 69, 70&lt;br&gt;# Alias: Data7 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 57,58,61,62,65,66,69,70 -s "alias=Data7" -l "alias=Data7, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data8 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 59, 60, 63, 64, 67, 68, 71, 72&lt;br&gt;# Alias: Data8&lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 59,60,63,64,67,68,71,72 -s "alias=Data8" -l "alias=Data8, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up global spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: Global spares, Revertible &lt;br&gt;# &lt;br&gt;spare -a add -p 3 -t g -r y &lt;br&gt;spare -a add -p 4 -t g -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;init -a start -l 5 -q 100 &lt;br&gt;init -a start -l 6 -q 100 &lt;br&gt;init -a start -l 7 -q 100 &lt;br&gt;init -a start -l 8 -q 100 &lt;br&gt;init -a start -l 9 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2016-10-19T06:39:00.4270000</pubDate>
      <link>https://kb.promise.com/thread/raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5800f-3-j5600s/</link>
    </item>
    <item>
      <title>Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5800f + J5600s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5800f + J5600s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5800f + J5600s Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers and 1 SAS-connected Expansion Chassis: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 6 LUNs from 40 drives (head + 1 expansion units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 5 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;#array -a del -d 4 &lt;br&gt;#array -a del -d 5 &lt;br&gt;#&lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 21, 22, 23, 24 &lt;br&gt;# Alias: Scratch &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 21,22,23,24 -s "alias=Scratch" -l "alias=Scratch, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 5, 6, 9, 10, 13, 14, 17, 18 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10,13,14,17,18 -s "alias=Data1" -l "alias=Data1, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 7, 8, 11, 12, 15, 16, 19, 20&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12,15,16,19,20 -s "alias=Data2" -l "alias=Data2, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 25, 26, 29, 30, 33, 34, 37, 38&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 25,26,29,30,33,34,37,38 -s "alias=Data3" -l "alias=Data3, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 27, 28, 31, 32, 35, 36, 39, 40&lt;br&gt;# Alias: Data4&lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 27,28,31,32,35,36,39,40 -s "alias=Data4" -l "alias=Data4, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;#&lt;br&gt;# Set up global spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: Global spares, Revertible &lt;br&gt;# &lt;br&gt;spare -a add -p 3 -t g -r y &lt;br&gt;spare -a add -p 4 -t g -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;init -a start -l 5 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-19T06:31:06.7900000</pubDate>
      <link>https://kb.promise.com/thread/vtrak-family-vtrak-e5000-raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5600f-3-j5600s-follow-raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5800f-j5600s/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Head - Xsan (Metadata and Data) Configuration Script - E5800f</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak E5800f RAID Controllers for Xsan (Metadata and Data).&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5800f Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers Only: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 4 LUNs from 24 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 3 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 21, 22, 23, 24 &lt;br&gt;# Alias: Scratch &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 21,22,23,24 -s "alias=Scratch" -l "alias=Scratch, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 5, 6, 9, 10, 13, 14, 17, 18 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10,13,14,17,18 -s "alias=Data1" -l "alias=Data1, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 7, 8, 11, 12, 15, 16, 19, 20&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12,15,16,19,20 -s "alias=Data2" -l "alias=Data2, raid=6, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up global spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: Global spares, Revertible &lt;br&gt;# &lt;br&gt;spare -a add -p 3 -t g -r y &lt;br&gt;spare -a add -p 4 -t g -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;&#xD;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2016-10-19T06:23:47.3300000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5800f/</link>
    </item>
    <item>
      <title>Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5600f + (3) J5600s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5600f + (3) J5600s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5600f + (3) J5600s Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers and 3 SAS-connected Expansion Chassis: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 12 LUNs from 64 drives (head + 3 expansion units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 11 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;#array -a del -d 4 &lt;br&gt;#array -a del -d 5 &lt;br&gt;#array -a del -d 6 &lt;br&gt;#array -a del -d 7 &lt;br&gt;#array -a del -d 8 &lt;br&gt;#array -a del -d 9 &lt;br&gt;#array -a del -d 10 &lt;br&gt;#array -a del -d 11 &lt;br&gt;#&lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5, 6, 9, 10, 13, 14&lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 2&lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 7, 8, 11, 12, 15, 16&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12,15,16 -s "alias=Data2" -l "alias=Data2, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 17, 18, 21, 22, 25, 26&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 17,18,21,22,25,26 -s "alias=Data3" -l "alias=Data3, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 19, 20, 23, 24, 27, 28&lt;br&gt;# Alias: Data4&lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 19,20,23,24,27,28 -s "alias=Data4" -l "alias=Data4, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data5 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 33, 34, 37, 38, 41, 42&lt;br&gt;# Alias: Data5 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 33,34,37,38,41,42 -s "alias=Data5" -l "alias=Data5, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data6 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 35, 36, 39, 40, 43, 44&lt;br&gt;# Alias: Data6&lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 35,36,39,40,43,44 -s "alias=Data6" -l "alias=Data6, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data7 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 49, 50, 53, 54, 57, 58&lt;br&gt;# Alias: Data7 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 49,50,53,54,57,58 -s "alias=Data7" -l "alias=Data7, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data8 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 51, 52, 55, 56, 59, 60&lt;br&gt;# Alias: Data8&lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 51,52,55,56,59,60 -s "alias=Data8" -l "alias=Data8, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 29, 30, 31 &lt;br&gt;# Alias: Scratch1 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 29,30,31 -s "alias=Scratch1" -l "alias=Scratch1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 45, 46, 47&lt;br&gt;# Alias: Scratch2 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 45,46,47 -s "alias=Scratch2" -l "alias=Scratch2, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch3 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 61, 62, 63 &lt;br&gt;# Alias: Scratch3 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 61,62,63 -s "alias=Scratch3" -l "alias=Scratch3, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up global spares &lt;br&gt;# Physical Drives: 3, 4, 32, 48, 64 &lt;br&gt;# Type: Global spares, Revertible &lt;br&gt;# &lt;br&gt;spare -a add -p 3 -t g -r y &lt;br&gt;spare -a add -p 4 -t g -r y &lt;br&gt;spare -a add -p 32 -t g -r y &lt;br&gt;spare -a add -p 48 -t g -r y &lt;br&gt;spare -a add -p 64 -t g -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;init -a start -l 5 -q 100 &lt;br&gt;init -a start -l 6 -q 100 &lt;br&gt;init -a start -l 7 -q 100 &lt;br&gt;init -a start -l 8 -q 100 &lt;br&gt;init -a start -l 9 -q 100&lt;br&gt;init -a start -l 10 -q 100 &lt;br&gt;init -a start -l 11 -q 100&lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-19T04:01:19.7470000</pubDate>
      <link>https://kb.promise.com/thread/raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5600f-3-j5600s/</link>
    </item>
    <item>
      <title>Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5600f &amp;#43; J5600s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5600f + J5600s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5600f + J5600s Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers and 1 SAS-connected Expansion Chassis: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 6 LUNs from 32 drives (head + 1 expansion units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 5&lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;#array -a del -d 4 &lt;br&gt;#array -a del -d 5 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5, 6, 9, 10, 13, 14&lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 2&lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 7, 8, 11, 12, 15, 16&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12,15,16 -s "alias=Data2" -l "alias=Data2, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 17, 18, 21, 22, 25, 26&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 17,18,21,22,25,26 -s "alias=Data3" -l "alias=Data3, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 19, 20, 23, 24, 27, 28&lt;br&gt;# Alias: Data4&lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 19,20,23,24,27,28 -s "alias=Data4" -l "alias=Data4, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Audio/Scratch1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 29, 30, 31 &lt;br&gt;# Alias: Scratch1 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 29,30,31 -s "alias=Scratch1" -l "alias=Scratch1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up global spares &lt;br&gt;# Physical Drives: 3, 4, 32&lt;br&gt;# Type: Global spares, Revertible &lt;br&gt;# &lt;br&gt;spare -a add -p 3 -t g -r y &lt;br&gt;spare -a add -p 4 -t g -r y &lt;br&gt;spare -a add -p 32 -t g -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;init -a start -l 5 -q 100 &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-19T02:33:01.8430000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5600f-j5600s/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Head - Xsan (Metadata and Data) Configuration Script - E5600f</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak E5600f RAID Controllers for Xsan (Metadata and Data)&lt;/strong&gt;.&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5600f Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 3 LUNs from 16 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable" &lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 2&lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;&lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5, 6, 9, 10, 13, 14&lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 2&lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 7, 8, 11, 12, 15, 16&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 1MB stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12,15,16 -s "alias=Data2" -l "alias=Data2, raid=5, stripe=1mb, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;#&lt;br&gt;# Set up global spares &lt;br&gt;# Physical Drives: 3, 4&lt;br&gt;# Type: Global spares, Revertible &lt;br&gt;# &lt;br&gt;spare -a add -p 3 -t g -r y &lt;br&gt;spare -a add -p 4 -t g -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-19T01:51:56.0700000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5600f/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5320f &amp;#43; (2) J5600s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5320f + (2) J5600s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise E5320f + (2) J5600s Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers Only: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 7 LUNs from 56 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enable [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 3 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5,6,7,8,9,10,11,12,13,14 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,7,8,9,10,11,12,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 15,16,17,18,19,20,21,22,23,24&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 15,16,17,18,19,20,21,22,23,24 -s "alias=Data2" -l "alias=Data2, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 25,29,30,33,34,37,38&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 25,29,30,33,34,37,38 -s "alias=Data3" -l "alias=Data3, raid=6, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 28,31,32,35,36,39,40&lt;br&gt;# Alias: Data4 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 28,31,32,35,36,39,40 -s "alias=Data4" -l "alias=Data4, raid=6, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data5 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 41,45,46,49,50,53,54&lt;br&gt;# Alias: Data5 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 41,45,46,49,50,53,54 -s "alias=Data5" -l "alias=Data5, raid=6, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data6 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 44,47,48,51,52,55,56&lt;br&gt;# Alias: Data6 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 44,47,48,51,52,55,56 -s "alias=Data6" -l "alias=Data6, raid=6, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: dedicated spares, Revertible to meta, data1, data2&lt;br&gt;# &lt;br&gt;spare -a add -p 3 -d 0,1,2 -t d -r y &lt;br&gt;spare -a add -p 4 -d 0,1,2 -t d -r y &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 26, 27&lt;br&gt;# Type: dedicated spares, Revertible to data3, data4&lt;br&gt;# &lt;br&gt;spare -a add -p 26 -d 3,4 -t d -r y &lt;br&gt;spare -a add -p 27 -d 3,4 -t d -r y &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 50, 51&lt;br&gt;# Type: dedicated spares, Revertible to data5, data6&lt;br&gt;# &lt;br&gt;spare -a add -p 42 -d 5,6 -t d -r y &lt;br&gt;spare -a add -p 43 -d 5,6 -t d -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;init -a start -l 5 -q 100 &lt;br&gt;init -a start -l 6 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-18T23:49:55.6970000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5320f-2-j5600s/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5320f &amp;#43; J5600s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5320f + J5600s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise E5320 + J5600s configuration script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers Only: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 5 LUNs from 40 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext &lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 3 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5,6,7,8,9,10,11,12,13,14 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,7,8,9,10,11,12,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 15,16,17,18,19,20,21,22,23,24&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 15,16,17,18,19,20,21,22,23,24 -s "alias=Data2" -l "alias=Data2, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 25,29,30,33,34,37,38&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 25,29,30,33,34,37,38 -s "alias=Data3" -l "alias=Data3, raid=6, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 6 &lt;br&gt;# Physical Drives: 28,31,32,35,36,39,40&lt;br&gt;# Alias: Data4 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 28,31,32,35,36,39,40 -s "alias=Data4" -l "alias=Data4, raid=6, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: dedicated spares, Revertible to meta, data1, data2&lt;br&gt;# &lt;br&gt;spare -a add -p 3 -d 0,1,2 -t d -r y &lt;br&gt;spare -a add -p 4 -d 0,1,2 -t d -r y &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 26, 27&lt;br&gt;# Type: dedicated spares, Revertible to data3, data4&lt;br&gt;# &lt;br&gt;spare -a add -p 26 -d 3,4 -t d -r y &lt;br&gt;spare -a add -p 27 -d 3,4 -t d -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2016-10-18T23:26:26.3670000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5320f-j5600s/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) Configuration Script - E5320f &amp;#43; J5320s</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) configuration script - E5320f + J5320s&lt;/strong&gt;&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise Promise E5320f + J5320s Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers Only: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 5 LUNs from 48 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext &lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 3 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5,6,7,8,9,10,11,12,13,14 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,7,8,9,10,11,12,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 15,16,17,18,19,20,21,22,23,24&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 15,16,17,18,19,20,21,22,23,24 -s "alias=Data2" -l "alias=Data2, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data3 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 29,30,31,32,33,34,35,36,37,38&lt;br&gt;# Alias: Data3 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 29,30,31,32,33,34,35,36,37,38 -s "alias=Data3" -l "alias=Data3, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data4 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 39,40,41,42,43,44,45,46,47,48&lt;br&gt;# Alias: Data4 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 39,40,41,42,43,44,45,46,47,48 -s "alias=Data4" -l "alias=Data4, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: dedicated spares, Revertible to meta, data1, data2&lt;br&gt;# &lt;br&gt;spare -a add -p 3 -d 0,1,2 -t d -r y &lt;br&gt;spare -a add -p 4 -d 0,1,2 -t d -r y &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 25, 26, 27, 28 &lt;br&gt;# Type: dedicated spares, Revertible to data3, data4&lt;br&gt;# &lt;br&gt;spare -a add -p 25 -d 3,4 -t d -r y &lt;br&gt;spare -a add -p 26 -d 3,4 -t d -r y &lt;br&gt;spare -a add -p 27 -d 3,4 -t d -r y &lt;br&gt;spare -a add -p 28 -d 3,4 -t d -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;init -a start -l 3 -q 100 &lt;br&gt;init -a start -l 4 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&amp;nbsp;&lt;/p&gt;</description>
      <pubDate>2016-10-18T22:01:48.8100000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5320f-j5320s/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Head - Xsan (Metadata and Data) Configuration Script - E5320f</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak E5320f RAID Controllers for Xsan (Metadata and Data)&lt;/strong&gt;.&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5320f Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers Only: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 3 LUNs from 24 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext&lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 3 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5,6,7,8,9,10,11,12,13,14 &lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,7,8,9,10,11,12,13,14 -s "alias=Data1" -l "alias=Data1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 15,16,17,18,19,20,21,22,23,24&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default),&lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 15,16,17,18,19,20,21,22,23,24 -s "alias=Data2" -l "alias=Data2, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: dedicated spares, Revertible to meta, data1, data2&lt;br&gt;# &lt;br&gt;spare -a add -p 3 -d 0,1,2 -t d -r y &lt;br&gt;spare -a add -p 4 -d 0,1,2 -t d -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-18T21:50:34.4700000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5320f/</link>
    </item>
    <item>
      <title>Promise VTrak: RAID Head - Xsan (Metadata and Data) Configuration Script - E5300f</title>
      <description>&lt;p&gt;This article contains a script used to configure &lt;strong&gt;Promise VTrak E5300f RAID Controllers for Xsan (Metadata and Data)&lt;/strong&gt;.&lt;br&gt;&lt;br&gt;To create the configuration script, copy the text below beginning with the line "#Begin Copy" to the line "#End Copy" and paste the text into TextEdit, using &lt;a href="http://support.apple.com/kb/TA20645" target="_blank" rel="noreferrer, noopener"&gt;these guidelines&lt;/a&gt;. Once you've created the script, follow the guidelines &lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;in this article&lt;/a&gt; to import the configuration script.&lt;/p&gt;&#xD;
&lt;pre&gt;#Begin Copy &lt;br&gt;# &lt;br&gt;# Promise VTrak E5300f Configuration Script &lt;br&gt;# &lt;br&gt;# Script Name: RAID Controllers Only: Xsan (Metadata and Data) &lt;br&gt;# &lt;br&gt;# &lt;br&gt;# Script Details: builds 3 LUNs from 12 drives (head units) &lt;br&gt;# in the recommended configuration for use with Xsan and StorNext &lt;br&gt;# &lt;br&gt;# Script Assumptions: no arrays or logical drives created &lt;br&gt;# (uncomment out array deletion if needed) &lt;br&gt;# script works with either SATA or SAS configurations &lt;br&gt;# &lt;br&gt;# Global controller settings &lt;br&gt;# LUN Affinity: enabled [required] &lt;br&gt;# ALUA: enabled [required]&lt;br&gt;# Adaptive Writeback Cache: enabled [optional] &lt;br&gt;# Host Cache Flushing: disabled [optional] &lt;br&gt;# Forced Read Ahead: enabled [optional] &lt;br&gt;# &lt;br&gt;ctrl -a mod -s "lunaffinity=enable, alua=enable, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable" &lt;br&gt;# &lt;br&gt;# Delete any existing arrays &lt;br&gt;# Delete array 0 thru 3 &lt;br&gt;# &lt;br&gt;#array -a del -d 0 &lt;br&gt;#array -a del -d 1 &lt;br&gt;#array -a del -d 2 &lt;br&gt;#array -a del -d 3 &lt;br&gt;# &lt;br&gt;# Build MDC LUN &lt;br&gt;# RAID level: 1 &lt;br&gt;# Physical Drives: 1, 2 &lt;br&gt;# Alias: MDC &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Cache, Write Back&lt;br&gt;#&lt;br&gt;array -a add -p 1,2 -s "alias=MDC" -l "alias=MDC, raid=1, readpolicy=readcache, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Build Data1 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 5,6,9,10&lt;br&gt;# Alias: Data1 &lt;br&gt;# Controller Affinity: 2 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size (default), &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 5,6,9,10 -s "alias=Data1" -l "alias=Data1, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=2" &lt;br&gt;# &lt;br&gt;# Build Data2 LUN &lt;br&gt;# RAID level: 5 &lt;br&gt;# Physical Drives: 7,8,11,12&lt;br&gt;# Alias: Data2 &lt;br&gt;# Controller Affinity: 1 &lt;br&gt;# Configuration options: Capacity all (default), 64K stripe size, &lt;br&gt;# 512 byte sector (default), Read Ahead, Write Back &lt;br&gt;# &lt;br&gt;array -a add -p 7,8,11,12 -s "alias=Data2" -l "alias=Data2, raid=5, readpolicy=readahead, writepolicy=writeback, preferredctrlid=1" &lt;br&gt;# &lt;br&gt;# Set up dedicated spares &lt;br&gt;# Physical Drives: 3, 4 &lt;br&gt;# Type: dedicated spares, Revertible to meta, data1, data2&lt;br&gt;# &lt;br&gt;spare -a add -p 3 -d 0,1,2 -t d -r y &lt;br&gt;spare -a add -p 4 -d 0,1,2 -t d -r y &lt;br&gt;# &lt;br&gt;# Perform Quick Init on all LUNs &lt;br&gt;# Note: Ensure any stale filesystem data is destroyed &lt;br&gt;# &lt;br&gt;init -a start -l 0 -q 100 &lt;br&gt;init -a start -l 1 -q 100 &lt;br&gt;init -a start -l 2 -q 100 &lt;br&gt;# &lt;br&gt;# END &lt;br&gt;# &lt;br&gt;#End Copy&lt;/pre&gt;&#xD;
&lt;p&gt;For more information about options configured by this script, see&amp;nbsp;&lt;a href="/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/" target="_blank" rel="noreferrer, noopener"&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/a&gt;&lt;/p&gt;</description>
      <pubDate>2016-10-18T21:08:55.0800000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5300f/</link>
    </item>
    <item>
      <title>Promise VTrak: Configuring for Optimal Performance (E5000 Series)</title>
      <description>&lt;div&gt;&#xD;
&lt;h2&gt;Promise VTrak: Configuring for Optimal Performance (E5000 Series)&lt;/h2&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Configure Promise VTrak RAIDs for best performance using the guidelines in this article.&lt;br&gt;&lt;br&gt;&lt;/strong&gt;These scripts are inteded to be used for &amp;nbsp;&lt;strong&gt;Xsan&lt;/strong&gt; and &lt;strong&gt;Quantum&lt;/strong&gt;&amp;nbsp;&lt;strong&gt;StorNext&lt;/strong&gt; Deployments&lt;strong&gt;&lt;br&gt;&lt;/strong&gt;&lt;br&gt;&lt;strong&gt;-&lt;/strong&gt; For the Promise VTrak E5000 series, the scripted options below are currently&amp;nbsp;&lt;span style="text-decoration: underline;"&gt;&lt;strong&gt;not&lt;/strong&gt;&lt;/span&gt;&amp;nbsp;built into the Promise Admin interface. At this juncture, it is&amp;nbsp;necessary to create and import the configuration scripts.&amp;nbsp;In the next service release of the firmware, these scripts will be embedded in the GUI.&lt;br&gt;&lt;br&gt;&lt;strong&gt;-&lt;/strong&gt; For the Promise VTrak &lt;a href="http://promise.com/Products/VTrak/E5000/" target="_blank" rel="noreferrer, noopener"&gt;E5000&lt;/a&gt;&amp;nbsp;&amp;amp; &lt;a href="http://promise.com/Products/VTrak/J5000/" target="_blank" rel="noreferrer, noopener"&gt;J5000&lt;/a&gt;&amp;nbsp;Series, create and import the desired script, as described below.&lt;br&gt;&lt;strong&gt;&lt;br&gt;-&lt;/strong&gt;&amp;nbsp;Some of the configuration scripts configure Global or Dedicated&amp;nbsp;Revertible Spare Drives. In the event that a drive is replaced, data will be migrated from the Spare Drive to the replacement drive. Performance may be impacted for the duration of the migration procedure.&lt;/p&gt;&#xD;
&lt;hr&gt;&#xD;
&lt;p&gt;&lt;strong&gt;Note:&lt;/strong&gt; All these scripts have &lt;strong&gt;ALUA (Asymmetric Logical Unit Access)&lt;/strong&gt;&amp;nbsp;enabled by default. If &lt;strong&gt;all&lt;/strong&gt; your host's&amp;nbsp;operating systems are ALUA compliant; you may leave ALUA enabled. &lt;br&gt;&lt;br&gt;In the case that you need to disable ALUA due to your operating system's non-compliancy, following the instructions below.&lt;br&gt;&lt;br&gt;Locate&amp;nbsp;&lt;code&gt;&lt;strong&gt;alua=enable&lt;/strong&gt;&lt;/code&gt;&amp;nbsp;:&lt;br&gt;&amp;nbsp;&lt;br&gt;&lt;code&gt;ctrl -a mod -s "lunaffinity=enable, &lt;strong&gt;alua=enable&lt;/strong&gt;, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;br&gt;&lt;br&gt;&lt;/code&gt;Modify the &lt;strong&gt;&lt;code&gt;alua&lt;/code&gt;&lt;/strong&gt;&amp;nbsp;variable&amp;nbsp;in the script before importing:&lt;br&gt;&lt;br&gt;&lt;code&gt;ctrl -a mod -s "lunaffinity=enable, &lt;strong&gt;alua=disable&lt;/strong&gt;, adaptivewbcache=enable, hostcacheflushing=disable, forcedreadahead=enable"&lt;/code&gt;&lt;/p&gt;&#xD;
&lt;hr&gt;&#xD;
&lt;h2&gt;Configure via Script&lt;/h2&gt;&#xD;
&lt;/div&gt;&#xD;
&lt;div&gt;&lt;strong&gt;Create the script file&lt;/strong&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Determine which script you should use from the following list, then click the linked article to get the text of the script. Follow the instructions in the linked article to save the script, then return to this article.&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5300f" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller: Xsan (Metadata and Data) - E5300f&lt;/a&gt;&lt;/strong&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, two data LUNs, and two spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/9268ab97-3f32-48dc-9ff3-a6a30152f791_configurationscript-2u12.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;br&gt;&lt;hr&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&#xD;
&lt;div&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5600f/" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller: Xsan (Metadata and Data) - E5600f&lt;/a&gt;&lt;/strong&gt;&lt;br&gt;&lt;br&gt;Configures a RAID Controller that will be used as an Xsan or StorNext Volume. Configures arrays to be used as a metadata LUN, two data LUNs, and two spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/ededcb5a-b0fb-4556-9363-a6a4001995a7_e5600f.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;br&gt;&lt;hr&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5600f-j5600s/" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5600f + J5600s&lt;/a&gt;&lt;br&gt;&lt;/strong&gt;&lt;br&gt;Configures a RAID Controller and SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, four data LUNs, one scratch LUN, and three spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/e4731df7-c80c-4c0f-ac12-a6a40020f6d3_script2.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;strong&gt;&lt;br&gt;&lt;/strong&gt;&lt;/div&gt;&#xD;
&lt;hr&gt;&lt;strong&gt;&lt;a href="/thread/raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5600f-3-j5600s/" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5600f + (3) J5600s&lt;/a&gt;&lt;/strong&gt;&lt;br&gt;&lt;br&gt;Configures a RAID Controller and (3) SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, eight data LUNs, three scratch LUNs, and five spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/e3cb4115-0653-4342-aa77-a6a4003efb7f_script3.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;hr&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5800f/" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller: Xsan (Metadata and Data) - E5800f&lt;/a&gt;&lt;/strong&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, two data LUNs, one scratch LUN, and two spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/81614fd8-2888-41cb-931e-a6a40066c6c5_script4.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;br&gt;&lt;hr&gt;&lt;/div&gt;&#xD;
&lt;strong&gt;&lt;a href="/thread/vtrak-family-vtrak-e5000-raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5600f-3-j5600s-follow-raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5800f-j5600s/" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5800f + J5600s&lt;/a&gt;&lt;/strong&gt;&lt;br&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller and a SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, four data LUNs, one scratch LUN, and two spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/97ed61e9-873d-4a7a-be53-a6a4006a05bf_script5.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;hr&gt;&lt;/div&gt;&#xD;
&lt;strong&gt;&lt;strong&gt;&lt;a href="/thread/raid-controller-and-sas-connected-expansion-chassis-xsan-metadata-and-data-e5800f-3-j5600s/" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5800f + (3) J5600s&lt;/a&gt;&lt;/strong&gt;&lt;/strong&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller and (3) SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, eight data LUNs, one scratch LUN, and two spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/9add5c22-3fdc-4090-91da-a6a4006caad0_script6.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;hr&gt;&lt;/div&gt;&#xD;
&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-head-xsan-metadata-and-data-configuration-script-e5320f" target="_blank" rel="noreferrer, noopener"&gt;RAID Controller: Xsan (Metadata and Data) - E5320f&lt;/a&gt;&lt;/strong&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, two data LUNs, and two spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/892d9387-fa90-455d-b6a9-a6a30154b730_configurationscript-2u24.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;br&gt;&lt;hr&gt;&#xD;
&lt;div&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5320f-j5320s" target="_blank" rel="noreferrer, noopener"&gt;RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5320f + J5320s&lt;/a&gt;&lt;/strong&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller and a SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, four data LUNs, and six spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/079a2909-7279-4ddc-9de2-a6a30154fd35_configurationscript-2u24+2u24.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;br&gt;&lt;hr&gt;&#xD;
&lt;div&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5320f-j5600s" target="_blank" rel="noreferrer, noopener"&gt;RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5320f + J5600s&lt;/a&gt;&lt;/strong&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;div&gt;Configures a RAID Controller and a SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, four data LUNs, and four spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/af7d79fa-c213-4b9e-ab9c-a6a301555bea_configurationscript-2u24+4u24.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&lt;hr&gt;&lt;strong&gt;&lt;a href="/thread/promise-vtrak-raid-controllers-and-sas-connected-expansion-chassis-xsan-metadata-and-data-configuration-script-e5320f-2-j5600s" target="_blank" rel="noreferrer, noopener"&gt;RAID Controllers and SAS-connected Expansion Chassis: Xsan (Metadata and Data) - E5320f + (2) J5600s&lt;/a&gt;&lt;/strong&gt;&lt;br&gt;&lt;br&gt;Configures a RAID Controller and (2) SAS-connected Expansion Chassis that will be used as an Xsan or StorNext volume. Configures arrays to be used as a metadata LUN, six data LUNs, and six spares. Each Xsan or StorNext volume must contain one metadata LUN.&lt;br&gt;&lt;br&gt;&lt;img src="/content/uploads/8424a8f7-ef0d-4197-a135-a5b6009195e8/492d6e2a-23d6-4da7-938b-a6a30156210a_configurationscript-2u24+4u24x2.jpg?width=690&amp;amp;upscale=false" alt=""&gt;&lt;br&gt;&lt;hr&gt;&lt;/div&gt;&#xD;
&lt;div&gt;&amp;nbsp;&lt;/div&gt;&#xD;
&lt;/div&gt;&#xD;
&lt;/div&gt;&#xD;
&lt;/div&gt;</description>
      <pubDate>2016-10-18T20:47:16.7430000</pubDate>
      <link>https://kb.promise.com/thread/promise-vtrak-configuring-for-optimal-performance-e5000-series/</link>
    </item>
  </channel>
</rss>