Home | History | Annotate | Line # | Download | only in io
      1 /******************************************************************************
      2  * blkif.h
      3  *
      4  * Unified block-device I/O interface for Xen guest OSes.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a copy
      7  * of this software and associated documentation files (the "Software"), to
      8  * deal in the Software without restriction, including without limitation the
      9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
     10  * sell copies of the Software, and to permit persons to whom the Software is
     11  * furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     22  * DEALINGS IN THE SOFTWARE.
     23  *
     24  * Copyright (c) 2003-2004, Keir Fraser
     25  * Copyright (c) 2012, Spectra Logic Corporation
     26  */
     27 
     28 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
     29 #define __XEN_PUBLIC_IO_BLKIF_H__
     30 
     31 #include "ring.h"
     32 #include "../grant_table.h"
     33 
     34 /*
     35  * Front->back notifications: When enqueuing a new request, sending a
     36  * notification can be made conditional on req_event (i.e., the generic
     37  * hold-off mechanism provided by the ring macros). Backends must set
     38  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
     39  *
     40  * Back->front notifications: When enqueuing a new response, sending a
     41  * notification can be made conditional on rsp_event (i.e., the generic
     42  * hold-off mechanism provided by the ring macros). Frontends must set
     43  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
     44  */
     45 
     46 #ifndef blkif_vdev_t
     47 #define blkif_vdev_t   uint16_t
     48 #endif
     49 #define blkif_sector_t uint64_t
     50 
     51 /*
     52  * Feature and Parameter Negotiation
     53  * =================================
     54  * The two halves of a Xen block driver utilize nodes within the XenStore to
     55  * communicate capabilities and to negotiate operating parameters.  This
     56  * section enumerates these nodes which reside in the respective front and
     57  * backend portions of the XenStore, following the XenBus convention.
     58  *
     59  * All data in the XenStore is stored as strings.  Nodes specifying numeric
     60  * values are encoded in decimal.  Integer value ranges listed below are
     61  * expressed as fixed sized integer types capable of storing the conversion
     62  * of a properly formated node string, without loss of information.
     63  *
     64  * Any specified default value is in effect if the corresponding XenBus node
     65  * is not present in the XenStore.
     66  *
     67  * XenStore nodes in sections marked "PRIVATE" are solely for use by the
     68  * driver side whose XenBus tree contains them.
     69  *
     70  * XenStore nodes marked "DEPRECATED" in their notes section should only be
     71  * used to provide interoperability with legacy implementations.
     72  *
     73  * See the XenBus state transition diagram below for details on when XenBus
     74  * nodes must be published and when they can be queried.
     75  *
     76  *****************************************************************************
     77  *                            Backend XenBus Nodes
     78  *****************************************************************************
     79  *
     80  *------------------ Backend Device Identification (PRIVATE) ------------------
     81  *
     82  * mode
     83  *      Values:         "r" (read only), "w" (writable)
     84  *
     85  *      The read or write access permissions to the backing store to be
     86  *      granted to the frontend.
     87  *
     88  * params
     89  *      Values:         string
     90  *
     91  *      A free formatted string providing sufficient information for the
     92  *      hotplug script to attach the device and provide a suitable
     93  *      handler (ie: a block device) for blkback to use.
     94  *
     95  * physical-device
     96  *      Values:         "MAJOR:MINOR"
     97  *      Notes: 11
     98  *
     99  *      MAJOR and MINOR are the major number and minor number of the
    100  *      backing device respectively.
    101  *
    102  * physical-device-path
    103  *      Values:         path string
    104  *
    105  *      A string that contains the absolute path to the disk image. On
    106  *      NetBSD and Linux this is always a block device, while on FreeBSD
    107  *      it can be either a block device or a regular file.
    108  *
    109  * type
    110  *      Values:         "file", "phy", "tap"
    111  *
    112  *      The type of the backing device/object.
    113  *
    114  *
    115  * direct-io-safe
    116  *      Values:         0/1 (boolean)
    117  *      Default Value:  0
    118  *
    119  *      The underlying storage is not affected by the direct IO memory
    120  *      lifetime bug.  See:
    121  *        http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
    122  *
    123  *      Therefore this option gives the backend permission to use
    124  *      O_DIRECT, notwithstanding that bug.
    125  *
    126  *      That is, if this option is enabled, use of O_DIRECT is safe,
    127  *      in circumstances where we would normally have avoided it as a
    128  *      workaround for that bug.  This option is not relevant for all
    129  *      backends, and even not necessarily supported for those for
    130  *      which it is relevant.  A backend which knows that it is not
    131  *      affected by the bug can ignore this option.
    132  *
    133  *      This option doesn't require a backend to use O_DIRECT, so it
    134  *      should not be used to try to control the caching behaviour.
    135  *
    136  *--------------------------------- Features ---------------------------------
    137  *
    138  * feature-barrier
    139  *      Values:         0/1 (boolean)
    140  *      Default Value:  0
    141  *
    142  *      A value of "1" indicates that the backend can process requests
    143  *      containing the BLKIF_OP_WRITE_BARRIER request opcode.  Requests
    144  *      of this type may still be returned at any time with the
    145  *      BLKIF_RSP_EOPNOTSUPP result code.
    146  *
    147  * feature-flush-cache
    148  *      Values:         0/1 (boolean)
    149  *      Default Value:  0
    150  *
    151  *      A value of "1" indicates that the backend can process requests
    152  *      containing the BLKIF_OP_FLUSH_DISKCACHE request opcode.  Requests
    153  *      of this type may still be returned at any time with the
    154  *      BLKIF_RSP_EOPNOTSUPP result code.
    155  *
    156  * feature-discard
    157  *      Values:         0/1 (boolean)
    158  *      Default Value:  0
    159  *
    160  *      A value of "1" indicates that the backend can process requests
    161  *      containing the BLKIF_OP_DISCARD request opcode.  Requests
    162  *      of this type may still be returned at any time with the
    163  *      BLKIF_RSP_EOPNOTSUPP result code.
    164  *
    165  * feature-persistent
    166  *      Values:         0/1 (boolean)
    167  *      Default Value:  0
    168  *      Notes: 7
    169  *
    170  *      A value of "1" indicates that the backend can keep the grants used
    171  *      by the frontend driver mapped, so the same set of grants should be
    172  *      used in all transactions. The maximum number of grants the backend
    173  *      can map persistently depends on the implementation, but ideally it
    174  *      should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
    175  *      feature the backend doesn't need to unmap each grant, preventing
    176  *      costly TLB flushes. The backend driver should only map grants
    177  *      persistently if the frontend supports it. If a backend driver chooses
    178  *      to use the persistent protocol when the frontend doesn't support it,
    179  *      it will probably hit the maximum number of persistently mapped grants
    180  *      (due to the fact that the frontend won't be reusing the same grants),
    181  *      and fall back to non-persistent mode. Backend implementations may
    182  *      shrink or expand the number of persistently mapped grants without
    183  *      notifying the frontend depending on memory constraints (this might
    184  *      cause a performance degradation).
    185  *
    186  *      If a backend driver wants to limit the maximum number of persistently
    187  *      mapped grants to a value less than RING_SIZE *
    188  *      BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
    189  *      discard the grants that are less commonly used. Using a LRU in the
    190  *      backend driver paired with a LIFO queue in the frontend will
    191  *      allow us to have better performance in this scenario.
    192  *
    193  *----------------------- Request Transport Parameters ------------------------
    194  *
    195  * max-ring-page-order
    196  *      Values:         <uint32_t>
    197  *      Default Value:  0
    198  *      Notes:          1, 3
    199  *
    200  *      The maximum supported size of the request ring buffer in units of
    201  *      lb(machine pages). (e.g. 0 == 1 page,  1 = 2 pages, 2 == 4 pages,
    202  *      etc.).
    203  *
    204  * max-ring-pages
    205  *      Values:         <uint32_t>
    206  *      Default Value:  1
    207  *      Notes:          DEPRECATED, 2, 3
    208  *
    209  *      The maximum supported size of the request ring buffer in units of
    210  *      machine pages.  The value must be a power of 2.
    211  *
    212  *------------------------- Backend Device Properties -------------------------
    213  *
    214  * discard-enable
    215  *      Values:         0/1 (boolean)
    216  *      Default Value:  1
    217  *
    218  *      This optional property, set by the toolstack, instructs the backend
    219  *      to offer (or not to offer) discard to the frontend. If the property
    220  *      is missing the backend should offer discard if the backing storage
    221  *      actually supports it.
    222  *
    223  * discard-alignment
    224  *      Values:         <uint32_t>
    225  *      Default Value:  0
    226  *      Notes:          4, 5
    227  *
    228  *      The offset, in bytes from the beginning of the virtual block device,
    229  *      to the first, addressable, discard extent on the underlying device.
    230  *
    231  * discard-granularity
    232  *      Values:         <uint32_t>
    233  *      Default Value:  <"sector-size">
    234  *      Notes:          4
    235  *
    236  *      The size, in bytes, of the individually addressable discard extents
    237  *      of the underlying device.
    238  *
    239  * discard-secure
    240  *      Values:         0/1 (boolean)
    241  *      Default Value:  0
    242  *      Notes:          10
    243  *
    244  *      A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
    245  *      requests with the BLKIF_DISCARD_SECURE flag set.
    246  *
    247  * info
    248  *      Values:         <uint32_t> (bitmap)
    249  *
    250  *      A collection of bit flags describing attributes of the backing
    251  *      device.  The VDISK_* macros define the meaning of each bit
    252  *      location.
    253  *
    254  * sector-size
    255  *      Values:         <uint32_t>
    256  *
    257  *      The logical sector size, in bytes, of the backend device.
    258  *
    259  * physical-sector-size
    260  *      Values:         <uint32_t>
    261  *
    262  *      The physical sector size, in bytes, of the backend device.
    263  *
    264  * sectors
    265  *      Values:         <uint64_t>
    266  *
    267  *      The size of the backend device, expressed in units of its logical
    268  *      sector size ("sector-size").
    269  *
    270  *****************************************************************************
    271  *                            Frontend XenBus Nodes
    272  *****************************************************************************
    273  *
    274  *----------------------- Request Transport Parameters -----------------------
    275  *
    276  * event-channel
    277  *      Values:         <uint32_t>
    278  *
    279  *      The identifier of the Xen event channel used to signal activity
    280  *      in the ring buffer.
    281  *
    282  * ring-ref
    283  *      Values:         <uint32_t>
    284  *      Notes:          6
    285  *
    286  *      The Xen grant reference granting permission for the backend to map
    287  *      the sole page in a single page sized ring buffer.
    288  *
    289  * ring-ref%u
    290  *      Values:         <uint32_t>
    291  *      Notes:          6
    292  *
    293  *      For a frontend providing a multi-page ring, a "number of ring pages"
    294  *      sized list of nodes, each containing a Xen grant reference granting
    295  *      permission for the backend to map the page of the ring located
    296  *      at page index "%u".  Page indexes are zero based.
    297  *
    298  * protocol
    299  *      Values:         string (XEN_IO_PROTO_ABI_*)
    300  *      Default Value:  XEN_IO_PROTO_ABI_NATIVE
    301  *
    302  *      The machine ABI rules governing the format of all ring request and
    303  *      response structures.
    304  *
    305  * ring-page-order
    306  *      Values:         <uint32_t>
    307  *      Default Value:  0
    308  *      Maximum Value:  MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
    309  *      Notes:          1, 3
    310  *
    311  *      The size of the frontend allocated request ring buffer in units
    312  *      of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
    313  *      etc.).
    314  *
    315  * num-ring-pages
    316  *      Values:         <uint32_t>
    317  *      Default Value:  1
    318  *      Maximum Value:  MAX(max-ring-pages,(0x1 << max-ring-page-order))
    319  *      Notes:          DEPRECATED, 2, 3
    320  *
    321  *      The size of the frontend allocated request ring buffer in units of
    322  *      machine pages.  The value must be a power of 2.
    323  *
    324  * feature-persistent
    325  *      Values:         0/1 (boolean)
    326  *      Default Value:  0
    327  *      Notes: 7, 8, 9
    328  *
    329  *      A value of "1" indicates that the frontend will reuse the same grants
    330  *      for all transactions, allowing the backend to map them with write
    331  *      access (even when it should be read-only). If the frontend hits the
    332  *      maximum number of allowed persistently mapped grants, it can fallback
    333  *      to non persistent mode. This will cause a performance degradation,
    334  *      since the the backend driver will still try to map those grants
    335  *      persistently. Since the persistent grants protocol is compatible with
    336  *      the previous protocol, a frontend driver can choose to work in
    337  *      persistent mode even when the backend doesn't support it.
    338  *
    339  *      It is recommended that the frontend driver stores the persistently
    340  *      mapped grants in a LIFO queue, so a subset of all persistently mapped
    341  *      grants gets used commonly. This is done in case the backend driver
    342  *      decides to limit the maximum number of persistently mapped grants
    343  *      to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
    344  *
    345  *------------------------- Virtual Device Properties -------------------------
    346  *
    347  * device-type
    348  *      Values:         "disk", "cdrom", "floppy", etc.
    349  *
    350  * virtual-device
    351  *      Values:         <uint32_t>
    352  *
    353  *      A value indicating the physical device to virtualize within the
    354  *      frontend's domain.  (e.g. "The first ATA disk", "The third SCSI
    355  *      disk", etc.)
    356  *
    357  *      See docs/misc/vbd-interface.txt for details on the format of this
    358  *      value.
    359  *
    360  * Notes
    361  * -----
    362  * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
    363  *     PV drivers.
    364  * (2) Multi-page ring buffer scheme first used in some RedHat distributions
    365  *     including a distribution deployed on certain nodes of the Amazon
    366  *     EC2 cluster.
    367  * (3) Support for multi-page ring buffers was implemented independently,
    368  *     in slightly different forms, by both Citrix and RedHat/Amazon.
    369  *     For full interoperability, block front and backends should publish
    370  *     identical ring parameters, adjusted for unit differences, to the
    371  *     XenStore nodes used in both schemes.
    372  * (4) Devices that support discard functionality may internally allocate space
    373  *     (discardable extents) in units that are larger than the exported logical
    374  *     block size. If the backing device has such discardable extents the
    375  *     backend should provide both discard-granularity and discard-alignment.
    376  *     Providing just one of the two may be considered an error by the frontend.
    377  *     Backends supporting discard should include discard-granularity and
    378  *     discard-alignment even if it supports discarding individual sectors.
    379  *     Frontends should assume discard-alignment == 0 and discard-granularity
    380  *     == sector size if these keys are missing.
    381  * (5) The discard-alignment parameter allows a physical device to be
    382  *     partitioned into virtual devices that do not necessarily begin or
    383  *     end on a discardable extent boundary.
    384  * (6) When there is only a single page allocated to the request ring,
    385  *     'ring-ref' is used to communicate the grant reference for this
    386  *     page to the backend.  When using a multi-page ring, the 'ring-ref'
    387  *     node is not created.  Instead 'ring-ref0' - 'ring-refN' are used.
    388  * (7) When using persistent grants data has to be copied from/to the page
    389  *     where the grant is currently mapped. The overhead of doing this copy
    390  *     however doesn't suppress the speed improvement of not having to unmap
    391  *     the grants.
    392  * (8) The frontend driver has to allow the backend driver to map all grants
    393  *     with write access, even when they should be mapped read-only, since
    394  *     further requests may reuse these grants and require write permissions.
    395  * (9) Linux implementation doesn't have a limit on the maximum number of
    396  *     grants that can be persistently mapped in the frontend driver, but
    397  *     due to the frontent driver implementation it should never be bigger
    398  *     than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
    399  *(10) The discard-secure property may be present and will be set to 1 if the
    400  *     backing device supports secure discard.
    401  *(11) Only used by Linux and NetBSD.
    402  */
    403 
    404 /*
    405  * Multiple hardware queues/rings:
    406  * If supported, the backend will write the key "multi-queue-max-queues" to
    407  * the directory for that vbd, and set its value to the maximum supported
    408  * number of queues.
    409  * Frontends that are aware of this feature and wish to use it can write the
    410  * key "multi-queue-num-queues" with the number they wish to use, which must be
    411  * greater than zero, and no more than the value reported by the backend in
    412  * "multi-queue-max-queues".
    413  *
    414  * For frontends requesting just one queue, the usual event-channel and
    415  * ring-ref keys are written as before, simplifying the backend processing
    416  * to avoid distinguishing between a frontend that doesn't understand the
    417  * multi-queue feature, and one that does, but requested only one queue.
    418  *
    419  * Frontends requesting two or more queues must not write the toplevel
    420  * event-channel and ring-ref keys, instead writing those keys under sub-keys
    421  * having the name "queue-N" where N is the integer ID of the queue/ring for
    422  * which those keys belong. Queues are indexed from zero.
    423  * For example, a frontend with two queues must write the following set of
    424  * queue-related keys:
    425  *
    426  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
    427  * /local/domain/1/device/vbd/0/queue-0 = ""
    428  * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
    429  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
    430  * /local/domain/1/device/vbd/0/queue-1 = ""
    431  * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
    432  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
    433  *
    434  * It is also possible to use multiple queues/rings together with
    435  * feature multi-page ring buffer.
    436  * For example, a frontend requests two queues/rings and the size of each ring
    437  * buffer is two pages must write the following set of related keys:
    438  *
    439  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
    440  * /local/domain/1/device/vbd/0/ring-page-order = "1"
    441  * /local/domain/1/device/vbd/0/queue-0 = ""
    442  * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
    443  * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
    444  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
    445  * /local/domain/1/device/vbd/0/queue-1 = ""
    446  * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
    447  * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
    448  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
    449  *
    450  */
    451 
    452 /*
    453  * STATE DIAGRAMS
    454  *
    455  *****************************************************************************
    456  *                                   Startup                                 *
    457  *****************************************************************************
    458  *
    459  * Tool stack creates front and back nodes with state XenbusStateInitialising.
    460  *
    461  * Front                                Back
    462  * =================================    =====================================
    463  * XenbusStateInitialising              XenbusStateInitialising
    464  *  o Query virtual device               o Query backend device identification
    465  *    properties.                          data.
    466  *  o Setup OS device instance.          o Open and validate backend device.
    467  *                                       o Publish backend features and
    468  *                                         transport parameters.
    469  *                                                      |
    470  *                                                      |
    471  *                                                      V
    472  *                                      XenbusStateInitWait
    473  *
    474  * o Query backend features and
    475  *   transport parameters.
    476  * o Allocate and initialize the
    477  *   request ring.
    478  * o Publish transport parameters
    479  *   that will be in effect during
    480  *   this connection.
    481  *              |
    482  *              |
    483  *              V
    484  * XenbusStateInitialised
    485  *
    486  *                                       o Query frontend transport parameters.
    487  *                                       o Connect to the request ring and
    488  *                                         event channel.
    489  *                                       o Publish backend device properties.
    490  *                                                      |
    491  *                                                      |
    492  *                                                      V
    493  *                                      XenbusStateConnected
    494  *
    495  *  o Query backend device properties.
    496  *  o Finalize OS virtual device
    497  *    instance.
    498  *              |
    499  *              |
    500  *              V
    501  * XenbusStateConnected
    502  *
    503  * Note: Drivers that do not support any optional features, or the negotiation
    504  *       of transport parameters, can skip certain states in the state machine:
    505  *
    506  *       o A frontend may transition to XenbusStateInitialised without
    507  *         waiting for the backend to enter XenbusStateInitWait.  In this
    508  *         case, default transport parameters are in effect and any
    509  *         transport parameters published by the frontend must contain
    510  *         their default values.
    511  *
    512  *       o A backend may transition to XenbusStateInitialised, bypassing
    513  *         XenbusStateInitWait, without waiting for the frontend to first
    514  *         enter the XenbusStateInitialised state.  In this case, default
    515  *         transport parameters are in effect and any transport parameters
    516  *         published by the backend must contain their default values.
    517  *
    518  *       Drivers that support optional features and/or transport parameter
    519  *       negotiation must tolerate these additional state transition paths.
    520  *       In general this means performing the work of any skipped state
    521  *       transition, if it has not already been performed, in addition to the
    522  *       work associated with entry into the current state.
    523  */
    524 
    525 /*
    526  * REQUEST CODES.
    527  */
    528 #define BLKIF_OP_READ              0
    529 #define BLKIF_OP_WRITE             1
    530 /*
    531  * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
    532  * operation code ("barrier request") must be completed prior to the
    533  * execution of the barrier request.  All writes issued after the barrier
    534  * request must not execute until after the completion of the barrier request.
    535  *
    536  * Optional.  See "feature-barrier" XenBus node documentation above.
    537  */
    538 #define BLKIF_OP_WRITE_BARRIER     2
    539 /*
    540  * Commit any uncommitted contents of the backing device's volatile cache
    541  * to stable storage.
    542  *
    543  * Optional.  See "feature-flush-cache" XenBus node documentation above.
    544  */
    545 #define BLKIF_OP_FLUSH_DISKCACHE   3
    546 /*
    547  * Used in SLES sources for device specific command packet
    548  * contained within the request. Reserved for that purpose.
    549  */
    550 #define BLKIF_OP_RESERVED_1        4
    551 /*
    552  * Indicate to the backend device that a region of storage is no longer in
    553  * use, and may be discarded at any time without impact to the client.  If
    554  * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
    555  * discarded region on the device must be rendered unrecoverable before the
    556  * command returns.
    557  *
    558  * This operation is analogous to performing a trim (ATA) or unamp (SCSI),
    559  * command on a native device.
    560  *
    561  * More information about trim/unmap operations can be found at:
    562  * http://t13.org/Documents/UploadedDocuments/docs2008/
    563  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
    564  * http://www.seagate.com/staticfiles/support/disc/manuals/
    565  *     Interface%20manuals/100293068c.pdf
    566  *
    567  * Optional.  See "feature-discard", "discard-alignment",
    568  * "discard-granularity", and "discard-secure" in the XenBus node
    569  * documentation above.
    570  */
    571 #define BLKIF_OP_DISCARD           5
    572 
    573 /*
    574  * Recognized if "feature-max-indirect-segments" in present in the backend
    575  * xenbus info. The "feature-max-indirect-segments" node contains the maximum
    576  * number of segments allowed by the backend per request. If the node is
    577  * present, the frontend might use blkif_request_indirect structs in order to
    578  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
    579  * maximum number of indirect segments is fixed by the backend, but the
    580  * frontend can issue requests with any number of indirect segments as long as
    581  * it's less than the number provided by the backend. The indirect_grefs field
    582  * in blkif_request_indirect should be filled by the frontend with the
    583  * grant references of the pages that are holding the indirect segments.
    584  * These pages are filled with an array of blkif_request_segment that hold the
    585  * information about the segments. The number of indirect pages to use is
    586  * determined by the number of segments an indirect request contains. Every
    587  * indirect page can contain a maximum of
    588  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
    589  * calculate the number of indirect pages to use we have to do
    590  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
    591  *
    592  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
    593  * create the "feature-max-indirect-segments" node!
    594  */
    595 #define BLKIF_OP_INDIRECT          6
    596 
    597 /*
    598  * Maximum scatter/gather segments per request.
    599  * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
    600  * NB. This could be 12 if the ring indexes weren't stored in the same page.
    601  */
    602 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
    603 
    604 /*
    605  * Maximum number of indirect pages to use per request.
    606  */
    607 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
    608 
    609 /*
    610  * NB. first_sect and last_sect in blkif_request_segment, as well as
    611  * sector_number in blkif_request, are always expressed in 512-byte units.
    612  * However they must be properly aligned to the real sector size of the
    613  * physical disk, which is reported in the "physical-sector-size" node in
    614  * the backend xenbus info. Also the xenbus "sectors" node is expressed in
    615  * 512-byte units.
    616  */
    617 struct blkif_request_segment {
    618     grant_ref_t gref;        /* reference to I/O buffer frame        */
    619     /* @first_sect: first sector in frame to transfer (inclusive).   */
    620     /* @last_sect: last sector in frame to transfer (inclusive).     */
    621     uint8_t     first_sect, last_sect;
    622 };
    623 
    624 /*
    625  * Starting ring element for any I/O request.
    626  */
    627 struct blkif_request {
    628     uint8_t        operation;    /* BLKIF_OP_???                         */
    629     uint8_t        nr_segments;  /* number of segments                   */
    630     blkif_vdev_t   handle;       /* only for read/write requests         */
    631     uint64_t       id;           /* private guest value, echoed in resp  */
    632     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
    633     struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    634 };
    635 typedef struct blkif_request blkif_request_t;
    636 
    637 /*
    638  * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
    639  * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
    640  */
    641 struct blkif_request_discard {
    642     uint8_t        operation;    /* BLKIF_OP_DISCARD                     */
    643     uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
    644 #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0      */
    645     blkif_vdev_t   handle;       /* same as for read/write requests      */
    646     uint64_t       id;           /* private guest value, echoed in resp  */
    647     blkif_sector_t sector_number;/* start sector idx on disk             */
    648     uint64_t       nr_sectors;   /* number of contiguous sectors to discard*/
    649 };
    650 typedef struct blkif_request_discard blkif_request_discard_t;
    651 
    652 struct blkif_request_indirect {
    653     uint8_t        operation;    /* BLKIF_OP_INDIRECT                    */
    654     uint8_t        indirect_op;  /* BLKIF_OP_{READ/WRITE}                */
    655     uint16_t       nr_segments;  /* number of segments                   */
    656     uint64_t       id;           /* private guest value, echoed in resp  */
    657     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
    658     blkif_vdev_t   handle;       /* same as for read/write requests      */
    659     grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
    660 #ifdef __i386__
    661     uint64_t       pad;          /* Make it 64 byte aligned on i386      */
    662 #endif
    663 };
    664 typedef struct blkif_request_indirect blkif_request_indirect_t;
    665 
    666 struct blkif_response {
    667     uint64_t        id;              /* copied from request */
    668     uint8_t         operation;       /* copied from request */
    669     int16_t         status;          /* BLKIF_RSP_???       */
    670 };
    671 typedef struct blkif_response blkif_response_t;
    672 
    673 /*
    674  * STATUS RETURN CODES.
    675  */
    676  /* Operation not supported (only happens on barrier writes). */
    677 #define BLKIF_RSP_EOPNOTSUPP  -2
    678  /* Operation failed for some unspecified reason (-EIO). */
    679 #define BLKIF_RSP_ERROR       -1
    680  /* Operation completed successfully. */
    681 #define BLKIF_RSP_OKAY         0
    682 
    683 /*
    684  * Generate blkif ring structures and types.
    685  */
    686 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
    687 
    688 #define VDISK_CDROM        0x1
    689 #define VDISK_REMOVABLE    0x2
    690 #define VDISK_READONLY     0x4
    691 
    692 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
    693 
    694 /*
    695  * Local variables:
    696  * mode: C
    697  * c-file-style: "BSD"
    698  * c-basic-offset: 4
    699  * tab-width: 4
    700  * indent-tabs-mode: nil
    701  * End:
    702  */
    703