123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869
/**
 * The atomic module is intended to provide some basic support for lock-free
 * concurrent programming.  Some common operations are defined, each of which
 * may be performed using the specified memory barrier or a less granular
 * barrier if the hardware does not support the version requested.  This
 * model is based on a design by Alexander Terekhov as outlined in
 * <a href=http://groups.google.com/groups?threadm=3E4820EE.6F408B25%40web.de>
 * this thread</a>.  Another useful reference for memory ordering on modern
 * architectures is <a href=http://www.linuxjournal.com/article/8211>this
 * article by Paul McKenney</a>.
 *
 * Copyright: Copyright (C) 2005-2006 Sean Kelly.  All rights reserved.
 * License:   BSD style: $(LICENSE)
 * Authors:   Sean Kelly
 */
module tango.core.Atomic;


////////////////////////////////////////////////////////////////////////////////
// Synchronization Options
////////////////////////////////////////////////////////////////////////////////


/**
 * Memory synchronization flag.  If the supplied option is not available on the
 * current platform then a stronger method will be used instead.
 */
enum msync
{
    raw,    /// not sequenced
    hlb,    /// hoist-load barrier
    hsb,    /// hoist-store barrier
    slb,    /// sink-load barrier
    ssb,    /// sink-store barrier
    acq,    /// hoist-load + hoist-store barrier
    rel,    /// sink-load + sink-store barrier
    seq,    /// fully sequenced (acq + rel)
}


////////////////////////////////////////////////////////////////////////////////
// Internal Type Checking
////////////////////////////////////////////////////////////////////////////////


private
{
    version( TangoDoc ) {} else
    {
        import tango.core.Traits;


        template isValidAtomicType( T )
        {
            const bool isValidAtomicType = T.sizeof == byte.sizeof  ||
                                           T.sizeof == short.sizeof ||
                                           T.sizeof == int.sizeof   ||
                                           T.sizeof == long.sizeof;
        }


        template isValidNumericType( T )
        {
            const bool isValidNumericType = isIntegerType!( T ) ||
                                            isPointerType!( T );
        }


        template isHoistOp( msync ms )
        {
            const bool isHoistOp = ms == msync.hlb ||
                                   ms == msync.hsb ||
                                   ms == msync.acq ||
                                   ms == msync.seq;
        }


        template isSinkOp( msync ms )
        {
            const bool isSinkOp = ms == msync.slb ||
                                  ms == msync.ssb ||
                                  ms == msync.rel ||
                                  ms == msync.seq;
        }
    }
}


////////////////////////////////////////////////////////////////////////////////
// DDoc Documentation for Atomic Functions
////////////////////////////////////////////////////////////////////////////////


version( TangoDoc )
{
    ////////////////////////////////////////////////////////////////////////////
    // Atomic Load
    ////////////////////////////////////////////////////////////////////////////


    /**
     * Supported msync values:
     *  msync.raw
     *  msync.hlb
     *  msync.acq
     *  msync.seq
     */
    template atomicLoad( msync ms, T )
    {
        /**
         * Refreshes the contents of 'val' from main memory.  This operation is
         * both lock-free and atomic.
         *
         * Params:
         *  val = The value to load.  This value must be properly aligned.
         *
         * Returns:
         *  The loaded value.
         */
        T atomicLoad( ref T val )
        {
            return val;
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store
    ////////////////////////////////////////////////////////////////////////////


    /**
     * Supported msync values:
     *  msync.raw
     *  msync.ssb
     *  msync.acq
     *  msync.rel
     *  msync.seq
     */
    template atomicStore( msync ms, T )
    {
        /**
         * Stores 'newval' to the memory referenced by 'val'.  This operation
         * is both lock-free and atomic.
         *
         * Params:
         *  val     = The destination variable.
         *  newval  = The value to store.
         */
        void atomicStore( ref T val, T newval )
        {

        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic StoreIf
    ////////////////////////////////////////////////////////////////////////////


    /**
     * Supported msync values:
     *  msync.raw
     *  msync.ssb
     *  msync.acq
     *  msync.rel
     *  msync.seq
     */
    template atomicStoreIf( msync ms, T )
    {
        /**
         * Stores 'newval' to the memory referenced by 'val' if val is equal to
         * 'equalTo'.  This operation is both lock-free and atomic.
         *
         * Params:
         *  val     = The destination variable.
         *  newval  = The value to store.
         *  equalTo = The comparison value.
         *
         * Returns:
         *  true if the store occurred, false if not.
         */
        bool atomicStoreIf( ref T val, T newval, T equalTo )
        {
            return false;
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Increment
    ////////////////////////////////////////////////////////////////////////////


    /**
     * Supported msync values:
     *  msync.raw
     *  msync.ssb
     *  msync.acq
     *  msync.rel
     *  msync.seq
     */
    template atomicIncrement( msync ms, T )
    {
        /**
         * This operation is only legal for built-in value and pointer types,
         * and is equivalent to an atomic "val = val + 1" operation.  This
         * function exists to facilitate use of the optimized increment
         * instructions provided by some architecures.  If no such instruction
         * exists on the target platform then the behavior will perform the
         * operation using more traditional means.  This operation is both
         * lock-free and atomic.
         *
         * Params:
         *  val = The value to increment.
         *
         * Returns:
         *  The result of an atomicLoad of val immediately following the
         *  increment operation.  This value is not required to be equal to the
         *  newly stored value.  Thus, competing writes are allowed to occur
         *  between the increment and successive load operation.
         */
        T atomicIncrement( ref T val )
        {
            return val;
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Decrement
    ////////////////////////////////////////////////////////////////////////////


    /**
     * Supported msync values:
     *  msync.raw
     *  msync.ssb
     *  msync.acq
     *  msync.rel
     *  msync.seq
     */
    template atomicDecrement( msync ms, T )
    {
        /**
         * This operation is only legal for built-in value and pointer types,
         * and is equivalent to an atomic "val = val - 1" operation.  This
         * function exists to facilitate use of the optimized decrement
         * instructions provided by some architecures.  If no such instruction
         * exists on the target platform then the behavior will perform the
         * operation using more traditional means.  This operation is both
         * lock-free and atomic.
         *
         * Params:
         *  val = The value to decrement.
         *
         * Returns:
         *  The result of an atomicLoad of val immediately following the
         *  increment operation.  This value is not required to be equal to the
         *  newly stored value.  Thus, competing writes are allowed to occur
         *  between the increment and successive load operation.
         */
        T atomicDecrement( ref T val )
        {
            return val;
        }
    }
}


////////////////////////////////////////////////////////////////////////////////
// LDC Atomics Implementation
////////////////////////////////////////////////////////////////////////////////


else version( LDC )
{
    import ldc.intrinsics;


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Load
    ////////////////////////////////////////////////////////////////////////////


    template atomicLoad( msync ms = msync.seq, T )
    {
        T atomicLoad(ref T val)
        {
            llvm_memory_barrier(
                ms == msync.hlb || ms == msync.acq || ms == msync.seq,
                ms == msync.hsb || ms == msync.acq || ms == msync.seq,
                ms == msync.slb || ms == msync.rel || ms == msync.seq,
                ms == msync.ssb || ms == msync.rel || ms == msync.seq,
                false);
            static if (isPointerType!(T))
            {
                return cast(T)llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 0);
            }
            else static if (is(T == bool))
            {
                return llvm_atomic_load_add!(ubyte)(cast(ubyte*)&val, cast(ubyte)0) ? 1 : 0;
            }
            else
            {
                return llvm_atomic_load_add!(T)(&val, cast(T)0);
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store
    ////////////////////////////////////////////////////////////////////////////


    template atomicStore( msync ms = msync.seq, T )
    {
        void atomicStore( ref T val, T newval )
        {
            llvm_memory_barrier(
                ms == msync.hlb || ms == msync.acq || ms == msync.seq,
                ms == msync.hsb || ms == msync.acq || ms == msync.seq,
                ms == msync.slb || ms == msync.rel || ms == msync.seq,
                ms == msync.ssb || ms == msync.rel || ms == msync.seq,
                false);
            static if (isPointerType!(T))
            {
                llvm_atomic_swap!(size_t)(cast(size_t*)&val, cast(size_t)newval);
            }
            else static if (is(T == bool))
            {
                llvm_atomic_swap!(ubyte)(cast(ubyte*)&val, newval?1:0);
            }
            else
            {
                llvm_atomic_swap!(T)(&val, newval);
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store If
    ////////////////////////////////////////////////////////////////////////////


    template atomicStoreIf( msync ms = msync.seq, T )
    {
        bool atomicStoreIf( ref T val, T newval, T equalTo )
        {
            llvm_memory_barrier(
                ms == msync.hlb || ms == msync.acq || ms == msync.seq,
                ms == msync.hsb || ms == msync.acq || ms == msync.seq,
                ms == msync.slb || ms == msync.rel || ms == msync.seq,
                ms == msync.ssb || ms == msync.rel || ms == msync.seq,
                false);
            T oldval = void;
            static if (isPointerType!(T))
            {
                oldval = cast(T)llvm_atomic_cmp_swap!(size_t)(cast(size_t*)&val, cast(size_t)equalTo, cast(size_t)newval);
            }
            else static if (is(T == bool))
            {
                oldval = llvm_atomic_cmp_swap!(ubyte)(cast(ubyte*)&val, equalTo?1:0, newval?1:0)?0:1;
            }
            else
            {
                oldval = llvm_atomic_cmp_swap!(T)(&val, equalTo, newval);
            }
            return oldval == equalTo;
        }
    }
    
    
    ////////////////////////////////////////////////////////////////////////////
    // Atomic Increment
    ////////////////////////////////////////////////////////////////////////////


    template atomicIncrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( isValidNumericType!(T) );


        T atomicIncrement( ref T val )
        {
            static if (isPointerType!(T))
            {
                llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 1);
            }
            else
            {
                llvm_atomic_load_add!(T)(&val, cast(T)1);
            }
            return val;
        }
    }
    
    
    ////////////////////////////////////////////////////////////////////////////
    // Atomic Decrement
    ////////////////////////////////////////////////////////////////////////////


    template atomicDecrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( isValidNumericType!(T) );


        T atomicDecrement( ref T val )
        {
            static if (isPointerType!(T))
            {
                llvm_atomic_load_sub!(size_t)(cast(size_t*)&val, 1);
            }
            else
            {
                llvm_atomic_load_sub!(T)(&val, cast(T)1);
            }
            return val;
        }
    }
}

////////////////////////////////////////////////////////////////////////////////
// x86 Atomic Function Implementation
////////////////////////////////////////////////////////////////////////////////


else version( D_InlineAsm_X86 )
{
    version( X86 )
    {
        version( BuildInfo )
        {
            pragma( msg, "tango.core.Atomic: using IA-32 inline asm" );
        }

        version(darwin){
            extern(C) bool OSAtomicCompareAndSwap64(long oldValue, long newValue, long *theValue);
            extern(C) bool OSAtomicCompareAndSwap64Barrier(long oldValue, long newValue, long *theValue);
        }
        version = Has64BitCAS;
        version = Has32BitOps;
    }
    version( X86_64 )
    {
        version( BuildInfo )
        {
            pragma( msg, "tango.core.Atomic: using AMD64 inline asm" );
        }

        version = Has64BitOps;
    }

    private
    {
        ////////////////////////////////////////////////////////////////////////
        // x86 Value Requirements
        ////////////////////////////////////////////////////////////////////////


        // NOTE: Strictly speaking, the x86 supports atomic operations on
        //       unaligned values.  However, this is far slower than the
        //       common case, so such behavior should be prohibited.
        template atomicValueIsProperlyAligned( T )
        {
            bool atomicValueIsProperlyAligned( size_t addr )
            {
                return addr % T.sizeof == 0;
            }
        }


        ////////////////////////////////////////////////////////////////////////
        // x86 Synchronization Requirements
        ////////////////////////////////////////////////////////////////////////


        // NOTE: While x86 loads have acquire semantics for stores, it appears
        //       that independent loads may be reordered by some processors
        //       (notably the AMD64).  This implies that the hoist-load barrier
        //       op requires an ordering instruction, which also extends this
        //       requirement to acquire ops (though hoist-store should not need
        //       one if support is added for this later).  However, since no
        //       modern architectures will reorder dependent loads to occur
        //       before the load they depend on (except the Alpha), raw loads
        //       are actually a possible means of ordering specific sequences
        //       of loads in some instances.  The original atomic<>
        //       implementation provides a 'ddhlb' ordering specifier for
        //       data-dependent loads to handle this situation, but as there
        //       are no plans to support the Alpha there is no reason to add
        //       that option here.
        //
        //       For reference, the old behavior (acquire semantics for loads)
        //       required a memory barrier if: ms == msync.seq || isSinkOp!(ms)
        template needsLoadBarrier( msync ms )
        {
            const bool needsLoadBarrier = ms != msync.raw;
        }


        // NOTE: x86 stores implicitly have release semantics so a membar is only
        //       necessary on acquires.
        template needsStoreBarrier( msync ms )
        {
            const bool needsStoreBarrier = ms == msync.seq || isHoistOp!(ms);
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Load
    ////////////////////////////////////////////////////////////////////////////


    template atomicLoad( msync ms = msync.seq, T )
    {
        T atomicLoad( ref T val )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof == byte.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 1 Byte Load
                ////////////////////////////////////////////////////////////////


                static if( needsLoadBarrier!(ms) )
                {
                    volatile asm
                    {
                        mov DL, 42;
                        mov AL, 42;
                        mov ECX, val;
                        lock;
                        cmpxchg [ECX], DL;
                    }
                }
                else
                {
                    volatile
                    {
                        return val;
                    }
                }
            }
            else static if( T.sizeof == short.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 2 Byte Load
                ////////////////////////////////////////////////////////////////

                static if( needsLoadBarrier!(ms) )
                {
                    volatile asm
                    {
                        mov DX, 42;
                        mov AX, 42;
                        mov ECX, val;
                        lock;
                        cmpxchg [ECX], DX;
                    }
                }
                else
                {
                    volatile
                    {
                        return val;
                    }
                }
            }
            else static if( T.sizeof == int.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 4 Byte Load
                ////////////////////////////////////////////////////////////////


                static if( needsLoadBarrier!(ms) )
                {
                    volatile asm
                    {
                        mov EDX, 42;
                        mov EAX, 42;
                        mov ECX, val;
                        lock;
                        cmpxchg [ECX], EDX;
                    }
                }
                else
                {
                    volatile
                    {
                        return val;
                    }
                }
            }
            else static if( T.sizeof == long.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 8 Byte Load
                ////////////////////////////////////////////////////////////////


                version( Has64BitOps )
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Load on 64-Bit Processor
                    ////////////////////////////////////////////////////////////


                    static if( needsLoadBarrier!(ms) )
                    {
                        volatile asm
                        {
                            mov RAX, val;
                            lock;
                            mov RAX, [RAX];
                        }
                    }
                    else
                    {
                        volatile
                        {
                            return val;
                        }
                    }
                }
                else
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Load on 32-Bit Processor
                    ////////////////////////////////////////////////////////////


                    pragma( msg, "This operation is only available on 64-bit platforms." );
                    static assert( false );
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // Not a 1, 2, 4, or 8 Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store
    ////////////////////////////////////////////////////////////////////////////


    template atomicStore( msync ms = msync.seq, T )
    {
        void atomicStore( ref T val, T newval )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof == byte.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 1 Byte Store
                ////////////////////////////////////////////////////////////////


                static if( needsStoreBarrier!(ms) )
                {
                    volatile asm
                    {
                        mov EAX, val;
                        mov DL, newval;
                        lock;
                        xchg [EAX], DL;
                    }
                }
                else
                {
                    volatile asm
                    {
                        mov EAX, val;
                        mov DL, newval;
                        mov [EAX], DL;
                    }
                }
            }
            else static if( T.sizeof == short.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 2 Byte Store
                ////////////////////////////////////////////////////////////////


                static if( needsStoreBarrier!(ms) )
                {
                    volatile asm
                    {
                        mov EAX, val;
                        mov DX, newval;
                        lock;
                        xchg [EAX], DX;
                    }
                }
                else
                {
                    volatile asm
                    {
                        mov EAX, val;
                        mov DX, newval;
                        mov [EAX], DX;
                    }
                }
            }
            else static if( T.sizeof == int.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 4 Byte Store
                ////////////////////////////////////////////////////////////////


                static if( needsStoreBarrier!(ms) )
                {
                    volatile asm
                    {
                        mov EAX, val;
                        mov EDX, newval;
                        lock;
                        xchg [EAX], EDX;
                    }
                }
                else
                {
                    volatile asm
                    {
                        mov EAX, val;
                        mov EDX, newval;
                        mov [EAX], EDX;
                    }
                }
            }
            else static if( T.sizeof == long.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 8 Byte Store
                ////////////////////////////////////////////////////////////////


                version( Has64BitOps )
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Store on 64-Bit Processor
                    ////////////////////////////////////////////////////////////


                    static if( needsStoreBarrier!(ms) )
                    {
                        volatile asm
                        {
                            mov RAX, val;
                            mov RDX, newval;
                            lock;
                            xchg [RAX], RDX;
                        }
                    }
                    else
                    {
                        volatile asm
                        {
                            mov RAX, val;
                            mov RDX, newval;
                            mov [RAX], RDX;
                        }
                    }
                }
                else
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Store on 32-Bit Processor
                    ////////////////////////////////////////////////////////////


                    pragma( msg, "This operation is only available on 64-bit platforms." );
                    static assert( false );
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // Not a 1, 2, 4, or 8 Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store If
    ////////////////////////////////////////////////////////////////////////////


    template atomicStoreIf( msync ms = msync.seq, T )
    {
        bool atomicStoreIf( ref T val, T newval, T equalTo )
        in
        {
            // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
            //       4 byte alignment, so use size_t as the align type here.
            static if( T.sizeof > size_t.sizeof )
                assert( atomicValueIsProperlyAligned!(size_t)( cast(size_t) &val ) );
            else
                assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof == byte.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 1 Byte StoreIf
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov DL, newval;
                    mov AL, equalTo;
                    mov ECX, val;
                    lock; // lock always needed to make this op atomic
                    cmpxchg [ECX], DL;
                    setz AL;
                }
            }
            else static if( T.sizeof == short.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 2 Byte StoreIf
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov DX, newval;
                    mov AX, equalTo;
                    mov ECX, val;
                    lock; // lock always needed to make this op atomic
                    cmpxchg [ECX], DX;
                    setz AL;
                }
            }
            else static if( T.sizeof == int.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 4 Byte StoreIf
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EDX, newval;
                    mov EAX, equalTo;
                    mov ECX, val;
                    lock; // lock always needed to make this op atomic
                    cmpxchg [ECX], EDX;
                    setz AL;
                }
            }
            else static if( T.sizeof == long.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 8 Byte StoreIf
                ////////////////////////////////////////////////////////////////


                version( Has64BitOps )
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte StoreIf on 64-Bit Processor
                    ////////////////////////////////////////////////////////////


                    volatile asm
                    {
                        mov RDX, newval;
                        mov RAX, equalTo;
                        mov RCX, val;
                        lock; // lock always needed to make this op atomic
                        cmpxchg [RCX], RDX;
                        setz AL;
                    }
                }
                else version( Has64BitCAS )
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte StoreIf on 32-Bit Processor
                    ////////////////////////////////////////////////////////////
                    version(darwin){
                        static if(ms==msync.raw){
                            return OSAtomicCompareAndSwap64(cast(long)equalTo, cast(long)newval,  cast(long*)&val);
                        } else {
                            return OSAtomicCompareAndSwap64Barrier(cast(long)equalTo, cast(long)newval,  cast(long*)&val);
                        }
                    } else {
                        volatile asm
                        {
                            push EDI;
                            push EBX;
                            lea EDI, newval;
                            mov EBX, [EDI];
                            mov ECX, 4[EDI];
                            lea EDI, equalTo;
                            mov EAX, [EDI];
                            mov EDX, 4[EDI];
                            mov EDI, val;
                            lock; // lock always needed to make this op atomic
                            cmpxch8b [EDI];
                            setz AL;
                            pop EBX;
                            pop EDI;
                        }
                    }
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // Not a 1, 2, 4, or 8 Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Increment
    ////////////////////////////////////////////////////////////////////////////


    template atomicIncrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( isValidNumericType!(T) );


        T atomicIncrement( ref T val )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof == byte.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 1 Byte Increment
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EAX, val;
                    lock; // lock always needed to make this op atomic
                    inc [EAX];
                    mov AL, [EAX];
                }
            }
            else static if( T.sizeof == short.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 2 Byte Increment
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EAX, val;
                    lock; // lock always needed to make this op atomic
                    inc short ptr [EAX];
                    mov AX, [EAX];
                }
            }
            else static if( T.sizeof == int.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 4 Byte Increment
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EAX, val;
                    lock; // lock always needed to make this op atomic
                    inc int ptr [EAX];
                    mov EAX, [EAX];
                }
            }
            else static if( T.sizeof == long.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 8 Byte Increment
                ////////////////////////////////////////////////////////////////


                version( Has64BitOps )
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Increment on 64-Bit Processor
                    ////////////////////////////////////////////////////////////


                    volatile asm
                    {
                        mov RAX, val;
                        lock; // lock always needed to make this op atomic
                        inc qword ptr [RAX];
                        mov RAX, [RAX];
                    }
                }
                else
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Increment on 32-Bit Processor
                    ////////////////////////////////////////////////////////////


                    pragma( msg, "This operation is only available on 64-bit platforms." );
                    static assert( false );
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // Not a 1, 2, 4, or 8 Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Decrement
    ////////////////////////////////////////////////////////////////////////////


    template atomicDecrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( isValidNumericType!(T) );


        T atomicDecrement( ref T val )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof == byte.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 1 Byte Decrement
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EAX, val;
                    lock; // lock always needed to make this op atomic
                    dec [EAX];
                    mov AL, [EAX];
                }
            }
            else static if( T.sizeof == short.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 2 Byte Decrement
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EAX, val;
                    lock; // lock always needed to make this op atomic
                    dec short ptr [EAX];
                    mov AX, [EAX];
                }
            }
            else static if( T.sizeof == int.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 4 Byte Decrement
                ////////////////////////////////////////////////////////////////


                volatile asm
                {
                    mov EAX, val;
                    lock; // lock always needed to make this op atomic
                    dec int ptr [EAX];
                    mov EAX, [EAX];
                }
            }
            else static if( T.sizeof == long.sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // 8 Byte Decrement
                ////////////////////////////////////////////////////////////////


                version( Has64BitOps )
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Decrement on 64-Bit Processor
                    ////////////////////////////////////////////////////////////


                    volatile asm
                    {
                        mov RAX, val;
                        lock; // lock always needed to make this op atomic
                        dec qword ptr [RAX];
                        mov RAX, [RAX];
                    }
                }
                else
                {
                    ////////////////////////////////////////////////////////////
                    // 8 Byte Decrement on 32-Bit Processor
                    ////////////////////////////////////////////////////////////


                    pragma( msg, "This operation is only available on 64-bit platforms." );
                    static assert( false );
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // Not a 1, 2, 4, or 8 Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }
}
else
{
    version( BuildInfo )
    {
        pragma( msg, "tango.core.Atomic: using synchronized ops" );
    }

    private
    {
        ////////////////////////////////////////////////////////////////////////
        // Default Value Requirements
        ////////////////////////////////////////////////////////////////////////


        template atomicValueIsProperlyAligned( T )
        {
            bool atomicValueIsProperlyAligned( size_t addr )
            {
                return addr % T.sizeof == 0;
            }
        }


        ////////////////////////////////////////////////////////////////////////
        // Default Synchronization Requirements
        ////////////////////////////////////////////////////////////////////////


        template needsLoadBarrier( msync ms )
        {
            const bool needsLoadBarrier = ms != msync.raw;
        }


        template needsStoreBarrier( msync ms )
        {
            const bool needsStoreBarrier = ms != msync.raw;
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Load
    ////////////////////////////////////////////////////////////////////////////


    template atomicLoad( msync ms = msync.seq, T )
    {
        T atomicLoad( ref T val )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof <= (void*).sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // <= (void*).sizeof Byte Load
                ////////////////////////////////////////////////////////////////


                static if( needsLoadBarrier!(ms) )
                {
                    synchronized
                    {
                        return val;
                    }
                }
                else
                {
                    volatile
                    {
                        return val;
                    }
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // > (void*).sizeof Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store
    ////////////////////////////////////////////////////////////////////////////


    template atomicStore( msync ms = msync.seq, T )
    {
        void atomicStore( ref T val, T newval )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof <= (void*).sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // <= (void*).sizeof Byte Store
                ////////////////////////////////////////////////////////////////


                static if( needsStoreBarrier!(ms) )
                {
                    synchronized
                    {
                        val = newval;
                    }
                }
                else
                {
                    volatile
                    {
                        val = newval;
                    }
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // > (void*).sizeof Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store If
    ////////////////////////////////////////////////////////////////////////////


    template atomicStoreIf( msync ms = msync.seq, T )
    {
        bool atomicStoreIf( ref T val, T newval, T equalTo )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof <= (void*).sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // <= (void*).sizeof Byte StoreIf
                ////////////////////////////////////////////////////////////////


                synchronized
                {
                    if( val == equalTo )
                    {
                        val = newval;
                        return true;
                    }
                    return false;
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // > (void*).sizeof Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    /////////////////////////////////////////////////////////////////////////////
    // Atomic Increment
    ////////////////////////////////////////////////////////////////////////////


    template atomicIncrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( isValidNumericType!(T) );


        T atomicIncrement( ref T val )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof <= (void*).sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // <= (void*).sizeof Byte Increment
                ////////////////////////////////////////////////////////////////


                synchronized
                {
                    return ++val;
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // > (void*).sizeof Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Decrement
    ////////////////////////////////////////////////////////////////////////////


    template atomicDecrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( isValidNumericType!(T) );


        T atomicDecrement( ref T val )
        in
        {
            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
        }
        body
        {
            static if( T.sizeof <= (void*).sizeof )
            {
                ////////////////////////////////////////////////////////////////
                // <= (void*).sizeof Byte Decrement
                ////////////////////////////////////////////////////////////////


                synchronized
                {
                    return --val;
                }
            }
            else
            {
                ////////////////////////////////////////////////////////////////
                // > (void*).sizeof Byte Type
                ////////////////////////////////////////////////////////////////


                pragma( msg, "Invalid template type specified." );
                static assert( false );
            }
        }
    }
}


////////////////////////////////////////////////////////////////////////////////
// Atomic
////////////////////////////////////////////////////////////////////////////////


/**
 * This struct represents a value which will be subject to competing access.
 * All accesses to this value will be synchronized with main memory, and
 * various memory barriers may be employed for instruction ordering.  Any
 * primitive type of size equal to or smaller than the memory bus size is
 * allowed, so 32-bit machines may use values with size <= int.sizeof and
 * 64-bit machines may use values with size <= long.sizeof.  The one exception
 * to this rule is that architectures that support DCAS will allow double-wide
 * storeIf operations.  The 32-bit x86 architecture, for example, supports
 * 64-bit storeIf operations.
 */
struct Atomic( T )
{
    ////////////////////////////////////////////////////////////////////////////
    // Atomic Load
    ////////////////////////////////////////////////////////////////////////////


    template load( msync ms = msync.seq )
    {
        static assert( ms == msync.raw || ms == msync.hlb ||
                       ms == msync.acq || ms == msync.seq,
                       "ms must be one of: msync.raw, msync.hlb, msync.acq, msync.seq" );

        /**
         * Refreshes the contents of this value from main memory.  This
         * operation is both lock-free and atomic.
         *
         * Returns:
         *  The loaded value.
         */
        T load()
        {
            return atomicLoad!(ms,T)( m_val );
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic Store
    ////////////////////////////////////////////////////////////////////////////


    template store( msync ms = msync.seq )
    {
        static assert( ms == msync.raw || ms == msync.ssb ||
                       ms == msync.acq || ms == msync.rel ||
                       ms == msync.seq,
                       "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );

        /**
         * Stores 'newval' to the memory referenced by this value.  This
         * operation is both lock-free and atomic.
         *
         * Params:
         *  newval  = The value to store.
         */
        void store( T newval )
        {
            atomicStore!(ms,T)( m_val, newval );
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Atomic StoreIf
    ////////////////////////////////////////////////////////////////////////////


    template storeIf( msync ms = msync.seq )
    {
        static assert( ms == msync.raw || ms == msync.ssb ||
                       ms == msync.acq || ms == msync.rel ||
                       ms == msync.seq,
                       "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );

        /**
         * Stores 'newval' to the memory referenced by this value if val is
         * equal to 'equalTo'.  This operation is both lock-free and atomic.
         *
         * Params:
         *  newval  = The value to store.
         *  equalTo = The comparison value.
         *
         * Returns:
         *  true if the store occurred, false if not.
         */
        bool storeIf( T newval, T equalTo )
        {
            return atomicStoreIf!(ms,T)( m_val, newval, equalTo );
        }
    }


    ////////////////////////////////////////////////////////////////////////////
    // Numeric Functions
    ////////////////////////////////////////////////////////////////////////////

	version( TangoDoc )
	{
		/**
		 * The following additional functions are available for integer types.
		 */
		////////////////////////////////////////////////////////////////////////
		// Atomic Increment
		////////////////////////////////////////////////////////////////////////


		template increment( msync ms = msync.seq )
		{
			/**
			 * This operation is only legal for built-in value and pointer
			 * types, and is equivalent to an atomic "val = val + 1" operation.
			 * This function exists to facilitate use of the optimized
			 * increment instructions provided by some architecures.  If no
			 * such instruction exists on the target platform then the
			 * behavior will perform the operation using more traditional
			 * means.  This operation is both lock-free and atomic.
			 *
			 * Returns:
			 *  The result of an atomicLoad of val immediately following the
			 *  increment operation.  This value is not required to be equal to
			 *  the newly stored value.  Thus, competing writes are allowed to
			 *  occur between the increment and successive load operation.
			 */
			T increment()
			{
				return m_val;
			}
		}


		////////////////////////////////////////////////////////////////////////
		// Atomic Decrement
		////////////////////////////////////////////////////////////////////////


		template decrement( msync ms = msync.seq )
		{
			/**
			 * This operation is only legal for built-in value and pointer
			 * types, and is equivalent to an atomic "val = val - 1" operation.
			 * This function exists to facilitate use of the optimized
			 * decrement instructions provided by some architecures.  If no
			 * such instruction exists on the target platform then the behavior
			 * will perform the operation using more traditional means.  This
			 * operation is both lock-free and atomic.
			 *
			 * Returns:
			 *  The result of an atomicLoad of val immediately following the
			 *  increment operation.  This value is not required to be equal to
			 *  the newly stored value.  Thus, competing writes are allowed to
			 *  occur between the increment and successive load operation.
			 */
			T decrement()
			{
				return m_val;
			}
		}
	}
	else
	{
		static if( isValidNumericType!(T) )
		{
			////////////////////////////////////////////////////////////////////////
			// Atomic Increment
			////////////////////////////////////////////////////////////////////////


			template increment( msync ms = msync.seq )
			{
				static assert( ms == msync.raw || ms == msync.ssb ||
							   ms == msync.acq || ms == msync.rel ||
							   ms == msync.seq,
							   "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
				T increment()
				{
					return atomicIncrement!(ms,T)( m_val );
				}
			}


			////////////////////////////////////////////////////////////////////////
			// Atomic Decrement
			////////////////////////////////////////////////////////////////////////


			template decrement( msync ms = msync.seq )
			{
				static assert( ms == msync.raw || ms == msync.ssb ||
							   ms == msync.acq || ms == msync.rel ||
							   ms == msync.seq,
							   "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
				T decrement()
				{
					return atomicDecrement!(ms,T)( m_val );
				}
			}
		}
	}

private:
    T   m_val;
}


////////////////////////////////////////////////////////////////////////////////
// Support Code for Unit Tests
////////////////////////////////////////////////////////////////////////////////


private
{
    version( TangoDoc ) {} else
    {
        template testLoad( msync ms, T )
        {
            void testLoad( T val = T.init + 1)
            {
                T          base;
                Atomic!(T) atom;

                assert( atom.load!(ms)() == base );
                base        = val;
                atom.m_val  = val;
                assert( atom.load!(ms)() == base );
            }
        }


        template testStore( msync ms, T )
        {
            void testStore( T val = T.init + 1)
            {
                T          base;
                Atomic!(T) atom;

                assert( atom.m_val == base );
                base = val;
                atom.store!(ms)( base );
                assert( atom.m_val == base );
            }
        }


        template testStoreIf( msync ms, T )
        {
            void testStoreIf( T val = T.init + 1)
            {
                T          base;
                Atomic!(T) atom;

                assert( atom.m_val == base );
                base = val;
                atom.storeIf!(ms)( base, val );
                assert( atom.m_val != base );
                atom.storeIf!(ms)( base, T.init );
                assert( atom.m_val == base );
            }
        }


        template testIncrement( msync ms, T )
        {
            void testIncrement( T val = T.init + 1)
            {
                T          base = val;
                T          incr = val;
                Atomic!(T) atom;

                atom.m_val = val;
                assert( atom.m_val == base && incr == base );
                base = cast(T)( base + 1 );
                incr = atom.increment!(ms)();
                assert( atom.m_val == base && incr == base );
            }
        }


        template testDecrement( msync ms, T )
        {
            void testDecrement( T val = T.init + 1)
            {
                T          base = val;
                T          decr = val;
                Atomic!(T) atom;

                atom.m_val = val;
                assert( atom.m_val == base && decr == base );
                base = cast(T)( base - 1 );
                decr = atom.decrement!(ms)();
                assert( atom.m_val == base && decr == base );
            }
        }


        template testType( T )
        {
            void testType( T val = T.init + 1)
            {
                testLoad!(msync.raw, T)( val );
                testLoad!(msync.hlb, T)( val );
                testLoad!(msync.acq, T)( val );
                testLoad!(msync.seq, T)( val );

                testStore!(msync.raw, T)( val );
                testStore!(msync.ssb, T)( val );
                testStore!(msync.acq, T)( val );
                testStore!(msync.rel, T)( val );
                testStore!(msync.seq, T)( val );

                testStoreIf!(msync.raw, T)( val );
                testStoreIf!(msync.ssb, T)( val );
                testStoreIf!(msync.acq, T)( val );
                testStoreIf!(msync.rel, T)( val );
                testStoreIf!(msync.seq, T)( val );

                static if( isValidNumericType!(T) )
                {
                    testIncrement!(msync.raw, T)( val );
                    testIncrement!(msync.ssb, T)( val );
                    testIncrement!(msync.acq, T)( val );
                    testIncrement!(msync.rel, T)( val );
                    testIncrement!(msync.seq, T)( val );

                    testDecrement!(msync.raw, T)( val );
                    testDecrement!(msync.ssb, T)( val );
                    testDecrement!(msync.acq, T)( val );
                    testDecrement!(msync.rel, T)( val );
                    testDecrement!(msync.seq, T)( val );
                }
            }
        }
    }
}


////////////////////////////////////////////////////////////////////////////////
// Unit Tests
////////////////////////////////////////////////////////////////////////////////


debug( UnitTest )
{
    unittest
    {
        testType!(bool)();

        testType!(byte)();
        testType!(ubyte)();

        testType!(short)();
        testType!(ushort)();

        testType!(int)();
        testType!(uint)();

        version( Has64BitOps )
        {
            testType!(long)();
            testType!(ulong)();
        }
        else version( Has64BitCAS )
        {
            testStoreIf!(msync.raw, long)();
            testStoreIf!(msync.ssb, long)();
            testStoreIf!(msync.acq, long)();
            testStoreIf!(msync.rel, long)();
            testStoreIf!(msync.seq, long)();

            testStoreIf!(msync.raw, ulong)();
            testStoreIf!(msync.ssb, ulong)();
            testStoreIf!(msync.acq, ulong)();
            testStoreIf!(msync.rel, ulong)();
            testStoreIf!(msync.seq, ulong)();
        }
    }
}


////////////////////////////////////////////////////////////////////////////////
// Unit Tests
////////////////////////////////////////////////////////////////////////////////


debug(Atomic)
{
        void main()
        {
                Atomic!(int) i;

                i.store (1);
                i.increment;
                i.decrement;
                auto x = i.load;
                i.store (2);

                x = atomicLoad (x);
        }
}