#region CPL License
/*
Nuclex Framework
Copyright (C) 2002-2012 Nuclex Development Labs
This library is free software; you can redistribute it and/or
modify it under the terms of the IBM Common Public License as
published by the IBM Corporation; either version 1.0 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
IBM Common Public License for more details.
You should have received a copy of the IBM Common Public
License along with this library
*/
#endregion
using System;
using System.Collections.Generic;
using System.Threading;
namespace Nuclex.Support {
  /// A reverse counting semaphore
  /// 
  ///   
  ///     This semaphore counts in reverse, which means you can Release() the semaphore
  ///     as often as you'd like a thread calling WaitOne() to be let through. You
  ///     can use it in the traditional sense and have any Thread calling WaitOne()
  ///     make sure to call Release() afterwards, or you can, for example, Release() it
  ///     whenever work becomes available and let threads take work from the Semaphore
  ///     by calling WaitOne() alone.
  ///   
  ///   
  ///     Implementation notes (ignore this if you just want to use the Semaphore)
  ///   
  ///   
  ///     We could design a semaphore that uses an auto reset event, where the thread
  ///     that gets to pass immediately sets the event again if the semaphore isn't full
  ///     yet to let another thread pass.
  ///   
  ///   
  ///     However, this would mean that when a semaphore receives a large number of
  ///     wait requests, assuming it would allow, for example, 25 users at once, the
  ///     thread scheduler would see only 1 thread become eligible for execution. Then
  ///     that thread would unlock the next and so on. In short, we wait 25 times
  ///     for the thread scheduler to wake up a thread until all users get through.
  ///   
  ///   
  ///     So we chose a ManualResetEvent, which will wake up more threads than
  ///     neccessary and possibly cause a period of intense competition for getting
  ///     a lock on the resource, but will make the thread scheduler see all threads
  ///     become eligible for execution.
  ///   
  /// 
#if !(XBOX360 || WINDOWS_PHONE)
  [Obsolete("Prefer the normal semaphore on Windows builds.")]
#endif
  public class Semaphore : WaitHandle {
    /// Initializes a new semaphore
    public Semaphore() {
      createEvent();
    }
    /// Initializes a new semaphore
    /// 
    ///   Number of users that can access the resource at the same time
    /// 
    public Semaphore(int count) {
      this.free = count;
      createEvent();
    }
    /// Initializes a new semaphore
    /// 
    ///   Initial number of users accessing the resource 
    /// 
    /// 
    ///   Maximum numbr of users that can access the resource at the same time
    /// 
    public Semaphore(int initialCount, int maximumCount) {
      if(initialCount > maximumCount) {
        throw new ArgumentOutOfRangeException(
          "initialCount", "Initial count must not be larger than the maximum count"
        );
      }
      this.free = maximumCount - initialCount;
      createEvent();
    }
    /// Immediately releases all resources owned by the instance
    /// 
    ///   Whether Dispose() has been called explictly
    /// 
    protected override void Dispose(bool explicitDisposing) {
      if(this.manualResetEvent != null) {
        base.SafeWaitHandle = null;
        this.manualResetEvent.Close();
        this.manualResetEvent = null;
      }
      base.Dispose(explicitDisposing);
    }
    /// 
    ///   Waits for the resource to become available and locks it
    /// 
    /// 
    ///   Number of milliseconds to wait at most before giving up
    /// 
    /// 
    ///   True to exit the synchronization domain for the context before the wait (if
    ///   in a synchronized context), and reacquire it afterward; otherwise, false.
    /// 
    /// 
    ///   True if the resource was available and is now locked, false if
    ///   the timeout has been reached.
    /// 
#if NO_EXITCONTEXT
    public override bool WaitOne(int millisecondsTimeout) {
#else
    public override bool WaitOne(int millisecondsTimeout, bool exitContext) {
#endif
      for (; ; ) {
        // Lock the resource - even if it is full. We will correct out mistake later
        // if we overcomitted the resource.
        int newFree = Interlocked.Decrement(ref this.free);
        // If we got the resource, let the thread pass without further processing.
        if(newFree >= 0) {
          if(newFree > 0) {
            this.manualResetEvent.Set();
          }
          return true;
        }
        // We overcomitted the resource, count it down again. We know that, at least
        // moments ago, the resource was busy, so block the event.
        this.manualResetEvent.Reset();
        Thread.MemoryBarrier();
        newFree = Interlocked.Increment(ref this.free);
        // Unless we have been preempted by a Release(), we now have to wait for the
        // resource to become available.
        if(newFree >= 0) {
#if NO_EXITCONTEXT
          if(!this.manualResetEvent.WaitOne(millisecondsTimeout)) {
#else
          if(!this.manualResetEvent.WaitOne(millisecondsTimeout, exitContext)) {
#endif
            return false;
          }
        }
      } // for(; ; )
    }
    /// 
    ///   Waits for the resource to become available and locks it
    /// 
    /// 
    ///   True if the resource was available and is now locked, false if
    ///   the timeout has been reached.
    /// 
    public override bool WaitOne() {
#if NO_EXITCONTEXT
      return WaitOne(-1);
#else
      return WaitOne(-1, false);
#endif
    }
    /// 
    ///   Waits for the resource to become available and locks it
    /// 
    /// 
    ///   Time span to wait for the lock before giving up
    /// 
    /// 
    ///   True to exit the synchronization domain for the context before the wait (if
    ///   in a synchronized context), and reacquire it afterward; otherwise, false.
    /// 
    /// 
    ///   True if the resource was available and is now locked, false if
    ///   the timeout has been reached.
    /// 
#if NO_EXITCONTEXT
    public override bool WaitOne(TimeSpan timeout) {
#else
    public override bool WaitOne(TimeSpan timeout, bool exitContext) {
#endif
      long totalMilliseconds = (long)timeout.TotalMilliseconds;
      if((totalMilliseconds < -1) || (totalMilliseconds > int.MaxValue)) {
        throw new ArgumentOutOfRangeException(
          "timeout", "Timeout must be either -1 or positive and less than 2^31"
        );
      }
#if NO_EXITCONTEXT
      return WaitOne((int)totalMilliseconds);
#else
      return WaitOne((int)totalMilliseconds, exitContext);
#endif
    }
    /// 
    ///   Releases a lock on the resource. Note that for a reverse counting semaphore,
    ///   it is legal to Release() the resource before locking it.
    /// 
    public void Release() {
      // Release one lock on the resource
      Interlocked.Increment(ref this.free);
      // Wake up any threads waiting for the resource to become available
      this.manualResetEvent.Set();
    }
    /// Creates the event used to make threads wait for the resource
    private void createEvent() {
      this.manualResetEvent = new ManualResetEvent(false);
      base.SafeWaitHandle = this.manualResetEvent.SafeWaitHandle;
    }
    /// Event used to make threads wait if the semaphore is full
    private ManualResetEvent manualResetEvent;
    /// Number of users currently accessing the resource
    /// 
    ///   Since this is a reverse counting semaphore, it will be negative if
    ///   the resource is available and 0 if the semaphore is full.
    /// 
    private int free;
  }
} // namespace Nuclex.Support