Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

cc.h File Reference

#include <ntos.h>
#include <NtIoLogc.h>
#include <FsRtl.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>

Go to the source code of this file.

Classes

struct  _VACB_LEVEL_REFERENCE
struct  _VACB
struct  _PRIVATE_CACHE_MAP
struct  _SHARED_CACHE_MAP
struct  _SHARED_CACHE_MAP_LIST_CURSOR
struct  _BITMAP_RANGE
struct  _MBCB
struct  _BCB
struct  _OBCB
struct  _DEFERRED_WRITE
struct  _LAZY_WRITER
struct  _WORK_QUEUE_ENTRY
struct  _MDL_WRITE

Defines

#define CcAcquireMasterLock(OldIrql)   ExAcquireSpinLock( &CcMasterSpinLock, OldIrql )
#define CcReleaseMasterLock(OldIrql)   ExReleaseSpinLock( &CcMasterSpinLock, OldIrql )
#define CcAcquireMasterLockAtDpcLevel()   ExAcquireSpinLockAtDpcLevel( &CcMasterSpinLock )
#define CcReleaseMasterLockFromDpcLevel()   ExReleaseSpinLockFromDpcLevel( &CcMasterSpinLock )
#define CcAcquireVacbLock(OldIrql)   ExAcquireSpinLock( &CcVacbSpinLock, OldIrql )
#define CcReleaseVacbLock(OldIrql)   ExReleaseSpinLock( &CcVacbSpinLock, OldIrql )
#define CcAcquireVacbLockAtDpcLevel()   ExAcquireSpinLockAtDpcLevel( &CcVacbSpinLock )
#define CcReleaseVacbLockFromDpcLevel()   ExReleaseSpinLockFromDpcLevel( &CcVacbSpinLock )
#define FsRtlAllocatePool(a, b)   FsRtlAllocatePoolWithTag(a,b,' cC')
#define FsRtlAllocatePoolWithQuota(a, b)   FsRtlAllocatePoolWithQuotaTag(a,b,' cC')
#define ExAllocatePool(a, b)   ExAllocatePoolWithTag(a,b,' cC')
#define ExAllocatePoolWithQuota(a, b)   ExAllocatePoolWithQuotaTag(a,b,' cC')
#define CACHE_NTC_SHARED_CACHE_MAP   (0x2FF)
#define CACHE_NTC_PRIVATE_CACHE_MAP   (0x2FE)
#define CACHE_NTC_BCB   (0x2FD)
#define CACHE_NTC_DEFERRED_WRITE   (0x2FC)
#define CACHE_NTC_MBCB   (0x2FB)
#define CACHE_NTC_OBCB   (0x2FA)
#define CACHE_NTC_MBCB_GRANDE   (0x2F9)
#define CACHE_BUG_CHECK_CACHEDAT   (0x00010000)
#define CACHE_BUG_CHECK_CACHESUB   (0x00020000)
#define CACHE_BUG_CHECK_COPYSUP   (0x00030000)
#define CACHE_BUG_CHECK_FSSUP   (0x00040000)
#define CACHE_BUG_CHECK_LAZYRITE   (0x00050000)
#define CACHE_BUG_CHECK_LOGSUP   (0x00060000)
#define CACHE_BUG_CHECK_MDLSUP   (0x00070000)
#define CACHE_BUG_CHECK_PINSUP   (0x00080000)
#define CACHE_BUG_CHECK_VACBSUP   (0x00090000)
#define CcBugCheck(A, B, C)   { KeBugCheckEx(CACHE_MANAGER, BugCheckFileId | __LINE__, A, B, C ); }
#define DEFAULT_CREATE_MODULO   ((ULONG)(0x00100000))
#define DEFAULT_EXTEND_MODULO   ((ULONG)(0x00100000))
#define SEQUENTIAL_MAP_LIMIT   ((ULONG)(0x00080000))
#define MAX_READ_AHEAD   (8 * 1024 * 1024)
#define MAX_WRITE_BEHIND   (MM_MAXIMUM_DISK_IO_SIZE)
#define WRITE_CHARGE_THRESHOLD   (64 * PAGE_SIZE)
#define MAX_ZERO_TRANSFER   (PAGE_SIZE * 128)
#define MIN_ZERO_TRANSFER   (0x10000)
#define MAX_ZEROS_IN_CACHE   (0x10000)
#define VACB_LEVEL_SHIFT   (7)
#define VACB_LEVEL_BLOCK_SIZE   ((1 << VACB_LEVEL_SHIFT) * sizeof(PVOID))
#define VACB_LAST_INDEX_FOR_LEVEL   ((1 << VACB_LEVEL_SHIFT) - 1)
#define VACB_SIZE_OF_FIRST_LEVEL   (1 << (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT))
#define VACB_NUMBER_OF_LEVELS   (((63 - VACB_OFFSET_SHIFT)/VACB_LEVEL_SHIFT) + 1)
#define MBCB_BITMAP_BLOCK_SIZE   (VACB_LEVEL_BLOCK_SIZE)
#define MBCB_BITMAP_RANGE   (MBCB_BITMAP_BLOCK_SIZE * 8 * PAGE_SIZE)
#define MBCB_BITMAP_INITIAL_SIZE   (2 * sizeof(BITMAP_RANGE))
#define BEGIN_BCB_LIST_ARRAY   (0x200000)
#define SIZE_PER_BCB_LIST   (VACB_MAPPING_GRANULARITY * 2)
#define BCB_LIST_SHIFT   (VACB_OFFSET_SHIFT + 1)
#define GetBcbListHead(SCM, OFF, FAILSUCC)
#define CcLockVacbLevel(SCM, OFF)
#define CcUnlockVacbLevel(SCM, OFF)
#define NOISE_BITS   (0x7)
#define LAZY_WRITER_IDLE_DELAY   ((LONG)(10000000))
#define LAZY_WRITER_COLLISION_DELAY   ((LONG)(1000000))
#define LAZY_WRITER_MAX_AGE_TARGET   ((ULONG)(8))
#define CC_REQUEUE   35422
#define mm   (0x100)
#define FlagOn(F, SF)
#define BooleanFlagOn(F, SF)
#define SetFlag(F, SF)
#define ClearFlag(F, SF)
#define QuadAlign(P)
#define CcAddToLog(LOG, ACTION, REASON)
#define PREALLOCATED_VACBS   (4)
#define VACB_SPECIAL_REFERENCE   ((PVACB) ~0)
#define VACB_SPECIAL_DEREFERENCE   ((PVACB) ~1)
#define VACB_SPECIAL_FIRST_VALID   VACB_SPECIAL_DEREFERENCE
#define CcIncrementOpenCount(SCM, REASON)
#define CcDecrementOpenCount(SCM, REASON)
#define DISABLE_READ_AHEAD   0x0001
#define DISABLE_WRITE_BEHIND   0x0002
#define PIN_ACCESS   0x0004
#define TRUNCATE_REQUIRED   0x0010
#define WRITE_QUEUED   0x0020
#define ONLY_SEQUENTIAL_ONLY_SEEN   0x0040
#define ACTIVE_PAGE_IS_DIRTY   0x0080
#define BEING_CREATED   0x0100
#define MODIFIED_WRITE_DISABLED   0x0200
#define LAZY_WRITE_OCCURRED   0x0400
#define IS_CURSOR   0x0800
#define RANDOM_ACCESS_SEEN   0x1000
#define GetActiveVacb(SCM, IRQ, V, P, D)
#define GetActiveVacbAtDpcLevel(SCM, V, P, D)
#define SetActiveVacb(SCM, IRQ, V, P, D)
#define ZERO_FIRST_PAGE   1
#define ZERO_MIDDLE_PAGES   2
#define ZERO_LAST_PAGE   4
#define CcAllocateWorkQueueEntry()   (PWORK_QUEUE_ENTRY)ExAllocateFromPPNPagedLookasideList(LookasideTwilightList)
#define CcFreeWorkQueueEntry(_entry_)   ExFreeToPPNPagedLookasideList(LookasideTwilightList, (_entry_))
#define try_return(S)   { S; goto try_exit; }
#define DebugTrace(INDENT, LEVEL, X, Y)   {NOTHING;}
#define DebugTrace2(INDENT, LEVEL, X, Y, Z)   {NOTHING;}
#define DebugDump(STR, LEVEL, PTR)   {NOTHING;}

Typedefs

typedef _VACB_LEVEL_REFERENCE VACB_LEVEL_REFERENCE
typedef _VACB_LEVEL_REFERENCEPVACB_LEVEL_REFERENCE
typedef _VACB VACB
typedef _VACBPVACB
typedef _PRIVATE_CACHE_MAP PRIVATE_CACHE_MAP
typedef PRIVATE_CACHE_MAPPPRIVATE_CACHE_MAP
typedef _SHARED_CACHE_MAP SHARED_CACHE_MAP
typedef SHARED_CACHE_MAPPSHARED_CACHE_MAP
typedef _SHARED_CACHE_MAP_LIST_CURSOR SHARED_CACHE_MAP_LIST_CURSOR
typedef _SHARED_CACHE_MAP_LIST_CURSORPSHARED_CACHE_MAP_LIST_CURSOR
typedef _BITMAP_RANGE BITMAP_RANGE
typedef _BITMAP_RANGEPBITMAP_RANGE
typedef _MBCB MBCB
typedef MBCBPMBCB
typedef _BCB BCB
typedef BCBPBCB
typedef _OBCB OBCB
typedef OBCBPOBCB
typedef _DEFERRED_WRITE DEFERRED_WRITE
typedef _DEFERRED_WRITEPDEFERRED_WRITE
typedef _LAZY_WRITER LAZY_WRITER
typedef enum _WORKER_FUNCTION WORKER_FUNCTION
typedef _WORK_QUEUE_ENTRY WORK_QUEUE_ENTRY
typedef _WORK_QUEUE_ENTRYPWORK_QUEUE_ENTRY
typedef _MDL_WRITE MDL_WRITE
typedef _MDL_WRITEPMDL_WRITE

Enumerations

enum  _WORKER_FUNCTION {
  Noop = 0, ReadAhead, WriteBehind, LazyWriteScan,
  EventSet
}
enum  UNMAP_ACTIONS { UNPIN, UNREF, SET_CLEAN }

Functions

VOID CcPostDeferredWrites ()
BOOLEAN CcPinFileData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN ReadOnly, IN BOOLEAN WriteOnly, IN ULONG Flags, OUT PBCB *Bcb, OUT PVOID *BaseAddress, OUT PLARGE_INTEGER BeyondLastByte)
VOID FASTCALL CcUnpinFileData (IN OUT PBCB Bcb, IN BOOLEAN ReadOnly, IN UNMAP_ACTIONS UnmapAction)
VOID FASTCALL CcDeallocateBcb (IN PBCB Bcb)
VOID FASTCALL CcPerformReadAhead (IN PFILE_OBJECT FileObject)
VOID CcSetDirtyInMask (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset, IN ULONG Length)
VOID FASTCALL CcWriteBehind (IN PSHARED_CACHE_MAP SharedCacheMap, IN PIO_STATUS_BLOCK IoStatus)
BOOLEAN CcMapAndRead (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG ZeroFlags, IN BOOLEAN Wait, IN PVOID BaseAddress)
VOID CcFreeActiveVacb (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVACB ActiveVacb OPTIONAL, IN ULONG ActivePage, IN ULONG PageIsDirty)
VOID CcMapAndCopy (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVOID UserBuffer, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG ZeroFlags, IN BOOLEAN WriteThrough)
VOID CcScanDpc (IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
VOID CcScheduleLazyWriteScan ()
VOID CcStartLazyWriter (IN PVOID NotUsed)
VOID FASTCALL CcPostWorkQueue (IN PWORK_QUEUE_ENTRY WorkQueueEntry, IN PLIST_ENTRY WorkQueue)
VOID CcWorkerThread (PVOID ExWorkQueueItem)
VOID FASTCALL CcDeleteSharedCacheMap (IN PSHARED_CACHE_MAP SharedCacheMap, IN KIRQL ListIrql, IN ULONG ReleaseFile)
LONG CcCopyReadExceptionFilter (IN PEXCEPTION_POINTERS ExceptionPointer, IN PNTSTATUS ExceptionCode)
LONG CcExceptionFilter (IN NTSTATUS ExceptionCode)
VOID CcInitializeVacbs ()
PVOID CcGetVirtualAddressIfMapped (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, OUT PVACB *Vacb, OUT PULONG ReceivedLength)
PVOID CcGetVirtualAddress (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset, OUT PVACB *Vacb, OUT PULONG ReceivedLength)
VOID FASTCALL CcFreeVirtualAddress (IN PVACB Vacb)
VOID CcReferenceFileOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset)
VOID CcDereferenceFileOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset)
VOID CcWaitOnActiveCount (IN PSHARED_CACHE_MAP SharedCacheMap)
VOID FASTCALL CcCreateVacbArray (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER NewSectionSize)
VOID CcExtendVacbArray (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER NewSectionSize)
BOOLEAN FASTCALL CcUnmapVacbArray (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset OPTIONAL, IN ULONG Length, IN BOOLEAN UnmapBehind)
VOID CcAdjustVacbLevelLockCount (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, IN LONG Adjustment)
PLIST_ENTRY CcGetBcbListHeadLargeOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, IN BOOLEAN FailToSuccessor)
ULONG CcPrefillVacbLevelZone (IN ULONG NumberNeeded, OUT PKIRQL OldIrql, IN ULONG NeedBcbListHeads)
VOID CcDrainVacbLevelZone ()
_inline PVACBCcAllocateVacbLevel (IN BOOLEAN AllocatingBcbListHeads)
_inline VOID CcDeallocateVacbLevel (IN PVACB *Entry, IN BOOLEAN DeallocatingBcbListHeads)
_inline PVACB_LEVEL_REFERENCE VacbLevelReference (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVACB *VacbArray, IN ULONG Level)
_inline ULONG IsVacbLevelReferenced (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVACB *VacbArray, IN ULONG Level)

Variables

PFN_COUNT MmAvailablePages
KSPIN_LOCK CcMasterSpinLock
KSPIN_LOCK CcBcbSpinLock
LIST_ENTRY CcCleanSharedCacheMapList
SHARED_CACHE_MAP_LIST_CURSOR CcDirtySharedCacheMapList
SHARED_CACHE_MAP_LIST_CURSOR CcLazyWriterCursor
NPAGED_LOOKASIDE_LIST CcTwilightLookasideList
KSPIN_LOCK CcWorkQueueSpinlock
ULONG CcNumberWorkerThreads
ULONG CcNumberActiveWorkerThreads
LIST_ENTRY CcIdleWorkerThreadList
LIST_ENTRY CcExpressWorkQueue
LIST_ENTRY CcRegularWorkQueue
LIST_ENTRY CcPostTickWorkQueue
BOOLEAN CcQueueThrottle
ULONG CcIdleDelayTick
LARGE_INTEGER CcNoDelay
LARGE_INTEGER CcFirstDelay
LARGE_INTEGER CcIdleDelay
LARGE_INTEGER CcCollisionDelay
LARGE_INTEGER CcTargetCleanDelay
LAZY_WRITER LazyWriter
KSPIN_LOCK CcVacbSpinLock
ULONG CcNumberVacbs
PVACB CcVacbs
PVACB CcBeyondVacbs
LIST_ENTRY CcVacbLru
KSPIN_LOCK CcDeferredWriteSpinLock
LIST_ENTRY CcDeferredWrites
ULONG CcDirtyPageThreshold
ULONG CcDirtyPageTarget
ULONG CcDirtyPagesLastScan
ULONG CcPagesYetToWrite
ULONG CcPagesWrittenLastTime
ULONG CcAvailablePagesThreshold
ULONG CcTotalDirtyPages
ULONG CcTune
LONG CcAggressiveZeroCount
LONG CcAggressiveZeroThreshold
ULONG CcLazyWriteHotSpots
MM_SYSTEMSIZE CcCapturedSystemSize
ULONG CcMaxVacbLevelsSeen
ULONG CcVacbLevelEntries
PVACBCcVacbLevelFreeList
ULONG CcVacbLevelWithBcbsEntries
PVACBCcVacbLevelWithBcbsFreeList


Define Documentation

#define ACTIVE_PAGE_IS_DIRTY   0x0080
 

Definition at line 1065 of file cc.h.

Referenced by CcCopyWrite(), CcFastCopyWrite(), CcFreeActiveVacb(), and CcMapAndCopy().

#define BCB_LIST_SHIFT   (VACB_OFFSET_SHIFT + 1)
 

Definition at line 387 of file cc.h.

#define BEGIN_BCB_LIST_ARRAY   (0x200000)
 

Definition at line 385 of file cc.h.

Referenced by CcCreateVacbArray(), and CcExtendVacbArray().

#define BEING_CREATED   0x0100
 

Definition at line 1071 of file cc.h.

Referenced by CcInitializeCacheMap().

#define BooleanFlagOn F,
SF   ) 
 

Value:

( \ (BOOLEAN)(((F) & (SF)) != 0) \ )

Definition at line 501 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcCanIWrite(), CcCopyWrite(), CcDeferWrite(), CcFastCopyWrite(), CcPinMappedData(), CcReleaseByteRangeFromWrite(), FsRtlPrivateCheckWaitingLocks(), FsRtlProcessFileLock(), UdfCommonCreate(), UdfCommonRead(), UdfEnumerateIndex(), UdfIllegalFcbAccess(), UdfInitializeEnumeration(), UdfNotifyChangeDirectory(), and UdfQueryAlternateNameInfo().

#define CACHE_BUG_CHECK_CACHEDAT   (0x00010000)
 

Definition at line 175 of file cc.h.

#define CACHE_BUG_CHECK_CACHESUB   (0x00020000)
 

Definition at line 176 of file cc.h.

#define CACHE_BUG_CHECK_COPYSUP   (0x00030000)
 

Definition at line 177 of file cc.h.

#define CACHE_BUG_CHECK_FSSUP   (0x00040000)
 

Definition at line 178 of file cc.h.

#define CACHE_BUG_CHECK_LAZYRITE   (0x00050000)
 

Definition at line 179 of file cc.h.

#define CACHE_BUG_CHECK_LOGSUP   (0x00060000)
 

Definition at line 180 of file cc.h.

#define CACHE_BUG_CHECK_MDLSUP   (0x00070000)
 

Definition at line 181 of file cc.h.

#define CACHE_BUG_CHECK_PINSUP   (0x00080000)
 

Definition at line 182 of file cc.h.

#define CACHE_BUG_CHECK_VACBSUP   (0x00090000)
 

Definition at line 183 of file cc.h.

#define CACHE_NTC_BCB   (0x2FD)
 

Definition at line 155 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcAllocateInitializeBcb(), CcCalculateVacbLevelLockCount(), CcDeallocateBcb(), CcDeleteSharedCacheMap(), CcFindBcb(), CcGetDirtyPages(), CcGetFlushedValidData(), CcGetLsnForFileObject(), CcPinMappedData(), CcReleaseByteRangeFromWrite(), CcRemapBcb(), CcSetVacbLargeOffset(), and CcUnpinFileData().

#define CACHE_NTC_DEFERRED_WRITE   (0x2FC)
 

Definition at line 156 of file cc.h.

Referenced by CcCanIWrite(), and CcDeferWrite().

#define CACHE_NTC_MBCB   (0x2FB)
 

Definition at line 157 of file cc.h.

Referenced by CcSetDirtyInMask().

#define CACHE_NTC_MBCB_GRANDE   (0x2F9)
 

Definition at line 159 of file cc.h.

Referenced by CcSetDirtyInMask().

#define CACHE_NTC_OBCB   (0x2FA)
 

Definition at line 158 of file cc.h.

Referenced by CcAllocateObcb(), CcRemapBcb(), CcSetBcbOwnerPointer(), CcSetDirtyPinnedData(), CcUnpinData(), and CcUnpinDataForThread().

#define CACHE_NTC_PRIVATE_CACHE_MAP   (0x2FE)
 

Definition at line 154 of file cc.h.

Referenced by CcInitializeCacheMap().

#define CACHE_NTC_SHARED_CACHE_MAP   (0x2FF)
 

Definition at line 153 of file cc.h.

Referenced by CcInitializeCacheMap(), and CcUnpinFileData().

#define CC_REQUEUE   35422
 

Definition at line 442 of file cc.h.

Referenced by CcFlushCache(), CcWorkerThread(), and CcWriteBehind().

#define CcAcquireMasterLock OldIrql   )     ExAcquireSpinLock( &CcMasterSpinLock, OldIrql )
 

Definition at line 64 of file cc.h.

Referenced by CcCanIWrite(), CcDeferWrite(), CcDeleteSharedCacheMap(), CcFlushCache(), CcFreeActiveVacb(), CcGetDirtyPages(), CcGetFileObjectFromSectionPtrs(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcLazyWriteScan(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcScheduleReadAhead(), CcSetAdditionalCacheAttributes(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWaitForCurrentLazyWriterActivity(), CcWriteBehind(), and CcZeroEndOfLastPage().

 
#define CcAcquireMasterLockAtDpcLevel  )     ExAcquireSpinLockAtDpcLevel( &CcMasterSpinLock )
 

Definition at line 70 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcDeleteMbcb(), CcGetFlushedValidData(), CcGetVacbMiss(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcUnpinFileData(), and CcWriteBehind().

#define CcAcquireVacbLock OldIrql   )     ExAcquireSpinLock( &CcVacbSpinLock, OldIrql )
 

Definition at line 76 of file cc.h.

Referenced by CcDereferenceFileOffset(), CcDrainVacbLevelZone(), CcExtendVacbArray(), CcFreeVirtualAddress(), CcGetVacbMiss(), CcGetVirtualAddress(), CcGetVirtualAddressIfMapped(), CcMapAndCopy(), CcPrefillVacbLevelZone(), CcRemapBcb(), CcUnmapVacbArray(), and CcWaitOnActiveCount().

 
#define CcAcquireVacbLockAtDpcLevel  )     ExAcquireSpinLockAtDpcLevel( &CcVacbSpinLock )
 

Definition at line 82 of file cc.h.

Referenced by CcAllocateInitializeBcb(), CcDeleteMbcb(), CcExtendVacbArray(), CcSetDirtyInMask(), and CcUnpinFileData().

#define CcAddToLog LOG,
ACTION,
REASON   ) 
 

Definition at line 558 of file cc.h.

 
#define CcAllocateWorkQueueEntry  )     (PWORK_QUEUE_ENTRY)ExAllocateFromPPNPagedLookasideList(LookasideTwilightList)
 

Definition at line 1847 of file cc.h.

Referenced by CcLazyWriteScan(), CcScanDpc(), CcScheduleReadAhead(), and CcWaitForCurrentLazyWriterActivity().

#define CcBugCheck A,
B,
 )     { KeBugCheckEx(CACHE_MANAGER, BugCheckFileId | __LINE__, A, B, C ); }
 

Definition at line 185 of file cc.h.

Referenced by CcAllocateInitializeBcb(), CcInitializeCacheManager(), CcLazyWriteScan(), and CcUnpinFileData().

#define CcDecrementOpenCount SCM,
REASON   ) 
 

Value:

{ \ (SCM)->OpenCount -= 1; \ if (REASON != 0) { \ CcAddToLog( &(SCM)->OpenCountLog, REASON, -1 ); \ } \ }

Definition at line 1010 of file cc.h.

Referenced by CcFlushCache(), CcGetDirtyPages(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPurgeCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWriteBehind(), and CcZeroEndOfLastPage().

#define CcFreeWorkQueueEntry _entry_   )     ExFreeToPPNPagedLookasideList(LookasideTwilightList, (_entry_))
 

Definition at line 1850 of file cc.h.

Referenced by CcWorkerThread().

#define CcIncrementOpenCount SCM,
REASON   ) 
 

Value:

{ \ (SCM)->OpenCount += 1; \ if (REASON != 0) { \ CcAddToLog( &(SCM)->OpenCountLog, REASON, 1 ); \ } \ }

Definition at line 1003 of file cc.h.

Referenced by CcFlushCache(), CcGetDirtyPages(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcScheduleReadAhead(), CcSetFileSizes(), CcWriteBehind(), and CcZeroEndOfLastPage().

#define CcLockVacbLevel SCM,
OFF   ) 
 

Value:

{ \ if (((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) && \ FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { \ CcAdjustVacbLevelLockCount((SCM),(OFF), +1);} \ }

Definition at line 403 of file cc.h.

Referenced by CcAllocateInitializeBcb().

#define CcReleaseMasterLock OldIrql   )     ExReleaseSpinLock( &CcMasterSpinLock, OldIrql )
 

Definition at line 67 of file cc.h.

Referenced by CcCanIWrite(), CcDeferWrite(), CcDeleteSharedCacheMap(), CcFlushCache(), CcFreeActiveVacb(), CcGetDirtyPages(), CcGetFileObjectFromSectionPtrs(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcLazyWriteScan(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcScheduleReadAhead(), CcSetAdditionalCacheAttributes(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWaitForCurrentLazyWriterActivity(), CcWriteBehind(), and CcZeroEndOfLastPage().

 
#define CcReleaseMasterLockFromDpcLevel  )     ExReleaseSpinLockFromDpcLevel( &CcMasterSpinLock )
 

Definition at line 73 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcDeleteMbcb(), CcGetVacbMiss(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcUnpinFileData(), and CcWriteBehind().

#define CcReleaseVacbLock OldIrql   )     ExReleaseSpinLock( &CcVacbSpinLock, OldIrql )
 

Definition at line 79 of file cc.h.

Referenced by CcDereferenceFileOffset(), CcDrainVacbLevelZone(), CcExtendVacbArray(), CcFreeVirtualAddress(), CcGetVacbMiss(), CcGetVirtualAddress(), CcGetVirtualAddressIfMapped(), CcMapAndCopy(), CcPrefillVacbLevelZone(), CcReferenceFileOffset(), CcRemapBcb(), CcSetDirtyInMask(), CcUnmapVacbArray(), and CcWaitOnActiveCount().

 
#define CcReleaseVacbLockFromDpcLevel  )     ExReleaseSpinLockFromDpcLevel( &CcVacbSpinLock )
 

Definition at line 85 of file cc.h.

Referenced by CcAllocateInitializeBcb(), CcDeleteMbcb(), CcExtendVacbArray(), CcSetDirtyInMask(), and CcUnpinFileData().

#define CcUnlockVacbLevel SCM,
OFF   ) 
 

Value:

{ \ if (((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) && \ FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { \ CcAdjustVacbLevelLockCount((SCM),(OFF), -1);} \ }

Definition at line 409 of file cc.h.

Referenced by CcDeleteSharedCacheMap(), and CcUnpinFileData().

#define ClearFlag F,
SF   ) 
 

Value:

{ \ (F) &= ~(SF); \ }

Definition at line 509 of file cc.h.

Referenced by CcFreeActiveVacb(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcLazyWriteScan(), CcSetAdditionalCacheAttributes(), CcWriteBehind(), FsRtlNotifyCompleteIrpList(), FsRtlNotifyFullChangeDirectory(), FsRtlNotifyFullReportChange(), FsRtlOpBatchBreakClosePending(), FsRtlOplockBreakToNone(), LfsCloseLogFile(), LfsFindFirstIo(), LfsFindLastLsn(), LfsFlushLfcb(), LfsFlushToLsnPriv(), LfsFreeSpanningBuffer(), LfsGetLbcb(), LfsInitializeLogFileService(), LfsPrepareLfcbForLogRecord(), LfsRestartLogFile(), LfsSetBaseLsnPriv(), LfsWriteLfsRestart(), LfsWriteLogRecordIntoLogPage(), UdfCleanupIrpContext(), UdfCommonCleanup(), UdfCommonRead(), UdfDecodeFileObject(), UdfDismountVcb(), UdfFindAnchorVolumeDescriptor(), UdfFspClose(), UdfInvalidateVolumes(), UdfLookupNextDirEntry(), UdfMountVolume(), UdfNonCachedRead(), UdfOplockRequest(), UdfProcessException(), UdfQueryDirectory(), UdfQueryFsAttributeInfo(), UdfTeardownStructures(), UdfUninitializeFcbMcb(), UdfUnlockVolumeInternal(), and UdfVerifyVolume().

#define DebugDump STR,
LEVEL,
PTR   )     {NOTHING;}
 

Definition at line 2294 of file cc.h.

#define DebugTrace INDENT,
LEVEL,
X,
 )     {NOTHING;}
 

Definition at line 2290 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcCopyRead(), CcCopyWrite(), CcDeleteSharedCacheMap(), CcExceptionFilter(), CcFastCopyRead(), CcFastCopyWrite(), CcFindBcb(), CcFlushCache(), CcGetVacbMiss(), CcInitializeCacheMap(), CcLazyWriteScan(), CcMapAndCopy(), CcMapData(), CcMdlRead(), CcMdlReadComplete2(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPinFileData(), CcPinMappedData(), CcPinRead(), CcPostWorkQueue(), CcPrepareMdlWrite(), CcPreparePinWrite(), CcPurgeCacheSection(), CcReleaseByteRangeFromWrite(), CcScheduleReadAhead(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcSetValidData(), CcUninitializeCacheMap(), CcUnmapAndPurge(), CcUnmapVacb(), CcUnpinData(), CcUnpinDataForThread(), CcUnpinFileData(), CcUnpinRepinnedBcb(), CcWorkerThread(), CcWriteBehind(), CcZeroData(), FsRtlAcknowledgeOplockBreak(), FsRtlAddLargeMcbEntry(), FsRtlAllocateOplock(), FsRtlCancelExclusiveIrp(), FsRtlCancelNotify(), FsRtlCancelOplockIIIrp(), FsRtlCancelWaitIrp(), FsRtlCheckLockForReadAccess(), FsRtlCheckLockForWriteAccess(), FsRtlCheckNoExclusiveConflict(), FsRtlCheckOplock(), FsRtlCompletionRoutinePriv(), FsRtlCurrentBatchOplock(), FsRtlFastCheckLockForRead(), FsRtlFastCheckLockForWrite(), FsRtlFastUnlockSingleExclusive(), FsRtlFastUnlockSingleShared(), FsRtlGetNextFileLock(), FsRtlGetNextLargeMcbEntry(), FsRtlInitializeFileLock(), FsRtlInitializeLargeMcb(), FsRtlInitializeOplock(), FsRtlIsDbcsInExpression(), FsRtlIsNameInExpressionPrivate(), FsRtlIsNotifyOnList(), FsRtlLookupLargeMcbEntry(), FsRtlLookupLastLargeMcbEntry(), FsRtlLookupLastLargeMcbEntryAndIndex(), FsRtlNotifyChangeDirectory(), FsRtlNotifyCleanup(), FsRtlNotifyCompleteIrp(), FsRtlNotifyCompleteIrpList(), FsRtlNotifyCompletion(), FsRtlNotifyFullChangeDirectory(), FsRtlNotifyFullReportChange(), FsRtlNotifyInitializeSync(), FsRtlNotifyReportChange(), FsRtlNotifySetCancelRoutine(), FsRtlNotifyUninitializeSync(), FsRtlNotifyUpdateBuffer(), FsRtlNumberOfRunsInLargeMcb(), FsRtlOpBatchBreakClosePending(), FsRtlOplockBreakNotify(), FsRtlOplockBreakToII(), FsRtlOplockBreakToNone(), FsRtlOplockCleanup(), FsRtlOplockFsctrl(), FsRtlOplockIsFastIoPossible(), FsRtlPrivateCancelFileLockIrp(), FsRtlPrivateCheckWaitingLocks(), FsRtlPrivateFastUnlockAll(), FsRtlPrivateInsertSharedLock(), FsRtlPrivateLock(), FsRtlProcessFileLock(), FsRtlRemoveAndCompleteIrp(), FsRtlRemoveAndCompleteWaitIrp(), FsRtlRemoveLargeMcbEntry(), FsRtlRemoveMcbEntry(), FsRtlRemoveMcbEntryPrivate(), FsRtlRequestExclusiveOplock(), FsRtlRequestOplockII(), FsRtlSplitLargeMcb(), FsRtlTruncateLargeMcb(), FsRtlUninitializeFileLock(), FsRtlUninitializeLargeMcb(), FsRtlUninitializeOplock(), FsRtlWaitOnIrp(), IsUdfsVolume(), LfsAddClientToList(), LfsAllocateLfcb(), LfsAllocateSpanningBuffer(), LfsCheckSubsequentLogPage(), LfsCloseLogFile(), LfsCopyReadLogRecord(), LfsCurrentAvailSpace(), LfsDeallocateLfcb(), LfsFindClientNextLsn(), LfsFindCurrentAvail(), LfsFindFirstIo(), LfsFindLastLsn(), LfsFindLogRecord(), LfsFindNextLsn(), LfsFindOldestClientLsn(), LfsFlushLbcb(), LfsFlushLfcb(), LfsFlushToLsn(), LfsFlushToLsnPriv(), LfsForceWrite(), LfsFreeSpanningBuffer(), LfsGetLbcb(), LfsInitializeLogFile(), LfsInitializeLogFilePriv(), LfsInitializeLogFileService(), LfsLsnFinalOffset(), LfsNextLogPageOffset(), LfsNormalizeBasicLogFile(), LfsOpenLogFile(), LfsPinOrMapData(), LfsPinOrMapLogRecordHeader(), LfsPrepareLfcbForLogRecord(), LfsQueryLastLsn(), LfsReadLogFileInformation(), LfsReadLogRecord(), LfsReadNextLogRecord(), LfsReadRestart(), LfsReadRestartArea(), LfsRemoveClientFromList(), LfsResetUndoTotal(), LfsRestartLogFile(), LfsSearchForwardByClient(), LfsSetBaseLsn(), LfsSetBaseLsnPriv(), LfsTerminateLogQuery(), LfsTransferLogBytes(), LfsUpdateLfcbFromNoRestart(), LfsUpdateLfcbFromPgHeader(), LfsUpdateLfcbFromRestart(), LfsUpdateRestartAreaFromLfcb(), LfsVerifyLogSpaceAvail(), LfsWrite(), LfsWriteLfsRestart(), LfsWriteLogRecordIntoLogPage(), LfsWriteRestartArea(), UdfAddToPcb(), UdfAddVmcbMapping(), UdfCheckLegalCS0Dstring(), UdfCommonCleanup(), UdfCommonClosePrivate(), UdfCompleteFcbOpen(), UdfCompletePcb(), UdfCreateInternalStream(), UdfCS0DstringContainsLegalCharacters(), UdfDetermineVolumeBounding(), UdfEqualEntityId(), UdfExceptionFilter(), UdfFindAnchorVolumeDescriptor(), UdfFindDirEntry(), UdfFindFileSetDescriptor(), UdfFindVolumeDescriptors(), UdfFspClose(), UdfInitializeAllocations(), UdfInitializePcb(), UdfInitializeVmcb(), UdfIsRemount(), UdfLoadSparingTables(), UdfLookupAllocation(), UdfLookupDirEntryPostProcessing(), UdfLookupPsnOfExtent(), UdfMountVolume(), UdfOpenObjectFromDirContext(), UdfQueueClose(), UdfReadSectors(), UdfRecognizeVolume(), UdfRemoveVmcbMapping(), UdfResetVmcb(), UdfSetMaximumLbnVmcb(), UdfTeardownStructures(), UdfUninitializeVmcb(), UdfUpdateDirNames(), UdfUpdateVcbPhase0(), UdfUpdateVcbPhase1(), UdfUpdateVolumeLabel(), UdfVerifyDescriptor(), UdfVerifyVolume(), UdfVmcbLbnToVbn(), and UdfVmcbVbnToLbn().

#define DebugTrace2 INDENT,
LEVEL,
X,
Y,
 )     {NOTHING;}
 

Definition at line 2292 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcFindBcb(), CcFlushCache(), CcGetVacbMiss(), CcInitializeCacheMap(), CcMapAndCopy(), CcMdlRead(), CcPinFileData(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcReleaseByteRangeFromWrite(), CcScheduleReadAhead(), CcSetFileSizes(), CcSetValidData(), CcUninitializeCacheMap(), CcUnmapAndPurge(), and CcUnpinRepinnedBcb().

#define DEFAULT_CREATE_MODULO   ((ULONG)(0x00100000))
 

Definition at line 192 of file cc.h.

Referenced by CcInitializeCacheMap().

#define DEFAULT_EXTEND_MODULO   ((ULONG)(0x00100000))
 

Definition at line 193 of file cc.h.

Referenced by CcSetFileSizes().

#define DISABLE_READ_AHEAD   0x0001
 

Definition at line 1025 of file cc.h.

Referenced by CcScheduleReadAhead(), and CcSetAdditionalCacheAttributes().

#define DISABLE_WRITE_BEHIND   0x0002
 

Definition at line 1031 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcAllocateInitializeBcb(), CcReleaseByteRangeFromWrite(), CcSetAdditionalCacheAttributes(), and CcSetDirtyPinnedData().

#define ExAllocatePool a,
 )     ExAllocatePoolWithTag(a,b,' cC')
 

Definition at line 126 of file cc.h.

Referenced by CmDeleteKeyRecursive(), CmGetSystemDriverList(), CmpAddToHiveFileList(), CmpClaimGlobalQuota(), CmpCloneControlSet(), CmpCloneHwProfile(), CmpCopySyncTree(), CmpCopySyncTree2(), CmpDiskFullWarning(), CmpFlushNotify(), CmpFreePostBlock(), CmpGetAcpiProfileInformation(), CmpGetHiveName(), CmpGetSymbolicLink(), CmpHiveRootSecurityDescriptor(), CmpInitializeHardwareConfiguration(), CmpInitializeMachineDependentConfiguration(), CmpInitializeRegistryNode(), CmpInitializeSystemHive(), CmpLoadHiveVolatile(), CmpMergeKeyValues(), CmpNotifyChangeKey(), CmpOpenHiveFiles(), CmpPostNotify(), CmpSaveBootControlSet(), CmpWorker(), CmSetAcpiHwProfile(), ComPortDBAdd(), DoBitMapTest(), DoPoolTest(), DoZoneTest(), DriverEntry(), EisaBuildSlotsResources(), ExAllocatePoolWithQuota(), FindPathForDevice(), FsgWriteToFrameBuffer(), FsVgaBuildResourceList(), FsVgaServiceParameters(), FsVgaWriteToFrameBuffer(), GetNextReparseVolumePath(), HvpDoWriteHive(), HvpReadFileImageAndBuildMap(), HvpWriteLog(), IoAllocateWorkItem(), IoCreateDriver(), IoGetDeviceProperty(), IoOpenDeviceRegistryKey(), IopAddRelationToList(), IopAllocateBuffer(), IopAllocateRelationList(), IopAllocateUnicodeString(), IopAppendLegacyVeto(), IopAppendStringToValueKey(), IopApplyFunctionToServiceInstances(), IopApplyFunctionToSubKeys(), IopApplySystemPartitionProt(), IopCallDriverAddDeviceQueryRoutine(), IopChangeInterfaceType(), IopCmResourcesToIoResources(), IopCompressRelationList(), IopConcatenateUnicodeStrings(), IopCreateArcNames(), IopCreateEntry(), IopCreateMadeupNode(), IopDeleteLockedDeviceNode(), IopErrorLogQueueRequest(), IopErrorLogThread(), IopExecuteHardwareProfileChange(), IopFilterResourceRequirementsList(), IopGetBusTypeGuidIndex(), IopGetDeviceResourcesFromRegistry(), IopGetDriverDeviceList(), IopGetDriverNameFromKeyNode(), IopGetLegacyVetoListDrivers(), IopGetRegistryKeyInformation(), IopGetRegistryValue(), IopGetRootDevices(), IopInitializeBootDrivers(), IopInitializeBootLogging(), IopInitializeBuiltinDriver(), IopInitializeDeviceInstanceKey(), IopInitializePlugPlayServices(), IopInitializeResourceMap(), IopLoadDriver(), IopMakeGloballyUniqueId(), IopMergeCmResourceLists(), IopMergeFilteredResourceRequirementsList(), IoPnPDeliverServicePowerNotification(), IopOpenDeviceParametersSubkey(), IopParseDevice(), IopPnPDispatch(), IopProcessAssignResources(), IopProcessCriticalDeviceRoutine(), IopProcessNewDeviceNode(), IopProcessNewProfile(), IopProcessSetInterfaceState(), IopQueryDeviceRelations(), IopQueryDeviceResources(), IopQueryDockRemovalInterface(), IopQueryResourceHandlerInterface(), IopQueueDeviceWorkItem(), IopQueuePendingSurpriseRemoval(), IopRaiseHardError(), IopReadDeviceConfiguration(), IopRealloc(), IopRegMultiSzToUnicodeStrings(), IopRequestDeviceAction(), IopResizeBuffer(), IopSafebootDriverLoad(), IopSendMessageToTrackService(), IopSetEaOrQuotaInformationFile(), IopStartDevice(), IopTranslateAndAdjustReqDesc(), IopTranslatorHandlerIo(), IopUnloadAttachedDriver(), IopUpdateHardwareProfile(), IoRegisterPlugPlayNotification(), IoReportDetectedDevice(), IoReportHalResourceUsage(), IoReportTargetDeviceChangeAsynchronous(), KeIA32SetIoAccessMap(), KeSetAutoAlignmentThread(), KeStartAllProcessors(), MapperAdjustResourceList(), MapperCallback(), MapperConstructRootEnumTree(), MapperMarkKey(), MapperPeripheralCallback(), MapperPhantomizeDetectedComPorts(), MapperSeedKey(), NtFlushBuffersFile(), NtQueryEaFile(), NtQueryInformationFile(), NtQueryQuotaInformationFile(), NtQueryVolumeInformationFile(), NtSetEaFile(), NtSetInformationFile(), NtSetInformationProcess(), NtSetLdtEntries(), NtSetVolumeInformationFile(), NtUnlockFile(), ObpGetDosDevicesProtection(), OpenDeviceReparseIndex(), PnPBiosCopyDeviceParamKey(), PnPBiosCopyIoDecode(), PnPBiosEliminateDupes(), PnPBiosExtractCompatibleIDs(), PnPBiosGetBiosInfo(), PnPBiosIoResourceListToCmResourceList(), PnPBiosTranslateInfo(), PpCreateLegacyDeviceIds(), PspCreateLdt(), PspSetLdtInformation(), PspSetQuotaLimits(), PspTerminateThreadByPointer(), PspUserThreadStartup(), QueryDeviceNameForPath(), QuerySymbolicLink(), RtlVolumeDeviceToDosName(), SeOpenObjectAuditAlarm(), SeOpenObjectForDeleteAuditAlarm(), SepAdtInitializeBounds(), SepCreateImpersonationTokenDacl(), SepCreateToken(), SepDuplicateToken(), SepFilterToken(), SepInitializationPhase1(), SepProbeAndCaptureQosData(), SeQueryInformationToken(), TestAccessCheck(), TestAssignSecurity(), TestCaptureSecurityDescriptor(), TestDefaultObjectMethod(), TestTokenCopy(), TestTokenSize(), and VerifierAllocatePool().

#define ExAllocatePoolWithQuota a,
 )     ExAllocatePoolWithQuotaTag(a,b,' cC')
 

Definition at line 127 of file cc.h.

Referenced by BuildQueryDirectoryIrp(), CmpSaveKeyByFileCopy(), IopSetEaOrQuotaInformationFile(), IopTrackLink(), IopXxxControlFile(), NtLoadDriver(), NtLockFile(), NtNotifyChangeDirectoryFile(), NtQueryEaFile(), NtQueryInformationFile(), NtQueryQuotaInformationFile(), NtQueryVolumeInformationFile(), NtReadFile(), NtReadFileScatter(), NtRegisterThreadTerminatePort(), NtSetEaFile(), NtSetInformationFile(), NtSetVolumeInformationFile(), NtUnloadDriver(), NtUnlockFile(), NtWriteFile(), NtWriteFileGather(), Psp386CreateVdmIoListHead(), Psp386InstallIoHandler(), and VerifierAllocatePoolWithQuota().

#define FlagOn F,
SF   ) 
 

Value:

( \ (((F) & (SF))) \ )

Definition at line 497 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcAllocateInitializeBcb(), CcCalculateVacbLevelLockCount(), CcCanIWrite(), CcCopyRead(), CcCopyWrite(), CcCreateVacbArray(), CcExtendVacbArray(), CcFastCopyRead(), CcFastCopyWrite(), CcFlushCache(), CcFreeActiveVacb(), CcGetDirtyPages(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcLazyWriteScan(), CcMapAndCopy(), CcMapAndRead(), CcMdlRead(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPinFileData(), CcPinMappedData(), CcPinRead(), CcPurgeCacheSection(), CcReferenceFileOffset(), CcReleaseByteRangeFromWrite(), CcScheduleReadAhead(), CcSetDirtyPageThreshold(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcSetVacbLargeOffset(), CcUninitializeCacheMap(), CcUnmapAndPurge(), CcUnmapVacb(), CcUnpinFileData(), CcUnpinRepinnedBcb(), CcWriteBehind(), CcZeroData(), CcZeroEndOfLastPage(), FsRtlAcknowledgeOplockBreak(), FsRtlAcquireFileForModWrite(), FsRtlCancelNotify(), FsRtlCheckOplock(), FsRtlCompareNodeAndKey(), FsRtlCopyWrite(), FsRtlCurrentBatchOplock(), FsRtlFreeTunnelNode(), FsRtlMdlWriteCompleteDev(), FsRtlNotifyCompleteIrp(), FsRtlNotifyFullChangeDirectory(), FsRtlNotifyFullReportChange(), FsRtlOpBatchBreakClosePending(), FsRtlOplockBreakNotify(), FsRtlOplockBreakToII(), FsRtlOplockBreakToNone(), FsRtlOplockCleanup(), FsRtlOplockFsctrl(), FsRtlOplockIsFastIoPossible(), FsRtlPrepareMdlWriteDev(), FsRtlRequestExclusiveOplock(), FsRtlRequestOplockII(), LfsAllocateSpanningBuffer(), LfsCloseLogFile(), LfsCopyReadLogRecord(), LfsCurrentAvailSpace(), LfsDeallocateLbcb(), LfsFindCurrentAvail(), LfsFindFirstIo(), LfsFindLastLsn(), LfsFindLogRecord(), LfsFlushLfcb(), LfsFlushToLsnPriv(), LfsFreeSpanningBuffer(), LfsGetLbcb(), LfsInitializeLogFileService(), LfsOpenLogFile(), LfsPrepareLfcbForLogRecord(), LfsQueryLastLsn(), LfsResetUndoTotal(), LfsRestartLogFile(), LfsUpdateLfcbFromNoRestart(), LfsUpdateLfcbFromRestart(), LfsUpdateRestartAreaFromLfcb(), LfsVerifyLogSpaceAvail(), LfsWriteLfsRestart(), LfsWriteLogRecordIntoLogPage(), UdfAcquireResource(), UdfCheckLegalCS0Dstring(), UdfCleanupDirContext(), UdfCleanupIrpContext(), UdfCommonClose(), UdfCommonCreate(), UdfCommonQueryInfo(), UdfCommonRead(), UdfCommonSetInfo(), UdfCompleteFcbOpen(), UdfCompleteRequest(), UdfDecodeFileObject(), UdfDismountVcb(), UdfEnumerateIndex(), UdfFastDecodeFileObject(), UdfFastQueryBasicInfo(), UdfFastQueryNetworkInfo(), UdfFastQueryStdInfo(), UdfFindDirEntry(), UdfFindPrefix(), UdfFsdDispatch(), UdfInitializeAllocationContext(), UdfInitializeEnumeration(), UdfInitializeFcbMcb(), UdfInitializeLcbFromDirContext(), UdfInitializePcb(), UdfInitializeVcb(), UdfInvalidateVolumes(), UdfLockVolumeInternal(), UdfLookupAllocation(), UdfLookupFileEntryInEnumeration(), UdfLookupNextDirEntry(), UdfMountVolume(), UdfMultipleAsync(), UdfNonCachedRead(), UdfNormalizeFileNames(), UdfOpenExistingFcb(), UdfOpenObjectByFileId(), UdfOpenObjectFromDirContext(), UdfPerformVerify(), UdfPrepareBuffers(), UdfPrePostIrp(), UdfProcessException(), UdfQueryAlternateNameInfo(), UdfQueryDirectory(), UdfQueryNetworkInfo(), UdfQueryStandardInfo(), UdfReadSectors(), UdfRemovePrefix(), UdfSetFileObject(), UdfSetThreadContext(), UdfSingleAsync(), UdfTeardownStructures(), UdfUninitializeFcbMcb(), UdfUnlockVolumeInternal(), UdfUpdateDirNames(), UdfUpdateTimestampsFromIcbContext(), UdfUpdateVcbPhase0(), UdfVerifyFcbOperation(), UdfVerifyVcb(), UdfVerifyVolume(), and VacbLevelReference().

#define FsRtlAllocatePool a,
 )     FsRtlAllocatePoolWithTag(a,b,' cC')
 

Definition at line 120 of file cc.h.

Referenced by FsRtlInitSystem(), and LfsAllocateLfcb().

#define FsRtlAllocatePoolWithQuota a,
 )     FsRtlAllocatePoolWithQuotaTag(a,b,' cC')
 

Definition at line 121 of file cc.h.

#define GetActiveVacb SCM,
IRQ,
V,
P,
 ) 
 

Value:

{ \ ExAcquireFastLock(&(SCM)->ActiveVacbSpinLock, &(IRQ)); \ (V) = (SCM)->ActiveVacb; \ if ((V) != NULL) { \ (P) = (SCM)->ActivePage; \ (SCM)->ActiveVacb = NULL; \ (D) = (SCM)->Flags & ACTIVE_PAGE_IS_DIRTY; \ } \ ExReleaseFastLock(&(SCM)->ActiveVacbSpinLock, (IRQ)); \ }

Definition at line 1577 of file cc.h.

Referenced by CcCopyRead(), CcCopyWrite(), CcFastCopyRead(), CcFastCopyWrite(), CcMdlRead(), CcPinFileData(), and CcPrepareMdlWrite().

#define GetActiveVacbAtDpcLevel SCM,
V,
P,
 ) 
 

Value:

{ \ ExAcquireSpinLockAtDpcLevel(&(SCM)->ActiveVacbSpinLock); \ (V) = (SCM)->ActiveVacb; \ if ((V) != NULL) { \ (P) = (SCM)->ActivePage; \ (SCM)->ActiveVacb = NULL; \ (D) = (SCM)->Flags & ACTIVE_PAGE_IS_DIRTY; \ } \ ExReleaseSpinLockFromDpcLevel(&(SCM)->ActiveVacbSpinLock); \ }

Definition at line 1588 of file cc.h.

Referenced by CcDeleteSharedCacheMap(), CcFlushCache(), CcGetVacbMiss(), CcPurgeCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWriteBehind(), and CcZeroEndOfLastPage().

#define GetBcbListHead SCM,
OFF,
FAILSUCC   ) 
 

Value:

( \ (((SCM)->SectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY) && \ FlagOn((SCM)->Flags, MODIFIED_WRITE_DISABLED)) ? \ (((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) ? \ CcGetBcbListHeadLargeOffset((SCM),(OFF),(FAILSUCC)) : \ (((OFF) >= (SCM)->SectionSize.QuadPart) ? &(SCM)->BcbList : \ ((PLIST_ENTRY)((SCM)->Vacbs) + (((SCM)->SectionSize.QuadPart + (OFF)) >> BCB_LIST_SHIFT)))) : \ &(SCM)->BcbList \ )

Definition at line 389 of file cc.h.

Referenced by CcFindBcb().

#define IS_CURSOR   0x0800
 

Definition at line 1090 of file cc.h.

Referenced by CcGetDirtyPages(), CcInitializeCacheManager(), CcIsThereDirtyData(), and CcLazyWriteScan().

#define LAZY_WRITE_OCCURRED   0x0400
 

Definition at line 1083 of file cc.h.

Referenced by CcFlushCache(), and CcWriteBehind().

#define LAZY_WRITER_COLLISION_DELAY   ((LONG)(1000000))
 

Definition at line 430 of file cc.h.

#define LAZY_WRITER_IDLE_DELAY   ((LONG)(10000000))
 

Definition at line 429 of file cc.h.

Referenced by CcInitializeCacheManager().

#define LAZY_WRITER_MAX_AGE_TARGET   ((ULONG)(8))
 

Definition at line 436 of file cc.h.

Referenced by CcLazyWriteScan().

#define MAX_READ_AHEAD   (8 * 1024 * 1024)
 

Definition at line 217 of file cc.h.

Referenced by CcPerformReadAhead().

#define MAX_WRITE_BEHIND   (MM_MAXIMUM_DISK_IO_SIZE)
 

Definition at line 223 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), and CcLazyWriteScan().

#define MAX_ZERO_TRANSFER   (PAGE_SIZE * 128)
 

Definition at line 288 of file cc.h.

Referenced by CcZeroData().

#define MAX_ZEROS_IN_CACHE   (0x10000)
 

Definition at line 290 of file cc.h.

Referenced by CcZeroData().

#define MBCB_BITMAP_BLOCK_SIZE   (VACB_LEVEL_BLOCK_SIZE)
 

Definition at line 351 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcDeleteMbcb(), and CcFindBitmapRangeToDirty().

#define MBCB_BITMAP_INITIAL_SIZE   (2 * sizeof(BITMAP_RANGE))
 

Definition at line 364 of file cc.h.

Referenced by CcSetDirtyInMask().

#define MBCB_BITMAP_RANGE   (MBCB_BITMAP_BLOCK_SIZE * 8 * PAGE_SIZE)
 

Definition at line 358 of file cc.h.

Referenced by CcSetDirtyInMask().

#define MIN_ZERO_TRANSFER   (0x10000)
 

Definition at line 289 of file cc.h.

Referenced by CcZeroData().

#define mm   (0x100)
 

Definition at line 462 of file cc.h.

Referenced by CcFlushCache(), CcGetVacbMiss(), CcInitializeCacheMap(), CcMdlRead(), CcMdlReadComplete2(), CcMdlWriteComplete2(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcUnmapAndPurge(), and CcUnmapVacb().

#define MODIFIED_WRITE_DISABLED   0x0200
 

Definition at line 1077 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcCalculateVacbLevelLockCount(), CcCreateVacbArray(), CcExtendVacbArray(), CcFlushCache(), CcGetVacbMiss(), CcInitializeCacheMap(), CcLazyWriteScan(), CcPinFileData(), CcPinMappedData(), CcPinRead(), CcReferenceFileOffset(), CcReleaseByteRangeFromWrite(), CcSetAdditionalCacheAttributes(), CcSetVacbLargeOffset(), CcUnpinFileData(), CcUnpinRepinnedBcb(), and VacbLevelReference().

#define NOISE_BITS   (0x7)
 

Definition at line 423 of file cc.h.

Referenced by CcScheduleReadAhead().

#define ONLY_SEQUENTIAL_ONLY_SEEN   0x0040
 

Definition at line 1059 of file cc.h.

Referenced by CcInitializeCacheMap(), and CcUnmapVacb().

#define PIN_ACCESS   0x0004
 

Definition at line 1038 of file cc.h.

Referenced by CcFlushCache(), CcInitializeCacheMap(), CcPinFileData(), CcSetFileSizes(), CcUninitializeCacheMap(), and CcWriteBehind().

#define PREALLOCATED_VACBS   (4)
 

Definition at line 571 of file cc.h.

Referenced by CcCreateVacbArray().

#define QuadAlign  ) 
 

Value:

( \ ((((P)) + 7) & (-8)) \ )

Definition at line 513 of file cc.h.

Referenced by CcSetDirtyInMask(), LfsIsRestartAreaValid(), LfsIsRestartPageHeaderValid(), LfsUpdateLfcbFromNoRestart(), LfsUpdateLfcbFromRestart(), LfsWriteLogRecordIntoLogPage(), and UdfQueryDirectory().

#define RANDOM_ACCESS_SEEN   0x1000
 

Definition at line 1098 of file cc.h.

Referenced by CcGetVacbMiss(), and CcInitializeCacheMap().

#define SEQUENTIAL_MAP_LIMIT   ((ULONG)(0x00080000))
 

Definition at line 200 of file cc.h.

Referenced by CcGetVacbMiss().

#define SetActiveVacb SCM,
IRQ,
V,
P,
 ) 
 

Definition at line 1619 of file cc.h.

Referenced by CcCopyRead(), CcCopyWrite(), CcFastCopyRead(), CcFastCopyWrite(), and CcMapAndCopy().

#define SetFlag F,
SF   ) 
 

Value:

{ \ (F) |= (SF); \ }

Definition at line 505 of file cc.h.

Referenced by CcAllocateInitializeBcb(), CcDeleteSharedCacheMap(), CcFlushCache(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcLazyWriteScan(), CcSetAdditionalCacheAttributes(), CcSetDirtyPageThreshold(), CcUninitializeCacheMap(), CcZeroEndOfLastPage(), FsRecGetDeviceSectors(), FsRecGetDeviceSectorSize(), FsRecReadBlock(), FsRtlAddToTunnelCache(), FsRtlCancelNotify(), FsRtlCheckNotifyForDelete(), FsRtlNotifyCleanup(), FsRtlNotifyFullChangeDirectory(), FsRtlNotifyFullReportChange(), FsRtlOpBatchBreakClosePending(), FsRtlOplockBreakToII(), FsRtlOplockBreakToNone(), FsRtlOplockFsctrl(), LfsAllocateSpanningBuffer(), LfsCloseLogFile(), LfsFindLastLsn(), LfsFlushLfcb(), LfsGetLbcb(), LfsInitializeLogFileService(), LfsRestartLogFile(), LfsUpdateLfcbFromNoRestart(), LfsUpdateLfcbFromPgHeader(), LfsUpdateLfcbFromRestart(), LfsUpdateRestartAreaFromLfcb(), LfsWriteLfsRestart(), LfsWriteLogRecordIntoLogPage(), UdfCommonCleanup(), UdfCommonPnp(), UdfCommonRead(), UdfCompleteFcbOpen(), UdfCreateIrpContext(), UdfDismountVolume(), UdfFindAnchorVolumeDescriptor(), UdfFsdDispatch(), UdfFspClose(), UdfFspDispatch(), UdfInitializeAllocations(), UdfInitializeEnumeration(), UdfInitializeFcbFromIcbContext(), UdfInitializeFcbMcb(), UdfInitializeLcbFromDirContext(), UdfInitializePcb(), UdfInitializeStackIrpContext(), UdfInitializeVcb(), UdfInsertPrefix(), UdfInvalidateVolumes(), UdfIsVolumeMounted(), UdfLockVolumeInternal(), UdfLookupDirEntryPostProcessing(), UdfMountVolume(), UdfNormalizeFileNames(), UdfNotifyChangeDirectory(), UdfOpenExistingFcb(), UdfOpenObjectFromDirContext(), UdfOplockRequest(), UdfPerformDevIoCtrl(), UdfPrePostIrp(), UdfQueryDirectory(), UdfQueueClose(), UdfReadSectors(), UdfSetFileObject(), UdfSetThreadContext(), UdfTeardownStructures(), UdfUpdateDirNames(), UdfUpdateVcbPhase0(), UdfUpdateVcbPhase1(), UdfVerifyFcbOperation(), and UdfVerifyVcb().

#define SIZE_PER_BCB_LIST   (VACB_MAPPING_GRANULARITY * 2)
 

Definition at line 386 of file cc.h.

Referenced by CcCreateVacbArray(), CcExtendVacbArray(), and CcFindBcb().

#define TRUNCATE_REQUIRED   0x0010
 

Definition at line 1045 of file cc.h.

Referenced by CcInitializeCacheMap(), CcUninitializeCacheMap(), and CcUnmapAndPurge().

#define try_return  )     { S; goto try_exit; }
 

Definition at line 2154 of file cc.h.

Referenced by CcInitializeCacheMap(), CcMapAndCopy(), CcMapAndRead(), CcPerformReadAhead(), CcPinFileData(), CcPinMappedData(), CcPinRead(), CcPreparePinWrite(), CcUninitializeCacheMap(), CcZeroData(), FsRtlAcknowledgeOplockBreak(), FsRtlAddLargeMcbEntry(), FsRtlGetNextLargeMcbEntry(), FsRtlLookupLargeMcbEntry(), FsRtlLookupLastLargeMcbEntry(), FsRtlLookupLastLargeMcbEntryAndIndex(), FsRtlNotifyFullChangeDirectory(), FsRtlOplockBreakNotify(), FsRtlOplockBreakToII(), FsRtlOplockBreakToNone(), FsRtlOplockCleanup(), FsRtlPrivateInitializeFileLock(), FsRtlPrivateLock(), FsRtlSplitLargeMcb(), LfsCloseLogFile(), LfsFlushLfcb(), LfsReadLogFileInformation(), LfsReadRestartArea(), and LfsTerminateLogQuery().

#define VACB_LAST_INDEX_FOR_LEVEL   ((1 << VACB_LEVEL_SHIFT) - 1)
 

Definition at line 321 of file cc.h.

Referenced by CcCalculateVacbLevelLockCount(), CcGetBcbListHeadLargeOffset(), and CcSetVacbLargeOffset().

#define VACB_LEVEL_BLOCK_SIZE   ((1 << VACB_LEVEL_SHIFT) * sizeof(PVOID))
 

Definition at line 315 of file cc.h.

Referenced by CcAllocateVacbLevel(), CcCreateVacbArray(), CcExtendVacbArray(), CcGetBcbListHeadLargeOffset(), CcPrefillVacbLevelZone(), CcSetVacbLargeOffset(), and VacbLevelReference().

#define VACB_LEVEL_SHIFT   (7)
 

Definition at line 307 of file cc.h.

Referenced by CcAdjustVacbLevelLockCount(), CcCreateVacbArray(), CcExtendVacbArray(), CcGetBcbListHeadLargeOffset(), CcGetVacbLargeOffset(), and CcSetVacbLargeOffset().

#define VACB_NUMBER_OF_LEVELS   (((63 - VACB_OFFSET_SHIFT)/VACB_LEVEL_SHIFT) + 1)
 

Definition at line 334 of file cc.h.

Referenced by CcCreateVacbArray(), CcExtendVacbArray(), CcGetBcbListHeadLargeOffset(), and CcSetVacbLargeOffset().

#define VACB_SIZE_OF_FIRST_LEVEL   (1 << (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT))
 

Definition at line 327 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcAdjustVacbLevelLockCount(), CcAllocateInitializeBcb(), CcDeleteSharedCacheMap(), CcDereferenceFileOffset(), CcExtendVacbArray(), CcGetBcbListHeadLargeOffset(), CcGetVacbLargeOffset(), CcGetVacbMiss(), CcReferenceFileOffset(), CcSetVacbLargeOffset(), and SetVacb().

#define VACB_SPECIAL_DEREFERENCE   ((PVACB) ~1)
 

Definition at line 629 of file cc.h.

Referenced by CcAdjustVacbLevelLockCount(), CcDereferenceFileOffset(), and CcSetVacbLargeOffset().

#define VACB_SPECIAL_FIRST_VALID   VACB_SPECIAL_DEREFERENCE
 

Definition at line 631 of file cc.h.

Referenced by CcSetVacbLargeOffset(), and SetVacb().

#define VACB_SPECIAL_REFERENCE   ((PVACB) ~0)
 

Definition at line 628 of file cc.h.

Referenced by CcReferenceFileOffset().

#define WRITE_CHARGE_THRESHOLD   (64 * PAGE_SIZE)
 

Definition at line 280 of file cc.h.

Referenced by CcCanIWrite(), and CcLazyWriteScan().

#define WRITE_QUEUED   0x0020
 

Definition at line 1051 of file cc.h.

Referenced by CcDeleteSharedCacheMap(), CcFlushCache(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcLazyWriteScan(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPurgeCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWriteBehind(), and CcZeroEndOfLastPage().

#define ZERO_FIRST_PAGE   1
 

Definition at line 1798 of file cc.h.

Referenced by CcCopyWrite(), CcFastCopyWrite(), CcMapAndCopy(), CcMapAndRead(), CcPinFileData(), and CcPrepareMdlWrite().

#define ZERO_LAST_PAGE   4
 

Definition at line 1800 of file cc.h.

Referenced by CcCopyWrite(), CcFastCopyWrite(), CcMapAndCopy(), CcMapAndRead(), CcPinFileData(), and CcPrepareMdlWrite().

#define ZERO_MIDDLE_PAGES   2
 

Definition at line 1799 of file cc.h.

Referenced by CcCopyWrite(), CcFastCopyWrite(), CcMapAndCopy(), CcMapAndRead(), CcPinFileData(), and CcPrepareMdlWrite().


Typedef Documentation

typedef struct _BCB BCB
 

Referenced by CcExtendVacbArray(), and CcFindBcb().

typedef struct _BITMAP_RANGE BITMAP_RANGE
 

Referenced by CcFindBitmapRangeToDirty().

typedef struct _DEFERRED_WRITE DEFERRED_WRITE
 

Referenced by CcDeferWrite().

typedef struct _LAZY_WRITER LAZY_WRITER
 

typedef struct _MBCB MBCB
 

typedef struct _MDL_WRITE MDL_WRITE
 

typedef struct _OBCB OBCB
 

Referenced by CcAllocateObcb().

typedef BCB* PBCB
 

Definition at line 1340 of file cc.h.

Referenced by UdfLookupPsnOfExtent().

typedef struct _BITMAP_RANGE * PBITMAP_RANGE
 

typedef struct _DEFERRED_WRITE * PDEFERRED_WRITE
 

typedef MBCB* PMBCB
 

Definition at line 1228 of file cc.h.

typedef struct _MDL_WRITE * PMDL_WRITE
 

typedef OBCB* POBCB
 

Definition at line 1374 of file cc.h.

Referenced by CcSetDirtyPinnedData().

typedef PRIVATE_CACHE_MAP* PPRIVATE_CACHE_MAP
 

Definition at line 717 of file cc.h.

Referenced by CcSetReadAheadGranularity().

typedef struct _PRIVATE_CACHE_MAP PRIVATE_CACHE_MAP
 

Referenced by CcInitializeCacheMap().

typedef SHARED_CACHE_MAP* PSHARED_CACHE_MAP
 

Definition at line 997 of file cc.h.

typedef struct _SHARED_CACHE_MAP_LIST_CURSOR * PSHARED_CACHE_MAP_LIST_CURSOR
 

typedef struct _VACB * PVACB
 

Referenced by CcAllocateVacbLevel(), CcDeallocateVacbLevel(), and CcUnpinFileData().

typedef struct _VACB_LEVEL_REFERENCE * PVACB_LEVEL_REFERENCE
 

Referenced by IsVacbLevelReferenced(), ReferenceVacbLevel(), and VacbLevelReference().

typedef struct _WORK_QUEUE_ENTRY * PWORK_QUEUE_ENTRY
 

Referenced by CcScheduleReadAhead().

typedef struct _SHARED_CACHE_MAP SHARED_CACHE_MAP
 

Referenced by CcInitializeCacheMap().

typedef struct _SHARED_CACHE_MAP_LIST_CURSOR SHARED_CACHE_MAP_LIST_CURSOR
 

typedef struct _VACB VACB
 

Referenced by CcInitializeVacbs().

typedef struct _VACB_LEVEL_REFERENCE VACB_LEVEL_REFERENCE
 

Referenced by CcCreateVacbArray(), and CcPrefillVacbLevelZone().

typedef struct _WORK_QUEUE_ENTRY WORK_QUEUE_ENTRY
 

Referenced by CcInitializeCacheManager().

typedef enum _WORKER_FUNCTION WORKER_FUNCTION
 


Enumeration Type Documentation

enum _WORKER_FUNCTION
 

Enumeration values:
Noop 
ReadAhead 
WriteBehind 
LazyWriteScan 
EventSet 

Definition at line 1482 of file cc.h.

01482 { 01483 Noop = 0, 01484 ReadAhead, 01485 WriteBehind, 01486 LazyWriteScan, 01487 EventSet 01488 } WORKER_FUNCTION;

enum UNMAP_ACTIONS
 

Enumeration values:
UNPIN 
UNREF 
SET_CLEAN 

Definition at line 1758 of file cc.h.

01758 { 01759 UNPIN, 01760 UNREF, 01761 SET_CLEAN 01762 } UNMAP_ACTIONS;


Function Documentation

VOID CcAdjustVacbLevelLockCount IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
IN LONG  Adjustment
 

Definition at line 2335 of file vacbsup.c.

References ASSERT, CcSetVacbLargeOffset(), FALSE, IsVacbLevelReferenced(), ReferenceVacbLevel(), TRUE, VACB_LEVEL_SHIFT, VACB_OFFSET_SHIFT, VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_DEREFERENCE.

02343 : 02344 02345 This routine may be called to adjust the lock count of the bottom Vacb level when 02346 Bcbs are inserted or deleted. If the count goes to zero, the level will be 02347 eliminated. The bottom level must exist, or we crash! 02348 02349 Arguments: 02350 02351 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb 02352 is desired. 02353 02354 FileOffset - Supplies the fileOffset corresponding to the desired Vacb. 02355 02356 Adjustment - Generally -1 or +1. 02357 02358 Return Value: 02359 02360 None. 02361 02362 Environment: 02363 02364 CcVacbSpinLock should be held on entry. 02365 02366 --*/ 02367 02368 { 02369 ULONG Level, Shift; 02370 PVACB *VacbArray; 02371 LONGLONG OriginalFileOffset = FileOffset; 02372 02373 // 02374 // Initialize variables controlling our descent into the hierarchy. 02375 // 02376 02377 Level = 0; 02378 Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 02379 02380 VacbArray = SharedCacheMap->Vacbs; 02381 02382 // 02383 // Caller must have verified that we have a hierarchy, otherwise this routine 02384 // would fail. 02385 // 02386 02387 ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL); 02388 02389 // 02390 // Loop to calculate how many levels we have and how much we have to 02391 // shift to index into the first level. 02392 // 02393 02394 do { 02395 02396 Level += 1; 02397 Shift += VACB_LEVEL_SHIFT; 02398 02399 } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)); 02400 02401 // 02402 // Now descend the tree to the bottom level to get the caller's Vacb. 02403 // 02404 02405 Shift -= VACB_LEVEL_SHIFT; 02406 do { 02407 02408 VacbArray = (PVACB *)VacbArray[(ULONG)(FileOffset >> Shift)]; 02409 02410 Level -= 1; 02411 02412 FileOffset &= ((LONGLONG)1 << Shift) - 1; 02413 02414 Shift -= VACB_LEVEL_SHIFT; 02415 02416 } while (Level != 0); 02417 02418 // 02419 // Now we have reached the final level, do the adjustment. 02420 // 02421 02422 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, Adjustment, FALSE ); 02423 02424 // 02425 // Now, if we decremented the count to 0, then force the collapse to happen by 02426 // upping count and resetting to NULL. Then smash OriginalFileOffset to be 02427 // the first entry so we do not recalculate! 02428 // 02429 02430 if (!IsVacbLevelReferenced( SharedCacheMap, VacbArray, Level )) { 02431 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, 1, TRUE ); 02432 OriginalFileOffset &= ~(VACB_SIZE_OF_FIRST_LEVEL - 1); 02433 CcSetVacbLargeOffset( SharedCacheMap, OriginalFileOffset, VACB_SPECIAL_DEREFERENCE ); 02434 } 02435 }

_inline PVACB* CcAllocateVacbLevel IN BOOLEAN  AllocatingBcbListHeads  ) 
 

Definition at line 2048 of file cc.h.

References ASSERT, CcVacbLevelEntries, CcVacbLevelFreeList, CcVacbLevelWithBcbsEntries, CcVacbLevelWithBcbsFreeList, NULL, PVACB, and VACB_LEVEL_BLOCK_SIZE.

Referenced by CcExtendVacbArray(), CcSetDirtyInMask(), and CcSetVacbLargeOffset().

02052 { 02053 PVACB *ReturnEntry; 02054 02055 if (AllocatingBcbListHeads) { 02056 ReturnEntry = CcVacbLevelWithBcbsFreeList; 02057 CcVacbLevelWithBcbsFreeList = (PVACB *)*ReturnEntry; 02058 CcVacbLevelWithBcbsEntries -= 1; 02059 } else { 02060 ReturnEntry = CcVacbLevelFreeList; 02061 CcVacbLevelFreeList = (PVACB *)*ReturnEntry; 02062 CcVacbLevelEntries -= 1; 02063 } 02064 *ReturnEntry = NULL; 02065 ASSERT(RtlCompareMemory(ReturnEntry, ReturnEntry + 1, VACB_LEVEL_BLOCK_SIZE - sizeof(PVACB)) == 02066 (VACB_LEVEL_BLOCK_SIZE - sizeof(PVACB))); 02067 return ReturnEntry; 02068 }

LONG CcCopyReadExceptionFilter IN PEXCEPTION_POINTERS  ExceptionPointer,
IN PNTSTATUS  ExceptionCode
 

Definition at line 1747 of file copysup.c.

References ASSERT, EXCEPTION_EXECUTE_HANDLER, NT_SUCCESS, and NTSTATUS().

Referenced by CcCopyRead(), CcCopyWrite(), CcFastCopyRead(), CcFastCopyWrite(), and CcMapAndCopy().

01754 : 01755 01756 This routine serves as a exception filter and has the special job of 01757 extracting the "real" I/O error when Mm raises STATUS_IN_PAGE_ERROR 01758 beneath us. 01759 01760 Arguments: 01761 01762 ExceptionPointer - A pointer to the exception record that contains 01763 the real Io Status. 01764 01765 ExceptionCode - A pointer to an NTSTATUS that is to receive the real 01766 status. 01767 01768 Return Value: 01769 01770 EXCEPTION_EXECUTE_HANDLER 01771 01772 --*/ 01773 01774 { 01775 *ExceptionCode = ExceptionPointer->ExceptionRecord->ExceptionCode; 01776 01777 if ( (*ExceptionCode == STATUS_IN_PAGE_ERROR) && 01778 (ExceptionPointer->ExceptionRecord->NumberParameters >= 3) ) { 01779 01780 *ExceptionCode = (NTSTATUS) ExceptionPointer->ExceptionRecord->ExceptionInformation[2]; 01781 } 01782 01783 ASSERT( !NT_SUCCESS(*ExceptionCode) ); 01784 01785 return EXCEPTION_EXECUTE_HANDLER; 01786 }

VOID FASTCALL CcCreateVacbArray IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  NewSectionSize
 

Definition at line 1208 of file vacbsup.c.

References ASSERT, BEGIN_BCB_LIST_ARRAY, CcMaxVacbLevelsSeen, ExAllocatePoolWithTag, ExRaiseStatus(), FALSE, FlagOn, MODIFIED_WRITE_DISABLED, NonPagedPool, NULL, PAGE_SIZE, PREALLOCATED_VACBS, SIZE_PER_BCB_LIST, SizeOfVacbArray, TRUE, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_REFERENCE, VACB_LEVEL_SHIFT, VACB_MAPPING_GRANULARITY, VACB_NUMBER_OF_LEVELS, and VACB_OFFSET_SHIFT.

Referenced by CcInitializeCacheMap().

01215 : 01216 01217 This routine must be called when a SharedCacheMap is created to create 01218 and initialize the initial Vacb array. 01219 01220 Arguments: 01221 01222 SharedCacheMap - Supplies the shared cache map for which the array is 01223 to be created. 01224 01225 NewSectionSize - Supplies the current size of the section which must be 01226 covered by the Vacb array. 01227 01228 Return Value: 01229 01230 None. 01231 01232 --*/ 01233 01234 { 01235 PVACB *NewAddresses; 01236 ULONG NewSize, SizeToAllocate; 01237 PLIST_ENTRY BcbListHead; 01238 BOOLEAN CreateBcbListHeads = FALSE, CreateReference = FALSE; 01239 01240 NewSize = SizeToAllocate = SizeOfVacbArray(NewSectionSize); 01241 01242 // 01243 // The following limit is greater than the MM limit 01244 // (i.e., MM actually only supports even smaller sections). 01245 // We have to reject the sign bit, and testing the high byte 01246 // for nonzero will surely only catch errors. 01247 // 01248 01249 if (NewSectionSize.HighPart & ~(PAGE_SIZE - 1)) { 01250 ExRaiseStatus(STATUS_SECTION_TOO_BIG); 01251 } 01252 01253 // 01254 // See if we can use the array inside the shared cache map. 01255 // 01256 01257 if (NewSize == (PREALLOCATED_VACBS * sizeof(PVACB))) { 01258 01259 NewAddresses = &SharedCacheMap->InitialVacbs[0]; 01260 01261 // 01262 // Else allocate the array. 01263 // 01264 01265 } else { 01266 01267 // 01268 // For large metadata streams, double the size to allocate 01269 // an array of Bcb listheads. Each two Vacb pointers also 01270 // gets its own Bcb listhead, thus requiring double the size. 01271 // 01272 01273 ASSERT(SIZE_PER_BCB_LIST == (VACB_MAPPING_GRANULARITY * 2)); 01274 01275 // 01276 // If this stream is larger than the size for multi-level Vacbs, 01277 // then fix the size to allocate the root. 01278 // 01279 01280 if (NewSize > VACB_LEVEL_BLOCK_SIZE) { 01281 01282 ULONG Level = 0; 01283 ULONG Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 01284 01285 NewSize = SizeToAllocate = VACB_LEVEL_BLOCK_SIZE; 01286 SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE); 01287 CreateReference = TRUE; 01288 01289 // 01290 // Loop to calculate how many levels we have and how much we have to 01291 // shift to index into the first level. 01292 // 01293 01294 do { 01295 01296 Level += 1; 01297 Shift += VACB_LEVEL_SHIFT; 01298 01299 } while ((NewSectionSize.QuadPart > ((LONGLONG)1 << Shift)) != 0); 01300 01301 // 01302 // Remember the maximum level ever seen (which is actually Level + 1). 01303 // 01304 01305 if (Level >= CcMaxVacbLevelsSeen) { 01306 ASSERT(Level <= VACB_NUMBER_OF_LEVELS); 01307 CcMaxVacbLevelsSeen = Level + 1; 01308 } 01309 01310 } else { 01311 01312 // 01313 // Does this stream get a Bcb Listhead array? 01314 // 01315 01316 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && 01317 (NewSectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY)) { 01318 01319 SizeToAllocate *= 2; 01320 CreateBcbListHeads = TRUE; 01321 } 01322 01323 // 01324 // Handle the boundary case by giving the proto-level a 01325 // reference count. This will allow us to simply push it 01326 // in the expansion case. In practice, due to pool granularity 01327 // this will not change the amount of space allocated 01328 // 01329 01330 if (NewSize == VACB_LEVEL_BLOCK_SIZE) { 01331 01332 SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE); 01333 CreateReference = TRUE; 01334 } 01335 } 01336 01337 NewAddresses = ExAllocatePoolWithTag( NonPagedPool, SizeToAllocate, 'pVcC' ); 01338 if (NewAddresses == NULL) { 01339 SharedCacheMap->Status = STATUS_INSUFFICIENT_RESOURCES; 01340 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 01341 } 01342 } 01343 01344 // 01345 // Zero out the Vacb array and the trailing reference counts. 01346 // 01347 01348 RtlZeroMemory( (PCHAR)NewAddresses, NewSize ); 01349 01350 if (CreateReference) { 01351 01352 SizeToAllocate -= sizeof(VACB_LEVEL_REFERENCE); 01353 RtlZeroMemory( (PCHAR)NewAddresses + SizeToAllocate, sizeof(VACB_LEVEL_REFERENCE) ); 01354 } 01355 01356 // 01357 // Loop to insert the Bcb listheads (if any) in the *descending* order 01358 // Bcb list. 01359 // 01360 01361 if (CreateBcbListHeads) { 01362 01363 for (BcbListHead = (PLIST_ENTRY)((PCHAR)NewAddresses + NewSize); 01364 BcbListHead < (PLIST_ENTRY)((PCHAR)NewAddresses + SizeToAllocate); 01365 BcbListHead++) { 01366 01367 InsertHeadList( &SharedCacheMap->BcbList, BcbListHead ); 01368 } 01369 } 01370 01371 SharedCacheMap->Vacbs = NewAddresses; 01372 SharedCacheMap->SectionSize = NewSectionSize; 01373 }

VOID FASTCALL CcDeallocateBcb IN PBCB  Bcb  ) 
 

Definition at line 5694 of file cachesub.c.

References _LAZY_WRITER::BcbZone, CACHE_NTC_BCB, CcBcbSpinLock, ExDeleteResource, ExFreePool(), ExFreeToZone, and LazyWriter.

Referenced by CcDeleteMbcb(), CcDeleteSharedCacheMap(), and CcUnpinFileData().

05700 : 05701 05702 This routine deallocates a Bcb to the BcbZone. It must 05703 already be removed from the BcbList. 05704 05705 Arguments: 05706 05707 Bcb - the Bcb to deallocate 05708 05709 Return Value: 05710 05711 None 05712 05713 --*/ 05714 05715 { 05716 KIRQL OldIrql; 05717 05718 // 05719 // Deallocate Resource structures 05720 // 05721 05722 if (Bcb->NodeTypeCode == CACHE_NTC_BCB) { 05723 05724 ExDeleteResource( &Bcb->Resource ); 05725 } 05726 05727 if ( Bcb->NodeIsInZone ) { 05728 05729 // 05730 // Synchronize access to the BcbZone 05731 // 05732 05733 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 05734 ExFreeToZone( &LazyWriter.BcbZone, Bcb ); 05735 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 05736 05737 } else { 05738 ExFreePool(Bcb); 05739 } 05740 return; 05741 }

_inline VOID CcDeallocateVacbLevel IN PVACB Entry,
IN BOOLEAN  DeallocatingBcbListHeads
 

Definition at line 2070 of file cc.h.

References CcVacbLevelEntries, CcVacbLevelFreeList, CcVacbLevelWithBcbsEntries, CcVacbLevelWithBcbsFreeList, PVACB, and VOID().

Referenced by CcDeleteMbcb(), CcExtendVacbArray(), CcSetDirtyInMask(), and CcSetVacbLargeOffset().

02075 { 02076 if (DeallocatingBcbListHeads) { 02077 *Entry = (PVACB)CcVacbLevelWithBcbsFreeList; 02078 CcVacbLevelWithBcbsFreeList = Entry; 02079 CcVacbLevelWithBcbsEntries += 1; 02080 } else { 02081 *Entry = (PVACB)CcVacbLevelFreeList; 02082 CcVacbLevelFreeList = Entry; 02083 CcVacbLevelEntries += 1; 02084 } 02085 }

VOID FASTCALL CcDeleteSharedCacheMap IN PSHARED_CACHE_MAP  SharedCacheMap,
IN KIRQL  ListIrql,
IN ULONG  ReleaseFile
 

Definition at line 1517 of file fssup.c.

References ASSERT, _BCB::BaseAddress, _BCB::BcbLinks, _BCB::ByteLength, CACHE_NTC_BCB, CcAcquireMasterLock, CcBcbSpinLock, CcDeallocateBcb(), CcDeleteMbcb(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcReleaseMasterLock, CcTotalDirtyPages, CcUnlockVacbLevel, CcUnmapAndPurge(), CcWaitOnActiveCount(), DebugTrace, _BCB::Dirty, _CACHE_UNINITIALIZE_EVENT::Event, ExFreePool(), FALSE, _BCB::FileOffset, FsRtlReleaseFile(), GetActiveVacbAtDpcLevel, IsVacbLevelReferenced(), KeSetEvent(), me, _CACHE_UNINITIALIZE_EVENT::Next, _BCB::NodeTypeCode, NULL, ObDereferenceObject, PAGE_SHIFT, _BCB::PinCount, _FILE_OBJECT::SectionObjectPointer, SetFlag, _SECTION_OBJECT_POINTERS::SharedCacheMap, _BCB::Vacb, VACB_SIZE_OF_FIRST_LEVEL, and WRITE_QUEUED.

Referenced by CcInitializeCacheMap(), CcUninitializeCacheMap(), and CcWriteBehind().

01525 : 01526 01527 The specified SharedCacheMap is removed from the global list of 01528 SharedCacheMap's and deleted with all of its related structures. 01529 Other objects which were referenced in CcInitializeCacheMap are 01530 dereferenced here. 01531 01532 NOTE: The CcMasterSpinLock must already be acquired 01533 on entry. It is released on return. 01534 01535 Arguments: 01536 01537 SharedCacheMap - Pointer to Cache Map to delete 01538 01539 ListIrql - priority to restore to when releasing shared cache map list 01540 01541 ReleaseFile - Supplied as nonzero if file was acquired exclusive and 01542 should be released. 01543 01544 ReturnValue: 01545 01546 None. 01547 01548 --*/ 01549 01550 { 01551 LIST_ENTRY LocalList; 01552 PLIST_ENTRY NextEntry; 01553 PFILE_OBJECT FileObject; 01554 PVACB ActiveVacb; 01555 ULONG ActivePage; 01556 ULONG PageIsDirty; 01557 KIRQL OldIrql; 01558 PMBCB Mbcb; 01559 01560 DebugTrace(+1, me, "CcDeleteSharedCacheMap:\n", 0 ); 01561 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 01562 01563 // 01564 // Remove it from the global list and clear the pointer to it via 01565 // the File Object. 01566 // 01567 01568 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01569 01570 // 01571 // Zero pointer to SharedCacheMap. Once we have cleared the pointer, 01572 // we can/must release the global list to avoid deadlocks. 01573 // 01574 01575 FileObject = SharedCacheMap->FileObject; 01576 01577 FileObject->SectionObjectPointer->SharedCacheMap = (PSHARED_CACHE_MAP)NULL; 01578 SetFlag( SharedCacheMap->Flags, WRITE_QUEUED ); 01579 01580 // 01581 // The OpenCount is 0, but we still need to flush out any dangling 01582 // cache read or writes. 01583 // 01584 01585 if ((SharedCacheMap->VacbActiveCount != 0) || (SharedCacheMap->NeedToZero != NULL)) { 01586 01587 // 01588 // We will put it in a local list and set a flag 01589 // to keep the Lazy Writer away from it, so that we can wrip it out 01590 // below if someone manages to sneak in and set something dirty, etc. 01591 // If the file system does not synchronize cleanup calls with an 01592 // exclusive on the stream, then this case is possible. 01593 // 01594 01595 InitializeListHead( &LocalList ); 01596 InsertTailList( &LocalList, &SharedCacheMap->SharedCacheMapLinks ); 01597 01598 // 01599 // If there is an active Vacb, then nuke it now (before waiting!). 01600 // 01601 01602 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 01603 01604 CcReleaseMasterLock( ListIrql ); 01605 01606 // 01607 // No point in saying the page is dirty (which can cause an allocation 01608 // failure), since we are deleting this SharedCacheMap anyway. 01609 // 01610 01611 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, FALSE ); 01612 01613 while (SharedCacheMap->VacbActiveCount != 0) { 01614 CcWaitOnActiveCount( SharedCacheMap ); 01615 } 01616 01617 // 01618 // Now in case we hit the rare path where someone moved the 01619 // SharedCacheMap again, do a remove again now. It may be 01620 // from our local list or it may be from the dirty list, 01621 // but who cares? The important thing is to remove it in 01622 // the case it was the dirty list, since we will delete it 01623 // below. 01624 // 01625 01626 CcAcquireMasterLock( &ListIrql ); 01627 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01628 } 01629 01630 CcReleaseMasterLock( ListIrql ); 01631 01632 // 01633 // If there are Bcbs, then empty the list, asserting that none of them 01634 // can be pinned now if we have gotten this far! 01635 // 01636 01637 NextEntry = SharedCacheMap->BcbList.Flink; 01638 while (NextEntry != &SharedCacheMap->BcbList) { 01639 01640 PBCB Bcb; 01641 01642 Bcb = (PBCB)CONTAINING_RECORD( NextEntry, 01643 BCB, 01644 BcbLinks ); 01645 NextEntry = Bcb->BcbLinks.Flink; 01646 01647 // 01648 // Skip over the pendaflex entries, only removing true Bcbs 01649 // so that level teardown doesn't need to special case unhooking 01650 // the pendaflex. This has the side benefit of dramatically 01651 // reducing write traffic to memory on teardown of large files. 01652 // 01653 // I really wonder how often we have Bcbs at teardown. This is 01654 // a lot of work that could be avoided otherwise. 01655 // 01656 01657 if (Bcb->NodeTypeCode == CACHE_NTC_BCB) { 01658 01659 ASSERT( Bcb->PinCount == 0 ); 01660 01661 RemoveEntryList( &Bcb->BcbLinks ); 01662 01663 // 01664 // For large metadata streams we unlock the Vacb level when removing. 01665 // We do not need spinlocks since no other thread can be accessing 01666 // this list when we are deleting the SharedCacheMap. 01667 // 01668 01669 CcUnlockVacbLevel( SharedCacheMap, Bcb->FileOffset.QuadPart ); 01670 01671 // 01672 // There is a small window where the data could still be mapped 01673 // if (for example) the Lazy Writer collides with a CcCopyWrite 01674 // in the foreground, and then someone calls CcUninitializeCacheMap 01675 // while the Lazy Writer is active. This is because the Lazy 01676 // Writer biases the pin count. Deal with that here. 01677 // 01678 01679 if (Bcb->BaseAddress != NULL) { 01680 CcFreeVirtualAddress( Bcb->Vacb ); 01681 } 01682 01683 // 01684 // Debug routines used to remove Bcbs from the global list 01685 // 01686 01687 #if LIST_DBG 01688 01689 { 01690 KIRQL OldIrql; 01691 01692 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 01693 01694 if (Bcb->CcBcbLinks.Flink != NULL) { 01695 01696 RemoveEntryList( &Bcb->CcBcbLinks ); 01697 CcBcbCount -= 1; 01698 } 01699 01700 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 01701 } 01702 01703 #endif 01704 01705 // 01706 // If the Bcb is dirty, we have to synchronize with the Lazy Writer 01707 // and reduce the total number of dirty. 01708 // 01709 01710 CcAcquireMasterLock( &ListIrql ); 01711 if (Bcb->Dirty) { 01712 01713 SharedCacheMap->DirtyPages -= Bcb->ByteLength >> PAGE_SHIFT; 01714 CcTotalDirtyPages -= Bcb->ByteLength >> PAGE_SHIFT; 01715 } 01716 01717 CcReleaseMasterLock( ListIrql ); 01718 01719 CcDeallocateBcb( Bcb ); 01720 } 01721 } 01722 01723 // 01724 // Call local routine to unmap, and purge if necessary. 01725 // 01726 01727 CcUnmapAndPurge( SharedCacheMap ); 01728 01729 // 01730 // Now release the file now that the purge is done. 01731 // 01732 01733 if (ReleaseFile) { 01734 FsRtlReleaseFile( SharedCacheMap->FileObject ); 01735 } 01736 01737 // 01738 // Dereference our pointer to the Section and FileObject 01739 // (We have to test the Section pointer since CcInitializeCacheMap 01740 // calls this routine for error recovery. Release our global 01741 // resource before dereferencing the FileObject to avoid deadlocks. 01742 // 01743 01744 if (SharedCacheMap->Section != NULL) { 01745 ObDereferenceObject( SharedCacheMap->Section ); 01746 } 01747 ObDereferenceObject( FileObject ); 01748 01749 // 01750 // If there is an Mbcb, deduct any dirty pages and deallocate. 01751 // 01752 01753 if (SharedCacheMap->Mbcb != NULL) { 01754 CcDeleteMbcb( SharedCacheMap ); 01755 } 01756 01757 // 01758 // If there was an uninitialize event specified for this shared cache 01759 // map, then set it to the signalled state, indicating that we are 01760 // removing the section and deleting the shared cache map. 01761 // 01762 01763 if (SharedCacheMap->UninitializeEvent != NULL) { 01764 PCACHE_UNINITIALIZE_EVENT CUEvent, EventNext; 01765 01766 CUEvent = SharedCacheMap->UninitializeEvent; 01767 while (CUEvent != NULL) { 01768 EventNext = CUEvent->Next; 01769 KeSetEvent(&CUEvent->Event, 0, FALSE); 01770 CUEvent = EventNext; 01771 } 01772 } 01773 01774 // 01775 // Now delete the Vacb vector. 01776 // 01777 01778 if ((SharedCacheMap->Vacbs != &SharedCacheMap->InitialVacbs[0]) 01779 01780 && 01781 01782 (SharedCacheMap->Vacbs != NULL)) { 01783 01784 // 01785 // If there are Vacb levels, then the Vacb Array better be in an empty state. 01786 // 01787 01788 ASSERT((SharedCacheMap->SectionSize.QuadPart <= VACB_SIZE_OF_FIRST_LEVEL) || 01789 !IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, 1 )); 01790 01791 ExFreePool( SharedCacheMap->Vacbs ); 01792 } 01793 01794 // 01795 // If an event had to be allocated for this SharedCacheMap, 01796 // deallocate it. 01797 // 01798 01799 if ((SharedCacheMap->CreateEvent != NULL) && (SharedCacheMap->CreateEvent != &SharedCacheMap->Event)) { 01800 ExFreePool( SharedCacheMap->CreateEvent ); 01801 } 01802 01803 if ((SharedCacheMap->WaitOnActiveCount != NULL) && (SharedCacheMap->WaitOnActiveCount != &SharedCacheMap->Event)) { 01804 ExFreePool( SharedCacheMap->WaitOnActiveCount ); 01805 } 01806 01807 // 01808 // Deallocate the storeage for the SharedCacheMap. 01809 // 01810 01811 ExFreePool( SharedCacheMap ); 01812 01813 DebugTrace(-1, me, "CcDeleteSharedCacheMap -> VOID\n", 0 ); 01814 01815 return; 01816 01817 }

VOID CcDereferenceFileOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset
 

Definition at line 1007 of file vacbsup.c.

References ASSERT, CcAcquireVacbLock, CcReleaseVacbLock, DISPATCH_LEVEL, SetVacb(), VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_DEREFERENCE.

Referenced by CcPinFileData().

01014 : 01015 01016 This routine must be called once for each call to CcReferenceFileOffset 01017 to remove the reference. 01018 01019 Arguments: 01020 01021 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 01022 01023 FileOffset - Supplies the desired FileOffset within the file. 01024 01025 Return Value: 01026 01027 None 01028 01029 --*/ 01030 01031 { 01032 KIRQL OldIrql; 01033 01034 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 01035 01036 // 01037 // This operation only has meaning if the Vacbs are in the multilevel form. 01038 // 01039 01040 if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) { 01041 01042 // 01043 // Acquire the Vacb lock to synchronize the dereference. 01044 // 01045 01046 CcAcquireVacbLock( &OldIrql ); 01047 01048 ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart ); 01049 01050 SetVacb( SharedCacheMap, FileOffset, VACB_SPECIAL_DEREFERENCE ); 01051 01052 CcReleaseVacbLock( OldIrql ); 01053 } 01054 01055 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 01056 01057 return; 01058 }

VOID CcDrainVacbLevelZone  ) 
 

Definition at line 2044 of file vacbsup.c.

References CcAcquireVacbLock, CcMaxVacbLevelsSeen, CcReleaseVacbLock, CcVacbLevelEntries, CcVacbLevelFreeList, CcVacbLevelWithBcbsEntries, CcVacbLevelWithBcbsFreeList, ExFreePool(), and NULL.

Referenced by CcDeleteMbcb(), CcGetVacbMiss(), and CcUnmapVacbArray().

02049 : 02050 02051 This routine should be called any time some entries have been deallocated to 02052 the VacbLevel zone, and we want to insure the zone is returned to a normal level. 02053 02054 Arguments: 02055 02056 Return Value: 02057 02058 None. 02059 02060 Environment: 02061 02062 No spinlocks should be held upon entry. 02063 02064 --*/ 02065 02066 { 02067 KIRQL OldIrql; 02068 PVACB *NextVacbArray; 02069 02070 // 02071 // This is an unsafe loop to see if it looks like there is stuff to 02072 // clean up. 02073 // 02074 02075 while ((CcVacbLevelEntries > (CcMaxVacbLevelsSeen * 4)) || 02076 (CcVacbLevelWithBcbsEntries > 2)) { 02077 02078 // 02079 // Now go in and try to pick up one entry to free under a FastLock. 02080 // 02081 02082 NextVacbArray = NULL; 02083 CcAcquireVacbLock( &OldIrql ); 02084 if (CcVacbLevelEntries > (CcMaxVacbLevelsSeen * 4)) { 02085 NextVacbArray = CcVacbLevelFreeList; 02086 CcVacbLevelFreeList = (PVACB *)NextVacbArray[0]; 02087 CcVacbLevelEntries -= 1; 02088 } else if (CcVacbLevelWithBcbsEntries > 2) { 02089 NextVacbArray = CcVacbLevelWithBcbsFreeList; 02090 CcVacbLevelWithBcbsFreeList = (PVACB *)NextVacbArray[0]; 02091 CcVacbLevelWithBcbsEntries -= 1; 02092 } 02093 CcReleaseVacbLock( OldIrql ); 02094 02095 // 02096 // Since the loop is unsafe, we may not have gotten anything. 02097 // 02098 02099 if (NextVacbArray != NULL) { 02100 ExFreePool(NextVacbArray); 02101 } 02102 } 02103 }

LONG CcExceptionFilter IN NTSTATUS  ExceptionCode  ) 
 

Definition at line 619 of file lazyrite.c.

References DebugTrace, EXCEPTION_CONTINUE_SEARCH, EXCEPTION_EXECUTE_HANDLER, and FsRtlIsNtstatusExpected().

Referenced by CcAcquireByteRangeForWrite(), CcFlushCache(), CcLazyWriteScan(), and CcWorkerThread().

00625 : 00626 00627 This is the standard exception filter for worker threads which simply 00628 calls an FsRtl routine to see if an expected status is being raised. 00629 If so, the exception is handled, else we bug check. 00630 00631 Arguments: 00632 00633 ExceptionCode - the exception code which was raised. 00634 00635 Return Value: 00636 00637 EXCEPTION_EXECUTE_HANDLER if expected, else a Bug Check occurs. 00638 00639 --*/ 00640 00641 { 00642 DebugTrace(0, 0, "CcExceptionFilter %08lx\n", ExceptionCode); 00643 00644 if (FsRtlIsNtstatusExpected( ExceptionCode )) { 00645 00646 return EXCEPTION_EXECUTE_HANDLER; 00647 00648 } else { 00649 00650 return EXCEPTION_CONTINUE_SEARCH; 00651 } 00652 }

VOID CcExtendVacbArray IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  NewSectionSize
 

Definition at line 1377 of file vacbsup.c.

References ASSERT, BCB, BEGIN_BCB_LIST_ARRAY, CcAcquireVacbLock, CcAcquireVacbLockAtDpcLevel, CcAllocateVacbLevel(), CcCalculateVacbLevelLockCount(), CcDeallocateVacbLevel(), CcMaxVacbLevelsSeen, CcPrefillVacbLevelZone(), CcReleaseVacbLock, CcReleaseVacbLockFromDpcLevel, CcVacbLevelEntries, ExFreePool(), ExRaiseStatus(), FALSE, FlagOn, FsRtlAllocatePoolWithTag, IsVacbLevelReferenced(), MODIFIED_WRITE_DISABLED, NonPagedPool, NULL, Offset, PAGE_SIZE, ReferenceVacbLevel(), SIZE_PER_BCB_LIST, SizeOfVacbArray, TRUE, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_SHIFT, VACB_NUMBER_OF_LEVELS, VACB_OFFSET_SHIFT, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by CcInitializeCacheMap(), and CcSetFileSizes().

01384 : 01385 01386 This routine must be called any time the section for a shared cache 01387 map is extended, in order to extend the Vacb array (if necessary). 01388 01389 Arguments: 01390 01391 SharedCacheMap - Supplies the shared cache map for which the array is 01392 to be created. 01393 01394 NewSectionSize - Supplies the new size of the section which must be 01395 covered by the Vacb array. 01396 01397 Return Value: 01398 01399 None. 01400 01401 --*/ 01402 01403 { 01404 KIRQL OldIrql; 01405 PVACB *OldAddresses; 01406 PVACB *NewAddresses; 01407 ULONG OldSize; 01408 ULONG NewSize, SizeToAllocate; 01409 LARGE_INTEGER NextLevelSize; 01410 BOOLEAN GrowingBcbListHeads = FALSE, CreateReference = FALSE; 01411 01412 // 01413 // The following limit is greater than the MM limit 01414 // (i.e., MM actually only supports even smaller sections). 01415 // We have to reject the sign bit, and testing the high byte 01416 // for nonzero will surely only catch errors. 01417 // 01418 01419 if (NewSectionSize.HighPart & ~(PAGE_SIZE - 1)) { 01420 ExRaiseStatus(STATUS_SECTION_TOO_BIG); 01421 } 01422 01423 // 01424 // See if we will be growing the Bcb ListHeads, so we can take out the 01425 // master lock if so. 01426 // 01427 01428 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && 01429 (NewSectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY)) { 01430 01431 GrowingBcbListHeads = TRUE; 01432 } 01433 01434 // 01435 // Is there any work to do? 01436 // 01437 01438 if (NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart) { 01439 01440 // 01441 // Handle the growth of the first level here. 01442 // 01443 01444 if (SharedCacheMap->SectionSize.QuadPart < VACB_SIZE_OF_FIRST_LEVEL) { 01445 01446 NextLevelSize = NewSectionSize; 01447 01448 // 01449 // Limit the growth of this level 01450 // 01451 01452 if (NextLevelSize.QuadPart >= VACB_SIZE_OF_FIRST_LEVEL) { 01453 NextLevelSize.QuadPart = VACB_SIZE_OF_FIRST_LEVEL; 01454 CreateReference = TRUE; 01455 } 01456 01457 // 01458 // N.B.: SizeOfVacbArray only calculates the size of the VACB 01459 // pointer block. We must adjust for Bcb listheads and the 01460 // multilevel reference count. 01461 // 01462 01463 NewSize = SizeToAllocate = SizeOfVacbArray(NextLevelSize); 01464 OldSize = SizeOfVacbArray(SharedCacheMap->SectionSize); 01465 01466 // 01467 // Only do something if the size is growing. 01468 // 01469 01470 if (NewSize > OldSize) { 01471 01472 // 01473 // Does this stream get a Bcb Listhead array? 01474 // 01475 01476 if (GrowingBcbListHeads) { 01477 SizeToAllocate *= 2; 01478 } 01479 01480 // 01481 // Do we need space for the reference count? 01482 // 01483 01484 if (CreateReference) { 01485 SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE); 01486 } 01487 01488 NewAddresses = FsRtlAllocatePoolWithTag( NonPagedPool, SizeToAllocate, 'pVcC' ); 01489 01490 // 01491 // See if we will be growing the Bcb ListHeads, so we can take out the 01492 // master lock if so. 01493 // 01494 01495 if (GrowingBcbListHeads) { 01496 01497 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 01498 CcAcquireVacbLockAtDpcLevel(); 01499 01500 } else { 01501 01502 // 01503 // Acquire the spin lock to serialize with anyone who might like 01504 // to "steal" one of the mappings we are going to move. 01505 // 01506 01507 CcAcquireVacbLock( &OldIrql ); 01508 } 01509 01510 OldAddresses = SharedCacheMap->Vacbs; 01511 if (OldAddresses != NULL) { 01512 RtlCopyMemory( NewAddresses, OldAddresses, OldSize ); 01513 } else { 01514 OldSize = 0; 01515 } 01516 01517 RtlZeroMemory( (PCHAR)NewAddresses + OldSize, NewSize - OldSize ); 01518 01519 if (CreateReference) { 01520 01521 SizeToAllocate -= sizeof(VACB_LEVEL_REFERENCE); 01522 RtlZeroMemory( (PCHAR)NewAddresses + SizeToAllocate, sizeof(VACB_LEVEL_REFERENCE) ); 01523 } 01524 01525 // 01526 // See if we have to initialize Bcb Listheads. 01527 // 01528 01529 if (GrowingBcbListHeads) { 01530 01531 LARGE_INTEGER Offset; 01532 PLIST_ENTRY BcbListHeadNew, TempEntry; 01533 01534 Offset.QuadPart = 0; 01535 BcbListHeadNew = (PLIST_ENTRY)((PCHAR)NewAddresses + NewSize ); 01536 01537 // 01538 // Handle case where the old array had Bcb Listheads. 01539 // 01540 01541 if ((SharedCacheMap->SectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY) && 01542 (OldAddresses != NULL)) { 01543 01544 PLIST_ENTRY BcbListHeadOld; 01545 01546 BcbListHeadOld = (PLIST_ENTRY)((PCHAR)OldAddresses + OldSize); 01547 01548 // 01549 // Loop to remove each old listhead and insert the new one 01550 // in its place. 01551 // 01552 01553 do { 01554 TempEntry = BcbListHeadOld->Flink; 01555 RemoveEntryList( BcbListHeadOld ); 01556 InsertTailList( TempEntry, BcbListHeadNew ); 01557 Offset.QuadPart += SIZE_PER_BCB_LIST; 01558 BcbListHeadOld += 1; 01559 BcbListHeadNew += 1; 01560 } while (Offset.QuadPart < SharedCacheMap->SectionSize.QuadPart); 01561 01562 // 01563 // Otherwise, handle the case where we are adding Bcb 01564 // Listheads. 01565 // 01566 01567 } else { 01568 01569 TempEntry = SharedCacheMap->BcbList.Blink; 01570 01571 // 01572 // Loop through any/all Bcbs to insert the new listheads. 01573 // 01574 01575 while (TempEntry != &SharedCacheMap->BcbList) { 01576 01577 // 01578 // Sit on this Bcb until we have inserted all listheads 01579 // that go before it. 01580 // 01581 01582 while (Offset.QuadPart <= ((PBCB)CONTAINING_RECORD(TempEntry, BCB, BcbLinks))->FileOffset.QuadPart) { 01583 01584 InsertHeadList(TempEntry, BcbListHeadNew); 01585 Offset.QuadPart += SIZE_PER_BCB_LIST; 01586 BcbListHeadNew += 1; 01587 } 01588 TempEntry = TempEntry->Blink; 01589 } 01590 } 01591 01592 // 01593 // Now insert the rest of the new listhead entries that were 01594 // not finished in either loop above. 01595 // 01596 01597 while (Offset.QuadPart < NextLevelSize.QuadPart) { 01598 01599 InsertHeadList(&SharedCacheMap->BcbList, BcbListHeadNew); 01600 Offset.QuadPart += SIZE_PER_BCB_LIST; 01601 BcbListHeadNew += 1; 01602 } 01603 } 01604 01605 // 01606 // These two fields must be changed while still holding the spinlock. 01607 // 01608 01609 SharedCacheMap->Vacbs = NewAddresses; 01610 SharedCacheMap->SectionSize = NextLevelSize; 01611 01612 // 01613 // Now we can free the spinlocks ahead of freeing pool. 01614 // 01615 01616 if (GrowingBcbListHeads) { 01617 CcReleaseVacbLockFromDpcLevel(); 01618 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01619 } else { 01620 CcReleaseVacbLock( OldIrql ); 01621 } 01622 01623 if ((OldAddresses != &SharedCacheMap->InitialVacbs[0]) && 01624 (OldAddresses != NULL)) { 01625 ExFreePool( OldAddresses ); 01626 } 01627 } 01628 01629 // 01630 // Make sure SectionSize gets updated. It is ok to fall through here 01631 // without a spinlock, so long as either Vacbs was not changed, or it 01632 // was changed together with SectionSize under the spinlock(s) above. 01633 // 01634 01635 SharedCacheMap->SectionSize = NextLevelSize; 01636 } 01637 01638 // 01639 // Handle extends up to and within multi-level Vacb arrays here. This is fairly simple. 01640 // If no additional Vacb levels are required, then there is no work to do, otherwise 01641 // we just have to push the root one or more levels linked through the first pointer 01642 // in the new root(s). 01643 // 01644 01645 if (NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart) { 01646 01647 PVACB *NextVacbArray; 01648 ULONG NewLevel; 01649 ULONG Level = 1; 01650 ULONG Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 01651 01652 // 01653 // Loop to calculate how many levels we currently have. 01654 // 01655 01656 while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)) { 01657 01658 Level += 1; 01659 Shift += VACB_LEVEL_SHIFT; 01660 } 01661 01662 NewLevel = Level; 01663 01664 // 01665 // Loop to calculate how many levels we need. 01666 // 01667 01668 while (((NewSectionSize.QuadPart - 1) >> Shift) != 0) { 01669 01670 NewLevel += 1; 01671 Shift += VACB_LEVEL_SHIFT; 01672 } 01673 01674 // 01675 // Now see if we have any work to do. 01676 // 01677 01678 if (NewLevel > Level) { 01679 01680 // 01681 // Remember the maximum level ever seen (which is actually NewLevel + 1). 01682 // 01683 01684 if (NewLevel >= CcMaxVacbLevelsSeen) { 01685 ASSERT(NewLevel <= VACB_NUMBER_OF_LEVELS); 01686 CcMaxVacbLevelsSeen = NewLevel + 1; 01687 } 01688 01689 // 01690 // Raise if we cannot preallocate enough buffers. 01691 // 01692 01693 if (!CcPrefillVacbLevelZone( NewLevel - Level, &OldIrql, FALSE )) { 01694 01695 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 01696 } 01697 01698 // 01699 // Now if the current Level of the file is 1, we have not been maintaining 01700 // a reference count, so we have to calculate it before pushing. In the 01701 // boundary case we have made sure that the reference space is avaliable. 01702 // 01703 01704 if (Level == 1) { 01705 01706 // 01707 // We know this is always a leaf-like level right now. 01708 // 01709 01710 CcCalculateVacbLevelLockCount( SharedCacheMap, SharedCacheMap->Vacbs, 0 ); 01711 } 01712 01713 // 01714 // Finally, if there are any active pointers in the first level, then we 01715 // have to create new levels by adding a new root enough times to create 01716 // additional levels. On the other hand, if the pointer count in the top 01717 // level is zero, then we must not do any pushes, because we never allow 01718 // empty leaves! 01719 // 01720 01721 if (IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, Level - 1 )) { 01722 01723 while (NewLevel > Level++) { 01724 01725 ASSERT(CcVacbLevelEntries != 0); 01726 NextVacbArray = CcAllocateVacbLevel(FALSE); 01727 01728 NextVacbArray[0] = (PVACB)SharedCacheMap->Vacbs; 01729 ReferenceVacbLevel( SharedCacheMap, NextVacbArray, Level, 1, FALSE ); 01730 01731 SharedCacheMap->Vacbs = NextVacbArray; 01732 } 01733 01734 } else { 01735 01736 // 01737 // We are now possesed of the additional problem that this level has no 01738 // references but may have Bcb listheads due to the boundary case where 01739 // we have expanded up to the multilevel Vacbs above. This level can't 01740 // remain at the root and needs to be destroyed. What we need to do is 01741 // replace it with one of our prefilled (non Bcb) levels and unlink the 01742 // Bcb listheads in the old one. 01743 // 01744 01745 if (Level == 1 && FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 01746 01747 PLIST_ENTRY PredecessorListHead, SuccessorListHead; 01748 01749 NextVacbArray = SharedCacheMap->Vacbs; 01750 SharedCacheMap->Vacbs = CcAllocateVacbLevel(FALSE); 01751 01752 PredecessorListHead = ((PLIST_ENTRY)((PCHAR)NextVacbArray + VACB_LEVEL_BLOCK_SIZE))->Flink; 01753 SuccessorListHead = ((PLIST_ENTRY)((PCHAR)NextVacbArray + (VACB_LEVEL_BLOCK_SIZE * 2) - sizeof(LIST_ENTRY)))->Blink; 01754 PredecessorListHead->Blink = SuccessorListHead; 01755 SuccessorListHead->Flink = PredecessorListHead; 01756 01757 CcDeallocateVacbLevel( NextVacbArray, TRUE ); 01758 } 01759 } 01760 01761 // 01762 // These two fields (Vacbs and SectionSize) must be changed while still 01763 // holding the spinlock. 01764 // 01765 01766 SharedCacheMap->SectionSize = NewSectionSize; 01767 CcReleaseVacbLock( OldIrql ); 01768 } 01769 01770 // 01771 // Make sure SectionSize gets updated. It is ok to fall through here 01772 // without a spinlock, so long as either Vacbs was not changed, or it 01773 // was changed together with SectionSize under the spinlock(s) above. 01774 // 01775 01776 SharedCacheMap->SectionSize = NewSectionSize; 01777 } 01778 } 01779 }

VOID CcFreeActiveVacb IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVACB ActiveVacb  OPTIONAL,
IN ULONG  ActivePage,
IN ULONG  PageIsDirty
 

Definition at line 5885 of file cachesub.c.

References ACTIVE_PAGE_IS_DIRTY, CcAcquireMasterLock, CcFreeVirtualAddress(), CcReleaseMasterLock, CcSetDirtyInMask(), CcTotalDirtyPages, ClearFlag, FlagOn, MmUnlockCachedPage(), NULL, PAGE_SHIFT, PAGE_SIZE, and VACB_MAPPING_GRANULARITY.

Referenced by CcCopyRead(), CcCopyWrite(), CcDeleteSharedCacheMap(), CcFastCopyRead(), CcFastCopyWrite(), CcFlushCache(), CcGetVacbMiss(), CcMapAndCopy(), CcMdlRead(), CcPinFileData(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWriteBehind(), and CcZeroEndOfLastPage().

05894 : 05895 05896 This routine may be called to zero the end of a locked page or 05897 free the ActiveVacb for a Shared Cache Map, if there is one. 05898 Note that some callers are not synchronized with foreground 05899 activity, and may therefore not have an ActiveVacb. Examples 05900 of unsynchronized callers are CcZeroEndOfLastPage (which is 05901 called by MM) and any flushing done by CcWriteBehind. 05902 05903 Arguments: 05904 05905 SharedCacheMap - SharedCacheMap to examine for page to be zeroed. 05906 05907 ActiveVacb - Vacb to free 05908 05909 ActivePage - Page that was used 05910 05911 PageIsDirty - ACTIVE_PAGE_IS_DIRTY if the active page is dirty 05912 05913 Return Value: 05914 05915 None 05916 05917 --*/ 05918 05919 { 05920 LARGE_INTEGER ActiveOffset; 05921 PVOID ActiveAddress; 05922 ULONG BytesLeftInPage; 05923 KIRQL OldIrql; 05924 05925 // 05926 // If the page was locked, then unlock it. 05927 // 05928 05929 if (SharedCacheMap->NeedToZero != NULL) { 05930 05931 PVACB NeedToZeroVacb; 05932 05933 // 05934 // Zero the rest of the page under spinlock control, 05935 // and then clear the address field. This field makes 05936 // zero->nonzero transitions only when the file is exclusive, 05937 // but it can make nonzero->zero transitions any time the 05938 // spinlock is not held. 05939 // 05940 05941 ExAcquireFastLock( &SharedCacheMap->ActiveVacbSpinLock, &OldIrql ); 05942 05943 // 05944 // The address could already be gone. 05945 // 05946 05947 ActiveAddress = SharedCacheMap->NeedToZero; 05948 if (ActiveAddress != NULL) { 05949 05950 BytesLeftInPage = PAGE_SIZE - ((((ULONG)((ULONG_PTR)ActiveAddress) - 1) & (PAGE_SIZE - 1)) + 1); 05951 05952 RtlZeroBytes( ActiveAddress, BytesLeftInPage ); 05953 SharedCacheMap->NeedToZero = NULL; 05954 NeedToZeroVacb = SharedCacheMap->NeedToZeroVacb; 05955 } 05956 ExReleaseFastLock( &SharedCacheMap->ActiveVacbSpinLock, OldIrql ); 05957 05958 // 05959 // Now call MM to unlock the address. Note we will never store the 05960 // address at the start of the page, but we can sometimes store 05961 // the start of the next page when we have exactly filled the page. 05962 // 05963 05964 if (ActiveAddress != NULL) { 05965 MmUnlockCachedPage( (PVOID)((PCHAR)ActiveAddress - 1) ); 05966 CcFreeVirtualAddress( NeedToZeroVacb ); 05967 } 05968 } 05969 05970 // 05971 // See if caller actually has an ActiveVacb 05972 // 05973 05974 if (ActiveVacb != NULL) { 05975 05976 // 05977 // See if the page is dirty 05978 // 05979 05980 if (PageIsDirty) { 05981 05982 ActiveOffset.QuadPart = (LONGLONG)ActivePage << PAGE_SHIFT; 05983 ActiveAddress = (PVOID)((PCHAR)ActiveVacb->BaseAddress + 05984 (ActiveOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1))); 05985 05986 // 05987 // Tell the Lazy Writer to write the page. 05988 // 05989 05990 CcSetDirtyInMask( SharedCacheMap, &ActiveOffset, PAGE_SIZE ); 05991 05992 // 05993 // Now we need to clear the flag and decrement some counts if there is 05994 // no other active Vacb which snuck in. 05995 // 05996 05997 CcAcquireMasterLock( &OldIrql ); 05998 ExAcquireSpinLockAtDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 05999 if ((SharedCacheMap->ActiveVacb == NULL) && 06000 FlagOn(SharedCacheMap->Flags, ACTIVE_PAGE_IS_DIRTY)) { 06001 06002 ClearFlag(SharedCacheMap->Flags, ACTIVE_PAGE_IS_DIRTY); 06003 SharedCacheMap->DirtyPages -= 1; 06004 CcTotalDirtyPages -= 1; 06005 } 06006 ExReleaseSpinLockFromDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 06007 CcReleaseMasterLock( OldIrql ); 06008 } 06009 06010 // 06011 // Now free the Vacb. 06012 // 06013 06014 CcFreeVirtualAddress( ActiveVacb ); 06015 } 06016 }

VOID FASTCALL CcFreeVirtualAddress IN PVACB  Vacb  ) 
 

Definition at line 862 of file vacbsup.c.

References CcAcquireVacbLock, CcMoveVacbToReuseHead, CcMoveVacbToReuseTail, CcReleaseVacbLock, CheckedDec, FALSE, KeSetEvent(), NULL, _SHARED_CACHE_MAP::VacbActiveCount, and _SHARED_CACHE_MAP::WaitOnActiveCount.

Referenced by CcCopyRead(), CcDeleteSharedCacheMap(), CcFastCopyRead(), CcFlushCache(), CcFreeActiveVacb(), CcMapAndCopy(), CcMdlRead(), CcPerformReadAhead(), CcPinFileData(), CcPinMappedData(), CcPrepareMdlWrite(), CcPurgeAndClearCacheSection(), and CcUnpinFileData().

00868 : 00869 00870 This routine must be called once for each call to CcGetVirtualAddress 00871 to free that virtual address. 00872 00873 Arguments: 00874 00875 Vacb - Supplies the Vacb which was returned from CcGetVirtualAddress. 00876 00877 Return Value: 00878 00879 None. 00880 00881 --*/ 00882 00883 { 00884 KIRQL OldIrql; 00885 PSHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 00886 00887 CcAcquireVacbLock( &OldIrql ); 00888 00889 CheckedDec(Vacb->Overlay.ActiveCount); 00890 00891 // 00892 // If the count goes to zero, then we want to decrement the global 00893 // Active count. 00894 // 00895 00896 if (Vacb->Overlay.ActiveCount == 0) { 00897 00898 // 00899 // If the SharedCacheMap address is not NULL, then this one is 00900 // in use by a shared cache map, and we have to decrement his 00901 // count and see if anyone is waiting. 00902 // 00903 00904 if (SharedCacheMap != NULL) { 00905 00906 CheckedDec(SharedCacheMap->VacbActiveCount); 00907 00908 // 00909 // If there is someone waiting for this count to go to zero, 00910 // wake them here. 00911 // 00912 00913 if (SharedCacheMap->WaitOnActiveCount != NULL) { 00914 KeSetEvent( SharedCacheMap->WaitOnActiveCount, 0, FALSE ); 00915 } 00916 00917 // 00918 // Go to the back of the LRU to save this range for a bit 00919 // 00920 00921 CcMoveVacbToReuseTail( Vacb ); 00922 00923 } else { 00924 00925 // 00926 // This range is no longer referenced, so make it avaliable 00927 // 00928 00929 CcMoveVacbToReuseHead( Vacb ); 00930 } 00931 00932 } else { 00933 00934 // 00935 // This range is still in use, so move it away from the front 00936 // so that it doesn't consume cycles being checked. 00937 // 00938 00939 CcMoveVacbToReuseTail( Vacb ); 00940 } 00941 00942 CcReleaseVacbLock( OldIrql ); 00943 }

PLIST_ENTRY CcGetBcbListHeadLargeOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
IN BOOLEAN  FailToSuccessor
 

Definition at line 2107 of file vacbsup.c.

References ASSERT, Index, NULL, TRUE, VACB_LAST_INDEX_FOR_LEVEL, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_SHIFT, VACB_NUMBER_OF_LEVELS, VACB_OFFSET_SHIFT, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by CcSetVacbLargeOffset().

02115 : 02116 02117 This routine may be called to return the Bcb listhead for the specified FileOffset. 02118 It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL. 02119 02120 Arguments: 02121 02122 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the listhead 02123 is desired. 02124 02125 FileOffset - Supplies the fileOffset corresponding to the desired listhead. 02126 02127 FailToSuccessor - Instructs whether not finding the exact listhead should cause us to 02128 return the predecessor or successor Bcb listhead. 02129 02130 Return Value: 02131 02132 Returns the desired Listhead pointer. If the desired listhead does not actually exist 02133 yet, then it returns the appropriate listhead. 02134 02135 Environment: 02136 02137 The BcbSpinlock should be held on entry. 02138 02139 --*/ 02140 02141 { 02142 ULONG Level, Shift; 02143 PVACB *VacbArray, *NextVacbArray; 02144 ULONG Index; 02145 ULONG SavedIndexes[VACB_NUMBER_OF_LEVELS]; 02146 PVACB *SavedVacbArrays[VACB_NUMBER_OF_LEVELS]; 02147 ULONG SavedLevels = 0; 02148 02149 // 02150 // Initialize variables controlling our descent into the hierarchy. 02151 // 02152 02153 Level = 0; 02154 Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 02155 VacbArray = SharedCacheMap->Vacbs; 02156 02157 // 02158 // Caller must have verified that we have a hierarchy, otherwise this routine 02159 // would fail. 02160 // 02161 02162 ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL); 02163 02164 // 02165 // Loop to calculate how many levels we have and how much we have to 02166 // shift to index into the first level. 02167 // 02168 02169 do { 02170 02171 Level += 1; 02172 Shift += VACB_LEVEL_SHIFT; 02173 02174 } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)); 02175 02176 // 02177 // Our caller could be asking for an offset off the end of section size, so if he 02178 // is actually off the size of the level, then return the main listhead. 02179 // 02180 02181 if (FileOffset >= ((LONGLONG)1 << Shift)) { 02182 return &SharedCacheMap->BcbList; 02183 } 02184 02185 // 02186 // Now descend the tree to the bottom level to get the caller's Bcb ListHead. 02187 // 02188 02189 Shift -= VACB_LEVEL_SHIFT; 02190 do { 02191 02192 // 02193 // Decrement back to the level that describes the size we are within. 02194 // 02195 02196 Level -= 1; 02197 02198 // 02199 // Calculate the index into the Vacb block for this level. 02200 // 02201 02202 Index = (ULONG)(FileOffset >> Shift); 02203 ASSERT(Index <= VACB_LAST_INDEX_FOR_LEVEL); 02204 02205 // 02206 // Get block address for next level. 02207 // 02208 02209 NextVacbArray = (PVACB *)VacbArray[Index]; 02210 02211 // 02212 // If it is NULL then we have to go find the highest Bcb or listhead which 02213 // comes before the guy we are looking for, i.e., its predecessor. 02214 // 02215 02216 if (NextVacbArray == NULL) { 02217 02218 // 02219 // Back up to look for the highest guy earlier in this tree, i.e., the 02220 // predecessor listhead. 02221 // 02222 02223 while (TRUE) { 02224 02225 // 02226 // Scan, if we can, in the current array for a non-null index. 02227 // 02228 02229 if (FailToSuccessor) { 02230 02231 if (Index != VACB_LAST_INDEX_FOR_LEVEL) { 02232 02233 while ((Index != VACB_LAST_INDEX_FOR_LEVEL) && (VacbArray[++Index] == NULL)) { 02234 continue; 02235 } 02236 02237 // 02238 // If we found a non-null index, get out and try to return the 02239 // listhead. 02240 // 02241 02242 if ((NextVacbArray = (PVACB *)VacbArray[Index]) != NULL) { 02243 break; 02244 } 02245 } 02246 02247 } else { 02248 02249 if (Index != 0) { 02250 02251 while ((Index != 0) && (VacbArray[--Index] == NULL)) { 02252 continue; 02253 } 02254 02255 // 02256 // If we found a non-null index, get out and try to return the 02257 // listhead. 02258 // 02259 02260 if ((NextVacbArray = (PVACB *)VacbArray[Index]) != NULL) { 02261 break; 02262 } 02263 } 02264 } 02265 02266 // 02267 // If there are no saved levels yet, then there is no predecessor or 02268 // successor - it is the main listhead. 02269 // 02270 02271 if (SavedLevels == 0) { 02272 return &SharedCacheMap->BcbList; 02273 } 02274 02275 // 02276 // Otherwise, we can pop up a level in the tree and start scanning 02277 // from that guy for a path to the right listhead. 02278 // 02279 02280 Level += 1; 02281 Index = SavedIndexes[--SavedLevels]; 02282 VacbArray = SavedVacbArrays[SavedLevels]; 02283 } 02284 02285 // 02286 // We have backed up in the hierarchy, so now we are just looking for the 02287 // highest/lowest guy in the level we want, i.e., the level-linking listhead. 02288 // So smash FileOffset accordingly (we mask the high bits out anyway). 02289 // 02290 02291 if (FailToSuccessor) { 02292 FileOffset = 0; 02293 } else { 02294 FileOffset = MAXLONGLONG; 02295 } 02296 } 02297 02298 // 02299 // We save Index and VacbArray at each level, for the case that we 02300 // have to walk back up the tree to find a predecessor. 02301 // 02302 02303 SavedIndexes[SavedLevels] = Index; 02304 SavedVacbArrays[SavedLevels] = VacbArray; 02305 SavedLevels += 1; 02306 02307 // 02308 // Now make this one our current pointer, and mask away the extraneous high-order 02309 // FileOffset bits for this level. 02310 // 02311 02312 VacbArray = NextVacbArray; 02313 FileOffset &= ((LONGLONG)1 << Shift) - 1; 02314 Shift -= VACB_LEVEL_SHIFT; 02315 02316 // 02317 // Loop until we hit the bottom level. 02318 // 02319 02320 } while (Level != 0); 02321 02322 // 02323 // Now calculate the index for the bottom level and return the appropriate listhead. 02324 // (The normal Vacb index indexes to a pointer to a Vacb for a .25MB view, so dropping 02325 // the low bit gets you to the even-indexed Vacb pointer which is one block size below 02326 // the two-pointer listhead for the Bcbs for that .5MB range...) 02327 // 02328 02329 Index = (ULONG)(FileOffset >> Shift); 02330 return (PLIST_ENTRY)((PCHAR)&VacbArray[Index & ~1] + VACB_LEVEL_BLOCK_SIZE); 02331 }

PVOID CcGetVirtualAddress IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset,
OUT PVACB Vacb,
OUT PULONG  ReceivedLength
 

PVOID CcGetVirtualAddressIfMapped IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
OUT PVACB Vacb,
OUT PULONG  ReceivedLength
 

Definition at line 230 of file vacbsup.c.

References ASSERT, CcAcquireVacbLock, CcMoveVacbToReuseTail, CcReleaseVacbLock, DISPATCH_LEVEL, GetVacb, NULL, and VACB_MAPPING_GRANULARITY.

Referenced by CcFlushCache().

00239 : 00240 00241 This routine returns a virtual address for the specified FileOffset, 00242 iff it is mapped. Otherwise, it informs the caller that the specified 00243 virtual address was not mapped. In the latter case, it still returns 00244 a ReceivedLength, which may be used to advance to the next view boundary. 00245 00246 Arguments: 00247 00248 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 00249 00250 FileOffset - Supplies the desired FileOffset within the file. 00251 00252 Vach - Returns a Vacb pointer which must be supplied later to free 00253 this virtual address, or NULL if not mapped. 00254 00255 ReceivedLength - Returns the number of bytes to the next view boundary, 00256 whether the desired file offset is mapped or not. 00257 00258 Return Value: 00259 00260 The virtual address at which the desired data is mapped, or NULL if it 00261 is not mapped. 00262 00263 --*/ 00264 00265 { 00266 KIRQL OldIrql; 00267 ULONG VacbOffset = (ULONG)FileOffset & (VACB_MAPPING_GRANULARITY - 1); 00268 PVOID Value = NULL; 00269 00270 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 00271 00272 // 00273 // Generate ReceivedLength return right away. 00274 // 00275 00276 *ReceivedLength = VACB_MAPPING_GRANULARITY - VacbOffset; 00277 00278 // 00279 // Acquire the Vacb lock to see if the desired offset is already mapped. 00280 // 00281 00282 CcAcquireVacbLock( &OldIrql ); 00283 00284 ASSERT( FileOffset <= SharedCacheMap->SectionSize.QuadPart ); 00285 00286 if ((*Vacb = GetVacb( SharedCacheMap, *(PLARGE_INTEGER)&FileOffset )) != NULL) { 00287 00288 if ((*Vacb)->Overlay.ActiveCount == 0) { 00289 SharedCacheMap->VacbActiveCount += 1; 00290 } 00291 00292 (*Vacb)->Overlay.ActiveCount += 1; 00293 00294 // 00295 // Move this range away from the front to avoid wasting cycles 00296 // looking at it for reuse. 00297 // 00298 00299 CcMoveVacbToReuseTail( *Vacb ); 00300 00301 Value = (PVOID)((PCHAR)(*Vacb)->BaseAddress + VacbOffset); 00302 } 00303 00304 CcReleaseVacbLock( OldIrql ); 00305 return Value; 00306 }

VOID CcInitializeVacbs  ) 
 

Definition at line 188 of file vacbsup.c.

References CcBeyondVacbs, CcNumberVacbs, CcVacbLru, CcVacbs, CcVacbSpinLock, FsRtlAllocatePoolWithTag, KeInitializeSpinLock(), _VACB::LruList, MmSizeOfSystemCacheInPages, NonPagedPool, PAGE_SHIFT, VACB, and VACB_OFFSET_SHIFT.

Referenced by CcInitializeCacheManager().

00193 : 00194 00195 This routine must be called during Cache Manager initialization to 00196 initialize the Virtual Address Control Block structures. 00197 00198 Arguments: 00199 00200 None. 00201 00202 Return Value: 00203 00204 None. 00205 00206 --*/ 00207 00208 { 00209 ULONG VacbBytes; 00210 PVACB NextVacb; 00211 00212 CcNumberVacbs = (MmSizeOfSystemCacheInPages >> (VACB_OFFSET_SHIFT - PAGE_SHIFT)) - 2; 00213 VacbBytes = CcNumberVacbs * sizeof(VACB); 00214 00215 KeInitializeSpinLock( &CcVacbSpinLock ); 00216 CcVacbs = (PVACB)FsRtlAllocatePoolWithTag( NonPagedPool, VacbBytes, 'aVcC' ); 00217 CcBeyondVacbs = (PVACB)((PCHAR)CcVacbs + VacbBytes); 00218 RtlZeroMemory( CcVacbs, VacbBytes ); 00219 00220 InitializeListHead( &CcVacbLru ); 00221 00222 for (NextVacb = CcVacbs; NextVacb < CcBeyondVacbs; NextVacb++) { 00223 00224 InsertTailList( &CcVacbLru, &NextVacb->LruList ); 00225 } 00226 }

VOID CcMapAndCopy IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVOID  UserBuffer,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN ULONG  ZeroFlags,
IN BOOLEAN  WriteThrough
 

Definition at line 6024 of file cachesub.c.

References ACTIVE_PAGE_IS_DIRTY, ASSERT, _VACB::BaseAddress, CcAcquireVacbLock, CcCopyReadExceptionFilter(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMaxDirtyWrite, CcReleaseVacbLock, CcSetDirtyInMask(), DebugTrace, DebugTrace2, ExRaiseStatus(), FlagOn, FsRtlNormalizeNtstatus(), me, MmCopyToCachedPage(), MmFlushSection(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetAddressRangeModified(), MmSetPageFaultReadAhead, NT_SUCCESS, NTSTATUS(), NULL, _VACB::Overlay, PAGE_SHIFT, PAGE_SIZE, PsGetCurrentThread, SetActiveVacb, Status, TRUE, try_return, ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by CcCopyWrite(), and CcFastCopyWrite().

06035 : 06036 06037 This routine may be called to copy the specified user data to the 06038 cache via a special Mm routine which copies the data to uninitialized 06039 pages and returns. 06040 06041 Arguments: 06042 06043 SharedCacheMap - Supplies the address of the SharedCacheMap for the 06044 data. 06045 06046 UserBuffer - unsafe buffer supplying the user's data to be written 06047 06048 FileOffset - Supplies the file offset to be modified 06049 06050 Length - Supplies the total amount of data 06051 06052 ZeroFlags - Defines which pages may be zeroed if not resident. 06053 06054 WriteThrough - Supplies whether the data is to be written through or not 06055 06056 Return Value: 06057 06058 None 06059 06060 --*/ 06061 06062 { 06063 ULONG ReceivedLength; 06064 ULONG ZeroCase; 06065 PVOID CacheBuffer; 06066 PVOID SavedMappedBuffer; 06067 ULONG SavedMappedLength; 06068 ULONG ActivePage; 06069 KIRQL OldIrql; 06070 LARGE_INTEGER PFileOffset; 06071 IO_STATUS_BLOCK IoStatus; 06072 NTSTATUS Status; 06073 ULONG SavedState; 06074 BOOLEAN MorePages; 06075 ULONG SavedTotalLength = Length; 06076 LARGE_INTEGER LocalOffset = *FileOffset; 06077 ULONG PageOffset = FileOffset->LowPart & (PAGE_SIZE - 1); 06078 PVACB Vacb = NULL; 06079 PETHREAD Thread = PsGetCurrentThread(); 06080 06081 // 06082 // Initialize SavePage to TRUE to skip the finally clause on zero-length 06083 // writes. 06084 // 06085 06086 BOOLEAN SavePage = TRUE; 06087 06088 DebugTrace(+1, me, "CcMapAndCopy:\n", 0 ); 06089 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 06090 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 06091 FileOffset->HighPart ); 06092 DebugTrace( 0, me, " Length = %08lx\n", Length ); 06093 06094 MmSavePageFaultReadAhead( Thread, &SavedState ); 06095 06096 // 06097 // try around everything for cleanup. 06098 // 06099 06100 try { 06101 06102 while (Length != 0) { 06103 06104 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 06105 LocalOffset, 06106 &Vacb, 06107 &ReceivedLength ); 06108 06109 // 06110 // If we got more than we need, make sure to only use 06111 // the right amount. 06112 // 06113 06114 if (ReceivedLength > Length) { 06115 ReceivedLength = Length; 06116 } 06117 SavedMappedBuffer = CacheBuffer; 06118 SavedMappedLength = ReceivedLength; 06119 Length -= ReceivedLength; 06120 06121 // 06122 // Now loop to touch all of the pages, calling MM to insure 06123 // that if we fault, we take in exactly the number of pages 06124 // we need. 06125 // 06126 06127 CacheBuffer = (PVOID)((PCHAR)CacheBuffer - PageOffset); 06128 ReceivedLength += PageOffset; 06129 06130 // 06131 // Loop to touch or zero the pages. 06132 // 06133 06134 ZeroCase = ZERO_FIRST_PAGE; 06135 06136 // 06137 // Set up offset to page for use below. 06138 // 06139 06140 PFileOffset = LocalOffset; 06141 PFileOffset.LowPart -= PageOffset; 06142 06143 while (TRUE) { 06144 06145 // 06146 // Calculate whether we wish to save an active page 06147 // or not. 06148 // 06149 06150 SavePage = ((Length == 0) && 06151 (ReceivedLength < PAGE_SIZE) && 06152 (SavedTotalLength <= (PAGE_SIZE / 2)) && 06153 !WriteThrough); 06154 06155 MorePages = (ReceivedLength > PAGE_SIZE); 06156 06157 // 06158 // Copy the data to the user buffer. 06159 // 06160 06161 try { 06162 06163 // 06164 // It is possible that there is a locked page 06165 // hanging around, and so we need to nuke it here. 06166 // 06167 06168 if (SharedCacheMap->NeedToZero != NULL) { 06169 CcFreeActiveVacb( SharedCacheMap, NULL, 0, 0 ); 06170 } 06171 06172 Status = STATUS_SUCCESS; 06173 if (FlagOn(ZeroFlags, ZeroCase)) { 06174 06175 Status = MmCopyToCachedPage( CacheBuffer, 06176 UserBuffer, 06177 PageOffset, 06178 MorePages ? 06179 (PAGE_SIZE - PageOffset) : 06180 (ReceivedLength - PageOffset), 06181 SavePage ); 06182 06183 if (!NT_SUCCESS(Status)) { 06184 06185 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 06186 STATUS_INVALID_USER_BUFFER )); 06187 } 06188 06189 // 06190 // Otherwise, we have to actually copy the data ourselves. 06191 // 06192 06193 } else { 06194 06195 MmSetPageFaultReadAhead( Thread, 06196 (MorePages && FlagOn(ZeroFlags, ZERO_LAST_PAGE)) ? 1 : 0); 06197 06198 RtlCopyBytes( (PVOID)((PCHAR)CacheBuffer + PageOffset), 06199 UserBuffer, 06200 MorePages ? 06201 (PAGE_SIZE - PageOffset) : 06202 (ReceivedLength - PageOffset) ); 06203 06204 MmResetPageFaultReadAhead( Thread, SavedState ); 06205 06206 } 06207 06208 } except( CcCopyReadExceptionFilter( GetExceptionInformation(), 06209 &Status ) ) { 06210 06211 // 06212 // If we got an access violation, then the user buffer went 06213 // away. Otherwise we must have gotten an I/O error trying 06214 // to bring the data in. 06215 // 06216 06217 if (Status == STATUS_ACCESS_VIOLATION) { 06218 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 06219 } 06220 else { 06221 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 06222 STATUS_UNEXPECTED_IO_ERROR )); 06223 } 06224 } 06225 06226 // 06227 // Now get out quickly if it is a small write and we want 06228 // to save the page. 06229 // 06230 06231 if (SavePage) { 06232 06233 ActivePage = (ULONG)( Vacb->Overlay.FileOffset.QuadPart >> PAGE_SHIFT ) + 06234 (ULONG)(((PCHAR)CacheBuffer - (PCHAR)Vacb->BaseAddress) >> 06235 PAGE_SHIFT); 06236 06237 PFileOffset.LowPart += ReceivedLength; 06238 06239 // 06240 // If the cache page was not locked, then clear the address 06241 // to zero from. 06242 // 06243 06244 if (Status == STATUS_CACHE_PAGE_LOCKED) { 06245 06246 // 06247 // We need to guarantee this Vacb for zeroing and calling 06248 // MmUnlockCachedPage, so we increment the active count here 06249 // and remember it for CcFreeActiveVacb. 06250 // 06251 06252 CcAcquireVacbLock( &OldIrql ); 06253 Vacb->Overlay.ActiveCount += 1; 06254 06255 ExAcquireSpinLockAtDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 06256 06257 ASSERT(SharedCacheMap->NeedToZero == NULL); 06258 06259 SharedCacheMap->NeedToZero = (PVOID)((PCHAR)CacheBuffer + 06260 (PFileOffset.LowPart & (PAGE_SIZE - 1))); 06261 SharedCacheMap->NeedToZeroPage = ActivePage; 06262 SharedCacheMap->NeedToZeroVacb = Vacb; 06263 06264 ExReleaseSpinLockFromDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 06265 CcReleaseVacbLock( OldIrql ); 06266 06267 } 06268 06269 SetActiveVacb( SharedCacheMap, 06270 OldIrql, 06271 Vacb, 06272 ActivePage, 06273 ACTIVE_PAGE_IS_DIRTY ); 06274 06275 try_return( NOTHING ); 06276 } 06277 06278 // 06279 // If it looks like we may save a page and exit on the next loop, 06280 // then we must make sure to mark the current page dirty. Note 06281 // that Cc[Fast]CopyWrite will finish the last part of any page 06282 // before allowing us to free the Active Vacb above, therefore 06283 // this case only occurs for a small random write. 06284 // 06285 06286 if ((SavedTotalLength <= (PAGE_SIZE / 2)) && !WriteThrough) { 06287 06288 CcSetDirtyInMask( SharedCacheMap, &PFileOffset, ReceivedLength ); 06289 } 06290 06291 UserBuffer = (PVOID)((PCHAR)UserBuffer + (PAGE_SIZE - PageOffset)); 06292 PageOffset = 0; 06293 06294 // 06295 // If there is more than a page to go (including what we just 06296 // copied), then adjust our buffer pointer and counts, and 06297 // determine if we are to the last page yet. 06298 // 06299 06300 if (MorePages) { 06301 06302 CacheBuffer = (PCHAR)CacheBuffer + PAGE_SIZE; 06303 ReceivedLength -= PAGE_SIZE; 06304 06305 // 06306 // Update our offset to the page. Note that 32-bit 06307 // add is ok since we cannot cross a Vacb boundary 06308 // and we reinitialize this offset before entering 06309 // this loop again. 06310 // 06311 06312 PFileOffset.LowPart += PAGE_SIZE; 06313 06314 if (ReceivedLength > PAGE_SIZE) { 06315 ZeroCase = ZERO_MIDDLE_PAGES; 06316 } else { 06317 ZeroCase = ZERO_LAST_PAGE; 06318 } 06319 06320 } else { 06321 06322 break; 06323 } 06324 } 06325 06326 // 06327 // If there is still more to write (ie. we are going to step 06328 // onto the next vacb) AND we just dirtied more than 64K, then 06329 // do a vicarious MmFlushSection here. This prevents us from 06330 // creating unlimited dirty pages while holding the file 06331 // resource exclusive. We also do not need to set the pages 06332 // dirty in the mask in this case. 06333 // 06334 06335 if (Length > CcMaxDirtyWrite) { 06336 06337 MmSetAddressRangeModified( SavedMappedBuffer, SavedMappedLength ); 06338 MmFlushSection( SharedCacheMap->FileObject->SectionObjectPointer, 06339 &LocalOffset, 06340 SavedMappedLength, 06341 &IoStatus, 06342 TRUE ); 06343 06344 if (!NT_SUCCESS(IoStatus.Status)) { 06345 ExRaiseStatus( FsRtlNormalizeNtstatus( IoStatus.Status, 06346 STATUS_UNEXPECTED_IO_ERROR )); 06347 } 06348 06349 // 06350 // For write through files, call Mm to propagate the dirty bits 06351 // here while we have the view mapped, so we know the flush will 06352 // work below. Again - do not set dirty in the mask. 06353 // 06354 06355 } else if (WriteThrough) { 06356 06357 MmSetAddressRangeModified( SavedMappedBuffer, SavedMappedLength ); 06358 06359 // 06360 // For the normal case, just set the pages dirty for the Lazy Writer 06361 // now. 06362 // 06363 06364 } else { 06365 06366 CcSetDirtyInMask( SharedCacheMap, &LocalOffset, SavedMappedLength ); 06367 } 06368 06369 CcFreeVirtualAddress( Vacb ); 06370 Vacb = NULL; 06371 06372 // 06373 // If we have to loop back to get at least a page, it will be ok to 06374 // zero the first page. If we are not getting at least a page, we 06375 // must make sure we clear the ZeroFlags if we cannot zero the last 06376 // page. 06377 // 06378 06379 if (Length >= PAGE_SIZE) { 06380 ZeroFlags |= ZERO_FIRST_PAGE; 06381 } else if ((ZeroFlags & ZERO_LAST_PAGE) == 0) { 06382 ZeroFlags = 0; 06383 } 06384 06385 // 06386 // Note that if ReceivedLength (and therefore SavedMappedLength) 06387 // was truncated to the transfer size then the new LocalOffset 06388 // computed below is not correct. This is not an issue since 06389 // in that case (Length == 0) and we would never get here. 06390 // 06391 06392 LocalOffset.QuadPart = LocalOffset.QuadPart + (LONGLONG)SavedMappedLength; 06393 } 06394 try_exit: NOTHING; 06395 } 06396 06397 // 06398 // Cleanup on the way out. 06399 // 06400 06401 finally { 06402 06403 MmResetPageFaultReadAhead( Thread, SavedState ); 06404 06405 // 06406 // We have no work to do if we have squirreled away the Vacb. 06407 // 06408 06409 if (!SavePage || AbnormalTermination()) { 06410 06411 // 06412 // Make sure we do not leave anything mapped or dirty in the PTE 06413 // on the way out. 06414 // 06415 06416 if (Vacb != NULL) { 06417 06418 CcFreeVirtualAddress( Vacb ); 06419 } 06420 06421 // 06422 // Either flush the whole range because of write through, or 06423 // mark it dirty for the lazy writer. 06424 // 06425 06426 if (WriteThrough) { 06427 06428 MmFlushSection ( SharedCacheMap->FileObject->SectionObjectPointer, 06429 FileOffset, 06430 SavedTotalLength, 06431 &IoStatus, 06432 TRUE ); 06433 06434 if (!NT_SUCCESS(IoStatus.Status)) { 06435 ExRaiseStatus( FsRtlNormalizeNtstatus( IoStatus.Status, 06436 STATUS_UNEXPECTED_IO_ERROR )); 06437 } 06438 06439 // 06440 // Advance ValidDataGoal 06441 // 06442 06443 LocalOffset.QuadPart = FileOffset->QuadPart + (LONGLONG)SavedTotalLength; 06444 if (LocalOffset.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) { 06445 SharedCacheMap->ValidDataGoal = LocalOffset; 06446 } 06447 } 06448 } 06449 } 06450 06451 DebugTrace(-1, me, "CcMapAndCopy -> %02lx\n", Result ); 06452 06453 return; 06454 }

BOOLEAN CcMapAndRead IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN ULONG  ZeroFlags,
IN BOOLEAN  Wait,
IN PVOID  BaseAddress
 

Definition at line 5749 of file cachesub.c.

References COMPUTE_PAGES_SPANNED, FALSE, FlagOn, MmCheckCachedPageState(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, PAGE_SIZE, PsGetCurrentThread, TRUE, try_return, ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by CcPinFileData(), and CcPrepareMdlWrite().

05760 : 05761 05762 This routine may be called to insure that the specified data is mapped, 05763 read into memory and locked. If TRUE is returned, then the 05764 correct I/O status for the transfer is also returned, along with 05765 a system-space address for the data. 05766 05767 Arguments: 05768 05769 SharedCacheMap - Supplies the address of the SharedCacheMap for the 05770 data. 05771 05772 FileOffset - Supplies the file offset of the desired data. 05773 05774 Length - Supplies the total amount of data desired. 05775 05776 ZeroFlags - Defines which pages may be zeroed if not resident. 05777 05778 Wait - Supplies FALSE if the caller is not willing to block for the 05779 data, or TRUE if the caller is willing to block. 05780 05781 BaseAddress - Supplies the system base address at which the data may 05782 be accessed. 05783 05784 Return Value: 05785 05786 FALSE - if the caller supplied Wait = FALSE and the data could not 05787 be returned without blocking. 05788 05789 TRUE - if the data is being returned. 05790 05791 Note: this routine may raise an exception due to a map or read failure, 05792 however, this can only happen if Wait was specified as TRUE, since 05793 mapping and reading will not be performed if the caller cannot wait. 05794 05795 --*/ 05796 05797 { 05798 ULONG ZeroCase; 05799 ULONG SavedState; 05800 BOOLEAN Result = FALSE; 05801 PETHREAD Thread = PsGetCurrentThread(); 05802 05803 MmSavePageFaultReadAhead( Thread, &SavedState ); 05804 05805 // 05806 // try around everything for cleanup. 05807 // 05808 05809 try { 05810 05811 ULONG PagesToGo; 05812 05813 // 05814 // Now loop to touch all of the pages, calling MM to insure 05815 // that if we fault, we take in exactly the number of pages 05816 // we need. 05817 // 05818 05819 PagesToGo = COMPUTE_PAGES_SPANNED( BaseAddress, Length ); 05820 05821 // 05822 // Loop to touch or zero the pages. 05823 // 05824 05825 ZeroCase = ZERO_FIRST_PAGE; 05826 05827 while (PagesToGo) { 05828 05829 // 05830 // If we cannot zero this page, or Mm failed to return 05831 // a zeroed page, then just fault it in. 05832 // 05833 05834 MmSetPageFaultReadAhead( Thread, (PagesToGo - 1) ); 05835 05836 if (!FlagOn(ZeroFlags, ZeroCase) || 05837 !MmCheckCachedPageState(BaseAddress, TRUE)) { 05838 05839 // 05840 // If we get here, it is almost certainly due to the fact 05841 // that we can not take a zero page. MmCheckCachedPageState 05842 // will so rarely return FALSE, that we will not worry 05843 // about it. We will only check if the page is there if 05844 // Wait is FALSE, so that we can do the right thing. 05845 // 05846 05847 if (!MmCheckCachedPageState(BaseAddress, FALSE) && !Wait) { 05848 try_return( Result = FALSE ); 05849 } 05850 } 05851 05852 BaseAddress = (PCHAR)BaseAddress + PAGE_SIZE; 05853 PagesToGo -= 1; 05854 05855 if (PagesToGo == 1) { 05856 ZeroCase = ZERO_LAST_PAGE; 05857 } else { 05858 ZeroCase = ZERO_MIDDLE_PAGES; 05859 } 05860 } 05861 05862 try_return( Result = TRUE ); 05863 05864 try_exit: NOTHING; 05865 } 05866 05867 // 05868 // Cleanup on the way out. 05869 // 05870 05871 finally { 05872 05873 MmResetPageFaultReadAhead(Thread, SavedState); 05874 } 05875 05876 return Result; 05877 }

VOID FASTCALL CcPerformReadAhead IN PFILE_OBJECT  FileObject  ) 
 

Definition at line 1633 of file cachesub.c.

References _CACHE_MANAGER_CALLBACKS::AcquireForReadAhead, _SHARED_CACHE_MAP::Callbacks, CcAcquireMasterLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMissCounter, CcReadAheadIos, CcReleaseMasterLock, CcScheduleLazyWriteScan(), CcThrowAway, COMPUTE_PAGES_SPANNED, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, FALSE, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, FO_SEQUENTIAL_ONLY, _SHARED_CACHE_MAP::LazyWriteContext, LazyWriter, MAX_READ_AHEAD, me, MmCheckCachedPageState(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, NULL, ObDereferenceObject, Offset, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, PsGetCurrentThread, _PRIVATE_CACHE_MAP::ReadAheadActive, _PRIVATE_CACHE_MAP::ReadAheadEnabled, _PRIVATE_CACHE_MAP::ReadAheadLength, _PRIVATE_CACHE_MAP::ReadAheadOffset, _PRIVATE_CACHE_MAP::ReadAheadSpinLock, _CACHE_MANAGER_CALLBACKS::ReleaseFromReadAhead, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, try_return, and WRITE_QUEUED.

Referenced by CcWorkerThread().

01639 : 01640 01641 This routine is called by the Lazy Writer to perform read ahead which 01642 has been scheduled for this file by CcScheduleReadAhead. 01643 01644 Arguments: 01645 01646 FileObject - supplies pointer to FileObject on which readahead should be 01647 considered. 01648 01649 Return Value: 01650 01651 None 01652 --*/ 01653 01654 { 01655 KIRQL OldIrql; 01656 PSHARED_CACHE_MAP SharedCacheMap; 01657 PPRIVATE_CACHE_MAP PrivateCacheMap; 01658 ULONG i; 01659 LARGE_INTEGER ReadAheadOffset[2]; 01660 ULONG ReadAheadLength[2]; 01661 PCACHE_MANAGER_CALLBACKS Callbacks; 01662 PVOID Context; 01663 ULONG SavedState; 01664 BOOLEAN Done; 01665 BOOLEAN HitEof = FALSE; 01666 BOOLEAN ReadAheadPerformed = FALSE; 01667 ULONG FaultOccurred = 0; 01668 PETHREAD Thread = PsGetCurrentThread(); 01669 PVACB Vacb = NULL; 01670 01671 BOOLEAN ResourceHeld = FALSE; 01672 01673 DebugTrace(+1, me, "CcPerformReadAhead:\n", 0 ); 01674 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 01675 01676 MmSavePageFaultReadAhead( Thread, &SavedState ); 01677 01678 try { 01679 01680 // 01681 // Since we have the open count biased, we can safely access the 01682 // SharedCacheMap. 01683 // 01684 01685 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01686 01687 Callbacks = SharedCacheMap->Callbacks; 01688 Context = SharedCacheMap->LazyWriteContext; 01689 01690 // 01691 // After the first time, keep looping as long as there are new 01692 // read ahead requirements. (We will skip out below.) 01693 // 01694 01695 while (TRUE) { 01696 01697 // 01698 // Get SharedCacheMap and PrivateCacheMap. If either are now NULL, get 01699 // out. 01700 // 01701 01702 CcAcquireMasterLock( &OldIrql ); 01703 01704 PrivateCacheMap = FileObject->PrivateCacheMap; 01705 01706 // 01707 // Now capture the information that we need, so that we can drop the 01708 // SharedList Resource. This information is advisory only anyway, and 01709 // the caller must guarantee that the FileObject is referenced. 01710 // 01711 01712 if (PrivateCacheMap != NULL) { 01713 01714 ExAcquireSpinLockAtDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01715 01716 // 01717 // We are done when the lengths are 0 01718 // 01719 01720 Done = ((PrivateCacheMap->ReadAheadLength[0] | 01721 PrivateCacheMap->ReadAheadLength[1]) == 0); 01722 01723 ReadAheadOffset[0] = PrivateCacheMap->ReadAheadOffset[0]; 01724 ReadAheadOffset[1] = PrivateCacheMap->ReadAheadOffset[1]; 01725 ReadAheadLength[0] = PrivateCacheMap->ReadAheadLength[0]; 01726 ReadAheadLength[1] = PrivateCacheMap->ReadAheadLength[1]; 01727 PrivateCacheMap->ReadAheadLength[0] = 0; 01728 PrivateCacheMap->ReadAheadLength[1] = 0; 01729 01730 ExReleaseSpinLockFromDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01731 } 01732 01733 CcReleaseMasterLock( OldIrql ); 01734 01735 // 01736 // Acquire the file shared. 01737 // 01738 01739 (*Callbacks->AcquireForReadAhead)( Context, TRUE ); 01740 ResourceHeld = TRUE; 01741 01742 if ((PrivateCacheMap == NULL) || Done) { 01743 01744 try_return( NOTHING ); 01745 } 01746 01747 // 01748 // PERFORM READ AHEAD 01749 // 01750 // 01751 // Now loop until everything is read in. The Read ahead is accomplished 01752 // by touching the pages with an appropriate ReadAhead parameter in MM. 01753 // 01754 01755 i = 0; 01756 01757 do { 01758 01759 LARGE_INTEGER Offset, SavedOffset; 01760 ULONG Length, SavedLength; 01761 01762 Offset = ReadAheadOffset[i]; 01763 Length = ReadAheadLength[i]; 01764 SavedOffset = Offset; 01765 SavedLength = Length; 01766 01767 if ((Length != 0) 01768 01769 && 01770 01771 ( Offset.QuadPart <= SharedCacheMap->FileSize.QuadPart )) { 01772 01773 ReadAheadPerformed = TRUE; 01774 01775 // 01776 // Keep length within file and MAX_READ_AHEAD 01777 // 01778 01779 if ( ( Offset.QuadPart + (LONGLONG)Length ) >= SharedCacheMap->FileSize.QuadPart ) { 01780 01781 Length = (ULONG)( SharedCacheMap->FileSize.QuadPart - Offset.QuadPart ); 01782 HitEof = TRUE; 01783 01784 } 01785 if (Length > MAX_READ_AHEAD) { 01786 Length = MAX_READ_AHEAD; 01787 } 01788 01789 // 01790 // Now loop to read all of the desired data in. This loop 01791 // is more or less like the same loop to read data in 01792 // CcCopyRead, except that we do not copy anything, just 01793 // unmap as soon as it is in. 01794 // 01795 01796 while (Length != 0) { 01797 01798 ULONG ReceivedLength; 01799 PVOID CacheBuffer; 01800 ULONG PagesToGo; 01801 01802 // 01803 // Call local routine to Map or Access the file data. 01804 // If we cannot map the data because of a Wait condition, 01805 // return FALSE. 01806 // 01807 // Since this routine is intended to be called from 01808 // the finally handler from file system read modules, 01809 // it is imperative that it not raise any exceptions. 01810 // Therefore, if any expected exception is raised, we 01811 // will simply get out. 01812 // 01813 01814 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 01815 Offset, 01816 &Vacb, 01817 &ReceivedLength ); 01818 01819 // 01820 // If we got more than we need, make sure to only transfer 01821 // the right amount. 01822 // 01823 01824 if (ReceivedLength > Length) { 01825 ReceivedLength = Length; 01826 } 01827 01828 // 01829 // Now loop to touch all of the pages, calling MM to insure 01830 // that if we fault, we take in exactly the number of pages 01831 // we need. 01832 // 01833 01834 PagesToGo = COMPUTE_PAGES_SPANNED( CacheBuffer, 01835 ReceivedLength ); 01836 01837 CcMissCounter = &CcReadAheadIos; 01838 01839 while (PagesToGo) { 01840 01841 MmSetPageFaultReadAhead( Thread, (PagesToGo - 1) ); 01842 FaultOccurred |= !MmCheckCachedPageState(CacheBuffer, FALSE); 01843 01844 CacheBuffer = (PCHAR)CacheBuffer + PAGE_SIZE; 01845 PagesToGo -= 1; 01846 } 01847 CcMissCounter = &CcThrowAway; 01848 01849 // 01850 // Calculate how much data we have left to go. 01851 // 01852 01853 Length -= ReceivedLength; 01854 01855 // 01856 // Assume we did not get all the data we wanted, and set 01857 // Offset to the end of the returned data. 01858 // 01859 01860 Offset.QuadPart = Offset.QuadPart + (LONGLONG)ReceivedLength; 01861 01862 // 01863 // It was only a page, so we can just leave this loop 01864 // After freeing the address. 01865 // 01866 01867 CcFreeVirtualAddress( Vacb ); 01868 Vacb = NULL; 01869 } 01870 } 01871 i += 1; 01872 } while (i <= 1); 01873 01874 // 01875 // Release the file 01876 // 01877 01878 (*Callbacks->ReleaseFromReadAhead)( Context ); 01879 ResourceHeld = FALSE; 01880 } 01881 01882 try_exit: NOTHING; 01883 } 01884 finally { 01885 01886 MmResetPageFaultReadAhead(Thread, SavedState); 01887 CcMissCounter = &CcThrowAway; 01888 01889 // 01890 // If we got an error faulting a single page in, release the Vacb 01891 // here. It is important to free any mapping before dropping the 01892 // resource to prevent purge problems. 01893 // 01894 01895 if (Vacb != NULL) { 01896 CcFreeVirtualAddress( Vacb ); 01897 } 01898 01899 // 01900 // Release the file 01901 // 01902 01903 if (ResourceHeld) { 01904 (*Callbacks->ReleaseFromReadAhead)( Context ); 01905 } 01906 01907 // 01908 // To show we are done, we must make sure the PrivateCacheMap is 01909 // still there. 01910 // 01911 01912 CcAcquireMasterLock( &OldIrql ); 01913 01914 PrivateCacheMap = FileObject->PrivateCacheMap; 01915 01916 // 01917 // Show readahead is going inactive. 01918 // 01919 01920 if (PrivateCacheMap != NULL) { 01921 01922 ExAcquireSpinLockAtDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01923 PrivateCacheMap->ReadAheadActive = FALSE; 01924 01925 // 01926 // If he said sequential only and we smashed into Eof, then 01927 // let's reset the highwater mark in case he wants to read the 01928 // file sequentially again. 01929 // 01930 01931 if (HitEof && FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 01932 PrivateCacheMap->ReadAheadOffset[1].LowPart = 01933 PrivateCacheMap->ReadAheadOffset[1].HighPart = 0; 01934 } 01935 01936 // 01937 // If no faults occurred, turn read ahead off. 01938 // 01939 01940 if (ReadAheadPerformed && !FaultOccurred) { 01941 PrivateCacheMap->ReadAheadEnabled = FALSE; 01942 } 01943 01944 ExReleaseSpinLockFromDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01945 } 01946 01947 // 01948 // Free SharedCacheMap list 01949 // 01950 01951 CcReleaseMasterLock( OldIrql ); 01952 01953 ObDereferenceObject( FileObject ); 01954 01955 // 01956 // Serialize again to decrement the open count. 01957 // 01958 01959 CcAcquireMasterLock( &OldIrql ); 01960 01961 CcDecrementOpenCount( SharedCacheMap, 'adRP' ); 01962 01963 if ((SharedCacheMap->OpenCount == 0) && 01964 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 01965 (SharedCacheMap->DirtyPages == 0)) { 01966 01967 // 01968 // Move to the dirty list. 01969 // 01970 01971 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01972 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 01973 &SharedCacheMap->SharedCacheMapLinks ); 01974 01975 // 01976 // Make sure the Lazy Writer will wake up, because we 01977 // want him to delete this SharedCacheMap. 01978 // 01979 01980 LazyWriter.OtherWork = TRUE; 01981 if (!LazyWriter.ScanActive) { 01982 CcScheduleLazyWriteScan(); 01983 } 01984 } 01985 01986 CcReleaseMasterLock( OldIrql ); 01987 } 01988 01989 DebugTrace(-1, me, "CcPerformReadAhead -> VOID\n", 0 ); 01990 01991 return; 01992 }

BOOLEAN CcPinFileData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  ReadOnly,
IN BOOLEAN  WriteOnly,
IN ULONG  Flags,
OUT PBCB Bcb,
OUT PVOID *  BaseAddress,
OUT PLARGE_INTEGER  BeyondLastByte
 

Definition at line 116 of file cachesub.c.

References ASSERT, _BCB::BaseAddress, _SHARED_CACHE_MAP::BcbSpinLock, _BCB::BeyondLastByte, _BCB::ByteLength, CcAllocateInitializeBcb(), CcDereferenceFileOffset(), CcFindBcb(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMapAndRead(), CcReferenceFileOffset(), CcUnpinFileData(), DebugTrace, DebugTrace2, ExAcquireResourceExclusive, ExAcquireSharedStarveExclusive(), ExRaiseStatus(), FALSE, _BCB::FileOffset, FlagOn, _SHARED_CACHE_MAP::Flags, GetActiveVacb, me, MODIFIED_WRITE_DISABLED, _SHARED_CACHE_MAP::NeedToZero, NULL, PAGE_SIZE, PIN_ACCESS, PIN_EXCLUSIVE, PIN_IF_BCB, PIN_NO_READ, PIN_WAIT, _BCB::PinCount, _BCB::Resource, ROUND_TO_PAGES, _SHARED_CACHE_MAP::SectionSize, TRUE, try_return, UNPIN, _BCB::Vacb, VACB_MAPPING_GRANULARITY, _SHARED_CACHE_MAP::ValidDataGoal, VOID(), ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by CcCopyRead(), CcCopyWrite(), CcMapData(), CcPinMappedData(), CcPinRead(), CcPreparePinWrite(), and CcZeroData().

00130 : 00131 00132 This routine locks the specified range of file data into memory. 00133 00134 Note that the data desired by the caller (or the first part of it) 00135 may be in one of three states: 00136 00137 No Bcb exists which describes the data 00138 00139 A Bcb exists describing the data, but it is not mapped 00140 (BcbOut->BaseAddress == NULL) 00141 00142 A Bcb exists describing the data, and it is mapped 00143 00144 Given the above three states, and given that the caller may call 00145 with either Wait == FALSE or Wait == TRUE, this routine has basically 00146 six cases. What has to be done, and the order in which things must be 00147 done varies quite a bit with each of these six cases. The most 00148 straight-forward implementation of this routine, with the least amount 00149 of branching, is achieved by determining which of the six cases applies, 00150 and dispatching fairly directly to that case. The handling of the 00151 cases is summarized in the following table: 00152 00153 Wait == TRUE Wait == FALSE 00154 ------------ ------------- 00155 00156 no Bcb Case 1: Case 2: 00157 00158 CcAllocateInitializeBcb CcMapAndRead (exit if FALSE) 00159 Acquire Bcb Exclusive CcAllocateInitializeBcb 00160 Release BcbList SpinLock Acquire Bcb Shared if not ReadOnly 00161 CcMapAndRead w/ Wait Release BcbList SpinLock 00162 Convert/Release Bcb Resource 00163 00164 Bcb not Case 3: Case 4: 00165 mapped 00166 Increment PinCount Acquire Bcb Exclusive (exit if FALSE) 00167 Release BcbList SpinLock CcMapAndRead (exit if FALSE) 00168 Acquire Bcb Excl. w/ Wait Increment PinCount 00169 if still not mapped Convert/Release Bcb Resource 00170 CcMapAndRead w/ Wait Release BcbList SpinLock 00171 Convert/Release Bcb Resource 00172 00173 Bcb mapped Case 5: Case 6: 00174 00175 Increment PinCount if not ReadOnly 00176 Release BcbList SpinLock Acquire Bcb shared (exit if FALSE) 00177 if not ReadOnly Increment PinCount 00178 Acquire Bcb Shared Release BcbList SpinLock 00179 00180 It is important to note that most changes to this routine will affect 00181 multiple cases from above. 00182 00183 Arguments: 00184 00185 FileObject - Pointer to File Object for file 00186 00187 FileOffset - Offset in file at which map should begin 00188 00189 Length - Length of desired map in bytes 00190 00191 ReadOnly - Supplies TRUE if caller will only read the mapped data (i.e., 00192 TRUE for CcCopyRead, CcMapData and CcMdlRead and FALSE for 00193 everyone else) 00194 00195 WriteOnly - The specified range of bytes will only be written. 00196 00197 Flags - (PIN_WAIT, PIN_EXCLUSIVE, PIN_NO_READ, etc. as defined in cache.h) 00198 00199 Bcb - Returns a pointer to the Bcb representing the pinned data. 00200 00201 BaseAddress - Returns base address of desired data 00202 00203 BeyondLastByte - Returns the File Offset of the first byte beyond the 00204 last accessible byte. 00205 00206 Return Value: 00207 00208 FALSE - if PIN_WAIT was set, and it was impossible to lock all 00209 of the data without blocking 00210 TRUE - if the desired data, is being returned 00211 00212 Raises: 00213 00214 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 00215 This can only occur if Wait was specified as TRUE. (If Wait is 00216 specified as FALSE, and an allocation failure occurs, this 00217 routine simply returns FALSE.) 00218 00219 --*/ 00220 00221 { 00222 PSHARED_CACHE_MAP SharedCacheMap; 00223 LARGE_INTEGER TrialBound; 00224 KIRQL OldIrql; 00225 PBCB BcbOut = NULL; 00226 ULONG ZeroFlags = 0; 00227 BOOLEAN SpinLockAcquired = FALSE; 00228 BOOLEAN Result = FALSE; 00229 00230 ULONG ReceivedLength; 00231 ULONG ActivePage; 00232 ULONG PageIsDirty; 00233 PVACB Vacb = NULL; 00234 00235 DebugTrace(+1, me, "CcPinFileData:\n", 0 ); 00236 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00237 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 00238 FileOffset->HighPart ); 00239 DebugTrace( 0, me, " Length = %08lx\n", Length ); 00240 DebugTrace( 0, me, " Flags = %02lx\n", Flags ); 00241 00242 // 00243 // Get pointer to SharedCacheMap via File Object. 00244 // 00245 00246 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 00247 + sizeof(PVOID)); 00248 00249 // 00250 // See if we have an active Vacb, that we need to free. 00251 // 00252 00253 GetActiveVacb( SharedCacheMap, OldIrql, Vacb, ActivePage, PageIsDirty ); 00254 00255 // 00256 // If there is an end of a page to be zeroed, then free that page now, 00257 // so it does not cause our data to get zeroed. If there is an active 00258 // page, free it so we have the correct ValidDataGoal. 00259 // 00260 00261 if ((Vacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { 00262 00263 CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 00264 Vacb = NULL; 00265 } 00266 00267 // 00268 // Make sure the calling file system is not asking to map beyond the 00269 // end of the section, for example, that it did not forget to do 00270 // CcExtendCacheSection. 00271 // 00272 00273 ASSERT( ( FileOffset->QuadPart + (LONGLONG)Length ) <= 00274 SharedCacheMap->SectionSize.QuadPart ); 00275 00276 // 00277 // Initially clear output 00278 // 00279 00280 *Bcb = NULL; 00281 *BaseAddress = NULL; 00282 00283 if (!FlagOn(Flags, PIN_NO_READ)) { 00284 00285 *BaseAddress = CcGetVirtualAddress( SharedCacheMap, 00286 *FileOffset, 00287 &Vacb, 00288 &ReceivedLength ); 00289 00290 } else { 00291 00292 // 00293 // In the PIN_NO_READ case, we simply need to make sure that the 00294 // sparse structure containing the Bcb listheads is expanded in the 00295 // region of the file we are interested in. 00296 // 00297 // Fake a ReceivedLength that matches the remaining bytes in the view. 00298 // 00299 00300 ReceivedLength = VACB_MAPPING_GRANULARITY - 00301 (ULONG)(FileOffset->QuadPart & (VACB_MAPPING_GRANULARITY - 1)); 00302 00303 // 00304 // Now simply cause a reference that will expand a multilevel Vacb. 00305 // 00306 00307 CcReferenceFileOffset( SharedCacheMap, *FileOffset ); 00308 } 00309 00310 // 00311 // Acquire Bcb List Exclusive to look for Bcb 00312 // 00313 00314 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00315 SpinLockAcquired = TRUE; 00316 00317 // 00318 // Use try to guarantee cleanup on the way out. 00319 // 00320 00321 try { 00322 00323 BOOLEAN Found; 00324 LARGE_INTEGER FOffset; 00325 LARGE_INTEGER TLength; 00326 00327 // 00328 // Search for Bcb describing the largest matching "prefix" byte range, 00329 // or where to insert it. 00330 // 00331 00332 TrialBound.QuadPart = FileOffset->QuadPart + (LONGLONG)Length; 00333 Found = CcFindBcb( SharedCacheMap, FileOffset, &TrialBound, &BcbOut ); 00334 00335 00336 // 00337 // Cases 1 and 2 - Bcb was not found. 00338 // 00339 // First caculate data to pin down. 00340 // 00341 00342 if (!Found) { 00343 00344 // 00345 // Get out if the user specified PIN_IF_BCB. 00346 // 00347 00348 if (FlagOn(Flags, PIN_IF_BCB)) { 00349 00350 // 00351 // We need to zap BcbOut since this is a hint to the cleanup code 00352 // to remove the Bcb if we are returning FALSE. 00353 // 00354 00355 BcbOut = NULL; 00356 try_return( Result = FALSE ); 00357 } 00358 00359 // 00360 // Not found, calculate data to pin down. 00361 // 00362 // Round local copy of FileOffset down to page boundary, and 00363 // round copies of size and minimum size up. Also make sure that 00364 // we keep the length from crossing the end of the SharedCacheMap. 00365 // 00366 00367 FOffset = *FileOffset; 00368 TLength.QuadPart = TrialBound.QuadPart - FOffset.QuadPart; 00369 00370 TLength.LowPart += FOffset.LowPart & (PAGE_SIZE - 1); 00371 ReceivedLength += FOffset.LowPart & (PAGE_SIZE - 1); 00372 00373 // 00374 // At this point we can calculate the ReadOnly flag for 00375 // the purposes of whether to use the Bcb resource, and 00376 // we can calculate the ZeroFlags. 00377 // 00378 00379 if ((!ReadOnly && !FlagOn(SharedCacheMap->Flags, PIN_ACCESS)) || WriteOnly) { 00380 00381 // 00382 // We can always zero middle pages, if any. 00383 // 00384 00385 ZeroFlags = ZERO_MIDDLE_PAGES; 00386 00387 if (((FOffset.LowPart & (PAGE_SIZE - 1)) == 0) && 00388 (Length >= PAGE_SIZE)) { 00389 ZeroFlags |= ZERO_FIRST_PAGE; 00390 } 00391 00392 if ((TLength.LowPart & (PAGE_SIZE - 1)) == 0) { 00393 ZeroFlags |= ZERO_LAST_PAGE; 00394 } 00395 } 00396 00397 // 00398 // We treat Bcbs as ReadOnly (do not acquire resource) if they 00399 // are in sections for which we have not disabled modified writing. 00400 // 00401 00402 if (!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 00403 ReadOnly = TRUE; 00404 } 00405 00406 TLength.LowPart = (ULONG) ROUND_TO_PAGES( TLength.LowPart ); 00407 00408 // 00409 // Round BaseAddress and FOffset down to the bottom of a page. 00410 // 00411 00412 *BaseAddress = ((PCHAR)*BaseAddress - (FileOffset->LowPart & (PAGE_SIZE - 1))); 00413 FOffset.LowPart &= ~(PAGE_SIZE - 1); 00414 00415 // 00416 // Even if we are readonly, we can still zero pages entirely 00417 // beyond valid data length. 00418 // 00419 00420 if (FOffset.QuadPart >= SharedCacheMap->ValidDataGoal.QuadPart) { 00421 00422 ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 00423 00424 } else if ((FOffset.QuadPart + (LONGLONG)PAGE_SIZE) >= 00425 SharedCacheMap->ValidDataGoal.QuadPart) { 00426 00427 ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 00428 } 00429 00430 // 00431 // We will get into trouble if we try to read more than we 00432 // can map by one Vacb. So make sure that our lengths stay 00433 // within a Vacb. 00434 // 00435 00436 if (TLength.LowPart > ReceivedLength) { 00437 TLength.LowPart = ReceivedLength; 00438 } 00439 00440 00441 // 00442 // Case 1 - Bcb was not found and Wait is TRUE. 00443 // 00444 // Note that it is important to minimize the time that the Bcb 00445 // List spin lock is held, as well as guarantee we do not take 00446 // any faults while holding this lock. 00447 // 00448 // If we can (and perhaps will) wait, then it is important to 00449 // allocate the Bcb acquire it exclusive and free the Bcb List. 00450 // We then procede to read in the data, and anyone else finding 00451 // our Bcb will have to wait shared to insure that the data is 00452 // in. 00453 // 00454 00455 if (FlagOn(Flags, PIN_WAIT)) { 00456 00457 BcbOut = CcAllocateInitializeBcb( SharedCacheMap, 00458 BcbOut, 00459 &FOffset, 00460 &TLength ); 00461 00462 if (BcbOut == NULL) { 00463 DebugTrace( 0, 0, "Bcb allocation failure\n", 0 ); 00464 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00465 SpinLockAcquired = FALSE; 00466 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00467 } 00468 00469 // 00470 // Now just acquire the newly-allocated Bcb shared, and 00471 // release the spin lock. 00472 // 00473 00474 if (!ReadOnly) { 00475 if (FlagOn(Flags, PIN_EXCLUSIVE)) { 00476 (VOID)ExAcquireResourceExclusive( &BcbOut->Resource, TRUE ); 00477 } else { 00478 (VOID)ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00479 } 00480 } 00481 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00482 SpinLockAcquired = FALSE; 00483 00484 // 00485 // Now read in the data. 00486 // 00487 00488 if (!FlagOn(Flags, PIN_NO_READ)) { 00489 00490 (VOID)CcMapAndRead( SharedCacheMap, 00491 &FOffset, 00492 TLength.LowPart, 00493 ZeroFlags, 00494 TRUE, 00495 *BaseAddress ); 00496 00497 // 00498 // Now we have to reacquire the Bcb List spinlock to load 00499 // up the mapping if we are the first one, else we collided 00500 // with someone else who loaded the mapping first, and we 00501 // will just free our mapping. It is guaranteed that the 00502 // data will be mapped to the same place. 00503 // 00504 00505 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00506 00507 if (BcbOut->BaseAddress == NULL) { 00508 00509 BcbOut->BaseAddress = *BaseAddress; 00510 BcbOut->Vacb = Vacb; 00511 Vacb = NULL; 00512 } 00513 00514 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00515 00516 // 00517 // Calculate Base Address of the data we want. 00518 // 00519 00520 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00521 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00522 } 00523 00524 // 00525 // Success! 00526 // 00527 00528 try_return( Result = TRUE ); 00529 } 00530 00531 00532 // 00533 // Case 2 - Bcb was not found and Wait is FALSE 00534 // 00535 // If we cannot wait, then we go immediately see if the data is 00536 // there (CcMapAndRead), and then only set up the Bcb and release 00537 // the spin lock if the data is there. Note here we call 00538 // CcMapAndRead while holding the spin lock, because we know we 00539 // will not fault and not block before returning. 00540 // 00541 00542 else { 00543 00544 // 00545 // Now try to allocate and initialize the Bcb. If we 00546 // fail to allocate one, then return FALSE, since we know that 00547 // Wait = FALSE. The caller may get lucky if he calls 00548 // us back with Wait = TRUE. 00549 // 00550 00551 BcbOut = CcAllocateInitializeBcb( SharedCacheMap, 00552 BcbOut, 00553 &FOffset, 00554 &TLength ); 00555 00556 if (BcbOut == NULL) { 00557 00558 try_return( Result = FALSE ); 00559 } 00560 00561 // 00562 // If we are not ReadOnly, we must acquire the newly-allocated 00563 // resource shared, and then we can free the spin lock. 00564 // 00565 00566 if (!ReadOnly) { 00567 ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00568 } 00569 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00570 SpinLockAcquired = FALSE; 00571 00572 // 00573 // Note that since this call has Wait = FALSE, it cannot 00574 // get an exception (see procedure header). 00575 // 00576 00577 ASSERT( !FlagOn(Flags, PIN_NO_READ) ); 00578 if (!CcMapAndRead( SharedCacheMap, 00579 &FOffset, 00580 TLength.LowPart, 00581 ZeroFlags, 00582 FALSE, 00583 *BaseAddress )) { 00584 00585 try_return( Result = FALSE ); 00586 } 00587 00588 // 00589 // Now we have to reacquire the Bcb List spinlock to load 00590 // up the mapping if we are the first one, else we collided 00591 // with someone else who loaded the mapping first, and we 00592 // will just free our mapping. It is guaranteed that the 00593 // data will be mapped to the same place. 00594 // 00595 00596 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00597 00598 if (BcbOut->BaseAddress == NULL) { 00599 00600 BcbOut->BaseAddress = *BaseAddress; 00601 BcbOut->Vacb = Vacb; 00602 Vacb = NULL; 00603 } 00604 00605 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00606 00607 // 00608 // Calculate Base Address of the data we want. 00609 // 00610 00611 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00612 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00613 00614 // 00615 // Success! 00616 // 00617 00618 try_return( Result = TRUE ); 00619 } 00620 00621 } else { 00622 00623 // 00624 // We treat Bcbs as ReadOnly (do not acquire resource) if they 00625 // are in sections for which we have not disabled modified writing. 00626 // 00627 00628 if (!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 00629 ReadOnly = TRUE; 00630 } 00631 } 00632 00633 00634 // 00635 // Cases 3 and 4 - Bcb is there but not mapped 00636 // 00637 00638 if (BcbOut->BaseAddress == NULL) { 00639 00640 // 00641 // It is too complicated to attempt to calculate any ZeroFlags in this 00642 // case, because we have to not only do the tests above, but also 00643 // compare to the byte range in the Bcb since we will be passing 00644 // those parameters to CcMapAndRead. Also, the probability of hitting 00645 // some window where zeroing is of any advantage is quite small. 00646 // 00647 00648 // 00649 // Set up to just reread the Bcb exactly as the data in it is 00650 // described. 00651 // 00652 00653 *BaseAddress = ((PCHAR)*BaseAddress - (FileOffset->LowPart - BcbOut->FileOffset.LowPart)); 00654 FOffset = BcbOut->FileOffset; 00655 TLength.QuadPart = (LONGLONG)BcbOut->ByteLength; 00656 00657 // 00658 // Case 3 - Bcb is there but not mapped and Wait is TRUE 00659 // 00660 // Increment the PinCount, and then release the BcbList 00661 // SpinLock so that we can wait to acquire the Bcb exclusive. 00662 // Once we have the Bcb exclusive, map and read it in if no 00663 // one beats us to it. Someone may have beat us to it since 00664 // we had to release the SpinLock above. 00665 // 00666 00667 if (FlagOn(Flags, PIN_WAIT)) { 00668 00669 BcbOut->PinCount += 1; 00670 00671 // 00672 // Now we have to release the BcbList SpinLock in order to 00673 // acquire the Bcb shared. 00674 // 00675 00676 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00677 SpinLockAcquired = FALSE; 00678 if (!ReadOnly) { 00679 if (FlagOn(Flags, PIN_EXCLUSIVE)) { 00680 (VOID)ExAcquireResourceExclusive( &BcbOut->Resource, TRUE ); 00681 } else { 00682 (VOID)ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00683 } 00684 } 00685 00686 // 00687 // Now procede to map and read the data in. 00688 // 00689 // Now read in the data. 00690 // 00691 00692 if (!FlagOn(Flags, PIN_NO_READ)) { 00693 00694 (VOID)CcMapAndRead( SharedCacheMap, 00695 &FOffset, 00696 TLength.LowPart, 00697 ZeroFlags, 00698 TRUE, 00699 *BaseAddress ); 00700 00701 // 00702 // Now we have to reacquire the Bcb List spinlock to load 00703 // up the mapping if we are the first one, else we collided 00704 // with someone else who loaded the mapping first, and we 00705 // will just free our mapping. It is guaranteed that the 00706 // data will be mapped to the same place. 00707 // 00708 00709 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00710 00711 if (BcbOut->BaseAddress == NULL) { 00712 00713 BcbOut->BaseAddress = *BaseAddress; 00714 BcbOut->Vacb = Vacb; 00715 Vacb = NULL; 00716 } 00717 00718 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00719 00720 // 00721 // 00722 // Calculate Base Address of the data we want. 00723 // 00724 00725 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00726 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00727 } 00728 00729 // 00730 // Success! 00731 // 00732 00733 try_return( Result = TRUE ); 00734 } 00735 00736 00737 // 00738 // Case 4 - Bcb is there but not mapped, and Wait is FALSE 00739 // 00740 // Since we cannot wait, we go immediately see if the data is 00741 // there (CcMapAndRead), and then only set up the Bcb and release 00742 // the spin lock if the data is there. Note here we call 00743 // CcMapAndRead while holding the spin lock, because we know we 00744 // will not fault and not block before returning. 00745 // 00746 00747 else { 00748 00749 if (!ReadOnly && !ExAcquireSharedStarveExclusive( &BcbOut->Resource, FALSE )) { 00750 00751 // 00752 // If we cannot get the resource and have not incremented PinCount, then 00753 // suppress the unpin on cleanup. 00754 // 00755 00756 BcbOut = NULL; 00757 try_return( Result = FALSE ); 00758 } 00759 00760 BcbOut->PinCount += 1; 00761 00762 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00763 SpinLockAcquired = FALSE; 00764 00765 // 00766 // Note that since this call has Wait = FALSE, it cannot 00767 // get an exception (see procedure header). 00768 // 00769 00770 ASSERT( !FlagOn(Flags, PIN_NO_READ) ); 00771 if (!CcMapAndRead( SharedCacheMap, 00772 &BcbOut->FileOffset, 00773 BcbOut->ByteLength, 00774 ZeroFlags, 00775 FALSE, 00776 *BaseAddress )) { 00777 00778 try_return( Result = FALSE ); 00779 } 00780 00781 // 00782 // Now we have to reacquire the Bcb List spinlock to load 00783 // up the mapping if we are the first one, else we collided 00784 // with someone else who loaded the mapping first, and we 00785 // will just free our mapping. It is guaranteed that the 00786 // data will be mapped to the same place. 00787 // 00788 00789 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00790 00791 if (BcbOut->BaseAddress == NULL) { 00792 00793 BcbOut->BaseAddress = *BaseAddress; 00794 BcbOut->Vacb = Vacb; 00795 Vacb = NULL; 00796 } 00797 00798 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00799 00800 // 00801 // Calculate Base Address of the data we want. 00802 // 00803 00804 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00805 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00806 00807 // 00808 // Success! 00809 // 00810 00811 try_return( Result = TRUE ); 00812 } 00813 } 00814 00815 00816 // 00817 // Cases 5 and 6 - Bcb is there and it is mapped 00818 // 00819 00820 else { 00821 00822 // 00823 // Case 5 - Bcb is there and mapped, and Wait is TRUE 00824 // 00825 // We can just increment the PinCount, release the SpinLock 00826 // and then acquire the Bcb Shared if we are not ReadOnly. 00827 // 00828 00829 if (FlagOn(Flags, PIN_WAIT)) { 00830 00831 BcbOut->PinCount += 1; 00832 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00833 SpinLockAcquired = FALSE; 00834 00835 // 00836 // Acquire Bcb Resource shared to insure that it is in memory. 00837 // 00838 00839 if (!ReadOnly) { 00840 if (FlagOn(Flags, PIN_EXCLUSIVE)) { 00841 (VOID)ExAcquireResourceExclusive( &BcbOut->Resource, TRUE ); 00842 } else { 00843 (VOID)ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00844 } 00845 } 00846 } 00847 00848 // 00849 // Case 6 - Bcb is there and mapped, and Wait is FALSE 00850 // 00851 // If we are not ReadOnly, we have to first see if we can 00852 // acquire the Bcb shared before incrmenting the PinCount, 00853 // since we will have to return FALSE if we cannot acquire the 00854 // resource. 00855 // 00856 00857 else { 00858 00859 // 00860 // Acquire Bcb Resource shared to insure that it is in memory. 00861 // 00862 00863 if (!ReadOnly && !ExAcquireSharedStarveExclusive( &BcbOut->Resource, FALSE )) { 00864 00865 // 00866 // If we cannot get the resource and have not incremented PinCount, then 00867 // suppress the unpin on cleanup. 00868 // 00869 00870 BcbOut = NULL; 00871 try_return( Result = FALSE ); 00872 } 00873 00874 BcbOut->PinCount += 1; 00875 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00876 SpinLockAcquired = FALSE; 00877 } 00878 00879 // 00880 // Calculate Base Address of the data we want. 00881 // 00882 00883 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00884 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00885 00886 // 00887 // Success! 00888 // 00889 00890 try_return( Result = TRUE ); 00891 } 00892 00893 00894 try_exit: NOTHING; 00895 00896 if (FlagOn(Flags, PIN_NO_READ) && 00897 FlagOn(Flags, PIN_EXCLUSIVE) && 00898 (BcbOut != NULL) && 00899 (BcbOut->BaseAddress != NULL)) { 00900 00901 // 00902 // Unmap the Vacb and free the resource if the Bcb is still 00903 // dirty. We have to free the resource before dropping the 00904 // spinlock, and we want to hold the resource until the 00905 // virtual address is freed. 00906 // 00907 00908 CcFreeVirtualAddress( BcbOut->Vacb ); 00909 00910 BcbOut->BaseAddress = NULL; 00911 BcbOut->Vacb = NULL; 00912 } 00913 00914 } finally { 00915 00916 // 00917 // Release the spinlock if it is acquired. 00918 // 00919 00920 if (SpinLockAcquired) { 00921 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00922 } 00923 00924 // 00925 // If the Vacb was not used for any reason (error or not needed), then free it here. 00926 // 00927 00928 if (Vacb != NULL) { 00929 CcFreeVirtualAddress( Vacb ); 00930 } 00931 00932 // 00933 // If we referenced a piece of a multilevel structure, release here. 00934 // 00935 00936 if (FlagOn(Flags, PIN_NO_READ)) { 00937 00938 CcDereferenceFileOffset( SharedCacheMap, *FileOffset ); 00939 } 00940 00941 if (Result) { 00942 00943 *Bcb = BcbOut; 00944 *BeyondLastByte = BcbOut->BeyondLastByte; 00945 00946 // 00947 // An abnormal termination can occur on an allocation failure, 00948 // or on a failure to map and read the buffer. 00949 // 00950 00951 } else { 00952 00953 *BaseAddress = NULL; 00954 if (BcbOut != NULL) { 00955 CcUnpinFileData( BcbOut, ReadOnly, UNPIN ); 00956 } 00957 } 00958 00959 DebugTrace( 0, me, " <Bcb = %08lx\n", *Bcb ); 00960 DebugTrace( 0, me, " <BaseAddress = %08lx\n", *BaseAddress ); 00961 DebugTrace(-1, me, "CcPinFileData -> %02lx\n", Result ); 00962 } 00963 00964 return Result; 00965 }

VOID CcPostDeferredWrites  ) 
 

Definition at line 2099 of file copysup.c.

References _DEFERRED_WRITE::BytesToWrite, CcCanIWrite(), CcDeferredWrites, CcDeferredWriteSpinLock, _DEFERRED_WRITE::Context1, _DEFERRED_WRITE::Context2, _DEFERRED_WRITE::DeferredWriteLinks, _DEFERRED_WRITE::Event, ExFreePool(), FALSE, _DEFERRED_WRITE::FileObject, KeSetEvent(), _DEFERRED_WRITE::LimitModifiedPages, NULL, and _DEFERRED_WRITE::PostRoutine.

Referenced by CcCanIWrite(), CcDeferWrite(), CcFlushCache(), CcLazyWriteScan(), CcUnpinRepinnedBcb(), and CcWriteBehind().

02104 : 02105 02106 This routine may be called to see if any deferred writes should be posted 02107 now, and to post them. It should be called any time the status of the 02108 queue may have changed, such as when a new entry has been added, or the 02109 Lazy Writer has finished writing out buffers and set them clean. 02110 02111 Arguments: 02112 02113 None 02114 02115 Return Value: 02116 02117 None 02118 02119 --*/ 02120 02121 { 02122 PDEFERRED_WRITE DeferredWrite; 02123 ULONG TotalBytesLetLoose = 0; 02124 KIRQL OldIrql; 02125 02126 do { 02127 02128 // 02129 // Initially clear the deferred write structure pointer 02130 // and syncrhronize. 02131 // 02132 02133 DeferredWrite = NULL; 02134 02135 ExAcquireSpinLock( &CcDeferredWriteSpinLock, &OldIrql ); 02136 02137 // 02138 // If the list is empty we are done. 02139 // 02140 02141 if (!IsListEmpty(&CcDeferredWrites)) { 02142 02143 PLIST_ENTRY Entry; 02144 02145 Entry = CcDeferredWrites.Flink; 02146 02147 while (Entry != &CcDeferredWrites) { 02148 02149 DeferredWrite = CONTAINING_RECORD( Entry, 02150 DEFERRED_WRITE, 02151 DeferredWriteLinks ); 02152 02153 // 02154 // Check for a paranoid case here that TotalBytesLetLoose 02155 // wraps. We stop processing the list at this time. 02156 // 02157 02158 TotalBytesLetLoose += DeferredWrite->BytesToWrite; 02159 02160 if (TotalBytesLetLoose < DeferredWrite->BytesToWrite) { 02161 02162 DeferredWrite = NULL; 02163 break; 02164 } 02165 02166 // 02167 // If it is now ok to post this write, remove him from 02168 // the list. 02169 // 02170 02171 if (CcCanIWrite( DeferredWrite->FileObject, 02172 TotalBytesLetLoose, 02173 FALSE, 02174 MAXUCHAR - 1 )) { 02175 02176 RemoveEntryList( &DeferredWrite->DeferredWriteLinks ); 02177 break; 02178 02179 // 02180 // Otherwise, it is time to stop processing the list, so 02181 // we clear the pointer again unless we throttled this item 02182 // because of a private dirty page limit. 02183 // 02184 02185 } else { 02186 02187 // 02188 // If this was a private throttle, skip over it and 02189 // remove its byte count from the running total. 02190 // 02191 02192 if (DeferredWrite->LimitModifiedPages) { 02193 02194 Entry = Entry->Flink; 02195 TotalBytesLetLoose -= DeferredWrite->BytesToWrite; 02196 DeferredWrite = NULL; 02197 continue; 02198 02199 } else { 02200 02201 DeferredWrite = NULL; 02202 02203 break; 02204 } 02205 } 02206 } 02207 } 02208 02209 ExReleaseSpinLock( &CcDeferredWriteSpinLock, OldIrql ); 02210 02211 // 02212 // If we got something, set the event or call the post routine 02213 // and deallocate the structure. 02214 // 02215 02216 if (DeferredWrite != NULL) { 02217 02218 if (DeferredWrite->Event != NULL) { 02219 02220 KeSetEvent( DeferredWrite->Event, 0, FALSE ); 02221 02222 } else { 02223 02224 (*DeferredWrite->PostRoutine)( DeferredWrite->Context1, 02225 DeferredWrite->Context2 ); 02226 ExFreePool( DeferredWrite ); 02227 } 02228 } 02229 02230 // 02231 // Loop until we find no more work to do. 02232 // 02233 02234 } while (DeferredWrite != NULL); 02235 } }

VOID FASTCALL CcPostWorkQueue IN PWORK_QUEUE_ENTRY  WorkQueueEntry,
IN PLIST_ENTRY  WorkQueue
 

Definition at line 662 of file lazyrite.c.

References ASSERT, CcIdleWorkerThreadList, CcNumberActiveWorkerThreads, CcQueueThrottle, CcWorkQueueSpinlock, CriticalWorkQueue, DebugTrace, ExQueueWorkItem(), List, me, and NULL.

Referenced by CcLazyWriteScan(), CcScanDpc(), and CcScheduleReadAhead().

00669 : 00670 00671 This routine queues a WorkQueueEntry, which has been allocated and 00672 initialized by the caller, to the WorkQueue for FIFO processing by 00673 the work threads. 00674 00675 Arguments: 00676 00677 WorkQueueEntry - supplies a pointer to the entry to queue 00678 00679 Return Value: 00680 00681 None 00682 00683 --*/ 00684 00685 { 00686 KIRQL OldIrql; 00687 PLIST_ENTRY WorkerThreadEntry = NULL; 00688 00689 ASSERT(FIELD_OFFSET(WORK_QUEUE_ITEM, List) == 0); 00690 00691 DebugTrace(+1, me, "CcPostWorkQueue:\n", 0 ); 00692 DebugTrace( 0, me, " WorkQueueEntry = %08lx\n", WorkQueueEntry ); 00693 00694 // 00695 // Queue the entry to the respective work queue. 00696 // 00697 00698 ExAcquireFastLock( &CcWorkQueueSpinlock, &OldIrql ); 00699 InsertTailList( WorkQueue, &WorkQueueEntry->WorkQueueLinks ); 00700 00701 // 00702 // Now, if we aren't throttled and have any more idle threads we can 00703 // use, activate one. 00704 // 00705 00706 if (!CcQueueThrottle && !IsListEmpty(&CcIdleWorkerThreadList)) { 00707 WorkerThreadEntry = RemoveHeadList( &CcIdleWorkerThreadList ); 00708 CcNumberActiveWorkerThreads += 1; 00709 } 00710 ExReleaseFastLock( &CcWorkQueueSpinlock, OldIrql ); 00711 00712 if (WorkerThreadEntry != NULL) { 00713 00714 // 00715 // I had to peak in the sources to verify that this routine 00716 // is a noop if the Flink is not NULL. Sheeeeit! 00717 // 00718 00719 ((PWORK_QUEUE_ITEM)WorkerThreadEntry)->List.Flink = NULL; 00720 ExQueueWorkItem( (PWORK_QUEUE_ITEM)WorkerThreadEntry, CriticalWorkQueue ); 00721 } 00722 00723 // 00724 // And return to our caller 00725 // 00726 00727 DebugTrace(-1, me, "CcPostWorkQueue -> VOID\n", 0 ); 00728 00729 return; 00730 }

ULONG CcPrefillVacbLevelZone IN ULONG  NumberNeeded,
OUT PKIRQL  OldIrql,
IN ULONG  NeedBcbListHeads
 

Definition at line 1931 of file vacbsup.c.

References CcAcquireVacbLock, CcReleaseVacbLock, CcVacbLevelEntries, CcVacbLevelFreeList, CcVacbLevelWithBcbsEntries, CcVacbLevelWithBcbsFreeList, ExAllocatePoolWithTag, FALSE, NonPagedPool, NULL, TRUE, VACB_LEVEL_BLOCK_SIZE, and VACB_LEVEL_REFERENCE.

Referenced by CcExtendVacbArray(), CcGetVacbMiss(), CcReferenceFileOffset(), and CcSetDirtyInMask().

01939 : 01940 01941 This routine may be called to prefill the VacbLevelZone with the number of 01942 entries required, and return with CcVacbSpinLock acquired. This approach is 01943 taken so that the pool allocations and RtlZeroMemory calls can occur without 01944 holding any spinlock, yet the caller may proceed to peform a single indivisible 01945 operation without error handling, since there is a guaranteed minimum number of 01946 entries in the zone. 01947 01948 Arguments: 01949 01950 NumberNeeded - Number of VacbLevel entries needed, not counting the possible 01951 one with Bcb listheads. 01952 01953 OldIrql = supplies a pointer to where OldIrql should be returned upon acquiring 01954 the spinlock. 01955 01956 NeedBcbListHeads - Supplies true if a level is also needed which contains listheads. 01957 01958 Return Value: 01959 01960 FALSE if the buffers could not be preallocated, TRUE otherwise. 01961 01962 Environment: 01963 01964 No spinlocks should be held upon entry. 01965 01966 --*/ 01967 01968 { 01969 PVACB *NextVacbArray; 01970 01971 CcAcquireVacbLock( OldIrql ); 01972 01973 // 01974 // Loop until there is enough entries, else raise... 01975 // 01976 01977 while ((NumberNeeded > CcVacbLevelEntries) || 01978 (NeedBcbListHeads && (CcVacbLevelWithBcbsFreeList == NULL))) { 01979 01980 01981 // 01982 // Else release the spinlock so we can do the allocate/zero. 01983 // 01984 01985 CcReleaseVacbLock( *OldIrql ); 01986 01987 // 01988 // First handle the case where we need a VacbListHead with Bcb Listheads. 01989 // The pointer test is unsafe but see below. 01990 // 01991 01992 if (NeedBcbListHeads && (CcVacbLevelWithBcbsFreeList == NULL)) { 01993 01994 // 01995 // Allocate and initialize the Vacb block for this level, and store its pointer 01996 // back into our parent. We do not zero the listhead area. 01997 // 01998 01999 NextVacbArray = 02000 (PVACB *)ExAllocatePoolWithTag( NonPagedPool, (VACB_LEVEL_BLOCK_SIZE * 2) + sizeof(VACB_LEVEL_REFERENCE), 'lVcC' ); 02001 02002 if (NextVacbArray == NULL) { 02003 return FALSE; 02004 } 02005 02006 RtlZeroMemory( (PCHAR)NextVacbArray, VACB_LEVEL_BLOCK_SIZE ); 02007 RtlZeroMemory( (PCHAR)NextVacbArray + (VACB_LEVEL_BLOCK_SIZE * 2), sizeof(VACB_LEVEL_REFERENCE) ); 02008 02009 CcAcquireVacbLock( OldIrql ); 02010 02011 NextVacbArray[0] = (PVACB)CcVacbLevelWithBcbsFreeList; 02012 CcVacbLevelWithBcbsFreeList = NextVacbArray; 02013 CcVacbLevelWithBcbsEntries += 1; 02014 02015 } else { 02016 02017 // 02018 // Allocate and initialize the Vacb block for this level, and store its pointer 02019 // back into our parent. 02020 // 02021 02022 NextVacbArray = 02023 (PVACB *)ExAllocatePoolWithTag( NonPagedPool, VACB_LEVEL_BLOCK_SIZE + sizeof(VACB_LEVEL_REFERENCE), 'lVcC' ); 02024 02025 if (NextVacbArray == NULL) { 02026 return FALSE; 02027 } 02028 02029 RtlZeroMemory( (PCHAR)NextVacbArray, VACB_LEVEL_BLOCK_SIZE + sizeof(VACB_LEVEL_REFERENCE) ); 02030 02031 CcAcquireVacbLock( OldIrql ); 02032 02033 NextVacbArray[0] = (PVACB)CcVacbLevelFreeList; 02034 CcVacbLevelFreeList = NextVacbArray; 02035 CcVacbLevelEntries += 1; 02036 } 02037 } 02038 02039 return TRUE; 02040 }

VOID CcReferenceFileOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset
 

Definition at line 947 of file vacbsup.c.

References ASSERT, CcMaxVacbLevelsSeen, CcPrefillVacbLevelZone(), CcReleaseVacbLock, DISPATCH_LEVEL, ExRaiseStatus(), FlagOn, MODIFIED_WRITE_DISABLED, SetVacb(), VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_REFERENCE.

Referenced by CcPinFileData().

00954 : 00955 00956 This is a special form of reference that insures that the multi-level 00957 Vacb structures are expanded to cover a given file offset. 00958 00959 Arguments: 00960 00961 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 00962 00963 FileOffset - Supplies the desired FileOffset within the file. 00964 00965 Return Value: 00966 00967 None 00968 00969 --*/ 00970 00971 { 00972 KIRQL OldIrql; 00973 00974 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 00975 00976 // 00977 // This operation only has meaning if the Vacbs are in the multilevel form. 00978 // 00979 00980 if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) { 00981 00982 // 00983 // Prefill the level zone so that we can expand the tree if required. 00984 // 00985 00986 if (!CcPrefillVacbLevelZone( CcMaxVacbLevelsSeen - 1, 00987 &OldIrql, 00988 FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) )) { 00989 00990 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00991 } 00992 00993 ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart ); 00994 00995 SetVacb( SharedCacheMap, FileOffset, VACB_SPECIAL_REFERENCE ); 00996 00997 CcReleaseVacbLock( OldIrql ); 00998 } 00999 01000 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 01001 01002 return; 01003 }

VOID CcScanDpc IN PKDPC  Dpc,
IN PVOID  DeferredContext,
IN PVOID  SystemArgument1,
IN PVOID  SystemArgument2
 

Definition at line 99 of file lazyrite.c.

References CcAllocateWorkQueueEntry, CcPostWorkQueue(), CcRegularWorkQueue, FALSE, _WORK_QUEUE_ENTRY::Function, LazyWriter, LazyWriteScan, NULL, and _LAZY_WRITER::ScanActive.

Referenced by CcInitializeCacheManager().

00108 : 00109 00110 This is the Dpc routine which runs when the scan timer goes off. It 00111 simply posts an element for an Ex Worker thread to do the scan. 00112 00113 Arguments: 00114 00115 (All are ignored) 00116 00117 Return Value: 00118 00119 None. 00120 00121 --*/ 00122 00123 { 00124 PWORK_QUEUE_ENTRY WorkQueueEntry; 00125 00126 UNREFERENCED_PARAMETER(Dpc); 00127 UNREFERENCED_PARAMETER(DeferredContext); 00128 UNREFERENCED_PARAMETER(SystemArgument1); 00129 UNREFERENCED_PARAMETER(SystemArgument2); 00130 00131 WorkQueueEntry = CcAllocateWorkQueueEntry(); 00132 00133 // 00134 // If we failed to allocate a WorkQueueEntry, things must 00135 // be in pretty bad shape. However, all we have to do is 00136 // say we are not active, and wait for another event to 00137 // wake things up again. 00138 // 00139 00140 if (WorkQueueEntry == NULL) { 00141 00142 LazyWriter.ScanActive = FALSE; 00143 00144 } else { 00145 00146 // 00147 // Otherwise post a work queue entry to do the scan. 00148 // 00149 00150 WorkQueueEntry->Function = (UCHAR)LazyWriteScan; 00151 00152 CcPostWorkQueue( WorkQueueEntry, &CcRegularWorkQueue ); 00153 } 00154 }

VOID CcScheduleLazyWriteScan  ) 
 

Definition at line 49 of file lazyrite.c.

References CcFirstDelay, CcIdleDelay, KeSetTimer(), LazyWriter, _LAZY_WRITER::ScanActive, _LAZY_WRITER::ScanDpc, _LAZY_WRITER::ScanTimer, and TRUE.

Referenced by CcDeferWrite(), CcFlushCache(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcLazyWriteScan(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPurgeCacheSection(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWaitForCurrentLazyWriterActivity(), and CcZeroEndOfLastPage().

00054 : 00055 00056 This routine may be called to schedule the next lazy writer scan, 00057 during which lazy write and lazy close activity is posted to other 00058 worker threads. Callers should acquire the lazy writer spin lock 00059 to see if the scan is currently active, and then call this routine 00060 still holding the spin lock if not. One special call is used at 00061 the end of the lazy write scan to propagate lazy write active once 00062 we go active. This call is "the" scan thread, and it can therefore 00063 safely schedule the next scan without taking out the spin lock. 00064 00065 Arguments: 00066 00067 None 00068 00069 Return Value: 00070 00071 None. 00072 00073 --*/ 00074 00075 { 00076 // 00077 // It is important to set the active flag TRUE first for the propagate 00078 // case, because it is conceivable that once the timer is set, another 00079 // thread could actually run and make the scan go idle before we then 00080 // jam the flag TRUE. 00081 // 00082 // When going from idle to active, we delay a little longer to let the 00083 // app finish saving its file. 00084 // 00085 00086 if (LazyWriter.ScanActive) { 00087 00088 KeSetTimer( &LazyWriter.ScanTimer, CcIdleDelay, &LazyWriter.ScanDpc ); 00089 00090 } else { 00091 00092 LazyWriter.ScanActive = TRUE; 00093 KeSetTimer( &LazyWriter.ScanTimer, CcFirstDelay, &LazyWriter.ScanDpc ); 00094 } 00095 }

VOID CcSetDirtyInMask IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length
 

Definition at line 2242 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BITMAP_RANGE::Bitmap, _MBCB::BitmapRange1, _MBCB::BitmapRange2, _MBCB::BitmapRange3, _MBCB::BitmapRanges, CACHE_NTC_MBCB, CACHE_NTC_MBCB_GRANDE, CcAcquireMasterLockAtDpcLevel, CcAcquireVacbLockAtDpcLevel, CcAllocateInitializeBcb(), CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcDirtySharedCacheMapList, CcFindBitmapRangeToDirty(), CcPrefillVacbLevelZone(), CcReleaseMasterLockFromDpcLevel, CcReleaseVacbLock, CcReleaseVacbLockFromDpcLevel, CcScheduleLazyWriteScan(), CcTotalDirtyPages, _MBCB::DirtyPages, _BITMAP_RANGE::DirtyPages, FALSE, _BITMAP_RANGE::FirstDirtyPage, _BITMAP_RANGE::LastDirtyPage, LazyWriter, _BITMAP_RANGE::Links, MBCB_BITMAP_INITIAL_SIZE, MBCB_BITMAP_RANGE, _MBCB::NodeTypeCode, NULL, PAGE_SHIFT, PAGE_SIZE, QuadAlign, _MBCB::ResumeWritePage, _LAZY_WRITER::ScanActive, and _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks.

Referenced by CcFreeActiveVacb(), CcMapAndCopy(), CcMdlWriteComplete2(), CcPrepareMdlWrite(), CcPurgeAndClearCacheSection(), and CcReleaseByteRangeFromWrite().

02250 : 02251 02252 This routine may be called to set a range of pages dirty in a user data 02253 file, by just setting the corresponding bits in the mask bcb. 02254 02255 IMPORTANT NOTE: 02256 02257 If this routine fails to set any bits due to an allocation failure, 02258 it just returns quietly without informing the caller. (Note that this 02259 routine is never called for no modified write sections.) The reason 02260 for this behavior is that this routine is sometimes called as part of 02261 error recovery (CcFreeActiveVacb, CcMdlWriteComplete, etc.) when it is 02262 essential to just keep on moving. Note that if an allocation failure does 02263 occur, this only means that MM will have to flush the modified page in 02264 time, since the Lazy Writer will not do it. 02265 02266 Arguments: 02267 02268 SharedCacheMap - SharedCacheMap where the pages are to be set dirty. 02269 02270 FileOffset - FileOffset of first page to set dirty 02271 02272 Length - Used in conjunction with FileOffset to determine how many pages 02273 to set dirty. 02274 02275 Return Value: 02276 02277 None 02278 02279 --*/ 02280 02281 { 02282 KIRQL OldIrql; 02283 PMBCB Mbcb; 02284 PBITMAP_RANGE BitmapRange; 02285 LONGLONG FirstPage; 02286 LONGLONG LastPage; 02287 PULONG MaskPtr; 02288 ULONG Mask = 0; 02289 PULONG Bitmap = NULL; 02290 ULONG AllocationError = FALSE; 02291 02292 // 02293 // We assume no caller can cross a bitmap range boundary (currently not even 02294 // a view boundary!), so we do not want to loop through bitmap ranges. 02295 // 02296 02297 ASSERT((FileOffset->QuadPart / MBCB_BITMAP_RANGE) == 02298 ((FileOffset->QuadPart + Length - 1) / MBCB_BITMAP_RANGE)); 02299 02300 // 02301 // Initialize our locals. 02302 // 02303 02304 FirstPage = FileOffset->QuadPart >> PAGE_SHIFT; 02305 LastPage = ((FileOffset->QuadPart + Length - 1) >> PAGE_SHIFT); 02306 02307 // 02308 // If we have to convert to an Mbcb grande, we will loop back here to 02309 // preallocate another buffer. 02310 // 02311 02312 do { 02313 02314 // 02315 // For large streams, we need to preallocate a block we use for 02316 // we use for bitmaps. We allocate one, then loop back in the rare 02317 // case where we will need another. We free it at the bottom if we 02318 // don't need one. 02319 // 02320 02321 if (SharedCacheMap->SectionSize.QuadPart > (MBCB_BITMAP_INITIAL_SIZE * 8 * PAGE_SIZE)) { 02322 02323 // 02324 // If we could not preallocate, break out into common cleanup code and 02325 // return quietly. 02326 // 02327 02328 if (!CcPrefillVacbLevelZone( 1, &OldIrql, FALSE )) { 02329 return; 02330 } 02331 02332 Bitmap = (PULONG)CcAllocateVacbLevel( FALSE ); 02333 CcReleaseVacbLock( OldIrql ); 02334 } 02335 02336 // 02337 // Acquire the Mbcb spinlock. 02338 // 02339 02340 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 02341 02342 // 02343 // If there is no Mbcb, we will have to allocate one. 02344 // 02345 02346 Mbcb = SharedCacheMap->Mbcb; 02347 if (Mbcb == NULL) { 02348 02349 // 02350 // Since we use the Bcb zone, we must assume that Bcbs are big enough. 02351 // 02352 02353 ASSERT(QuadAlign(sizeof(MBCB)) <= QuadAlign(sizeof(BCB))); 02354 02355 // 02356 // Allocate the Mbcb from the Bcb zone. 02357 // 02358 02359 Mbcb = (PMBCB)CcAllocateInitializeBcb( NULL, NULL, NULL, NULL ); 02360 02361 // 02362 // If we could not allocate an Mbcb, break out to clean up and return 02363 // 02364 02365 if (Mbcb == NULL) { 02366 break; 02367 } 02368 02369 // 02370 // Set in the node type, and initialize the listhead of ranges. 02371 // 02372 02373 Mbcb->NodeTypeCode = CACHE_NTC_MBCB; 02374 InitializeListHead( &Mbcb->BitmapRanges ); 02375 02376 // 02377 // Insert and initialize the first range. 02378 // 02379 02380 InsertTailList( &Mbcb->BitmapRanges, &Mbcb->BitmapRange1.Links ); 02381 Mbcb->BitmapRange1.FirstDirtyPage = MAXULONG; 02382 02383 // 02384 // Use the rest of the Mbcb as the initial bitmap. 02385 // 02386 02387 Mbcb->BitmapRange1.Bitmap = (PULONG)&Mbcb->BitmapRange2; 02388 02389 // 02390 // Now set to use our new Mbcb. 02391 // 02392 02393 SharedCacheMap->Mbcb = Mbcb; 02394 } 02395 02396 // 02397 // Now see if we need to switch to the Mbcb grande format. 02398 // 02399 02400 if ((LastPage >= (MBCB_BITMAP_INITIAL_SIZE * 8)) && 02401 (Mbcb->NodeTypeCode != CACHE_NTC_MBCB_GRANDE)) { 02402 02403 // 02404 // If there are any dirty pages, copy the initial bitmap over, and zero 02405 // out the original end of the Mbcb for reuse. 02406 // 02407 02408 if (Mbcb->BitmapRange1.DirtyPages != 0) { 02409 RtlCopyMemory( Bitmap, Mbcb->BitmapRange1.Bitmap, MBCB_BITMAP_INITIAL_SIZE ); 02410 RtlZeroMemory( Mbcb->BitmapRange1.Bitmap, MBCB_BITMAP_INITIAL_SIZE ); 02411 } 02412 02413 // 02414 // Store the new bitmap pointer and show we have consumed this one. 02415 // 02416 02417 Mbcb->BitmapRange1.Bitmap = Bitmap; 02418 Bitmap = NULL; 02419 02420 // 02421 // Insert and initialize the first range. 02422 // 02423 02424 InsertTailList( &Mbcb->BitmapRanges, &Mbcb->BitmapRange2.Links ); 02425 Mbcb->BitmapRange2.BasePage = MAXLONGLONG; 02426 Mbcb->BitmapRange2.FirstDirtyPage = MAXULONG; 02427 InsertTailList( &Mbcb->BitmapRanges, &Mbcb->BitmapRange3.Links ); 02428 Mbcb->BitmapRange3.BasePage = MAXLONGLONG; 02429 Mbcb->BitmapRange3.FirstDirtyPage = MAXULONG; 02430 Mbcb->NodeTypeCode = CACHE_NTC_MBCB_GRANDE; 02431 02432 // 02433 // This is a one-time event - converting to the large Mbcb. Continue back 02434 // to preallocate another buffer for CcFindBitmapRangeToDirty. 02435 // 02436 02437 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02438 continue; 02439 } 02440 02441 // 02442 // Now find the Bitmap range we are setting bits in. 02443 // 02444 02445 BitmapRange = CcFindBitmapRangeToDirty( Mbcb, FirstPage, &Bitmap ); 02446 02447 // 02448 // If we could not allocate this dinky structure, break out quietly. 02449 // 02450 02451 if (BitmapRange == NULL) { 02452 break; 02453 } 02454 02455 // 02456 // Now update the first and last dirty page indices and the bitmap. 02457 // 02458 02459 if (FirstPage < (BitmapRange->BasePage + BitmapRange->FirstDirtyPage)) { 02460 BitmapRange->FirstDirtyPage = (ULONG)(FirstPage - BitmapRange->BasePage); 02461 } 02462 02463 if (LastPage > (BitmapRange->BasePage + BitmapRange->LastDirtyPage)) { 02464 BitmapRange->LastDirtyPage = (ULONG)(LastPage - BitmapRange->BasePage); 02465 } 02466 02467 // 02468 // We have to acquire the shared cache map list, because we 02469 // may be changing lists. 02470 // 02471 02472 CcAcquireMasterLockAtDpcLevel(); 02473 02474 // 02475 // If this is the first dirty page for this cache map, there is some work 02476 // to do. 02477 // 02478 02479 if (SharedCacheMap->DirtyPages == 0) { 02480 02481 // 02482 // If the lazy write scan is not active, then start it. 02483 // 02484 02485 if (!LazyWriter.ScanActive) { 02486 CcScheduleLazyWriteScan(); 02487 } 02488 02489 // 02490 // Move to the dirty list. 02491 // 02492 02493 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02494 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02495 &SharedCacheMap->SharedCacheMapLinks ); 02496 02497 Mbcb->ResumeWritePage = FirstPage; 02498 } 02499 02500 MaskPtr = &BitmapRange->Bitmap[(ULONG)(FirstPage - BitmapRange->BasePage) / 32]; 02501 Mask = 1 << ((ULONG)FirstPage % 32); 02502 02503 // 02504 // Loop to set all of the bits and adjust the DirtyPage totals. 02505 // 02506 02507 for ( ; FirstPage <= LastPage; FirstPage++) { 02508 02509 if ((*MaskPtr & Mask) == 0) { 02510 02511 CcTotalDirtyPages += 1; 02512 SharedCacheMap->DirtyPages += 1; 02513 Mbcb->DirtyPages += 1; 02514 BitmapRange->DirtyPages += 1; 02515 *MaskPtr |= Mask; 02516 } 02517 02518 Mask <<= 1; 02519 02520 if (Mask == 0) { 02521 02522 MaskPtr += 1; 02523 Mask = 1; 02524 } 02525 } 02526 02527 // 02528 // See if we need to advance our goal for ValidDataLength. 02529 // 02530 02531 LastPage = FileOffset->QuadPart + Length; 02532 02533 if (LastPage > SharedCacheMap->ValidDataGoal.QuadPart) { 02534 SharedCacheMap->ValidDataGoal.QuadPart = (LONGLONG)LastPage; 02535 } 02536 02537 CcReleaseMasterLockFromDpcLevel(); 02538 02539 // 02540 // Continue until we have actually set the bits (there is a continue 02541 // which just wants to loop back and allocate another buffer). 02542 // 02543 02544 } while (Mask == 0); 02545 02546 // 02547 // Now if we preallocated a bitmap buffer, free it on the way out. 02548 // 02549 02550 if (Bitmap != NULL) { 02551 CcAcquireVacbLockAtDpcLevel(); 02552 CcDeallocateVacbLevel( (PVACB *)Bitmap, FALSE ); 02553 CcReleaseVacbLockFromDpcLevel(); 02554 } 02555 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02556 }

VOID CcStartLazyWriter IN PVOID  NotUsed  ) 
 

BOOLEAN FASTCALL CcUnmapVacbArray IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER FileOffset  OPTIONAL,
IN ULONG  Length,
IN BOOLEAN  UnmapBehind
 

Definition at line 1784 of file vacbsup.c.

References CcAcquireVacbLock, CcDrainVacbLevelZone(), CcMoveVacbToReuseHead, CcReleaseVacbLock, CcUnmapVacb(), FALSE, GetVacb, NULL, _VACB::Overlay, SetVacb(), _VACB::SharedCacheMap, TRUE, and VACB_MAPPING_GRANULARITY.

Referenced by CcGetVacbMiss(), CcPurgeCacheSection(), and CcUnmapAndPurge().

01793 : 01794 01795 This routine must be called to do any unmapping and associated 01796 cleanup for a shared cache map, just before it is deleted. 01797 01798 Arguments: 01799 01800 SharedCacheMap - Supplies a pointer to the shared cache map 01801 which is about to be deleted. 01802 01803 FileOffset - If supplied, only unmap the specified offset and length 01804 01805 Length - Completes range to unmap if FileOffset specified. If FileOffset 01806 is specified, Length of 0 means unmap to the end of the section. 01807 01808 UnmapBehind - If this is a result of our unmap behind logic 01809 01810 Return Value: 01811 01812 FALSE -- if an the unmap was not done due to an active vacb 01813 TRUE -- if the unmap was done 01814 01815 --*/ 01816 01817 { 01818 PVACB Vacb; 01819 KIRQL OldIrql; 01820 LARGE_INTEGER StartingFileOffset = {0,0}; 01821 LARGE_INTEGER EndingFileOffset = SharedCacheMap->SectionSize; 01822 01823 // 01824 // We could be just cleaning up for error recovery. 01825 // 01826 01827 if (SharedCacheMap->Vacbs == NULL) { 01828 return TRUE; 01829 } 01830 01831 // 01832 // See if a range was specified. Align it to the VACB boundaries so it 01833 // works in the loop below 01834 // 01835 01836 if (ARGUMENT_PRESENT(FileOffset)) { 01837 StartingFileOffset.QuadPart = ((FileOffset->QuadPart) & (~((LONGLONG)VACB_MAPPING_GRANULARITY - 1))); 01838 if (Length != 0) { 01839 01840 EndingFileOffset.QuadPart = FileOffset->QuadPart + Length; 01841 01842 } 01843 } 01844 01845 // 01846 // Acquire the spin lock to 01847 // 01848 01849 CcAcquireVacbLock( &OldIrql ); 01850 01851 while (StartingFileOffset.QuadPart < EndingFileOffset.QuadPart) { 01852 01853 // 01854 // Note that the caller with an explicit range may be off the 01855 // end of the section (example CcPurgeCacheSection for cache 01856 // coherency). That is the reason for the first part of the 01857 // test below. 01858 // 01859 // Check the next cell once without the spin lock, it probably will 01860 // not change, but we will handle it if it does not. 01861 // 01862 01863 if ((StartingFileOffset.QuadPart < SharedCacheMap->SectionSize.QuadPart) && 01864 ((Vacb = GetVacb( SharedCacheMap, StartingFileOffset )) != NULL)) { 01865 01866 // 01867 // Return here if we are unlucky and see an active 01868 // Vacb. It could be Purge calling, and the Lazy Writer 01869 // may have done a CcGetVirtualAddressIfMapped! 01870 // 01871 01872 if (Vacb->Overlay.ActiveCount != 0) { 01873 01874 CcReleaseVacbLock( OldIrql ); 01875 return FALSE; 01876 } 01877 01878 // 01879 // Unlink it from the other SharedCacheMap, so the other 01880 // guy will not try to use it when we free the spin lock. 01881 // 01882 01883 SetVacb( SharedCacheMap, StartingFileOffset, NULL ); 01884 Vacb->SharedCacheMap = NULL; 01885 01886 // 01887 // Increment the open count so that no one else will 01888 // try to unmap or reuse until we are done. 01889 // 01890 01891 Vacb->Overlay.ActiveCount += 1; 01892 01893 // 01894 // Release the spin lock. 01895 // 01896 01897 CcReleaseVacbLock( OldIrql ); 01898 01899 // 01900 // Unmap and free it if we really got it above. 01901 // 01902 01903 CcUnmapVacb( Vacb, SharedCacheMap, UnmapBehind ); 01904 01905 // 01906 // Reacquire the spin lock so that we can decrment the count. 01907 // 01908 01909 CcAcquireVacbLock( &OldIrql ); 01910 Vacb->Overlay.ActiveCount -= 1; 01911 01912 // 01913 // Place this VACB at the head of the LRU 01914 // 01915 01916 CcMoveVacbToReuseHead( Vacb ); 01917 } 01918 01919 StartingFileOffset.QuadPart = StartingFileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 01920 } 01921 01922 CcReleaseVacbLock( OldIrql ); 01923 01924 CcDrainVacbLevelZone(); 01925 01926 return TRUE; 01927 }

VOID FASTCALL CcUnpinFileData IN OUT PBCB  Bcb,
IN BOOLEAN  ReadOnly,
IN UNMAP_ACTIONS  UnmapAction
 

Definition at line 974 of file cachesub.c.

References ASSERT, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_BCB, CACHE_NTC_SHARED_CACHE_MAP, CcAcquireMasterLockAtDpcLevel, CcAcquireVacbLockAtDpcLevel, CcBcbSpinLock, CcBeyondVacbs, CcBugCheck, CcCleanSharedCacheMapList, CcDeallocateBcb(), CcFreeVirtualAddress(), CcPagesYetToWrite, CcReleaseMasterLockFromDpcLevel, CcReleaseVacbLockFromDpcLevel, CcTotalDirtyPages, CcUnlockVacbLevel, CcVacbs, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, ExReleaseResource, FALSE, FlagOn, _SHARED_CACHE_MAP::Flags, me, MODIFIED_WRITE_DISABLED, _SHARED_CACHE_MAP::NodeTypeCode, NULL, _SHARED_CACHE_MAP::OpenCount, PAGE_SHIFT, PVACB, SET_CLEAN, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, UNPIN, and UNREF.

Referenced by CcAcquireByteRangeForWrite(), CcCopyRead(), CcCopyWrite(), CcGetDirtyPages(), CcMapData(), CcPinFileData(), CcReleaseByteRangeFromWrite(), CcUnpinData(), CcUnpinDataForThread(), CcUnpinRepinnedBcb(), and CcZeroData().

00982 : 00983 00984 This routine umaps and unlocks the specified buffer, which was previously 00985 locked and mapped by calling CcPinFileData. 00986 00987 Arguments: 00988 00989 Bcb - Pointer previously returned from CcPinFileData. As may be 00990 seen above, this pointer may be either a Bcb or a Vacb. 00991 00992 ReadOnly - must specify same value as when data was mapped 00993 00994 UnmapAction - UNPIN or SET_CLEAN 00995 00996 Return Value: 00997 00998 None 00999 01000 --*/ 01001 01002 { 01003 KIRQL OldIrql; 01004 PSHARED_CACHE_MAP SharedCacheMap; 01005 01006 DebugTrace(+1, me, "CcUnpinFileData >Bcb = %08lx\n", Bcb ); 01007 01008 // 01009 // Note, since we have to allocate so many Vacbs, we do not use 01010 // a node type code. However, the Vacb starts with a BaseAddress, 01011 // so we assume that the low byte of the Bcb node type code has 01012 // some bits set, which a page-aligned Base Address cannot. 01013 // 01014 01015 ASSERT( (CACHE_NTC_BCB & 0xFF) != 0 ); 01016 01017 if (Bcb->NodeTypeCode != CACHE_NTC_BCB) { 01018 01019 ASSERT(((PVACB)Bcb >= CcVacbs) && ((PVACB)Bcb < CcBeyondVacbs)); 01020 ASSERT(((PVACB)Bcb)->SharedCacheMap->NodeTypeCode == CACHE_NTC_SHARED_CACHE_MAP); 01021 01022 CcFreeVirtualAddress( (PVACB)Bcb ); 01023 01024 DebugTrace(-1, me, "CcUnpinFileData -> VOID (simple release)\n", 0 ); 01025 01026 return; 01027 } 01028 01029 SharedCacheMap = Bcb->SharedCacheMap; 01030 01031 // 01032 // We treat Bcbs as ReadOnly (do not acquire resource) if they 01033 // are in sections for which we have not disabled modified writing. 01034 // 01035 01036 if (!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) || 01037 UnmapAction == UNREF) { 01038 ReadOnly = TRUE; 01039 } 01040 01041 // 01042 // Synchronize 01043 // 01044 01045 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 01046 01047 switch (UnmapAction) { 01048 01049 case UNPIN: 01050 case UNREF: 01051 01052 ASSERT( Bcb->PinCount > 0 ); 01053 01054 Bcb->PinCount -= 1; 01055 break; 01056 01057 case SET_CLEAN: 01058 01059 if (Bcb->Dirty) { 01060 01061 ULONG Pages = Bcb->ByteLength >> PAGE_SHIFT; 01062 01063 // 01064 // Reverse the rest of the actions taken when the Bcb was set dirty. 01065 // 01066 01067 Bcb->Dirty = FALSE; 01068 01069 CcAcquireMasterLockAtDpcLevel(); 01070 01071 SharedCacheMap->DirtyPages -= Pages; 01072 CcTotalDirtyPages -= Pages; 01073 01074 // 01075 // Normally we need to reduce CcPagesYetToWrite appropriately. 01076 // 01077 01078 if (CcPagesYetToWrite > Pages) { 01079 CcPagesYetToWrite -= Pages; 01080 } else { 01081 CcPagesYetToWrite = 0; 01082 } 01083 01084 // 01085 // Remove SharedCacheMap from dirty list if nothing more dirty, 01086 // and someone still has the cache map opened. 01087 // 01088 01089 if ((SharedCacheMap->DirtyPages == 0) && 01090 (SharedCacheMap->OpenCount != 0)) { 01091 01092 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01093 InsertTailList( &CcCleanSharedCacheMapList, 01094 &SharedCacheMap->SharedCacheMapLinks ); 01095 } 01096 01097 CcReleaseMasterLockFromDpcLevel(); 01098 } 01099 01100 break; 01101 01102 default: 01103 CcBugCheck( UnmapAction, 0, 0 ); 01104 } 01105 01106 // 01107 // If we brought it to 0, then we have to kill it. 01108 // 01109 01110 if (Bcb->PinCount == 0) { 01111 01112 // 01113 // If the Bcb is Dirty, we only release the resource and unmap now. 01114 // 01115 01116 if (Bcb->Dirty) { 01117 01118 if (Bcb->BaseAddress != NULL) { 01119 01120 // 01121 // Unmap the Vacb and free the resource if the Bcb is still 01122 // dirty. We have to free the resource before dropping the 01123 // spinlock, and we want to hold the resource until the 01124 // virtual address is freed. 01125 // 01126 01127 CcFreeVirtualAddress( Bcb->Vacb ); 01128 01129 Bcb->BaseAddress = NULL; 01130 Bcb->Vacb = NULL; 01131 } 01132 01133 if (!ReadOnly) { 01134 ExReleaseResource( &Bcb->Resource ); 01135 } 01136 01137 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01138 } 01139 01140 // 01141 // Otherwise, we also delete the Bcb. 01142 // 01143 01144 else { 01145 01146 // 01147 // Since CcCalculateVacbLockCount has to be able to walk 01148 // the BcbList with only the VacbSpinLock, we take that one 01149 // out to change the list and decrement the level. 01150 // 01151 01152 CcAcquireVacbLockAtDpcLevel(); 01153 RemoveEntryList( &Bcb->BcbLinks ); 01154 01155 // 01156 // For large metadata streams we unlock the Vacb level. 01157 // 01158 01159 CcUnlockVacbLevel( SharedCacheMap, Bcb->FileOffset.QuadPart ); 01160 CcReleaseVacbLockFromDpcLevel(); 01161 01162 // 01163 // Debug routines used to remove Bcbs from the global list 01164 // 01165 01166 #if LIST_DBG 01167 01168 ExAcquireSpinLockAtDpcLevel( &CcBcbSpinLock ); 01169 01170 if (Bcb->CcBcbLinks.Flink != NULL) { 01171 01172 RemoveEntryList( &Bcb->CcBcbLinks ); 01173 CcBcbCount -= 1; 01174 } 01175 01176 ExReleaseSpinLockFromDpcLevel( &CcBcbSpinLock ); 01177 01178 #endif 01179 01180 if (Bcb->BaseAddress != NULL) { 01181 01182 CcFreeVirtualAddress( Bcb->Vacb ); 01183 } 01184 #if DBG 01185 if (!ReadOnly) { 01186 ExReleaseResource( &Bcb->Resource ); 01187 } 01188 01189 // 01190 // ASSERT that the resource is unowned. 01191 // 01192 01193 ASSERT( Bcb->Resource.ActiveCount == 0 ); 01194 #endif 01195 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01196 CcDeallocateBcb( Bcb ); 01197 } 01198 } 01199 01200 // 01201 // Else we just have to release our Shared access, if we are not 01202 // readonly. We don't need to do this above, since we deallocate 01203 // the entire Bcb there. 01204 // 01205 01206 else { 01207 01208 if (!ReadOnly) { 01209 ExReleaseResource( &Bcb->Resource ); 01210 } 01211 01212 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01213 } 01214 01215 DebugTrace(-1, me, "CcUnpinFileData -> VOID\n", 0 ); 01216 01217 return; 01218 }

VOID CcWaitOnActiveCount IN PSHARED_CACHE_MAP  SharedCacheMap  ) 
 

Definition at line 1062 of file vacbsup.c.

References CcAcquireVacbLock, CcReleaseVacbLock, Event(), ExAllocatePoolWithTag, Executive, FALSE, KeInitializeEvent, KernelMode, KeWaitForSingleObject(), NonPagedPoolMustSucceed, and NULL.

Referenced by CcDeleteSharedCacheMap(), and CcPurgeCacheSection().

01068 : 01069 01070 This routine may be called to wait for outstanding mappings for 01071 a given SharedCacheMap to go inactive. It is intended to be called 01072 from CcUninitializeCacheMap, which is called by the file systems 01073 during cleanup processing. In that case this routine only has to 01074 wait if the user closed a handle without waiting for all I/Os on the 01075 handle to complete. 01076 01077 This routine returns each time the active count is decremented. The 01078 caller must recheck his wait conditions on return, either waiting for 01079 the ActiveCount to go to 0, or for specific views to go inactive 01080 (CcPurgeCacheSection case). 01081 01082 Arguments: 01083 01084 SharedCacheMap - Supplies the Shared Cache Map on whose VacbActiveCount 01085 we wish to wait. 01086 01087 Return Value: 01088 01089 None. 01090 01091 --*/ 01092 01093 { 01094 KIRQL OldIrql; 01095 PKEVENT Event; 01096 01097 // 01098 // In the unusual case that we get a cleanup while I/O is still going 01099 // on, we can wait here. The caller must test the count for nonzero 01100 // before calling this routine. 01101 // 01102 // Since we are being called from cleanup, we cannot afford to 01103 // fail here. 01104 // 01105 01106 CcAcquireVacbLock( &OldIrql ); 01107 01108 // 01109 // It is possible that the count went to zero before we acquired the 01110 // spinlock, so we must handle two cases here. 01111 // 01112 01113 if (SharedCacheMap->VacbActiveCount != 0) { 01114 01115 if ((Event = SharedCacheMap->WaitOnActiveCount) == NULL) { 01116 01117 // 01118 // If the local event is not being used then we take it. 01119 // 01120 01121 Event = InterlockedExchangePointer( &SharedCacheMap->LocalEvent, NULL ); 01122 01123 if (Event == NULL) { 01124 01125 Event = (PKEVENT)ExAllocatePoolWithTag( NonPagedPoolMustSucceed, 01126 sizeof(KEVENT), 01127 'vEcC' ); 01128 } 01129 } 01130 01131 KeInitializeEvent( Event, 01132 NotificationEvent, 01133 FALSE ); 01134 01135 SharedCacheMap->WaitOnActiveCount = Event; 01136 01137 CcReleaseVacbLock( OldIrql ); 01138 01139 KeWaitForSingleObject( Event, 01140 Executive, 01141 KernelMode, 01142 FALSE, 01143 (PLARGE_INTEGER)NULL); 01144 } else { 01145 01146 CcReleaseVacbLock( OldIrql ); 01147 } 01148 }

VOID CcWorkerThread PVOID  ExWorkQueueItem  ) 
 

Definition at line 738 of file lazyrite.c.

References ASSERT, CC_REQUEUE, CcDeferredWrites, CcExceptionFilter(), CcExpressWorkQueue, CcFreeWorkQueueEntry, CcIdleWorkerThreadList, CcLazyWriteScan(), CcNumberActiveWorkerThreads, CcPerformReadAhead(), CcQueueThrottle, CcRegularWorkQueue, CcTotalDirtyPages, CcWorkQueueSpinlock, CcWriteBehind(), DebugTrace, EventSet, FALSE, _WORK_QUEUE_ENTRY::Function, KeSetEvent(), LazyWriteScan, List, me, NT_SUCCESS, _WORK_QUEUE_ENTRY::Parameters, ReadAhead, TRUE, _WORK_QUEUE_ENTRY::WorkQueueLinks, and WriteBehind.

Referenced by CcInitializeCacheManager().

00744 : 00745 00746 This is worker thread routine for processing cache manager work queue 00747 entries. 00748 00749 Arguments: 00750 00751 ExWorkQueueItem - The work item used for this thread 00752 00753 Return Value: 00754 00755 None 00756 00757 --*/ 00758 00759 { 00760 KIRQL OldIrql; 00761 PLIST_ENTRY WorkQueue; 00762 PWORK_QUEUE_ENTRY WorkQueueEntry; 00763 BOOLEAN RescanOk = FALSE; 00764 BOOLEAN DropThrottle = FALSE; 00765 IO_STATUS_BLOCK IoStatus; 00766 00767 IoStatus.Status = STATUS_SUCCESS; 00768 IoStatus.Information = 0; 00769 00770 ASSERT(FIELD_OFFSET(WORK_QUEUE_ENTRY, WorkQueueLinks) == 0); 00771 00772 while (TRUE) { 00773 00774 ExAcquireFastLock( &CcWorkQueueSpinlock, &OldIrql ); 00775 00776 // 00777 // If we just processed a throttled operation, drop the flag. 00778 // 00779 00780 if (DropThrottle) { 00781 00782 DropThrottle = CcQueueThrottle = FALSE; 00783 } 00784 00785 // 00786 // On requeue, push at end of the regular queue and clear hint. 00787 // 00788 00789 if (IoStatus.Information == CC_REQUEUE) { 00790 00791 InsertTailList( WorkQueue, &WorkQueueEntry->WorkQueueLinks ); 00792 IoStatus.Information = 0; 00793 } 00794 00795 // 00796 // First see if there is something in the express queue. 00797 // 00798 00799 if (!IsListEmpty(&CcExpressWorkQueue)) { 00800 WorkQueue = &CcExpressWorkQueue; 00801 00802 // 00803 // If there was nothing there, then try the regular queue. 00804 // 00805 00806 } else if (!IsListEmpty(&CcRegularWorkQueue)) { 00807 WorkQueue = &CcRegularWorkQueue; 00808 00809 // 00810 // Else we can break and go idle. 00811 // 00812 00813 } else { 00814 00815 break; 00816 } 00817 00818 WorkQueueEntry = CONTAINING_RECORD( WorkQueue->Flink, WORK_QUEUE_ENTRY, WorkQueueLinks ); 00819 00820 // 00821 // If this is an EventSet, throttle down to a single thread to be sure 00822 // that this event fires after all preceeding workitems have completed. 00823 // 00824 00825 if (WorkQueueEntry->Function == EventSet && CcNumberActiveWorkerThreads > 1) { 00826 00827 CcQueueThrottle = TRUE; 00828 break; 00829 } 00830 00831 // 00832 // Pop the workitem off: we will execute it now. 00833 // 00834 00835 RemoveHeadList( WorkQueue ); 00836 00837 ExReleaseFastLock( &CcWorkQueueSpinlock, OldIrql ); 00838 00839 // 00840 // Process the entry within a try-except clause, so that any errors 00841 // will cause us to continue after the called routine has unwound. 00842 // 00843 00844 try { 00845 00846 switch (WorkQueueEntry->Function) { 00847 00848 // 00849 // Perform read ahead 00850 // 00851 00852 case ReadAhead: 00853 00854 DebugTrace( 0, me, "CcWorkerThread Read Ahead FileObject = %08lx\n", 00855 WorkQueueEntry->Parameters.Read.FileObject ); 00856 00857 CcPerformReadAhead( WorkQueueEntry->Parameters.Read.FileObject ); 00858 00859 break; 00860 00861 // 00862 // Perform write behind 00863 // 00864 00865 case WriteBehind: 00866 00867 DebugTrace( 0, me, "CcWorkerThread WriteBehind SharedCacheMap = %08lx\n", 00868 WorkQueueEntry->Parameters.Write.SharedCacheMap ); 00869 00870 CcWriteBehind( WorkQueueEntry->Parameters.Write.SharedCacheMap, &IoStatus ); 00871 RescanOk = (BOOLEAN)NT_SUCCESS(IoStatus.Status); 00872 break; 00873 00874 00875 // 00876 // Perform set event 00877 // 00878 00879 case EventSet: 00880 00881 DebugTrace( 0, me, "CcWorkerThread SetEvent Event = %08lx\n", 00882 WorkQueueEntry->Parameters.Event.Event ); 00883 00884 KeSetEvent( WorkQueueEntry->Parameters.Event.Event, 0, FALSE ); 00885 DropThrottle = TRUE; 00886 break; 00887 00888 // 00889 // Perform Lazy Write Scan 00890 // 00891 00892 case LazyWriteScan: 00893 00894 DebugTrace( 0, me, "CcWorkerThread Lazy Write Scan\n", 0 ); 00895 00896 CcLazyWriteScan(); 00897 break; 00898 } 00899 00900 } 00901 except( CcExceptionFilter( GetExceptionCode() )) { 00902 00903 NOTHING; 00904 } 00905 00906 // 00907 // If not a requeue request, free the workitem. 00908 // 00909 00910 if (IoStatus.Information != CC_REQUEUE) { 00911 00912 CcFreeWorkQueueEntry( WorkQueueEntry ); 00913 } 00914 } 00915 00916 // 00917 // No more work. Requeue our worker thread entry and get out. 00918 // 00919 00920 InsertTailList( &CcIdleWorkerThreadList, 00921 &((PWORK_QUEUE_ITEM)ExWorkQueueItem)->List ); 00922 CcNumberActiveWorkerThreads -= 1; 00923 00924 ExReleaseFastLock( &CcWorkQueueSpinlock, OldIrql ); 00925 00926 if (!IsListEmpty(&CcDeferredWrites) && (CcTotalDirtyPages >= 20) && RescanOk) { 00927 CcLazyWriteScan(); 00928 } 00929 00930 return; 00931 } }

VOID FASTCALL CcWriteBehind IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PIO_STATUS_BLOCK  IoStatus
 

Definition at line 3880 of file cachesub.c.

References CC_REQUEUE, CcAcquireMasterLock, CcAcquireMasterLockAtDpcLevel, CcDecrementOpenCount, CcDeferredWrites, CcDeleteSharedCacheMap(), CcFlushCache(), CcFreeActiveVacb(), CcGetFlushedValidData(), CcIncrementOpenCount, CcLogError(), CcNoDelay, CcPagesYetToWrite, CcPostDeferredWrites(), CcReleaseMasterLock, CcReleaseMasterLockFromDpcLevel, CcSetValidData(), ClearFlag, DbgPrint, DebugTrace, _MBCB::DirtyPages, ExAllocatePoolWithTag, ExFreePool(), FALSE, FlagOn, FsRtlAcquireFileExclusive(), FsRtlReleaseFile(), GetActiveVacbAtDpcLevel, IoRaiseInformationalHardError(), LAZY_WRITE_OCCURRED, me, NT_SUCCESS, NTSTATUS(), NULL, ObQueryNameString(), PagedPool, _MBCB::PagesToWrite, PIN_ACCESS, RetryError, Status, TRUE, and WRITE_QUEUED.

Referenced by CcWorkerThread().

03887 : 03888 03889 This routine may be called with Wait = FALSE to see if write behind 03890 is required, or with Wait = TRUE to perform write behind as required. 03891 03892 The code is very similar to the the code that the Lazy Writer performs 03893 for each SharedCacheMap. The main difference is in the call to 03894 CcAcquireByteRangeForWrite. Write Behind does not care about time 03895 stamps (passing ULONG to accept all time stamps), but it will never 03896 dump the first (highest byte offset) buffer in the list if the last 03897 byte of that buffer is not yet written. The Lazy Writer does exactly 03898 the opposite, in the sense that it is totally time-driven, and will 03899 even dump a partially modified buffer if it sits around long enough. 03900 03901 Arguments: 03902 03903 SharedCacheMap - Pointer to SharedCacheMap to be written 03904 03905 Return Value: 03906 03907 FALSE - if write behind is required, but the caller supplied 03908 Wait = FALSE 03909 03910 TRUE - if write behind is complete or not required 03911 03912 --*/ 03913 03914 { 03915 KIRQL OldIrql; 03916 ULONG ActivePage; 03917 ULONG PageIsDirty; 03918 PMBCB Mbcb; 03919 NTSTATUS Status; 03920 ULONG FileExclusive = FALSE; 03921 PVACB ActiveVacb = NULL; 03922 03923 DebugTrace(+1, me, "CcWriteBehind\n", 0 ); 03924 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 03925 03926 // 03927 // First we have to acquire the file for LazyWrite, to avoid 03928 // deadlocking with writers to the file. We do this via the 03929 // CallBack procedure specified to CcInitializeCacheMap. 03930 // 03931 03932 if (!(*SharedCacheMap->Callbacks->AcquireForLazyWrite) 03933 ( SharedCacheMap->LazyWriteContext, TRUE )) { 03934 03935 // 03936 // The filesystem is hinting that it doesn't think that it can 03937 // service the write without significant delay so we will defer 03938 // and come back later. Simply drop the queued flag ... note that 03939 // we do not modify CcPagesYetToWrite, in the hope that we can make 03940 // up the difference in some other cache map on this pass. 03941 // 03942 03943 CcAcquireMasterLock( &OldIrql ); 03944 ClearFlag(SharedCacheMap->Flags, WRITE_QUEUED); 03945 CcReleaseMasterLock( OldIrql ); 03946 03947 IoStatus->Status = STATUS_FILE_LOCK_CONFLICT; 03948 return; 03949 } 03950 03951 // 03952 // See if there is a previous active page to clean up, but only 03953 // do so now if it is the last dirty page or no users have the 03954 // file open. We will free it below after dropping the spinlock. 03955 // 03956 03957 ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 03958 CcAcquireMasterLockAtDpcLevel(); 03959 03960 if ((SharedCacheMap->DirtyPages <= 1) || (SharedCacheMap->OpenCount == 0)) { 03961 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 03962 } 03963 03964 // 03965 // Increment open count so that our caller's views stay available 03966 // for CcGetVacbMiss. We could be tying up all of the views, and 03967 // still need to write file sizes. 03968 // 03969 03970 CcIncrementOpenCount( SharedCacheMap, 'brWS' ); 03971 03972 // 03973 // If there is a mask bcb, then we need to establish a target for 03974 // it to flush. 03975 // 03976 03977 if ((Mbcb = SharedCacheMap->Mbcb) != 0) { 03978 03979 // 03980 // Set a target of pages to write, assuming that any Active 03981 // Vacb will increase the number. 03982 // 03983 03984 Mbcb->PagesToWrite = Mbcb->DirtyPages + ((ActiveVacb != NULL) ? 1 : 0); 03985 03986 if (Mbcb->PagesToWrite > CcPagesYetToWrite) { 03987 03988 Mbcb->PagesToWrite = CcPagesYetToWrite; 03989 } 03990 } 03991 03992 CcReleaseMasterLockFromDpcLevel(); 03993 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 03994 03995 // 03996 // Now free the active Vacb, if we found one. 03997 // 03998 03999 if (ActiveVacb != NULL) { 04000 04001 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 04002 } 04003 04004 // 04005 // Now perform the lazy writing for this file via a special call 04006 // to CcFlushCache. He recognizes us by the &CcNoDelay input to 04007 // FileOffset, which signifies a Lazy Write, but is subsequently 04008 // ignored. 04009 // 04010 04011 CcFlushCache( SharedCacheMap->FileObject->SectionObjectPointer, 04012 &CcNoDelay, 04013 1, 04014 IoStatus ); 04015 04016 // 04017 // No need for the Lazy Write resource now. 04018 // 04019 04020 (*SharedCacheMap->Callbacks->ReleaseFromLazyWrite) 04021 ( SharedCacheMap->LazyWriteContext ); 04022 04023 // 04024 // Check if we need to put up a popup. 04025 // 04026 04027 if (!NT_SUCCESS(IoStatus->Status) && !RetryError(IoStatus->Status)) { 04028 04029 // 04030 // We lost writebehind data. Try to get the filename. If we can't, 04031 // then just raise the error returned by the failing write 04032 // 04033 04034 POBJECT_NAME_INFORMATION FileNameInfo; 04035 NTSTATUS QueryStatus; 04036 ULONG whocares; 04037 04038 FileNameInfo = ExAllocatePoolWithTag( PagedPool, 1024, 'nFcC' ); 04039 04040 if ( FileNameInfo ) { 04041 QueryStatus = ObQueryNameString( SharedCacheMap->FileObject, 04042 FileNameInfo, 04043 1024, 04044 &whocares ); 04045 04046 if ( !NT_SUCCESS(QueryStatus) ) { 04047 ExFreePool(FileNameInfo); 04048 FileNameInfo = NULL; 04049 } 04050 } 04051 04052 // 04053 // Give checked builds something to look at. This should also be event 04054 // logged for after-the-fact analysis. 04055 // 04056 04057 KdPrint(("CACHE MANAGER: Lost delayed write FileOb %08x status %08x\n", SharedCacheMap->FileObject, IoStatus->Status)); 04058 04059 if ( FileNameInfo ) { 04060 IoRaiseInformationalHardError( STATUS_LOST_WRITEBEHIND_DATA,&FileNameInfo->Name, NULL ); 04061 ExFreePool(FileNameInfo); 04062 } else { 04063 if ( SharedCacheMap->FileObject->FileName.Length && 04064 SharedCacheMap->FileObject->FileName.MaximumLength && 04065 SharedCacheMap->FileObject->FileName.Buffer ) { 04066 04067 IoRaiseInformationalHardError( STATUS_LOST_WRITEBEHIND_DATA,&SharedCacheMap->FileObject->FileName, NULL ); 04068 } 04069 } 04070 04071 CcLogError( SharedCacheMap->FileObject->DeviceObject, 04072 IO_LOST_DELAYED_WRITE, 04073 IoStatus->Status, 04074 &(SharedCacheMap->FileObject->FileName) ); 04075 // 04076 // See if there is any deferred writes we can post. 04077 // 04078 04079 } else if (!IsListEmpty(&CcDeferredWrites)) { 04080 CcPostDeferredWrites(); 04081 } 04082 04083 // 04084 // Now acquire BcbSpinLock again to check for ValidData updates. 04085 // 04086 04087 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 04088 04089 // 04090 // If the the current ValidDataGoal is greater (or equal) than ValidDataLength, 04091 // then we must see if we have advanced beyond the current ValidDataLength. 04092 // 04093 // If we have NEVER written anything out from this shared cache map, then 04094 // there is no need to check anything associtated with valid data length 04095 // here. We will come by here again when, and if, anybody actually 04096 // modifies the file and we lazy write some data. 04097 // 04098 04099 Status = STATUS_SUCCESS; 04100 if (FlagOn(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED) && 04101 (SharedCacheMap->ValidDataGoal.QuadPart >= SharedCacheMap->ValidDataLength.QuadPart) && 04102 (SharedCacheMap->ValidDataLength.QuadPart != MAXLONGLONG) && 04103 (SharedCacheMap->FileSize.QuadPart != 0)) { 04104 04105 LARGE_INTEGER NewValidDataLength; 04106 04107 NewValidDataLength = CcGetFlushedValidData( SharedCacheMap->FileObject->SectionObjectPointer, 04108 TRUE ); 04109 04110 // 04111 // If New ValidDataLength has been written, then we have to 04112 // call the file system back to update it. We must temporarily 04113 // drop our global list while we do this, which is safe to do since 04114 // we have not cleared WRITE_QUEUED. 04115 // 04116 // Note we keep calling any time we wrote the last page of the file, 04117 // to solve the "famous" AFS Server problem. The file system will 04118 // truncate our valid data call to whatever is currently valid. But 04119 // then if he writes a little more, we do not want to stop calling 04120 // back. 04121 // 04122 04123 if ( NewValidDataLength.QuadPart >= SharedCacheMap->ValidDataLength.QuadPart ) { 04124 04125 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 04126 04127 // 04128 // Call file system to set new valid data. We have no 04129 // one to tell if this doesn't work. 04130 // 04131 04132 Status = CcSetValidData( SharedCacheMap->FileObject, 04133 &NewValidDataLength ); 04134 04135 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 04136 if (NT_SUCCESS(Status)) { 04137 SharedCacheMap->ValidDataLength = NewValidDataLength; 04138 #ifdef TOMM 04139 } else if ((Status != STATUS_INSUFFICIENT_RESOURCES) && !RetryError(Status)) { 04140 DbgPrint("Unexpected status from CcSetValidData: %08lx, FileObject: %08lx\n", 04141 Status, 04142 SharedCacheMap->FileObject); 04143 DbgBreakPoint(); 04144 #endif TOMM 04145 } 04146 } 04147 } 04148 04149 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 04150 04151 // 04152 // Show we are done. 04153 // 04154 04155 CcAcquireMasterLock( &OldIrql ); 04156 CcDecrementOpenCount( SharedCacheMap, 'brWF' ); 04157 04158 // 04159 // Make an approximate guess about whether we will call CcDeleteSharedCacheMap or not 04160 // to truncate the file. 04161 // 04162 // Also do not delete the SharedCacheMap if we got an error on the ValidDataLength 04163 // callback. If we get a resource allocation failure or a retryable error (due to 04164 // log file full?), we have no one to tell, so we must just loop back and try again. 04165 // Of course all I/O errors are just too bad. 04166 // 04167 04168 if ((SharedCacheMap->OpenCount == 0) 04169 04170 && 04171 04172 (NT_SUCCESS(Status) || ((Status != STATUS_INSUFFICIENT_RESOURCES) && !RetryError(Status)))) { 04173 04174 CcReleaseMasterLock( OldIrql ); 04175 FsRtlAcquireFileExclusive( SharedCacheMap->FileObject ); 04176 CcAcquireMasterLock( &OldIrql ); 04177 04178 // 04179 // Now really see if we are to delete this SharedCacheMap. By having released 04180 // first we avoid a deadlock with the file system when the FileObject is 04181 // dereferenced. Note that CcDeleteSharedCacheMap requires that the 04182 // CcMasterSpinLock already be acquired, and it releases it. 04183 // 04184 // Note that we must retest since we dropped and reacquired the master 04185 // lock. 04186 // 04187 04188 if ((SharedCacheMap->OpenCount == 0) 04189 04190 && 04191 04192 ((SharedCacheMap->DirtyPages == 0) || ((SharedCacheMap->FileSize.QuadPart == 0) && 04193 !FlagOn(SharedCacheMap->Flags, PIN_ACCESS)))) { 04194 04195 // 04196 // Make sure to drop the requeue flag in case the write hit the timeout at 04197 // the same time it finished everything up. 04198 // 04199 04200 CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, TRUE ); 04201 IoStatus->Information = 0; 04202 SharedCacheMap = NULL; 04203 04204 } else { 04205 04206 CcReleaseMasterLock( OldIrql ); 04207 FsRtlReleaseFile( SharedCacheMap->FileObject ); 04208 CcAcquireMasterLock( &OldIrql ); 04209 } 04210 } 04211 04212 // 04213 // In the normal case, we just clear the flag on the way out if 04214 // we will not requeue the workitem. 04215 // 04216 04217 if (SharedCacheMap != NULL) { 04218 04219 if (IoStatus->Information != CC_REQUEUE) { 04220 ClearFlag(SharedCacheMap->Flags, WRITE_QUEUED); 04221 } 04222 CcReleaseMasterLock( OldIrql ); 04223 } 04224 04225 DebugTrace(-1, me, "CcWriteBehind->VOID\n", 0 ); 04226 04227 return; 04228 }

_inline ULONG IsVacbLevelReferenced IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVACB VacbArray,
IN ULONG  Level
 

Definition at line 2110 of file cc.h.

References PVACB_LEVEL_REFERENCE, _VACB_LEVEL_REFERENCE::Reference, _VACB_LEVEL_REFERENCE::SpecialReference, and VacbLevelReference().

Referenced by CcAdjustVacbLevelLockCount(), CcDeleteSharedCacheMap(), CcExtendVacbArray(), CcSetVacbLargeOffset(), and SetVacb().

02115 { 02116 PVACB_LEVEL_REFERENCE VacbReference = VacbLevelReference( SharedCacheMap, VacbArray, Level ); 02117 02118 return VacbReference->Reference | VacbReference->SpecialReference; 02119 }

_inline PVACB_LEVEL_REFERENCE VacbLevelReference IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVACB VacbArray,
IN ULONG  Level
 

Definition at line 2094 of file cc.h.

References FlagOn, MODIFIED_WRITE_DISABLED, PVACB_LEVEL_REFERENCE, and VACB_LEVEL_BLOCK_SIZE.

Referenced by CcCalculateVacbLevelLockCount(), IsVacbLevelReferenced(), and ReferenceVacbLevel().

02099 { 02100 return (PVACB_LEVEL_REFERENCE) 02101 ((PCHAR)VacbArray + 02102 VACB_LEVEL_BLOCK_SIZE + 02103 (Level != 0? 02104 0 : (FlagOn( SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED )? 02105 VACB_LEVEL_BLOCK_SIZE : 0))); 02106 }


Variable Documentation

LONG CcAggressiveZeroCount
 

Definition at line 2033 of file cc.h.

Referenced by CcInitializeCacheManager(), and CcZeroData().

LONG CcAggressiveZeroThreshold
 

Definition at line 2034 of file cc.h.

Referenced by CcInitializeCacheManager(), and CcZeroData().

ULONG CcAvailablePagesThreshold
 

Definition at line 2030 of file cc.h.

KSPIN_LOCK CcBcbSpinLock
 

Definition at line 1998 of file cc.h.

Referenced by CcAllocateInitializeBcb(), CcDeallocateBcb(), CcDeleteSharedCacheMap(), CcInitializeCacheManager(), CcMapData(), CcPinMappedData(), CcPinRead(), CcPreparePinWrite(), and CcUnpinFileData().

PVACB CcBeyondVacbs
 

Definition at line 2021 of file cc.h.

Referenced by CcGetVacbLargeOffset(), CcInitializeVacbs(), CcRemapBcb(), and CcUnpinFileData().

MM_SYSTEMSIZE CcCapturedSystemSize
 

Definition at line 2036 of file cc.h.

Referenced by CcInitializeCacheManager(), and CcLazyWriteScan().

LIST_ENTRY CcCleanSharedCacheMapList
 

Definition at line 1999 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcInitializeCacheManager(), CcInitializeCacheMap(), and CcUnpinFileData().

LARGE_INTEGER CcCollisionDelay
 

Definition at line 2015 of file cc.h.

Referenced by CcPurgeCacheSection().

LIST_ENTRY CcDeferredWrites
 

Definition at line 2024 of file cc.h.

Referenced by CcCanIWrite(), CcDeferWrite(), CcFlushCache(), CcInitializeCacheManager(), CcLazyWriteScan(), CcPostDeferredWrites(), CcUnpinRepinnedBcb(), CcWorkerThread(), and CcWriteBehind().

KSPIN_LOCK CcDeferredWriteSpinLock
 

Definition at line 2023 of file cc.h.

ULONG CcDirtyPagesLastScan
 

Definition at line 2027 of file cc.h.

Referenced by CcLazyWriteScan().

ULONG CcDirtyPageTarget
 

Definition at line 2026 of file cc.h.

Referenced by CcInitializeCacheManager(), and CcLazyWriteScan().

ULONG CcDirtyPageThreshold
 

Definition at line 2025 of file cc.h.

Referenced by CcCanIWrite(), and CcInitializeCacheManager().

SHARED_CACHE_MAP_LIST_CURSOR CcDirtySharedCacheMapList
 

Definition at line 2000 of file cc.h.

Referenced by CcFlushCache(), CcGetDirtyPages(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheManager(), CcInitializeCacheMap(), CcIsThereDirtyData(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPurgeCacheSection(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcUninitializeCacheMap(), and CcZeroEndOfLastPage().

LIST_ENTRY CcExpressWorkQueue
 

Definition at line 2007 of file cc.h.

Referenced by CcInitializeCacheManager(), CcScheduleReadAhead(), and CcWorkerThread().

LARGE_INTEGER CcFirstDelay
 

Definition at line 2013 of file cc.h.

Referenced by CcScheduleLazyWriteScan().

LARGE_INTEGER CcIdleDelay
 

Definition at line 2014 of file cc.h.

Referenced by CcCanIWrite(), and CcScheduleLazyWriteScan().

ULONG CcIdleDelayTick
 

Definition at line 2011 of file cc.h.

Referenced by CcFlushCache(), and CcInitializeCacheManager().

LIST_ENTRY CcIdleWorkerThreadList
 

Definition at line 2006 of file cc.h.

Referenced by CcInitializeCacheManager(), CcPostWorkQueue(), and CcWorkerThread().

ULONG CcLazyWriteHotSpots
 

Definition at line 2035 of file cc.h.

Referenced by CcFlushCache().

SHARED_CACHE_MAP_LIST_CURSOR CcLazyWriterCursor
 

Definition at line 2001 of file cc.h.

Referenced by CcInitializeCacheManager(), and CcLazyWriteScan().

KSPIN_LOCK CcMasterSpinLock
 

Definition at line 1997 of file cc.h.

ULONG CcMaxVacbLevelsSeen
 

Definition at line 2037 of file cc.h.

Referenced by CcCreateVacbArray(), CcDrainVacbLevelZone(), CcExtendVacbArray(), CcGetVacbMiss(), and CcReferenceFileOffset().

LARGE_INTEGER CcNoDelay
 

Definition at line 2012 of file cc.h.

Referenced by CcCanIWrite(), CcFlushCache(), and CcWriteBehind().

ULONG CcNumberActiveWorkerThreads
 

Definition at line 2005 of file cc.h.

Referenced by CcPostWorkQueue(), and CcWorkerThread().

ULONG CcNumberVacbs
 

Definition at line 2019 of file cc.h.

Referenced by CcInitializeVacbs().

ULONG CcNumberWorkerThreads
 

Definition at line 2004 of file cc.h.

Referenced by CcInitializeCacheManager().

ULONG CcPagesWrittenLastTime
 

Definition at line 2029 of file cc.h.

Referenced by CcLazyWriteScan().

ULONG CcPagesYetToWrite
 

Definition at line 2028 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcLazyWriteScan(), CcUnpinFileData(), and CcWriteBehind().

LIST_ENTRY CcPostTickWorkQueue
 

Definition at line 2009 of file cc.h.

Referenced by CcInitializeCacheManager(), CcLazyWriteScan(), and CcWaitForCurrentLazyWriterActivity().

BOOLEAN CcQueueThrottle
 

Definition at line 2010 of file cc.h.

Referenced by CcPostWorkQueue(), and CcWorkerThread().

LIST_ENTRY CcRegularWorkQueue
 

Definition at line 2008 of file cc.h.

Referenced by CcInitializeCacheManager(), CcLazyWriteScan(), CcScanDpc(), and CcWorkerThread().

LARGE_INTEGER CcTargetCleanDelay
 

Definition at line 2016 of file cc.h.

ULONG CcTotalDirtyPages
 

Definition at line 2031 of file cc.h.

Referenced by CcAcquireByteRangeForWrite(), CcCanIWrite(), CcDeleteMbcb(), CcDeleteSharedCacheMap(), CcFreeActiveVacb(), CcLazyWriteScan(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcUnpinFileData(), and CcWorkerThread().

ULONG CcTune
 

Definition at line 2032 of file cc.h.

NPAGED_LOOKASIDE_LIST CcTwilightLookasideList
 

Definition at line 2002 of file cc.h.

Referenced by CcInitializeCacheManager().

ULONG CcVacbLevelEntries
 

Definition at line 2038 of file cc.h.

Referenced by CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcDrainVacbLevelZone(), CcExtendVacbArray(), and CcPrefillVacbLevelZone().

PVACB* CcVacbLevelFreeList
 

Definition at line 2039 of file cc.h.

Referenced by CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcDrainVacbLevelZone(), and CcPrefillVacbLevelZone().

ULONG CcVacbLevelWithBcbsEntries
 

Definition at line 2040 of file cc.h.

Referenced by CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcDrainVacbLevelZone(), and CcPrefillVacbLevelZone().

PVACB* CcVacbLevelWithBcbsFreeList
 

Definition at line 2041 of file cc.h.

Referenced by CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcDrainVacbLevelZone(), and CcPrefillVacbLevelZone().

LIST_ENTRY CcVacbLru
 

Definition at line 2022 of file cc.h.

Referenced by CcGetVacbMiss(), and CcInitializeVacbs().

PVACB CcVacbs
 

Definition at line 2020 of file cc.h.

Referenced by CcGetVacbLargeOffset(), CcInitializeVacbs(), CcRemapBcb(), and CcUnpinFileData().

KSPIN_LOCK CcVacbSpinLock
 

Definition at line 2018 of file cc.h.

KSPIN_LOCK CcWorkQueueSpinlock
 

Definition at line 2003 of file cc.h.

LAZY_WRITER LazyWriter
 

Definition at line 2017 of file cc.h.

Referenced by CcAllocateInitializeBcb(), CcCanIWrite(), CcDeallocateBcb(), CcDeferWrite(), CcFlushCache(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheManager(), CcInitializeCacheMap(), CcLazyWriteScan(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPurgeCacheSection(), CcScanDpc(), CcScheduleLazyWriteScan(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWaitForCurrentLazyWriterActivity(), and CcZeroEndOfLastPage().

PFN_COUNT MmAvailablePages
 

Definition at line 134 of file cc.h.

Referenced by CcZeroData(), ExAllocatePool(), MiAddWorkingSetPage(), MiAddWsleHash(), MiAllocatePoolPages(), MiAllocateSpecialPool(), MiCheckAndSetSystemTrimCriteria(), MiCheckProcessTrimCriteria(), MiCheckSystemTrimEndCriteria(), MiDetermineWsTrimAmount(), MiDoReplacement(), MiEnsureAvailablePageOrWait(), MiGatherMappedPages(), MiInitializeSessionPool(), MiInsertPageInList(), MiInsertStandbyListAtFront(), MiModifiedPageWriterWorker(), MiObtainFreePages(), MiRemoveAnyPage(), MiRemovePageByColor(), MiRemovePageFromList(), MiRemoveZeroPage(), MiResolveMappedFileFault(), MiUnlinkFreeOrZeroedPage(), MiUnlinkPageFromList(), MmAccessFault(), MmAddPhysicalMemory(), MmAdjustWorkingSetSize(), MmCheckCachedPageState(), MmGatherMemoryForHibernate(), MmInitSystem(), MmRaisePoolQuota(), MmSetMemoryPriorityProcess(), MmShutdownSystem(), MmWorkingSetManager(), and NtQuerySystemInformation().


Generated on Sat May 15 19:43:03 2004 for test by doxygen 1.3.7