Iimplementation changes for odb.ng (SegementedFileStorage, CachingStorage, etc.)

dev_timestamp
Harald Wolff 2019-09-19 08:46:19 +02:00
parent 514dda09aa
commit 0ffdfb59cf
19 changed files with 1220 additions and 795 deletions

View File

@ -83,11 +83,8 @@
<Compile Include="odb\ng\Document.cs" />
<Compile Include="odb\ng\storage\OrganizedFile.cs" />
<Compile Include="odb\ng\storage\OrganizedFileType.cs" />
<Compile Include="odb\ng\storage\FSStorageContainer.cs" />
<Compile Include="odb\ng\storage\FSStorage.cs" />
<Compile Include="odb\ng\storage\StorageAreaContainer.cs" />
<Compile Include="odb\ng\storage\StorageArea.cs" />
<Compile Include="odb\ng\Session.cs" />
<Compile Include="odb\ng\Mapper.API.cs" />
<Compile Include="odb\ng\index\Index.cs" />
<Compile Include="odb\ng\index\SimpleIndex.cs" />
@ -100,14 +97,21 @@
<Compile Include="odb\ng\DocumentChanges.cs" />
<Compile Include="net\IPv6.cs" />
<Compile Include="arithmetics\Words.cs" />
<Compile Include="odb\ng\Events.cs" />
<Compile Include="odb\ng\storage\bases\StorageBase.cs" />
<Compile Include="odb\ng\storage\bases\CachingStorageBase.cs" />
<Compile Include="odb\ng\storage\session\SessionStorageContainer.cs" />
<Compile Include="odb\ng\storage\session\SessionStorage.cs" />
<Compile Include="odb\ng\storage\cache\CachingStorage.cs" />
<Compile Include="odb\ng\storage\SegmentedFile.cs" />
<Compile Include="odb\ng\storage\fs\SegmentedFileStorage.cs" />
<Compile Include="odb\ng\storage\IStorage.cs" />
<Compile Include="odb\ng\storage\IStorageContainer.cs" />
<Compile Include="cache\LinkedListItem.cs" />
<Compile Include="cache\Cache.cs" />
<Compile Include="collections\LinkedList.cs" />
<Compile Include="odb\ng\storage\fs\FSStorageContainer.cs" />
<Compile Include="threads\LockingException.cs" />
<Compile Include="odb\ng\storage\bases\ChainedStorage.cs" />
</ItemGroup>
<ItemGroup>
<Folder Include="odb\" />
@ -124,6 +128,11 @@
<Folder Include="odb\ng\index\" />
<Folder Include="arithmetics\" />
<Folder Include="odb\ng\storage\bases\" />
<Folder Include="odb\ng\storage\session\" />
<Folder Include="odb\ng\storage\cache\" />
<Folder Include="odb\ng\storage\fs\" />
<Folder Include="cache\" />
<Folder Include="collections\" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\ln.logging\ln.logging.csproj">

View File

@ -81,6 +81,8 @@ namespace ln.types.odb.ng
public override ODBValue Clone()
{
Document clone = new Document(ID);
clone.StorageTimeStamp = StorageTimeStamp;
foreach (ODBValue fieldName in properties.Keys)
{
clone[fieldName] = this[fieldName].Clone();
@ -88,12 +90,21 @@ namespace ln.types.odb.ng
return clone;
}
public void CloneTo(Document target)
{
target.properties.Clear();
target.StorageTimeStamp = StorageTimeStamp;
foreach (ODBValue fieldName in properties.Keys)
{
target[fieldName] = this[fieldName].Clone();
}
}
public override byte[] ToStorage()
{
MemoryStream stream = new MemoryStream();
BinaryWriter writer = new BinaryWriter(stream);
writer.Write(ID.ToByteArray());
writer.Write(properties.Count);
foreach (ODBValue propName in properties.Keys)

View File

@ -1,24 +0,0 @@
using System;
namespace ln.types.odb.ng
{
public enum StorageChangeType { INSERT, UPDATE, DELETE }
public delegate void StorageChanged(StoredDocumentChangedEventArgs args);
public class StoredDocumentChangedEventArgs
{
public IStorage Storage { get; }
public StorageChangeType ChangeType { get; }
public Guid DocumentID { get; }
public DocumentChanges DocumentChanges { get; }
public StoredDocumentChangedEventArgs(IStorage storage, StorageChangeType changeType, Guid documentID, DocumentChanges documentChanges)
{
Storage = storage;
ChangeType = changeType;
DocumentID = documentID;
DocumentChanges = documentChanges;
}
}
}

View File

@ -5,6 +5,7 @@ using ln.types.odb.values;
using ln.types.odb.ng.index;
using System.Linq;
using System.Collections;
using ln.types.odb.ng.storage;
namespace ln.types.odb.ng
{

View File

@ -8,6 +8,7 @@ using ln.types.net;
using ln.types.odb.ng.storage;
using ln.types.btree;
using ln.types.odb.ng.mappings;
using ln.types.odb.ng.storage.fs;
namespace ln.types.odb.ng
{
@ -18,7 +19,8 @@ namespace ln.types.odb.ng
public partial class Mapper : IDisposable
{
public static Mapper Default { get; set; }
public static Mapper Default { get; set; } = new Mapper((IStorageContainer)null);
public IStorageContainer StorageContainer { get; private set; }
Dictionary<Type, IODBMapping> mappings = new Dictionary<Type, IODBMapping>();
@ -30,7 +32,7 @@ namespace ln.types.odb.ng
{ }
public Mapper(IStorageContainer storageContainer)
{
if (Default == null)
if (Default?.StorageContainer == null)
Default = this;
this.StorageContainer = storageContainer;

View File

@ -13,6 +13,7 @@ using ln.types.odb.values;
using ln.types.btree;
using System.Linq;
using ln.types.odb.ng.index;
using ln.types.odb.ng.storage;
namespace ln.types.odb.ng
{
public abstract class Query

View File

@ -1,250 +0,0 @@
using System;
using System.Collections.Generic;
using ln.types.odb.values;
using ln.logging;
using System.Linq;
namespace ln.types.odb.ng
{
public enum SessionSynchronisationStrategy
{
BIDIRECTIONAL,
FORWARDONLY,
REJECTCHANGED
}
public class Session : IStorageContainer
{
public IStorageContainer StorageContainer { get; private set; }
public Mapper ODBMapper { get; private set; }
public SessionSynchronisationStrategy SynchronisationStrategy { get; set; } = SessionSynchronisationStrategy.REJECTCHANGED;
public bool IsOpen => StorageContainer.IsOpen;
public Session(IStorageContainer storageContainer)
{
StorageContainer = storageContainer;
ODBMapper = Mapper.Default;
}
private Dictionary<string, IStorage> storages = new Dictionary<string, IStorage>();
public IStorage GetStorage(string storageName)
{
if (storages.ContainsKey(storageName))
return storages[storageName];
IStorage storage = StorageContainer.GetStorage(storageName);
storages.Add(storageName, new SessionCache(this,storage));
if (!storage.IsOpen)
storage.Open();
return storages[storageName];
}
public IStorageContainer Open()
{
StorageContainer.Open();
return this;
}
public void Close()
{
StorageContainer.Close();
}
public IEnumerable<string> GetStorageNames()
{
return StorageContainer.GetStorageNames();
}
public void Dispose()
{
}
class SessionCache : IStorage
{
public Session Session { get; }
public IStorage Storage { get; }
public bool IsOpen => Storage.IsOpen;
Dictionary<Guid, CachedDocument> documentCache = new Dictionary<Guid, CachedDocument>();
public event StorageChanged StorageChanged;
public SessionCache(Session session,IStorage storage)
{
Session = session;
Storage = storage;
}
public bool Open()
{
if (Storage.Open())
{
Storage.StorageChanged += Storage_StorageChanged;;
return true;
}
return false;
}
void Storage_StorageChanged(StoredDocumentChangedEventArgs args)
{
if (documentCache.ContainsKey(args.DocumentID))
{
documentCache[args.DocumentID].ConcurrentlyChanged = true;
}
StorageChanged?.Invoke(args);
}
public void Close()
{
Storage.StorageChanged -= Storage_StorageChanged;
Storage.Close();
}
public Document Load(Guid documentID)
{
lock (this)
{
if (!documentCache.ContainsKey(documentID))
{
Document cacheDocument = Storage.Load(documentID);
documentCache.Add(documentID, new CachedDocument(cacheDocument));
}
return documentCache[documentID].WorkingCopy;
}
}
public void Save(Document document)
{
lock (this)
{
if (!documentCache.ContainsKey(document.ID))
{
//Logging.Log(LogLevel.DEBUG, "SessionCache: Save(): saving new Document {0}",document.ID);
Storage.Save(document);
documentCache.Add(document.ID, new CachedDocument(document.Clone() as Document,document));
}
else
{
lock (Storage)
{
Document storageDocument = Storage.Load(document.ID);
Document cacheDocument = documentCache[document.ID].CachedCopy;
DocumentChanges storageChanges = new DocumentChanges(cacheDocument, storageDocument);
DocumentChanges sessionChanges = new DocumentChanges(cacheDocument, document);
switch (Session.SynchronisationStrategy)
{
case SessionSynchronisationStrategy.BIDIRECTIONAL:
throw new NotImplementedException();
case SessionSynchronisationStrategy.FORWARDONLY:
throw new NotImplementedException();
case SessionSynchronisationStrategy.REJECTCHANGED:
if (storageChanges.Changes.Count()>0)
throw new NotSupportedException("concurrent document change");
Storage.Save(document);
documentCache[document.ID] = new CachedDocument(document.Clone() as Document,document);
break;
}
}
}
}
}
public void Delete(Guid documentID)
{
lock (this)
{
if (documentCache.ContainsKey(documentID))
{
documentCache.Remove(documentID);
}
Storage.Delete(documentID);
}
}
public IEnumerable<Guid> GetDocumentIDs()
{
return Storage.GetDocumentIDs();
}
public IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate)
{
lock (this)
{
return Storage.GetDocumentIDs(path, predicate);
}
}
public void EnsureIndex(params string[] path)
{
Storage.EnsureIndex(path);
}
public void Dispose()
{
Close();
}
private bool SyncDocument(Document src,Document dst)
{
Boolean changedFlag = false;
foreach (ODBValue propertyName in src.Keys)
{
if (!src[propertyName].Equals(dst[propertyName]))
{
//Logging.Log(LogLevel.DEBUG, "SessionCache: Save(): found changed field for Document {0}: {1}={2}", document.ID, propertyName, document[propertyName]);
dst[propertyName] = src[propertyName];
changedFlag = true;
}
}
foreach (ODBValue propertyName in dst.Keys)
{
if (!src.Contains(propertyName))
{
dst[propertyName] = ODBNull.Instance;
changedFlag = true;
}
}
return changedFlag;
}
class CachedDocument
{
public Document CachedCopy;
public Document WorkingCopy;
public bool ConcurrentlyChanged;
public CachedDocument(Document cachedCopy)
{
CachedCopy = cachedCopy;
WorkingCopy = cachedCopy.Clone() as Document;
ConcurrentlyChanged = false;
}
public CachedDocument(Document cachedCopy,Document workingCopy)
{
CachedCopy = cachedCopy;
WorkingCopy = workingCopy;
ConcurrentlyChanged = false;
}
}
}
}
}

View File

@ -1,458 +0,0 @@
// /**
// * File: FSSTorage.cs
// * Author: haraldwolff
// *
// * This file and it's content is copyrighted by the Author and / or copyright holder.
// * Any use wihtout proper permission is illegal and may lead to legal actions.
// *
// *
// **/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using ln.logging;
using ln.types.odb.values;
using ln.types.odb.ng.index;
namespace ln.types.odb.ng.storage
{
/**
* FSStorage
*
* Directory Layout:
*
* <BasePath>/<StorageName>
* /data.odb Serialized Document Data
* /data.idx Serialized Lookup Index for Documents and Free Areas
*
* data.odb
* ----------
* 0000 4 MAGIC Bytes
* 0004 4 Version
* 0008 8 LastCloseTimestamp
* 0010 4 FirstOffset
* 0014 4 GranularWidth
* 0018 8 Reserved 0
*
**/
public class FSStorage : IStorage
{
public byte[] MagicBytes { get; } = new byte[] { 0x0F, 0x0E, 0x0D, 0x0A };
public String StoragePath { get; }
public String StorageName { get; }
public int FileVersion { get; private set; }
public long LastCloseTimestamp { get; private set; }
public int FirstOffset { get; private set; }
public int GranularWidth { get; private set; } = 12;
public int GranularityMask => (1 << GranularWidth) - 1;
public int AppendOffset { get; private set; }
public event StorageChanged StorageChanged;
StorageAreaContainer storageAreas = new StorageAreaContainer();
Dictionary<Guid, StorageArea> documentAreas = new Dictionary<Guid, StorageArea>();
IndexPath.DocumentPath documentPath = new IndexPath.DocumentPath();
FileStream fileStream;
public FSStorage(string storagePath)
{
StoragePath = storagePath;
StorageName = System.IO.Path.GetFileName(storagePath);
}
public FSStorage(string storagePath,int granularWidth)
:this(storagePath)
{
GranularWidth = granularWidth;
}
private void AssertOpen()
{
if (fileStream == null)
throw new IOException("FSStorage not opened");
}
public bool IsOpen => (fileStream != null);
public bool Open()
{
if (!IsOpen)
{
try
{
if (!Directory.Exists(StoragePath))
Directory.CreateDirectory(StoragePath);
fileStream = new FileStream(System.IO.Path.Combine(StoragePath, "data.odb"), FileMode.OpenOrCreate);
if (fileStream.Length == 0)
{
FileVersion = 0;
LastCloseTimestamp = 0;
FirstOffset = (1 << GranularWidth);
if (FirstOffset < 0x20)
throw new NotSupportedException("Granularity too small");
AppendOffset = FirstOffset;
Close();
return Open();
}
else
{
if (!fileStream.ReadBytes(4).SequenceEqual(MagicBytes))
throw new IOException("Magic bytes do not match");
FileVersion = fileStream.ReadInteger();
LastCloseTimestamp = fileStream.ReadLong();
FirstOffset = fileStream.ReadInteger();
GranularWidth = fileStream.ReadInteger();
Scan();
fileStream.Position = 8;
fileStream.WriteLong(0);
fileStream.Flush();
if (File.Exists(System.IO.Path.Combine(StoragePath, "indeces.lst")))
{
bool needsRebuild = false;
using (FileStream indexLst = new FileStream(System.IO.Path.Combine(StoragePath, "indeces.lst"), FileMode.Open))
{
byte[] indexLstBytes = indexLst.ReadBytes((int)indexLst.Length);
ODBList idxList = new ODBList(indexLstBytes, 0, indexLstBytes.Length);
foreach (ODBValue indexName in idxList)
{
documentPath.Ensure(IndexPath.SplitPath(indexName.AsString));
}
}
foreach (Index index in documentPath.GetIndeces())
{
if (!index.LoadIndex(StoragePath,LastCloseTimestamp))
needsRebuild = true;
}
if (needsRebuild)
RebuildIndeces();
}
}
}
catch (Exception e)
{
Logging.Log(e);
if (fileStream != null)
{
fileStream.Close();
fileStream.Dispose();
fileStream = null;
}
return false;
}
return true;
}
return false;
}
private void Scan()
{
int offset = FirstOffset;
while (offset < fileStream.Length)
{
fileStream.Position = offset;
StorageArea storageArea = new StorageArea(offset, fileStream.ReadInteger());
Guid documentID = new Guid(fileStream.ReadBytes(16));
if (Guid.Empty.Equals(documentID))
storageAreas.Push(storageArea);
else
{
if (documentAreas.ContainsKey(documentID))
{
Document previousDoc = LoadDocument(documentAreas[documentID]);
Document currentDoc = LoadDocument(storageArea);
if (previousDoc.StorageTimeStamp < currentDoc.StorageTimeStamp)
{
WriteStorageArea(documentAreas[documentID]);
storageAreas.Push(documentAreas[documentID]);
documentAreas[documentID] = storageArea;
}
else
{
WriteStorageArea(storageArea);
storageAreas.Push(storageArea);
}
} else
{
documentAreas.Add(documentID, storageArea);
}
}
offset = storageArea.NextOffset;
}
AppendOffset = offset;
}
public void Close()
{
lock (this){
AssertOpen();
fileStream.Position = 0;
fileStream.WriteBytes(MagicBytes);
fileStream.WriteInteger(FileVersion);
LastCloseTimestamp = (long)DateTime.Now.ToUnixTimeMilliseconds();
fileStream.WriteLong(LastCloseTimestamp);
fileStream.WriteInteger(FirstOffset);
fileStream.WriteInteger(GranularWidth);
fileStream.Close();
fileStream.Dispose();
fileStream = null;
List<String> indexNames = new List<string>();
foreach (Index index in documentPath.GetIndeces())
{
indexNames.Add(index.IndexName);
index.SaveIndex(StoragePath, LastCloseTimestamp);
}
ODBList indexList = new ODBList();
indexList.AddRange(indexNames.Select((x) => ODBValue.FromNative(x)));
FileStream indexLst = new FileStream(System.IO.Path.Combine(StoragePath, "indeces.lst"), FileMode.Create);
indexLst.WriteBytes(indexList.ToStorage());
indexLst.Close();
indexLst.Dispose();
}
}
public IEnumerable<Guid> GetDocumentIDs()
{
lock (this)
{
return documentAreas.Keys.ToArray();
}
}
public Document Load(Guid documentID)
{
lock (this)
{
if (!documentAreas.ContainsKey(documentID))
throw new KeyNotFoundException();
StorageArea storageArea = documentAreas[documentID];
return LoadDocument(storageArea);
}
}
private Document LoadDocument(StorageArea storageArea)
{
fileStream.Position = storageArea.Offset + 4;
Guid documentID = new Guid(fileStream.ReadBytes(16));
byte[] storageBytes = fileStream.ReadBytes(storageArea.Size - 20);
try
{
return new Document(documentID, storageBytes);
} catch (Exception e)
{
Logging.Log(LogLevel.DEBUG, "Exception while Deserializing Document from FSStorage: {1} ID={0}",documentID,StorageName);
Logging.Log(LogLevel.DEBUG, "StorageArea: {0}", storageArea);
throw;
}
}
public void Save(Document document)
{
lock (this)
{
LastCloseTimestamp = 0;
if (Guid.Empty.Equals(document.ID))
document.ID = Guid.NewGuid();
document.StorageTimeStamp = DateTime.Now;
byte[] storageBytes = document.ToStorage();
StorageArea storageArea = storageAreas.Pop(storageBytes.Length + 20);
if (storageArea == null)
storageArea = AppendStorageArea(storageBytes.Length + 20);
int neededSize = storageBytes.Length + 20;
CheckGranularity(ref neededSize);
if (storageArea.Size > neededSize)
{
StorageArea splitArea = storageArea.Split(storageArea.Size - neededSize);
ReleaseStorageArea(splitArea);
storageAreas.Push(splitArea);
}
WriteStorageArea(storageArea, storageBytes);
documentPath.Replace(document.ID, document);
if (documentAreas.ContainsKey(document.ID))
{
StorageArea oldStorageArea = documentAreas[document.ID];
Document previousDocument = LoadDocument(oldStorageArea);
WriteStorageArea(oldStorageArea);
storageAreas.Push(oldStorageArea);
fireStorageChanged(StorageChangeType.UPDATE, document.ID,new DocumentChanges(previousDocument,document));
}
else
{
fireStorageChanged(StorageChangeType.INSERT, document.ID,null);
}
documentAreas[document.ID] = storageArea;
}
}
public void Delete(Guid documentID)
{
lock (this)
{
LastCloseTimestamp = 0;
if (documentAreas.ContainsKey(documentID))
{
StorageArea storageArea = documentAreas[documentID];
documentAreas.Remove(documentID);
storageArea = storageAreas.Push(storageArea);
WriteStorageArea(storageArea);
documentPath.Remove(documentID);
fireStorageChanged(StorageChangeType.DELETE, documentID, null);
}
}
}
private StorageArea AppendStorageArea(int size)
{
CheckGranularity(ref size);
StorageArea storageArea = new StorageArea(AppendOffset, size);
AppendOffset = storageArea.NextOffset;
return storageArea;
}
private void WriteStorageArea(StorageArea storageArea) => ReleaseStorageArea(storageArea);
private void WriteStorageArea(StorageArea storageArea,byte[] data)
{
AssertOpen();
if (data.Length > (storageArea.Size - 4))
throw new ArgumentOutOfRangeException(nameof(data));
fileStream.Position = storageArea.Offset;
fileStream.WriteInteger(storageArea.Size);
fileStream.WriteBytes(data);
fileStream.WriteBytes(new byte[ storageArea.Size - 4 - data.Length]);
}
private void ReleaseStorageArea(StorageArea storageArea)
{
if (storageArea.Size < 20)
throw new NotSupportedException();
fileStream.Position = storageArea.Offset;
fileStream.WriteInteger(storageArea.Size);
fileStream.WriteBytes(new byte[16]);
}
private void CheckGranularity(ref int i)
{
i = (i + GranularityMask) & ~GranularityMask;
}
private void fireStorageChanged(StorageChangeType changeType,Guid documentID,DocumentChanges documentChanges)
{
try
{
StorageChanged?.Invoke(
new StoredDocumentChangedEventArgs(this, changeType, documentID, documentChanges)
);
} catch (Exception e)
{
Logging.Log(LogLevel.ERROR, "FSStorage: StorageChanged event threw exception: {0}",e.ToString());
Logging.Log(e);
}
}
public IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate)
{
lock (this)
{
index.Path p = index.IndexPath.SplitPath(path);
if (documentPath.Indexed(p))
{
return documentPath.GetDocumentIDs(p, predicate);
}
else
{
HashSet<Guid> documentIDs = new HashSet<Guid>();
IEnumerable<Guid> ids = GetDocumentIDs();
foreach (Guid documentID in ids)
{
Document document = Load(documentID);
if (predicate(document[path]))
documentIDs.Add(documentID);
}
return documentIDs;
}
}
}
public void EnsureIndex(params string[] paths)
{
lock (this)
{
bool needsRebuild = false;
foreach (String path in paths)
{
if (documentPath.Ensure(IndexPath.SplitPath(path)))
needsRebuild = true;
}
if (needsRebuild)
RebuildIndeces();
}
}
public void RebuildIndeces()
{
Logging.Log(LogLevel.INFO, "FSStorage: RebuildIndeces()");
foreach (Guid documentID in GetDocumentIDs())
{
Document document = Load(documentID);
documentPath.Replace(documentID, document);
}
}
public void Dispose()
{
if (IsOpen)
Close();
}
}
}

View File

@ -0,0 +1,299 @@
// /**
// * File: SegmentedFile.cs
// * Author: haraldwolff
// *
// * This file and it's content is copyrighted by the Author and / or copyright holder.
// * Any use wihtout proper permission is illegal and may lead to legal actions.
// *
// *
// **/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using ln.logging;
using ln.types.btree;
namespace ln.types.odb.ng.storage
{
/**
* SegmentedFile
* ----------
* 0000 4 MAGIC Bytes
* 0004 4 Version
* 0008 8 LastCloseTimestamp
* 0010 4 FirstOffset
* 0014 4 GranularWidth
* 0018 8 Reserved 0
*
**/
public class SegmentedFile
{
public byte[] MagicBytes { get; } = new byte[] { 0x0F, 0x0E, 0x0D, 0x0A };
public String FileName { get; }
public int FileVersion { get; private set; }
public long LastCloseTimestamp { get; private set; }
public int FirstOffset { get; private set; }
public int GranularWidth { get; private set; } = 12;
public int GranularityMask => (1 << GranularWidth) - 1;
public int AppendOffset { get; private set; }
public IEnumerable<Segment> Segments => segments;
MappingBTree<long, Segment> segments = new MappingBTree<long, Segment>((s) => s.Offset);
FileStream fileStream;
public SegmentedFile(string fileName)
{
FileName = fileName;
}
public SegmentedFile(string fileName,int granularWidth)
:this(fileName)
{
GranularWidth = granularWidth;
}
private void AssertOpen()
{
if (fileStream == null)
throw new IOException("FSStorage not opened");
}
private void CheckGranularity(ref int i){ i = (i + GranularityMask) & ~GranularityMask; }
public bool IsOpen => (fileStream != null);
public bool Open()
{
if (!IsOpen)
{
try
{
fileStream = new FileStream(FileName, FileMode.OpenOrCreate);
if (fileStream.Length == 0)
{
FileVersion = 0;
LastCloseTimestamp = 0;
FirstOffset = (1 << GranularWidth);
if (FirstOffset < 0x20)
throw new NotSupportedException("Granularity too small");
AppendOffset = FirstOffset;
Close();
return Open();
}
else
{
if (!fileStream.ReadBytes(4).SequenceEqual(MagicBytes))
throw new IOException("Magic bytes do not match");
FileVersion = fileStream.ReadInteger();
LastCloseTimestamp = fileStream.ReadLong();
FirstOffset = fileStream.ReadInteger();
GranularWidth = fileStream.ReadInteger();
Scan();
fileStream.Position = 8;
fileStream.WriteLong(0);
fileStream.Flush();
}
}
catch (Exception e)
{
Logging.Log(e);
if (fileStream != null)
{
fileStream.Close();
fileStream.Dispose();
fileStream = null;
}
return false;
}
return true;
}
return false;
}
public Segment Append(Guid id,byte[] payload) => Append(id, payload.Length, payload);
public Segment Append(Guid id, int dataSize) => Append(id, dataSize);
public Segment Append(Guid id, int dataSize, byte[] payload)
{
dataSize += Segment.HeaderSize;
CheckGranularity(ref dataSize);
Segment segment = new Segment(AppendOffset, dataSize) { ID = id, };
Write(segment, payload);
segments.Add(segment);
AppendOffset = segment.NextOffset;
return segment;
}
public Segment Join(Segment a,Segment b)
{
if (a.NextOffset != b.Offset)
throw new ArgumentException("Segments to join are not siblings");
a.Size += b.Size;
WriteSegmentHead(a);
segments.Remove(b);
return a;
}
public Segment Split(Segment segment,int dataSize)
{
int requestedSize = dataSize + Segment.HeaderSize;
CheckGranularity(ref requestedSize);
if (requestedSize < segment.Size)
{
Segment split = new Segment(segment.Offset + requestedSize,segment.Size - requestedSize);
segment.Size = requestedSize;
segments.Add(split);
WriteSegmentHead(split);
WriteSegmentHead(segment);
return split;
}
return null;
}
public byte[] Read(Segment segment)
{
fileStream.Position = segment.PayloadOffset;
return fileStream.ReadBytes(segment.PayloadSize);
}
private void WriteSegmentHead(Segment segment)
{
fileStream.Position = segment.Offset;
fileStream.WriteInteger(segment.Size);
fileStream.WriteBytes(segment.ID.ToByteArray());
fileStream.WriteDouble(segment.TimeStamp.ToUnixTimeMilliseconds());
}
public void Write(Segment segment,byte[] bytes)
{
AssertOpen();
if (bytes.Length > (segment.PayloadSize))
throw new ArgumentOutOfRangeException(nameof(bytes));
segment.TimeStamp = DateTime.Now;
WriteSegmentHead(segment);
fileStream.Position = segment.PayloadOffset;
fileStream.WriteBytes(bytes);
fileStream.WriteBytes(new byte[segment.PayloadSize - bytes.Length]);
}
/**
* Position fileStream to offset, read Segment Header and construct a Segment instance to return
**/
private Segment ScanSegment(int offset)
{
fileStream.Position = offset;
int size = fileStream.ReadInteger();
byte[] id = fileStream.ReadBytes(16);
double timestamp = fileStream.ReadDouble();
return new Segment(offset, size, DateTimeExtensions.FromUnixTimeMilliseconds(timestamp)) { ID = new Guid(id), };
}
/**
* Start at First Segment Offset and scan for all Segments in file
**/
private void Scan()
{
int offset = FirstOffset;
Segment segment = null;
while (offset < fileStream.Length)
{
segment = ScanSegment(offset);
segments.Add(segment);
offset = segment.NextOffset;
}
AppendOffset = offset;
}
public void Close()
{
lock (this){
AssertOpen();
fileStream.Position = 0;
fileStream.WriteBytes(MagicBytes);
fileStream.WriteInteger(FileVersion);
LastCloseTimestamp = (long)DateTime.Now.ToUnixTimeMilliseconds();
fileStream.WriteLong(LastCloseTimestamp);
fileStream.WriteInteger(FirstOffset);
fileStream.WriteInteger(GranularWidth);
fileStream.Close();
fileStream.Dispose();
fileStream = null;
}
}
public class Segment
{
public static readonly int HeaderSize = 32;
public int Offset { get; }
public int Size { get; set; }
public int PayloadOffset => Offset + HeaderSize;
public int PayloadSize => Size - HeaderSize;
public Guid ID { get; set; }
public DateTime TimeStamp { get; set; }
public int NextOffset => Offset + Size;
public Segment(int offset, int size)
{
Offset = offset;
Size = size;
}
public Segment(int offset, int size,DateTime timestamp)
:this(offset,size)
{
TimeStamp = timestamp;
}
public Segment Split(int splitSize)
{
if (splitSize >= Size)
throw new ArgumentOutOfRangeException(nameof(splitSize));
Segment splitArea = new Segment(Offset + Size - splitSize, splitSize);
Size -= splitSize;
return splitArea;
}
public override string ToString()
{
return string.Format("[StorageArea Offset=0x{0:x8} Size=0x{1:x8}]", Offset, Size);
}
}
}
}

View File

@ -15,7 +15,10 @@ namespace ln.types.odb.ng.storage
public int Offset { get; }
public int Size { get; set; }
public int NextOffset => Offset + Size;
public Guid ID { get; set; }
public DateTime TimeStamp { get; set; }
public int NextOffset => Offset + Size;
public StorageArea(int offset,int size)
{

View File

@ -15,7 +15,20 @@ namespace ln.types.odb.ng.storage.bases
storageTimestamps[documentID] = dateTime;
}
protected void ReleaseStorageTimestamp(Guid documentID)
{
storageTimestamps.Remove(documentID);
}
public override DateTime GetStorageTimestamp(Guid documentID)
{
return storageTimestamps[documentID];
}
}
public override void Dispose()
{
storageTimestamps = null;
base.Dispose();
}
}
}

View File

@ -0,0 +1,93 @@
using System;
using System.Collections.Generic;
using ln.types.odb.values;
namespace ln.types.odb.ng.storage.bases
{
public abstract class ChainedStorage : IStorage
{
public IStorage Storage { get; }
public bool IsOpen => Storage.IsOpen;
public ChainedStorage(IStorage storage)
{
Storage = storage;
}
public virtual bool Open()
{
return Storage.Open();
}
public virtual void Close()
{
Storage.Close();
}
public virtual Document Load(Guid documentID)
{
return Storage.Load(documentID);
}
public virtual void Save(Document document)
{
Storage.Save(document);
}
public virtual bool LockDocument(Guid documentID)
{
return Storage.LockDocument(documentID);
}
public virtual bool ReleaseDocument(Guid documentID)
{
return Storage.ReleaseDocument(documentID);
}
public virtual void SaveRelease(Document document)
{
Storage.SaveRelease(document);
}
public virtual void Delete(Guid documentID)
{
Storage.Delete(documentID);
}
public virtual bool Contains(Guid documentID)
{
return Storage.Contains(documentID);
}
public virtual IEnumerable<Guid> GetDocumentIDs()
{
return Storage.GetDocumentIDs();
}
public virtual IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate)
{
return Storage.GetDocumentIDs(path, predicate);
}
public virtual DateTime GetStorageTimestamp(Guid documentID)
{
return Storage.GetStorageTimestamp(documentID);
}
public virtual void EnsureIndex(params string[] path)
{
Storage.EnsureIndex(path);
}
public virtual void Dispose()
{
Storage.Dispose();
}
public bool GetDocumentLocked(Guid documentID)
{
return Storage.GetDocumentLocked(documentID);
}
}
}

View File

@ -1,50 +1,119 @@
using System;
using System.Collections.Generic;
using ln.types.odb.values;
using System.Linq;
using ln.types.btree;
using System.Threading;
using System.Runtime.ConstrainedExecution;
namespace ln.types.odb.ng.storage.bases
{
public abstract class StorageBase
{
public StorageBase()
{
}
public abstract class StorageBase : IStorage
{
BTree<Guid> lockedDocuments = new BTree<Guid>();
public abstract bool Open();
public abstract void Close();
public abstract bool IsOpen { get; }
/* Load */
public abstract Document Load(Guid documentID, out DateTime storageTimestamp);
public virtual Document Load(Guid documentID)
{
return Load(documentID, out DateTime timestamp);
}
public virtual DateTime GetStorageTimestamp(Guid documentID)
{
Load(documentID, out DateTime timestamp);
return timestamp;
}
/* Store */
public abstract void Save(Document document, out DateTime storageTimestamp);
public virtual void Save(Document document)
{
Save(document, out DateTime timestamp);
}
/* Delete */
public abstract void Delete(Guid documentID);
/* Enumeration */
public abstract IEnumerable<Guid> GetDocumentIDs();
public abstract IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate);
/* Indeces */
public abstract void EnsureIndex(params string[] path);
public StorageBase()
{
}
public abstract bool Open();
public abstract void Close();
public abstract bool IsOpen { get; }
public abstract Document Load(Guid documentID);
public abstract void Save(Document document);
public abstract void Delete(Guid documentID);
}
public virtual bool GetDocumentLocked(Guid documentID)
{
lock (this)
{
return lockedDocuments.ContainsKey(documentID);
}
}
public virtual bool LockDocument(Guid documentID)
{
lock (this)
{
if (lockedDocuments.ContainsKey(documentID))
return false;
lockedDocuments.Add(documentID);
return true;
}
}
public virtual bool ReleaseDocument(Guid documentID)
{
lock (this)
{
lockedDocuments.Remove(documentID);
return true;
}
}
public virtual void SaveRelease(Document document)
{
lock (this)
{
ReleaseDocument(document.ID);
Save(document);
}
}
public virtual DateTime GetStorageTimestamp(Guid documentID)
{
Document doc = Load(documentID);
return doc.StorageTimeStamp;
}
/* Enumeration */
public abstract IEnumerable<Guid> GetDocumentIDs();
public abstract IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate);
/* Indeces */
public abstract void EnsureIndex(params string[] path);
public virtual void Dispose()
{
if (IsOpen)
Close();
}
public virtual bool Contains(Guid documentID)
{
return GetDocumentIDs().Contains(documentID);
}
class LockedDocumentID
{
public Guid DocumentID { get; }
int waiting;
public LockedDocumentID(Guid documentID)
{
DocumentID = documentID;
}
public void Lock()
{
waiting++;
Monitor.Enter(this);
}
public bool Release()
{
waiting--;
Monitor.Exit(this);
return waiting > 0;
}
public override bool Equals(object obj)
{
return (obj is LockedDocumentID) && DocumentID.Equals((obj as LockedDocumentID).DocumentID);
}
public override int GetHashCode() => DocumentID.GetHashCode();
}
}
}

View File

@ -0,0 +1,96 @@
using System;
using System.Collections.Generic;
using ln.types.odb.ng.storage.bases;
using ln.types.odb.values;
using ln.types.btree;
using ln.types.cache;
using NUnit.Framework;
using ln.types.threads;
namespace ln.types.odb.ng.storage.cache
{
public class CachingStorage : StorageBase
{
public int CacheSize => cache.Count;
public int MaxCacheSize
{
get => cache.MaxCacheSize;
set => cache.MaxCacheSize = value;
}
IStorage storage;
Cache<Guid, Document> cache = new Cache<Guid, Document>();
public CachingStorage(IStorage storage)
{
this.storage = storage;
}
public override bool IsOpen => storage.IsOpen;
public override void Close()
{
if (IsOpen)
{
cache.Clear();
storage.Close();
}
}
public override void Delete(Guid documentID)
{
lock (this)
{
if (GetDocumentLocked(documentID))
throw new LockingException();
storage.Delete(documentID);
cache.Forget(documentID);
}
}
public override void EnsureIndex(params string[] path) => storage.EnsureIndex(path);
public override IEnumerable<Guid> GetDocumentIDs() => storage.GetDocumentIDs();
public override IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate) => storage.GetDocumentIDs(path, predicate);
public override Document Load(Guid documentID)
{
if (!Contains(documentID))
throw new KeyNotFoundException();
Document document = null;
if (!cache.TryGet(documentID,out document))
document = storage.Load(documentID);
cache.Ensure(documentID, document);
return document;
}
public override bool Open()
{
return storage.Open();
}
public override void Save(Document document)
{
lock (this)
{
if (GetDocumentLocked(document.ID))
throw new LockingException();
storage.Save(document);
}
}
public override bool Contains(Guid documentID) => storage.Contains(documentID);
public override void Dispose()
{
storage.Dispose();
cache.Clear();
cache = null;
}
}
}

View File

@ -12,15 +12,17 @@ using System.Collections.Generic;
using System.IO;
using System.Diagnostics;
using ln.logging;
using ln.types.odb.ng.storage.cache;
namespace ln.types.odb.ng.storage
namespace ln.types.odb.ng.storage.fs
{
public class FSStorageContainer : IStorageContainer,IDisposable
{
public string BasePath { get; }
public int DefaultCacheSize { get; set; }
FileStream lockFile;
Dictionary<string, FSStorage> storages = new Dictionary<string, FSStorage>();
FileStream lockFile;
Dictionary<string, IStorage> storages = new Dictionary<string, IStorage>();
public FSStorageContainer(string basePath)
{
@ -31,7 +33,7 @@ namespace ln.types.odb.ng.storage
private void AssertOpen()
{
if (!IsOpen)
throw new IOException("FSSTorage not open");
throw new IOException("FSStorage not open");
}
public void Close()
@ -40,7 +42,7 @@ namespace ln.types.odb.ng.storage
{
AssertOpen();
foreach (FSStorage storage in storages.Values)
foreach (IStorage storage in storages.Values)
{
if (storage.IsOpen)
storage.Close();
@ -62,8 +64,15 @@ namespace ln.types.odb.ng.storage
{
AssertOpen();
if (!storages.ContainsKey(storageName))
storages.Add(storageName, new FSStorage(Path.Combine(BasePath, storageName)));
if (!storages.ContainsKey(storageName))
{
IStorage storage = new SegmentedFileStorage(Path.Combine(BasePath, storageName));
if (DefaultCacheSize > 0)
storage = new CachingStorage(storage) { MaxCacheSize = DefaultCacheSize, };
storages.Add(storageName, storage);
}
return storages[storageName];
}
}
@ -118,13 +127,6 @@ namespace ln.types.odb.ng.storage
lockFile.Flush();
}
}
foreach (String storagePath in Directory.EnumerateDirectories(BasePath))
{
FSStorage storage = new FSStorage(storagePath);
storages.Add(storage.StorageName, storage);
}
}
}
return this;

View File

@ -0,0 +1,357 @@
// /**
// * File: FSSTorage.cs
// * Author: haraldwolff
// *
// * This file and it's content is copyrighted by the Author and / or copyright holder.
// * Any use wihtout proper permission is illegal and may lead to legal actions.
// *
// *
// **/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using ln.logging;
using ln.types.odb.values;
using ln.types.odb.ng.index;
using ln.types.odb.ng.storage.bases;
using ln.types.btree;
using ln.types.threads;
namespace ln.types.odb.ng.storage
{
/**
* FSStorage
*
* Directory Layout:
*
* <BasePath>/<StorageName>
* /data.odb Serialized Document Data
* /data.idx Serialized Lookup Index for Documents and Free Areas
*
* data.odb
* ----------
* 0000 4 MAGIC Bytes
* 0004 4 Version
* 0008 8 LastCloseTimestamp
* 0010 4 FirstOffset
* 0014 4 GranularWidth
* 0018 8 Reserved 0
*
**/
public class SegmentedFileStorage : StorageBase
{
public String StoragePath { get; }
public String DataFileName => System.IO.Path.Combine(StoragePath, "data.odb");
SegmentedFile segmentedFile;
MappingBTree<int, SegmentedFile.Segment> unusedSegments = new MappingBTree<int, SegmentedFile.Segment>((s)=>s.Offset);
MappingBTree<Guid, SegmentedFile.Segment> usedSegments = new MappingBTree<Guid, SegmentedFile.Segment>((s)=>s.ID);
IndexPath.DocumentPath indexRoot = new IndexPath.DocumentPath();
public SegmentedFileStorage(string storagePath)
{
StoragePath = storagePath;
}
private void AssertOpen()
{
if (!IsOpen)
throw new IOException("Not open");
}
public override bool IsOpen => ((segmentedFile != null) && segmentedFile.IsOpen);
public override bool Open()
{
if (!IsOpen)
{
try
{
if (!Directory.Exists(StoragePath))
Directory.CreateDirectory(StoragePath);
segmentedFile = new SegmentedFile(DataFileName);
segmentedFile.Open();
foreach (SegmentedFile.Segment segment in segmentedFile.Segments)
{
if (Guid.Empty.Equals(segment.ID))
{
unusedSegments.Add(segment);
}
else
{
SegmentedFile.Segment existing = null;
if (usedSegments.TryGet(segment.ID, ref existing))
{
if (existing.TimeStamp < segment.TimeStamp)
{
existing.ID = Guid.Empty;
segmentedFile.Write(existing, new byte[0]);
usedSegments.RemoveKey(existing.ID);
unusedSegments.Add(existing);
}
else
{
segment.ID = Guid.Empty;
segmentedFile.Write(segment, new byte[0]);
unusedSegments.Add(segment);
}
}
else
{
usedSegments.Add(segment);
}
}
}
if (File.Exists(System.IO.Path.Combine(StoragePath, "indeces.lst")))
{
bool needsRebuild = false;
using (FileStream indexLst = new FileStream(System.IO.Path.Combine(StoragePath, "indeces.lst"), FileMode.Open))
{
byte[] indexLstBytes = indexLst.ReadBytes((int)indexLst.Length);
ODBList idxList = new ODBList(indexLstBytes, 0, indexLstBytes.Length);
foreach (ODBValue indexName in idxList)
{
indexRoot.Ensure(IndexPath.SplitPath(indexName.AsString));
}
}
foreach (Index index in indexRoot.GetIndeces())
{
if (!index.LoadIndex(StoragePath, segmentedFile.LastCloseTimestamp))
needsRebuild = true;
}
if (needsRebuild)
RebuildIndeces();
}
return true;
} catch (Exception)
{
segmentedFile?.Close();
segmentedFile = null;
usedSegments.Clear();
unusedSegments.Clear();
throw;
}
}
return false;
}
public override void Close()
{
lock (this){
AssertOpen();
segmentedFile.Close();
List<String> indexNames = new List<string>();
foreach (Index index in indexRoot.GetIndeces())
{
indexNames.Add(index.IndexName);
index.SaveIndex(StoragePath, segmentedFile.LastCloseTimestamp);
}
ODBList indexList = new ODBList();
indexList.AddRange(indexNames.Select((x) => ODBValue.FromNative(x)));
FileStream indexLst = new FileStream(System.IO.Path.Combine(StoragePath, "indeces.lst"), FileMode.Create);
indexLst.WriteBytes(indexList.ToStorage());
indexLst.Close();
indexLst.Dispose();
}
}
public override IEnumerable<Guid> GetDocumentIDs()
{
lock (this)
{
return usedSegments.Keys.ToArray();
}
}
public override Document Load(Guid documentID)
{
lock (this)
{
SegmentedFile.Segment segment = null;
if (!usedSegments.TryGet(documentID,ref segment))
throw new KeyNotFoundException();
return LoadDocument(segment);
}
}
public override bool Contains(Guid documentID) => usedSegments.ContainsKey(documentID);
private Document LoadDocument(SegmentedFile.Segment segment)
{
byte[] storageBytes = segmentedFile.Read(segment);
try
{
return new Document(segment.ID, storageBytes) { StorageTimeStamp = segment.TimeStamp, };
} catch (Exception e)
{
Logging.Log(LogLevel.DEBUG, "Exception while Deserializing Document from FSStorage: {1} ID={0}",segment.ID,StoragePath);
Logging.Log(LogLevel.DEBUG, "StorageArea: {0}", segment);
Logging.Log(e);
throw;
}
}
public override void Save(Document document)
{
lock (this)
{
if (GetDocumentLocked(document.ID))
throw new LockingException();
byte[] storageBytes = document.ToStorage();
SegmentedFile.Segment segment = PopUnusedSegment(storageBytes.Length);
if (segment == null)
{
segment = segmentedFile.Append(document.ID,storageBytes);
}
else
{
segment.ID = document.ID;
segmentedFile.Write(segment,storageBytes);
}
indexRoot.Replace(document.ID, document);
SegmentedFile.Segment previousSegment = null;
if (usedSegments.TryGet(document.ID,ref previousSegment))
{
usedSegments.RemoveKey(document.ID);
previousSegment.ID = Guid.Empty;
segmentedFile.Write(previousSegment,new byte[0]);
PushUnusedSegment(previousSegment);
}
document.StorageTimeStamp = segment.TimeStamp;
usedSegments.Add(segment);
}
}
public override void Delete(Guid documentID)
{
lock (this)
{
if (GetDocumentLocked(documentID))
throw new LockingException();
SegmentedFile.Segment segment = null;
if (usedSegments.TryGet(documentID, ref segment))
{
usedSegments.RemoveKey(documentID);
segment.ID = Guid.Empty;
segmentedFile.Write(segment, new byte[0]);
indexRoot.Remove(documentID);
PushUnusedSegment(segment);
}
}
}
private SegmentedFile.Segment PopUnusedSegment(int payloadSize)
{
foreach (SegmentedFile.Segment segment in unusedSegments)
{
if (segment.PayloadSize >= payloadSize)
{
unusedSegments.Remove(segment);
return segment;
}
}
return null;
}
private void PushUnusedSegment(SegmentedFile.Segment segment)
{
unusedSegments.Add(segment);
}
public override DateTime GetStorageTimestamp(Guid documentID)
{
if (usedSegments.ContainsKey(documentID))
return usedSegments[documentID].TimeStamp;
return default(DateTime);
}
public override IEnumerable<Guid> GetDocumentIDs(string path, Predicate<ODBValue> predicate)
{
lock (this)
{
index.Path p = index.IndexPath.SplitPath(path);
if (indexRoot.Indexed(p))
{
return indexRoot.GetDocumentIDs(p, predicate);
}
else
{
HashSet<Guid> documentIDs = new HashSet<Guid>();
IEnumerable<Guid> ids = GetDocumentIDs();
foreach (Guid documentID in ids)
{
Document document = Load(documentID);
if (predicate(document[path]))
documentIDs.Add(documentID);
}
return documentIDs;
}
}
}
public override void EnsureIndex(params string[] paths)
{
lock (this)
{
bool needsRebuild = false;
foreach (String path in paths)
{
if (indexRoot.Ensure(IndexPath.SplitPath(path)))
needsRebuild = true;
}
if (needsRebuild)
RebuildIndeces();
}
}
public void RebuildIndeces()
{
Logging.Log(LogLevel.INFO, "FSStorage: RebuildIndeces()");
foreach (Guid documentID in GetDocumentIDs())
{
Document document = Load(documentID);
indexRoot.Replace(documentID, document);
}
}
}
}

View File

@ -0,0 +1,124 @@
using System;
using System.Collections.Generic;
using ln.types.odb.values;
using System.Linq;
using ln.types.odb.ng.storage.bases;
using ln.types.threads;
namespace ln.types.odb.ng.storage.session
{
class SessionStorage : ChainedStorage
{
public SessionStorageContainer SessionContainer { get; }
Dictionary<Guid, CachedDocument> documentCache = new Dictionary<Guid, CachedDocument>();
public SessionStorage(SessionStorageContainer session, IStorage storage)
:base(storage)
{
SessionContainer = session;
}
public override Document Load(Guid documentID)
{
lock (this)
{
if (!Storage.Contains(documentID))
throw new KeyNotFoundException();
if (!documentCache.ContainsKey(documentID))
{
Document cacheDocument = Storage.Load(documentID);
documentCache.Add(documentID, new CachedDocument(cacheDocument));
}
else
{
DateTime storageTimeStamp = GetStorageTimestamp(documentID);
if (!storageTimeStamp.Equals(documentCache[documentID].CachedCopy.StorageTimeStamp))
{
Document storageDocument = Storage.Load(documentID);
CachedDocument cd = documentCache[documentID];
storageDocument.CloneTo(cd.CachedCopy);
storageDocument.CloneTo(cd.WorkingCopy);
}
}
return documentCache[documentID].WorkingCopy;
}
}
public override void Save(Document document)
{
lock (this)
{
if (GetDocumentLocked(document.ID))
throw new LockingException();
if (!documentCache.ContainsKey(document.ID))
{
Storage.Save(document);
documentCache.Add(document.ID, new CachedDocument(document.Clone() as Document, document));
}
else
{
if (Storage.LockDocument(document.ID))
{
DateTime storageTimestamp = Storage.GetStorageTimestamp(document.ID);
Document cacheDocument = documentCache[document.ID].CachedCopy;
if (cacheDocument.StorageTimeStamp.Equals(storageTimestamp))
{
Storage.SaveRelease(document);
documentCache[document.ID] = new CachedDocument(document.Clone() as Document, document);
}
else
{
throw new NotImplementedException();
}
}
else
{
throw new LockingException();
}
}
}
}
public override void Delete(Guid documentID)
{
lock (this)
{
if (GetDocumentLocked(documentID))
throw new LockingException();
if (documentCache.ContainsKey(documentID))
{
documentCache.Remove(documentID);
}
Storage.Delete(documentID);
}
}
class CachedDocument
{
public Document CachedCopy;
public Document WorkingCopy;
public bool ConcurrentlyChanged;
public CachedDocument(Document cachedCopy)
{
CachedCopy = cachedCopy;
WorkingCopy = cachedCopy.Clone() as Document;
ConcurrentlyChanged = false;
}
public CachedDocument(Document cachedCopy, Document workingCopy)
{
CachedCopy = cachedCopy;
WorkingCopy = workingCopy;
ConcurrentlyChanged = false;
}
}
}
}

View File

@ -0,0 +1,67 @@
using System;
using System.Collections.Generic;
using ln.types.odb.values;
using ln.logging;
using System.Linq;
namespace ln.types.odb.ng.storage.session
{
public enum SessionSynchronisationStrategy
{
BIDIRECTIONAL,
FORWARDONLY,
REJECTCHANGED
}
public class SessionStorageContainer : IStorageContainer
{
public IStorageContainer StorageContainer { get; private set; }
public Mapper ODBMapper { get; private set; }
public SessionSynchronisationStrategy SynchronisationStrategy { get; set; } = SessionSynchronisationStrategy.REJECTCHANGED;
public bool IsOpen => StorageContainer.IsOpen;
public SessionStorageContainer(IStorageContainer storageContainer)
{
StorageContainer = storageContainer;
ODBMapper = Mapper.Default;
}
private Dictionary<string, IStorage> storages = new Dictionary<string, IStorage>();
public IStorage GetStorage(string storageName)
{
if (storages.ContainsKey(storageName))
return storages[storageName];
IStorage storage = StorageContainer.GetStorage(storageName);
storages.Add(storageName, new SessionStorage(this,storage));
if (!storage.IsOpen)
storage.Open();
return storages[storageName];
}
public IStorageContainer Open()
{
StorageContainer.Open();
return this;
}
public void Close()
{
StorageContainer.Close();
}
public IEnumerable<string> GetStorageNames()
{
return StorageContainer.GetStorageNames();
}
public void Dispose()
{
}
}
}

View File

@ -0,0 +1,10 @@
using System;
namespace ln.types.threads
{
public class LockingException : Exception
{
public LockingException()
{
}
}
}