package net.sourceforge.pain.db;
import net.sourceforge.pain.util.*;
import javax.xml.parsers.*;
import java.io.*;
import java.nio.channels.*;
import java.text.*;
import java.util.*;
/**
* PAiN Db is a not thread safe semi-object oriented main memory and very buggy database.<br>
* It's used by <a href="http://pain.sf.net">PAiN Mud Codebase</a> as persistence engine.<br>
* However, PAiN DB is <b>general purpose database</b>, it has great performance,<br>
* it's simple, opensource and could be used in any java based opensource projects.<br>
* <i>Distributed under the GPL licence</i>
*/
public final class PainDB {
public static final String DB_VERSION = "0.24";
private static final int[] zeroPageNumsStub = new int[0];
private static final Object[] constructParams = new Object[0];
private DbObject[] objects = null;
private final Map dbClassByClass = new HashMap();
private final Map dbClassByName = new HashMap();
private final DbIntBuffer dirty = new DbIntBuffer(1024); //indexIds of dirties
private final DbIntBuffer freeIndexIds = new DbIntBuffer();
private int maxUsedIndexId = 0; // used only during startup, as optimization param
private long currentVersionId;
private final DbObjectMapper objectMapper;
private DbPageMapper pageMapper;
private DbRuntimeMetaClass meta;
private int rootIndex;
private boolean active;
DbTransactionContext activeTrans;
private int transNo = 0;
/**
* allows use setters outside from transactions -> could not be rolled back (performance issue)
* but if database will be closed before flush, this changes will be lost
*/
public boolean ALLOW_PLAIN_WRITE = true;
/**
* MANUAL_FLUSH mode is a kind of delayed commit, user should manually call flush (outside of transaction)
* to flush all data to disk, if MANUAL_FLUSH_MODE is false every time T1(upper level) transaction
* commited PainDB will automatically call flush method.
* 'plain writes' always should be flushed manually
*/
public boolean MANUAL_FLUSH_MODE = true;
/**
* Opens specified database file. Creates if file do not exists.
* @param fileName - name of database file
* @throws Exception - if file is corrupted or is not paindb database file
*/
public PainDB(final String fileName) throws Exception {
try {
// System.out.println("Mem1:"+(Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()));
// long time = System.currentTimeMillis();
pageMapper = new DbPageMapper(fileName);
// System.out.println("Mapper Time:"+(System.currentTimeMillis() - time));
objectMapper = new DbObjectMapper(this, pageMapper);
meta = new DbRuntimeMetaClass(this);
// System.out.println("Mem2:"+(Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory())); time = System.currentTimeMillis();
// time = System.currentTimeMillis();
readDB();
// System.out.println("readDB Time:"+(System.currentTimeMillis() - time));
// System.out.println("Mem3:"+(Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()));
active = true;
} catch (Exception e) {
objects = null;
pageMapper = null;
meta = null;
throw e;
}
}
/**
* flushes all changes done after previous flush to disk
* Called automatically after each upper level transaction commit if
* MANUAL_FLUSH_MODE is false
* @throws IOException if any IO error occurs.
* @throws RuntimeException if there is active transaction
*/
public void flush() throws IOException {
checkDbState();
if (activeTrans != null) {
throw new RuntimeException("active transaction found!");
}
_flush();
}
private synchronized void _flush() throws IOException {
// flushing db metainfo
final byte[] firstPage = pageMapper.getPageImage();
DbPacker.pack8(firstPage, 0, currentVersionId);
DbPacker.pack4(firstPage, 8, maxUsedIndexId);
DbPacker.pack4(firstPage, 12, rootIndex);
System.arraycopy(pageMapper.ZERO_PAGE, 0, firstPage, 16, pageMapper.pageSize - 16);
pageMapper.writePage(0, firstPage);
// flushing all dirty objects
final long time = System.currentTimeMillis();
final int size = dirty.getSize();
for (int i = 0; i < size; i++) {
final DbObject obj = objects[dirty.data[i]];
if (obj == null || obj.globalState == DbConstants.STATE_OBJ_CLEAN) {
// obj was deleted or dublicate entry in dirties with the same indexid (old deleted new craeted)
continue;
}
objectMapper.writeObject(obj);
obj.globalState = DbConstants.STATE_OBJ_CLEAN;
}
dirty.clear();
if (Log.isDebugEnabled()) {
Log.debug("PAINDB:flush without disk flush time:" + (System.currentTimeMillis() - time));
}
pageMapper.flush();
}
/**
* @param classId - OID of the DbClass
* @return DbClass instance or null if no DbClass with specified classId found
*/
public DbClass getClass(final Object classId) {
final DbOid oid = (DbOid) classId;
final DbObject image;
if (!isIdInRange(oid.indexId)) {
return null;
}
image = objects[oid.indexId];
if (image == null) {
return null;
}
if (image.dbClass != meta) { // class image
return null;
}
if (image.versionId != oid.versionId) {
return null;
}
if (image.transContext != null && image.transContext.state == DbConstants.STATE_OBJ_DELETED) {
return null;
}
return ((DbClassImage) image).getDbRuntimeClass();
}
/**
* @param objectId - serialized unique object id
* @return DbObject for specified objectId or null if no object found
*/
public DbObject getObject(final Object objectId) {
final DbOid oid = (DbOid) objectId;
final DbObject result;
if (!isIdInRange(oid.indexId)) {
return null;
}
result = objects[oid.indexId];
if (result == null) {
return null;
}
if (result.dbClass == meta) { // class image
return null;
}
if (result.versionId != oid.versionId) {
return null;
}
if (result.transContext != null && result.transContext.state == DbConstants.STATE_OBJ_DELETED) {
return null;
}
return result;
}
/**
* closes database. All database objects are DETACHED after this method call
* flushes all changes if MANUAL_FLUSH_MODE = true;
* Database should not have active transaction during this method call
*/
public void close() {
checkDbState();
if (activeTrans != null) {
throw new RuntimeException();
}
if (!MANUAL_FLUSH_MODE) {
try {
flush();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
for (Iterator it = dbClassByClass.values().iterator(); it.hasNext();) {
final DbClassImpl dbClass = (DbClassImpl) it.next();
dbClass.setDbClosed();
}
dbClassByClass.clear();
dbClassByName.clear();
try {
pageMapper.close();
} catch (Exception e) {
Log.error(e);
}
objects = null;
active = false;
}
public DbClass getDbClass(final Class javaClazz) {
return (DbClass) this.dbClassByClass.get(javaClazz);
}
/**
* root is a simple mark on object, It's allowed to do not have root in DB.
* @return database root object
*/
public DbObject getRoot() {
checkDbState();
final int root = (activeTrans == null ? rootIndex : activeTrans.rootIndex);
if (root == -1 || !isIdInRange(root)) {
return null;
}
DbObject obj = getObjectByIndexId(root);
if (obj != null && (obj.isDeleted() || obj.isDetached())) {
return null;
}
return obj;
}
private void checkDbState() {
if (!active) {
throw new IllegalStateException("Db was closed!");
}
}
/**
* root is a simple mark on object, db could have not root at all, any time user can null this mark
*/
public void setRoot(final DbObject obj) {
checkDbState();
if (activeTrans == null && ALLOW_PLAIN_WRITE) { //plain write (no rollback ability)
if (obj == null) {
rootIndex = -1;
} else {
ensureOwner(obj);
obj.ensureReal();
rootIndex = obj.indexId;
}
} else { // inside transaciton
ensureTransaction();
if (obj == null) {
activeTrans.rootIndex = -1;
} else {
ensureOwner(obj);
obj.ensureReal();
activeTrans.rootIndex = obj.indexId;
}
}
}
private void ensureTransaction() {
if (activeTrans == null) {
throw new RuntimeException("Out of transaction");
}
}
DbClassImpl getDbClassMetaSchema() {
return meta;
}
/**
* reads all classes and object from file
*/
private void readDB() throws Exception {
final byte[] firstPage = pageMapper.startup_readPage(0);
pageMapper.startup_markPageAsUsed(0);
currentVersionId = DbPacker.unpack8(firstPage, 0);
rootIndex = -1;
if (currentVersionId == 0) {
objects = new DbObject[(int) (pageMapper.getFileSize() / pageMapper.pageSize)];
freeIndexIds.ensureCapacity(objects.length);
for (int i = objects.length; --i >= 0;) {
freeIndexIds.add(i);
}
Log.debug("PAINDB:New Database Created" + getDbFileName());
} else {
int indexIdsSize = DbPacker.unpack4(firstPage, 8);
rootIndex = DbPacker.unpack4(firstPage, 12);
maxUsedIndexId = indexIdsSize;
if (indexIdsSize == 0) {
indexIdsSize = 1024;
} else {
indexIdsSize = (int) ((100 + indexIdsSize) * 1.3);
}
objects = new DbObject[indexIdsSize];
Log.debug("PAINDB:Reading db");
// 1) read all classes
final int numberOfPages = pageMapper.getNumberOfPages();
for (int pageNo = 1; pageNo < numberOfPages; pageNo++) {
if (pageMapper.startup_isPageUsed(pageNo)) { //was already used
continue;
}
final byte[] data = pageMapper.startup_readPage(pageNo);
if (DbObjectMapper.isClassSchemaStartPage(data)) {
final DbRuntimeClass dbClass = objectMapper.readClassSchema(pageNo);
addClassInMaps(dbClass);
pageMapper.startup_markPageAsUsed(dbClass.getPageNums());
objects[dbClass.image.indexId] = dbClass.image;
}
}
// 2) read all objects
for (int pageNo = 0; pageNo < numberOfPages; pageNo++) {
if (pageMapper.startup_isPageUsed(pageNo)) { //was already used
continue;
}
final byte[] data = pageMapper.startup_readPage(pageNo);
if (DbObjectMapper.isObjectStartPage(data)) {
final DbObject obj = objectMapper.readObject(pageNo); // mapps indexId in objects[]
pageMapper.startup_markPageAsUsed(obj.pageNums);
if (obj.versionId > currentVersionId) {
currentVersionId = obj.versionId; // new objects will have oid with newer version id
}
}
}
// 3) initializing all reference fields that needed to be initialized (collecitons)
_startup_initReferences();
// 4) marking freeIds
freeIndexIds.ensureCapacity(objects.length);
for (int i = objects.length; --i >= 0;) {
if (objects[i] == null) {
freeIndexIds.add(i);
}
}
}
pageMapper.startup_complete();
currentVersionId++;
_flush();
}
private void _startup_initReferences() {
for (Iterator it = dbClassByClass.values().iterator(); it.hasNext();) {
final DbClassImpl dbClass = (DbClassImpl) it.next();
dbClass.onDbLoaded();
}
}
/**
* Internal use only, register class in all mapping structures
* @param dbClass
* @throws RuntimeException
*/
private void addClassInMaps(final DbRuntimeClass dbClass) throws RuntimeException {
try {
dbClassByClass.put(Class.forName(dbClass.getClassName()), dbClass);
} catch (Exception e) {
throw new RuntimeException(e);
}
dbClassByName.put(dbClass.getClassName(), dbClass);
}
void registerObject(final DbObject obj) throws RuntimeException {
checkDbState();
if (!ALLOW_PLAIN_WRITE) {
ensureTransaction();
}
final Class objClass = obj.getClass();
DbClassImpl dbClass = objClass == DbRuntimeClass.class ? meta : (DbClassImpl) dbClassByClass.get(objClass);
final DbOid oid;
if (dbClass == null) {
final DbClassSchema schema = obj.provideSchema();
// check if object schema is DbRuntimeClass Image -> no persistence presentation
if (schema == DbClassImage.schema) { // new class image
dbClass = meta;
} else { // new object
DbRuntimeClass dbRunClass = (DbRuntimeClass) dbClassByClass.get(obj.getClass());
if (dbRunClass == null) {
dbRunClass = registerNewClass(obj.getClass(), schema, null);
}
dbClass = dbRunClass;
}
}
oid = allocateOid();
obj.dbClass = dbClass;
obj.indexId = oid.indexId;
obj.versionId = oid.versionId;
obj.pageNums = zeroPageNumsStub;
obj.dataIndex = dbClass.data.allocateDataIndex();
objects[oid.indexId] = obj;
if (activeTrans == null) {
obj.globalState = DbConstants.STATE_OBJ_DIRTY;
dirty.add(obj.indexId); // plain write
} else {
obj.globalState = DbConstants.STATE_OBJ_NEW; // will become dirty after transaction will commited
addInTransaction(obj); // in transaction
}
dbClass.addToExtent(obj);
}
/**
* Creates new DbClassImpl with creation dataObject = DbRuntimeClass.DbImage
* @param clazz
* @param schema
* @param oid - used for import (we should assign predefined oid to class
* @return
* @throws RuntimeException
*/
synchronized DbRuntimeClass registerNewClass(final Class clazz, final DbClassSchema schema, DbOid oid) throws RuntimeException {
final DbClassImage image;
if (oid != null) {
try {
image = (DbClassImage) reflectDbObject(meta, oid, DbConstants.ZERO_INT_ARRAY);
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
image = new DbClassImage(this);
}
image.setClassName(clazz.getName());
image.setFieldTypes(schema.fieldTypes);
image.setFieldNames(schema.fieldNames);
final DbRuntimeClass dbClass = new DbRuntimeClass(image);
addClassInMaps(dbClass);
if (!ALLOW_PLAIN_WRITE || activeTrans != null) {
addInTransaction(dbClass, true);
dbClass.transContext.state = DbConstants.STATE_CLASS_NEW;
}
return dbClass;
}
/**
* allocates new object id, first of all tries to reuse oid with old indexId (with increased version), extends indexId space if needed)
*/
private synchronized DbOid allocateOid() {
currentVersionId++;
if (freeIndexIds.isEmpty()) {
extendObjectsIndex(objects.length + 1, true);
}
final int indexId = freeIndexIds.removeLast();
maxUsedIndexId = maxUsedIndexId < indexId ? indexId : maxUsedIndexId;
return new DbOid(indexId, currentVersionId);
}
void extendObjectsIndex(int newCapacity, boolean increaseFreeIndexes) {
final int oldObjLen = objects.length;
final int newSize = Math.max(oldObjLen + oldObjLen / 10, newCapacity);
final DbObject[] newObjects = new DbObject[newSize];
System.arraycopy(objects, 0, newObjects, 0, oldObjLen);
if (increaseFreeIndexes) {
for (int i = newObjects.length; --i >= oldObjLen;) {
freeIndexIds.add(i);
}
}
objects = newObjects;
}
private boolean isIdInRange(final int indexId) {
return indexId >= 0 && indexId < objects.length;
}
/** this method called only if object has not activeTransContext, so it was created before activeTrans
* ( new objects get trans context during creation), so this object should be dirty for current trans
*/
void markDirty(final DbObject obj) {
if (ALLOW_PLAIN_WRITE && activeTrans == null) {
if (obj.globalState != DbConstants.STATE_OBJ_CLEAN) {
return;
}
obj.globalState = DbConstants.STATE_OBJ_DIRTY;
dirty.add(obj.indexId);
} else {
ensureTransaction();
addInTransaction(obj);
}
}
/** object is already in current trans context*/
void markDeleted(final DbObject obj) {
if (activeTrans == null && ALLOW_PLAIN_WRITE) {
clearInverseReferences(obj);
final DbClassImpl dbClass = obj.dbClass;
dbClass.onMarkDeleted(obj);
detachObject(obj);
} else {
ensureTransaction();
clearInverseReferences(obj);
obj.transContext.state = DbConstants.STATE_OBJ_DELETED;
obj.dbClass.onMarkDeleted(obj);
}
}
/**
* cleaning inverse references from collections on object.delete();
* this method triggers inverse collection owner to be dirty and backup
* collection state before cleaning inverse
*/
private static void clearInverseReferences(final DbObject obj) {
for (DbInverseRef ir = obj.inverseRef; ir != null; ir = ir.nextInverseRef) {
ir.onTargetDelete();
}
obj.inverseRef = null;
}
DbClassImpl getDbClassSchema(final int classId) {
return ((DbClassImage) getObjectByIndexId(classId)).getDbRuntimeClass();
}
/**
* could return instance in deleted state!
*/
DbObject getObjectByIndexId(final int indexId) {
return objects[indexId];
}
/**
* startup/import method. Creates new Objects of specified class and assigns
* specified pageNums and oid to object. Adds objects to database object index
* and allocates data index for object.
* @param dbClass
* @param oid
* @param pageNums
* @return ready to use object with uninitialized field values.
* @throws Exception
*/
DbObject reflectDbObject(final DbClassImpl dbClass, final DbOid oid, final int[] pageNums) throws Exception {
final DbObject obj;
obj = (DbObject) dbClass.getReadConstructor().newInstance(constructParams);
obj.dbClass = dbClass;
obj.dataIndex = dbClass.data.allocateDataIndex();
obj.globalState = DbConstants.STATE_OBJ_CLEAN;
obj.pageNums = pageNums;
obj.indexId = oid.indexId;
obj.versionId = oid.versionId;
objects[oid.indexId] = obj;
dbClass.addToExtent(obj);
return obj;
}
int getClassId(final DbObject obj) {
return obj.dbClass == meta ? obj.indexId : ((DbRuntimeClass) (obj.dbClass)).getClassId();
}
void ensureOwner(final DbObject value) {
if (value.getDB() != this) {
throw new RuntimeException("Invalid Db!");
}
}
void removeClass(final DbRuntimeClass dbClass) {
if (!ALLOW_PLAIN_WRITE) {//else for the (activeTrans == null && ALLOW_PLAIN_WRITE)
//ensure we are in trans context
ensureTransaction();
//ensure class is not already deleted
final DbClassTransContext classTransContext = dbClass.transContext;
if (classTransContext != null && (classTransContext.state == DbConstants.STATE_CLASS_DELETED || classTransContext.state == DbConstants.STATE_CLASS_NEW_AND_DELETED)) {
throw new RuntimeException("class is already was deleted!:" + dbClass.getClassName());
}
//ensure class has activeTrans context
if (classTransContext == null || classTransContext.trans != activeTrans) {
addInTransaction(dbClass, false); // new class will have trans context
}
}
//destroying class extent and remove all mappings
for (Iterator it = dbClass.extentIterator(false); it.hasNext();) {
it.next();
it.remove();
}
dbClass.image.delete();
// deallocating resources
final Class objectClass = dbClass.getObjectClass();
dbClassByClass.remove(objectClass);
dbClassByName.remove(objectClass.getName());
if (activeTrans != null) { //not plain write
final DbClassTransContext classTransContext = dbClass.transContext;
classTransContext.state = classTransContext.state == DbConstants.STATE_CLASS_NEW ? DbConstants.STATE_CLASS_NEW_AND_DELETED : DbConstants.STATE_CLASS_DELETED;
}
}
/**
* @return database file size
* @throws IOException if any IO error occured during this method call
*/
public long getDBFileSize() throws IOException {
return pageMapper.getFileSize();
}
/**
* @return true for just created database or for database without objects created
*/
public boolean isDatabaseEmpty() {
return getNumberOfObjectsInDb() == 0;
}
/**
* @return number of all objects, including classes.
*/
public int getNumberOfObjectsInDb() {
checkDbState();
return objects.length - freeIndexIds.getSize();
}
protected void finalize() {
if (active) {
Log.warn("closing db with finalize!");
close();
}
}
/**
* starts the database transaction.
* Recurrent calls of this method without commit will create subtransactions.
* Its recommended to use {@link DbTransaction} wrapper class and do not call
* this method manually
*/
public void beginTransaction() {
checkDbState();
activeTrans = new DbTransactionContext(transNo++, activeTrans, activeTrans == null ? rootIndex : activeTrans.rootIndex);
}
/**
* commits the database transaction.
* Its recommended to use {@link DbTransaction} wrapper class and do not call
* this method manually
* This method will automatically flush all changes to disk if
* there no upperlevel transaction and MANUAL_FLUSH_MODE was not set
* @throws IOException if any IO problem occurs during flush
*/
public void commitTransaction() throws IOException {
if (activeTrans == null) {
throw new IllegalStateException("No active transaction found!");
}
final DbTransactionContext upperTransaction = activeTrans.upperLevelTrans;
if (upperTransaction != null) { //combining this if and else blocks insingle block is possible but will complicate code and drop performance (we will need to do this check during every object processing)
commitTN(upperTransaction);
activeTrans = upperTransaction;
checkTrans();
} else {
commitT1();
activeTrans = upperTransaction;
if (!MANUAL_FLUSH_MODE) {
_flush();
}
}
}
private void checkTrans() {
// HashSet set = new HashSet();
// for (DbClassImpl cl = activeTrans.firstClassInTrans; cl != null; cl = cl.transContext != null ? cl.transContext.nextClassInTrans : null) {
// if (set.contains(cl)) {
// Log.error("set already contains this class!!!:" + cl.getClassName());
// }
// if (cl.transContext == null) {
// Log.error(" trans context is null!!!:" + cl.getClassName());
// } else if (cl.transContext.trans != activeTrans) {
// Log.error("invalid trans context!!!:" + cl.getClassName());
// }
// set.add(cl);
// }
}
/** commiting transaction with deep1 */
private void commitT1() {
//upper transaction is null, we should:
//delete all new&deleted objects and classes
//move state from transContext to globalState for objects
//remove all trans contexts
DbClassImpl nextClass;
for (DbClassImpl dbClass = activeTrans.firstClassInTrans; dbClass != null; dbClass = nextClass) {
final DbClassTransContext classTransContext = dbClass.transContext;
nextClass = classTransContext.nextClassInTrans;
if (classTransContext.backupData != null) {
classTransContext.backupData.clear();
}
DbObject nextObject;
for (DbObject obj = classTransContext.firstObjInTrans; obj != null; obj = nextObject) {
final DbObjectTransContext objTransContext = obj.transContext;
assert(objTransContext.prevTransContext == null);
nextObject = objTransContext.nextObjInTrans;
if (objTransContext.state == DbConstants.STATE_OBJ_DELETED) {
//deleted or new_deleted
detachObject(obj);
} else {
if (obj.globalState != DbConstants.STATE_OBJ_DIRTY) {
//globalState == DIRTY if there was no flush between transactions
dirty.add(obj.indexId);
obj.globalState = DbConstants.STATE_OBJ_DIRTY;
}
obj.transContext = null;
}
}
dbClass.transContext = null;
}
rootIndex = activeTrans.rootIndex;
}
/**
* commiting transaction with deep = N
* @param upperTransaction
*/
private void commitTN(final DbTransactionContext upperTransaction) {
//here we should:
//process all trans classes
//remove all new&&deleted classes and it's objects
//move all backupData obj data in upper context
//move state of obj and classes to upper context
DbClassImpl nextClassInActiveTrans;
for (DbClassImpl dbClass = activeTrans.firstClassInTrans; dbClass != null; dbClass = nextClassInActiveTrans) {
final DbClassTransContext classTransContext = dbClass.transContext;
nextClassInActiveTrans = classTransContext.nextClassInTrans;
// for new_deleted we should just clear resources
final DbClassTransContext prevTransClassContext = classTransContext.prevTransContext;
// if (classPrevTransContext is not equals upper we can reuse current without any change)
if (prevTransClassContext == null || prevTransClassContext.trans != upperTransaction) {
//prevTransClassContext is not match prev transaction
//reusing current context, but now it's responsible for upper level transaction
classTransContext.trans = upperTransaction;
classTransContext.nextClassInTrans = upperTransaction.firstClassInTrans;
upperTransaction.firstClassInTrans = dbClass;
//remove new_deleted objects, review chain of class trans objects
DbObject nextObject;
DbObject obj = classTransContext.firstObjInTrans;
classTransContext.firstObjInTrans = null;//we will rebuild it
for (; obj != null; obj = nextObject) {
final DbObjectTransContext objTransContext = obj.transContext;
nextObject = objTransContext.nextObjInTrans;
if (objTransContext.state == DbConstants.STATE_OBJ_DELETED && objTransContext.backupDataIndex == -1) { //NEW_AND_DELETED
// new and deleted in current trans
detachObject(obj);
} else {
objTransContext.nextObjInTrans = classTransContext.firstObjInTrans;
classTransContext.firstObjInTrans = obj;
objTransContext.trans = upperTransaction;
}
}
} else { // prevClassContexts exists, we should add diff to it
DbObject nextObject;
for (DbObject obj = classTransContext.firstObjInTrans; obj != null; obj = nextObject) {
final DbObjectTransContext objTransContext = obj.transContext;
nextObject = objTransContext.nextObjInTrans;
if (objTransContext.state == DbConstants.STATE_OBJ_DELETED && objTransContext.backupDataIndex == -1) {
//NEW_AND_DELETED
detachObject(obj);
} else {
final DbObjectTransContext prevObjTransContext = objTransContext.prevTransContext;
if (prevObjTransContext != null && prevObjTransContext.trans == upperTransaction) {
// ok backupData data is already exists in upper level, we should move only state
prevObjTransContext.state = objTransContext.state;
obj.transContext = prevObjTransContext; //loosing ref to last trans
} else {
//add class in chain for prev transaction, reuse current transcontext object
objTransContext.trans = upperTransaction;
objTransContext.nextObjInTrans = prevTransClassContext.firstObjInTrans;
prevTransClassContext.firstObjInTrans = obj;
if (objTransContext.backupDataIndex != -1 && prevTransClassContext.backupData != null) { //new created stays new created and if class was created in prev transaction-> there is no backup to
objTransContext.backupDataIndex = dbClass.moveBackupData(obj, objTransContext.backupDataIndex, classTransContext.backupData, prevTransClassContext.backupData);
}
}
}
}
// transContext for activeTrans will not be used any more, here we loose link to it
if (classTransContext.backupData != null) {
classTransContext.backupData.clear();
}
dbClass.transContext = prevTransClassContext;
prevTransClassContext.state = prevTransClassContext.state == DbConstants.STATE_CLASS_NEW ? DbConstants.STATE_CLASS_NEW : classTransContext.state;// class is now a part of upper transaction
}
}
upperTransaction.rootIndex = activeTrans.rootIndex;
}
/** deallocates all object resources
* object could not be restored back after this method call
* @param obj
*/
private void detachObject(final DbObject obj) {
obj.dbClass.data.deallocateDataIndex(obj.dataIndex);
pageMapper.deallocatePages(obj.pageNums);
obj.globalState = DbConstants.STATE_OBJ_DETACHED;
objects[obj.indexId] = null;
if (rootIndex == obj.indexId) {
rootIndex = -1;
}
freeIndexIds.add(obj.indexId);
obj.indexId = -1;
obj.versionId = -1;
obj.dbClass = null;
obj.transContext = null;
}
public void rollbackTransaction() {
checkDbState();
checkTrans();
// here we should:
// process all new, new_deleted -> deallocate resources
// restore data from backupData
// set state to the state before trans
final DbTransactionContext upperTransaction = activeTrans.upperLevelTrans;
DbClassImpl nextClass;
// force delete all new classes
for (DbClassImpl dbClass = activeTrans.firstClassInTrans; dbClass != null; dbClass = nextClass) {
if (dbClass.transContext.state == DbConstants.STATE_CLASS_NEW) {
removeClass((DbRuntimeClass) dbClass);
}
nextClass = dbClass.transContext.nextClassInTrans;
}
// ok there is no new classes now, only deleted
for (DbClassImpl dbClass = activeTrans.firstClassInTrans; dbClass != null; dbClass = nextClass) {
if (Log.isDebugEnabled()) {
Log.debug("PAINDB:Rollback:" + dbClass.getClassName());
}
final DbClassTransContext classTransContext = dbClass.transContext;
nextClass = classTransContext.nextClassInTrans;
if (classTransContext.state == DbConstants.STATE_CLASS_DELETED) {
addClassInMaps((DbRuntimeClass) dbClass);
}
DbObject nextObject;
for (DbObject obj = classTransContext.firstObjInTrans; obj != null; obj = nextObject) {
final DbObjectTransContext objTransContext = obj.transContext;
nextObject = objTransContext.nextObjInTrans;
if (objTransContext.backupDataIndex == -1) {//(NEW or NEW_DELETED)
detachObject(obj);
} else {
dbClass.restoreObject(obj); // restore from backup, return to extent
obj.transContext = obj.transContext.prevTransContext;
}
}
dbClass.transContext = classTransContext.prevTransContext;
}
activeTrans = upperTransaction;
Log.debug("PAINDB:rollback done!");
}
/** 1) called during new obj registration (NEW)
* 2) during set method on object wich is not in current transaction(NOT NEW)
* 3) not called on obj.delete -> obj states become NEW_DELETED and nothing to do here
*/
private void addInTransaction(final DbObject obj) {
final boolean newObject = obj.globalState == DbConstants.STATE_OBJ_NEW;
assert(obj.transContext == null || obj.transContext.trans != activeTrans);
final DbObjectTransContext objTransContext = new DbObjectTransContext(activeTrans);
objTransContext.prevTransContext = obj.transContext;
objTransContext.state = newObject ? DbConstants.STATE_OBJ_NEW : DbConstants.STATE_OBJ_DIRTY;
final DbClassImpl dbClass = obj.dbClass;
DbClassTransContext classTransContext = dbClass.transContext;
if (classTransContext == null || classTransContext.trans != activeTrans) { // first object of this class in trans
addInTransaction(dbClass, false);
classTransContext = dbClass.transContext;
}
// here we need to backupData all obj data into classTransContext.data
// we will not create backup if object was just created
objTransContext.backupDataIndex = newObject && obj.transContext == null ? -1 : dbClass.backupObject(obj);
obj.transContext = objTransContext;
objTransContext.nextObjInTrans = classTransContext.firstObjInTrans;
classTransContext.firstObjInTrans = obj;
}
private void addInTransaction(final DbClassImpl dbClass, final boolean newClass) {
final DbClassTransContext classContext = new DbClassTransContext(activeTrans);
classContext.backupData = newClass ? null : dbClass.createClassData();
classContext.nextClassInTrans = activeTrans.firstClassInTrans;
classContext.prevTransContext = dbClass.transContext;
dbClass.transContext = classContext;
activeTrans.firstClassInTrans = dbClass;
}
/**
* same as execute(trans, null);
*/
public Object execute(DbTransaction trans) throws Exception {
return execute(trans, null);
}
/**
* Executes transaction.
* @param params passed to DbTransaction.execute() method
* @return result of the DbTransaction execute method}
* @throws Exception if it was thrown in DbTransaction.execute method or if
* write error on flush occurs
*/
public Object execute(DbTransaction trans, Object params[]) throws Exception {
boolean ok = false;
beginTransaction();
try {
final Object result = trans.execute(params);
ok = true;
return result;
} finally {
if (ok) {
try {
commitTransaction();
} catch (Exception e) {
Log.error("Exception during commit! ", e);
throw e;
}
} else {
try {
rollbackTransaction();
} catch (Exception e) {
Log.error("Exception during rollback!", e);
// prev exception will be thrown here.
}
}
}
}
/**
* @return database file name
*/
public String getDbFileName() {
// checkDbState();
return pageMapper.getFileName();
}
/**
* @return true if database was closed
*/
public boolean isClosed() {
return !active;
}
/** forces database to close. All active transactions will be rolled back*/
public void forceClose() {
checkDbState();
try {
while (activeTrans != null) {
rollbackTransaction();
}
} finally {
activeTrans = null;
close();
}
}
/**
* Creates database backup file<br>
* <code>withFlush</code> should be <code>false</code> if this method is called inside of<br>
* transaction (database image created with last flush will be used)
* @param backupFileName file name for backup
* @param withFlush - if true database flush precede to backup
*/
public void backupTo(String backupFileName, boolean withFlush) throws IOException {
File backup = _prepareForBackup(withFlush, backupFileName);
// primitive backup: here we make just a copy of file
final RandomAccessFile file = pageMapper.getFile();
final FileOutputStream backupStream = new FileOutputStream(backup);
try {
final FileChannel toChannel = backupStream.getChannel();
final FileChannel fromChanel = file.getChannel();
fromChanel.position(0);
toChannel.transferFrom(fromChanel, 0, fromChanel.size());
} finally {
backupStream.close();
}
}
private File _prepareForBackup(boolean flush, String fileName) throws IOException {
checkDbState();
if (activeTrans != null && flush) {
throw new RuntimeException("active transaction found, cannot flush data!");
}
if (flush) {
_flush();
}
File backup = new File(fileName);
if (backup.exists()) {
throw new RuntimeException("File Already exists:" + backup.getAbsolutePath());
}
final File parentFile = backup.getParentFile();
if (parentFile != null) { // parent == null if we use default folder
if (!parentFile.exists()) {
if (!parentFile.mkdirs()) {
throw new RuntimeException("Can't create backup file parent dirs: " + backup.getAbsolutePath());
}
} else if (!parentFile.isDirectory()) {
throw new RuntimeException("illegal destination dir:" + parentFile.getAbsolutePath());
}
}
return backup;
}
/**
* Exports all database data to XML file.<br>
* encodes with BASE64 binary fields values<br>
* keeps objects identities
* @param fileName = XML file name to export
*/
public void exportToXml(String fileName) throws IOException {
File file = _prepareForBackup(true, fileName);
BufferedWriter writer = new BufferedWriter(new FileWriter(file), 100 * 1000);
writer.write("<?xml version=\"1.0\"?>\n\n");
String rootOid = null;
if (rootIndex > 0) {
rootOid = DbOid.toString(rootIndex, objects[rootIndex].versionId);
}
writer.write("<paindb version=\"" + DB_VERSION + "\" backupDate=\"" + DateFormat.getDateTimeInstance().format(new Date()) + "\" rootOid=\"" + rootOid + "\" maxIndex=\"" + _getMaxUsedIndex() + "\" nItems=\"" + getNumberOfObjectsInDb() + "\">\n");
writer.write("<metadata>\n");
for (Iterator it = dbClassByName.values().iterator(); it.hasNext();) {
DbClassImpl dbClass = (DbClassImpl) it.next();
writer.write("\t<class name=\"" + dbClass.getClassName() + "\" id=\"" + dbClass.getOid() + "\">\n");
int nFields = dbClass.getNumberOfFields();
for (int i = 0; i < nFields; i++) {
writer.write("\t\t<field name=\"" + dbClass.getFieldName(i) + "\" type=\"" + dbClass.getFieldType(i) + "\"/>\n");
}
writer.write("\t</class>\n");
}
writer.write("</metadata>\n");
writer.write("<objects>\n");
for (Iterator it = dbClassByName.values().iterator(); it.hasNext();) {
DbClassImpl dbClass = (DbClassImpl) it.next();
String fieldNames[] = dbClass.getFieldNames();
byte fieldTypes[] = dbClass.getFieldTypes();
String className = dbClass.getClassName();
for (DbObject obj = dbClass.firstInExtent; obj != null; obj = obj.next) {
writer.write("\t<object class=\"" + className + "\" id=\"" + DbOid.toString(obj.indexId, obj.versionId) + "\">\n");
final int len = fieldNames.length;
for (int i = 0; i < len; i++) {
String value = objectMapper.getXMLValue(fieldTypes, i, dbClass, obj);
writer.write("\t\t<field name=\"" + fieldNames[i] + "\" value=\"" + value + "\" />\n");
}
writer.write("\t</object>\n");
}
}
writer.write("</objects>\n");
writer.write("</paindb>");
writer.close();
}
/**
* Creates new database instance, fills it with data from given XML file.<br>
* all objects keeps identity.
* @param xmlFileName - XML file with database image to import
* @param resultDbFileName - name if result database file.
* @return new database instance
*/
public static PainDB importFromXml(String xmlFileName, String resultDbFileName) throws Exception {
File f = new File(xmlFileName);
if (!f.exists()) {
throw new FileNotFoundException(f.getAbsolutePath());
}
if (f.isDirectory()) {
throw new RuntimeException("specified file is directory:" + f.getAbsolutePath());
}
boolean ok = false;
PainDB db = new PainDB(resultDbFileName);
try {
SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
final FileInputStream is = new FileInputStream(f);
try {
// long time = System.currentTimeMillis();
final DbObjectMapper.XMLImportDataHandler xmlImportDataHandler = db.objectMapper.createXmlImportDataHandler();
parser.parse(new BufferedInputStream(is, 100 * 1000), xmlImportDataHandler);
// System.out.println("time:" + (System.currentTimeMillis() - time));
db._startup_initReferences();
if (!"null".equals(xmlImportDataHandler.rootOid)) {
db.rootIndex = new DbOid(xmlImportDataHandler.rootOid).indexId;
}
db.maxUsedIndexId = xmlImportDataHandler.maxUsedIndexId;
db.freeIndexIds.clear();
db.freeIndexIds.ensureCapacity(db.objects.length - xmlImportDataHandler.nItems + 1);
DbObject[] objs = db.objects;
DbIntBuffer dirty = db.dirty;
for (int i = objs.length; --i >= 0;) {
final DbObject obj = objs[i];
if (obj == null) {
db.freeIndexIds.add(i);
} else {
obj.globalState = DbConstants.STATE_OBJ_DIRTY;
dirty.add(obj.indexId);
}
}
if (xmlImportDataHandler.nItems != db.getNumberOfObjectsInDb()) {
throw new RuntimeException("import checksum error!");
}
// System.out.println("Root:" + xmlImportDataHandler.rootOid);
} finally {
is.close();
}
ok = true;
} finally {
if (!ok) {
db.close();
new File(resultDbFileName).delete();
}
}
return db;
}
DbRuntimeClass getDbClassByClassName(String className) {
return (DbRuntimeClass) dbClassByName.get(className);
}
private int _getMaxUsedIndex() {
DbObject[] objs = objects;
for (int i = objs.length; --i >= 0;) {
if (objs[i] != null) {
return i;
}
}
return 0;
}
}