001/* 002 * The contents of this file are subject to the terms of the Common Development and 003 * Distribution License (the License). You may not use this file except in compliance with the 004 * License. 005 * 006 * You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the 007 * specific language governing permission and limitations under the License. 008 * 009 * When distributing Covered Software, include this CDDL Header Notice in each file and include 010 * the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL 011 * Header, with the fields enclosed by brackets [] replaced by your own identifying 012 * information: "Portions Copyright [year] [name of copyright owner]". 013 * 014 * Copyright 2014-2016 ForgeRock AS. 015 */ 016package org.opends.server.backends.pdb; 017 018import static com.persistit.Transaction.CommitPolicy.*; 019import static java.util.Arrays.*; 020 021import static org.opends.messages.BackendMessages.*; 022import static org.opends.messages.UtilityMessages.*; 023import static org.opends.server.backends.pluggable.spi.StorageUtils.*; 024import static org.opends.server.util.StaticUtils.*; 025 026import java.io.Closeable; 027import java.io.File; 028import java.io.FileFilter; 029import java.io.IOException; 030import java.nio.file.Files; 031import java.nio.file.Path; 032import java.nio.file.Paths; 033import java.rmi.RemoteException; 034import java.util.ArrayList; 035import java.util.HashMap; 036import java.util.HashSet; 037import java.util.List; 038import java.util.ListIterator; 039import java.util.Map; 040import java.util.NoSuchElementException; 041import java.util.Objects; 042import java.util.Set; 043 044import org.forgerock.i18n.LocalizableMessage; 045import org.forgerock.i18n.slf4j.LocalizedLogger; 046import org.forgerock.opendj.config.server.ConfigChangeResult; 047import org.forgerock.opendj.config.server.ConfigException; 048import org.forgerock.opendj.ldap.ByteSequence; 049import org.forgerock.opendj.ldap.ByteString; 050import org.forgerock.util.Reject; 051import org.opends.server.admin.server.ConfigurationChangeListener; 052import org.opends.server.admin.std.server.PDBBackendCfg; 053import org.opends.server.api.Backupable; 054import org.opends.server.api.DiskSpaceMonitorHandler; 055import org.opends.server.backends.pluggable.spi.AccessMode; 056import org.opends.server.backends.pluggable.spi.Cursor; 057import org.opends.server.backends.pluggable.spi.Importer; 058import org.opends.server.backends.pluggable.spi.ReadOnlyStorageException; 059import org.opends.server.backends.pluggable.spi.ReadOperation; 060import org.opends.server.backends.pluggable.spi.SequentialCursor; 061import org.opends.server.backends.pluggable.spi.Storage; 062import org.opends.server.backends.pluggable.spi.StorageInUseException; 063import org.opends.server.backends.pluggable.spi.StorageRuntimeException; 064import org.opends.server.backends.pluggable.spi.StorageStatus; 065import org.opends.server.backends.pluggable.spi.StorageUtils; 066import org.opends.server.backends.pluggable.spi.TreeName; 067import org.opends.server.backends.pluggable.spi.UpdateFunction; 068import org.opends.server.backends.pluggable.spi.WriteOperation; 069import org.opends.server.backends.pluggable.spi.WriteableTransaction; 070import org.opends.server.core.DirectoryServer; 071import org.opends.server.core.MemoryQuota; 072import org.opends.server.core.ServerContext; 073import org.opends.server.extensions.DiskSpaceMonitor; 074import org.opends.server.types.BackupConfig; 075import org.opends.server.types.BackupDirectory; 076import org.opends.server.types.DirectoryException; 077import org.opends.server.types.RestoreConfig; 078import org.opends.server.util.BackupManager; 079 080import com.persistit.Configuration; 081import com.persistit.Configuration.BufferPoolConfiguration; 082import com.persistit.Exchange; 083import com.persistit.Key; 084import com.persistit.Persistit; 085import com.persistit.Transaction; 086import com.persistit.Value; 087import com.persistit.Volume; 088import com.persistit.VolumeSpecification; 089import com.persistit.exception.InUseException; 090import com.persistit.exception.PersistitException; 091import com.persistit.exception.RollbackException; 092import com.persistit.exception.TreeNotFoundException; 093 094/** PersistIt database implementation of the {@link Storage} engine. */ 095public final class PDBStorage implements Storage, Backupable, ConfigurationChangeListener<PDBBackendCfg>, 096 DiskSpaceMonitorHandler 097{ 098 private static final int IMPORT_DB_CACHE_SIZE = 32 * MB; 099 100 private static final double MAX_SLEEP_ON_RETRY_MS = 50.0; 101 private static final String VOLUME_NAME = "dj"; 102 private static final String JOURNAL_NAME = VOLUME_NAME + "_journal"; 103 /** The buffer / page size used by the PersistIt storage. */ 104 private static final int BUFFER_SIZE = 16 * 1024; 105 106 /** PersistIt implementation of the {@link Cursor} interface. */ 107 private final class CursorImpl implements Cursor<ByteString, ByteString> 108 { 109 private ByteString currentKey; 110 private ByteString currentValue; 111 private final Exchange exchange; 112 113 private CursorImpl(final Exchange exchange) 114 { 115 this.exchange = exchange; 116 } 117 118 @Override 119 public void close() 120 { 121 // Release immediately because this exchange did not come from the txn cache 122 releaseExchange(exchange); 123 } 124 125 @Override 126 public boolean isDefined() 127 { 128 return exchange.getValue().isDefined(); 129 } 130 131 @Override 132 public ByteString getKey() 133 { 134 if (currentKey == null) 135 { 136 throwIfUndefined(); 137 currentKey = ByteString.wrap(exchange.getKey().reset().decodeByteArray()); 138 } 139 return currentKey; 140 } 141 142 @Override 143 public ByteString getValue() 144 { 145 if (currentValue == null) 146 { 147 throwIfUndefined(); 148 currentValue = ByteString.wrap(exchange.getValue().getByteArray()); 149 } 150 return currentValue; 151 } 152 153 @Override 154 public boolean next() 155 { 156 clearCurrentKeyAndValue(); 157 try 158 { 159 return exchange.next(); 160 } 161 catch (final PersistitException e) 162 { 163 throw new StorageRuntimeException(e); 164 } 165 } 166 167 @Override 168 public void delete() 169 { 170 throwIfUndefined(); 171 try 172 { 173 exchange.remove(); 174 } 175 catch (final PersistitException | RollbackException e) 176 { 177 throw new StorageRuntimeException(e); 178 } 179 } 180 181 @Override 182 public boolean positionToKey(final ByteSequence key) 183 { 184 clearCurrentKeyAndValue(); 185 bytesToKey(exchange.getKey(), key); 186 try 187 { 188 exchange.fetch(); 189 return exchange.getValue().isDefined(); 190 } 191 catch (final PersistitException e) 192 { 193 throw new StorageRuntimeException(e); 194 } 195 } 196 197 @Override 198 public boolean positionToKeyOrNext(final ByteSequence key) 199 { 200 clearCurrentKeyAndValue(); 201 bytesToKey(exchange.getKey(), key); 202 try 203 { 204 exchange.fetch(); 205 return exchange.getValue().isDefined() || exchange.next(); 206 } 207 catch (final PersistitException e) 208 { 209 throw new StorageRuntimeException(e); 210 } 211 } 212 213 @Override 214 public boolean positionToIndex(int index) 215 { 216 // There doesn't seem to be a way to optimize this using Persistit. 217 clearCurrentKeyAndValue(); 218 exchange.getKey().to(Key.BEFORE); 219 try 220 { 221 for (int i = 0; i <= index; i++) 222 { 223 if (!exchange.next()) 224 { 225 return false; 226 } 227 } 228 return true; 229 } 230 catch (final PersistitException e) 231 { 232 throw new StorageRuntimeException(e); 233 } 234 } 235 236 @Override 237 public boolean positionToLastKey() 238 { 239 clearCurrentKeyAndValue(); 240 exchange.getKey().to(Key.AFTER); 241 try 242 { 243 return exchange.previous(); 244 } 245 catch (final PersistitException e) 246 { 247 throw new StorageRuntimeException(e); 248 } 249 } 250 251 private void clearCurrentKeyAndValue() 252 { 253 currentKey = null; 254 currentValue = null; 255 } 256 257 private void throwIfUndefined() 258 { 259 if (!isDefined()) 260 { 261 throw new NoSuchElementException(); 262 } 263 } 264 } 265 266 /** PersistIt implementation of the {@link Importer} interface. */ 267 private final class ImporterImpl implements Importer 268 { 269 private final ThreadLocal<Map<TreeName, Exchange>> exchanges = new ThreadLocal<Map<TreeName, Exchange>>() 270 { 271 @Override 272 protected Map<TreeName, Exchange> initialValue() 273 { 274 return new HashMap<>(); 275 } 276 }; 277 278 @Override 279 public void close() 280 { 281 PDBStorage.this.close(); 282 } 283 284 @Override 285 public void clearTree(final TreeName treeName) 286 { 287 final Transaction txn = db.getTransaction(); 288 deleteTree(txn, treeName); 289 createTree(txn, treeName); 290 } 291 292 private void createTree(final Transaction txn, final TreeName treeName) 293 { 294 try 295 { 296 txn.begin(); 297 getNewExchange(treeName, true); 298 txn.commit(); 299 } 300 catch (PersistitException e) 301 { 302 throw new StorageRuntimeException(e); 303 } 304 finally 305 { 306 txn.end(); 307 } 308 } 309 310 private void deleteTree(Transaction txn, final TreeName treeName) 311 { 312 Exchange ex = null; 313 try 314 { 315 txn.begin(); 316 ex = getNewExchange(treeName, true); 317 ex.removeTree(); 318 txn.commit(); 319 } 320 catch (PersistitException e) 321 { 322 throw new StorageRuntimeException(e); 323 } 324 finally 325 { 326 txn.end(); 327 } 328 } 329 330 @Override 331 public void put(final TreeName treeName, final ByteSequence key, final ByteSequence value) 332 { 333 try 334 { 335 final Exchange ex = getExchangeFromCache(treeName); 336 bytesToKey(ex.getKey(), key); 337 bytesToValue(ex.getValue(), value); 338 ex.store(); 339 } 340 catch (final Exception e) 341 { 342 throw new StorageRuntimeException(e); 343 } 344 } 345 346 @Override 347 public ByteString read(final TreeName treeName, final ByteSequence key) 348 { 349 try 350 { 351 final Exchange ex = getExchangeFromCache(treeName); 352 bytesToKey(ex.getKey(), key); 353 ex.fetch(); 354 return valueToBytes(ex.getValue()); 355 } 356 catch (final PersistitException e) 357 { 358 throw new StorageRuntimeException(e); 359 } 360 } 361 362 private Exchange getExchangeFromCache(final TreeName treeName) throws PersistitException 363 { 364 Map<TreeName, Exchange> threadExchanges = exchanges.get(); 365 Exchange exchange = threadExchanges.get(treeName); 366 if (exchange == null) 367 { 368 exchange = getNewExchange(treeName, false); 369 threadExchanges.put(treeName, exchange); 370 } 371 return exchange; 372 } 373 374 @Override 375 public SequentialCursor<ByteString, ByteString> openCursor(TreeName treeName) 376 { 377 try 378 { 379 return new CursorImpl(getNewExchange(treeName, false)); 380 } 381 catch (PersistitException e) 382 { 383 throw new StorageRuntimeException(e); 384 } 385 } 386 } 387 388 /** Common interface for internal WriteableTransaction implementations. */ 389 private interface StorageImpl extends WriteableTransaction, Closeable { 390 } 391 392 /** PersistIt implementation of the {@link WriteableTransaction} interface. */ 393 private final class WriteableStorageImpl implements StorageImpl 394 { 395 private static final String DUMMY_RECORD = "_DUMMY_RECORD_"; 396 private final Map<TreeName, Exchange> exchanges = new HashMap<>(); 397 398 @Override 399 public void put(final TreeName treeName, final ByteSequence key, final ByteSequence value) 400 { 401 try 402 { 403 final Exchange ex = getExchangeFromCache(treeName); 404 bytesToKey(ex.getKey(), key); 405 bytesToValue(ex.getValue(), value); 406 ex.store(); 407 } 408 catch (final PersistitException | RollbackException e) 409 { 410 throw new StorageRuntimeException(e); 411 } 412 } 413 414 @Override 415 public boolean delete(final TreeName treeName, final ByteSequence key) 416 { 417 try 418 { 419 final Exchange ex = getExchangeFromCache(treeName); 420 bytesToKey(ex.getKey(), key); 421 return ex.remove(); 422 } 423 catch (final PersistitException | RollbackException e) 424 { 425 throw new StorageRuntimeException(e); 426 } 427 } 428 429 @Override 430 public void deleteTree(final TreeName treeName) 431 { 432 Exchange ex = null; 433 try 434 { 435 ex = getExchangeFromCache(treeName); 436 ex.removeTree(); 437 } 438 catch (final PersistitException | RollbackException e) 439 { 440 throw new StorageRuntimeException(e); 441 } 442 finally 443 { 444 exchanges.values().remove(ex); 445 releaseExchange(ex); 446 } 447 } 448 449 @Override 450 public long getRecordCount(TreeName treeName) 451 { 452 // FIXME: is there a better/quicker way to do this? 453 try(final Cursor<?, ?> cursor = openCursor(treeName)) 454 { 455 long count = 0; 456 while (cursor.next()) 457 { 458 count++; 459 } 460 return count; 461 } 462 } 463 464 @Override 465 public Cursor<ByteString, ByteString> openCursor(final TreeName treeName) 466 { 467 try 468 { 469 /* 470 * Acquire a new exchange for the cursor rather than using a cached 471 * exchange in order to avoid reentrant accesses to the same tree 472 * interfering with the cursor position. 473 */ 474 return new CursorImpl(getNewExchange(treeName, false)); 475 } 476 catch (final PersistitException | RollbackException e) 477 { 478 throw new StorageRuntimeException(e); 479 } 480 } 481 482 @Override 483 public void openTree(final TreeName treeName, boolean createOnDemand) 484 { 485 if (createOnDemand) 486 { 487 openCreateTree(treeName); 488 } 489 else 490 { 491 try 492 { 493 getExchangeFromCache(treeName); 494 } 495 catch (final PersistitException | RollbackException e) 496 { 497 throw new StorageRuntimeException(e); 498 } 499 } 500 } 501 502 @Override 503 public ByteString read(final TreeName treeName, final ByteSequence key) 504 { 505 try 506 { 507 final Exchange ex = getExchangeFromCache(treeName); 508 bytesToKey(ex.getKey(), key); 509 ex.fetch(); 510 return valueToBytes(ex.getValue()); 511 } 512 catch (final PersistitException | RollbackException e) 513 { 514 throw new StorageRuntimeException(e); 515 } 516 } 517 518 @Override 519 public boolean update(final TreeName treeName, final ByteSequence key, final UpdateFunction f) 520 { 521 try 522 { 523 final Exchange ex = getExchangeFromCache(treeName); 524 bytesToKey(ex.getKey(), key); 525 ex.fetch(); 526 final ByteSequence oldValue = valueToBytes(ex.getValue()); 527 final ByteSequence newValue = f.computeNewValue(oldValue); 528 if (!Objects.equals(newValue, oldValue)) 529 { 530 if (newValue == null) 531 { 532 ex.remove(); 533 } 534 else 535 { 536 ex.getValue().clear().putByteArray(newValue.toByteArray()); 537 ex.store(); 538 } 539 return true; 540 } 541 return false; 542 } 543 catch (final PersistitException | RollbackException e) 544 { 545 throw new StorageRuntimeException(e); 546 } 547 } 548 549 private void openCreateTree(final TreeName treeName) 550 { 551 Exchange ex = null; 552 try 553 { 554 ex = getNewExchange(treeName, true); 555 // Work around a problem with forced shutdown right after tree creation. 556 // Tree operations are not part of the journal, so force a couple operations to be able to recover. 557 ByteString dummyKey = ByteString.valueOfUtf8(DUMMY_RECORD); 558 put(treeName, dummyKey, ByteString.empty()); 559 delete(treeName, dummyKey); 560 } 561 catch (final PersistitException | RollbackException e) 562 { 563 throw new StorageRuntimeException(e); 564 } 565 finally 566 { 567 releaseExchange(ex); 568 } 569 } 570 571 private Exchange getExchangeFromCache(final TreeName treeName) throws PersistitException 572 { 573 Exchange exchange = exchanges.get(treeName); 574 if (exchange == null) 575 { 576 exchange = getNewExchange(treeName, false); 577 exchanges.put(treeName, exchange); 578 } 579 return exchange; 580 } 581 582 @Override 583 public void close() 584 { 585 for (final Exchange ex : exchanges.values()) 586 { 587 releaseExchange(ex); 588 } 589 exchanges.clear(); 590 } 591 } 592 593 /** PersistIt read-only implementation of {@link StorageImpl} interface. */ 594 private final class ReadOnlyStorageImpl implements StorageImpl { 595 private final WriteableStorageImpl delegate; 596 597 ReadOnlyStorageImpl(WriteableStorageImpl delegate) 598 { 599 this.delegate = delegate; 600 } 601 602 @Override 603 public ByteString read(TreeName treeName, ByteSequence key) 604 { 605 return delegate.read(treeName, key); 606 } 607 608 @Override 609 public Cursor<ByteString, ByteString> openCursor(TreeName treeName) 610 { 611 return delegate.openCursor(treeName); 612 } 613 614 @Override 615 public long getRecordCount(TreeName treeName) 616 { 617 return delegate.getRecordCount(treeName); 618 } 619 620 @Override 621 public void openTree(TreeName treeName, boolean createOnDemand) 622 { 623 if (createOnDemand) 624 { 625 throw new ReadOnlyStorageException(); 626 } 627 Exchange ex = null; 628 try 629 { 630 ex = getNewExchange(treeName, false); 631 } 632 catch (final TreeNotFoundException e) 633 { 634 // ignore missing trees. 635 } 636 catch (final PersistitException | RollbackException e) 637 { 638 throw new StorageRuntimeException(e); 639 } 640 finally 641 { 642 releaseExchange(ex); 643 } 644 } 645 646 @Override 647 public void close() 648 { 649 delegate.close(); 650 } 651 652 @Override 653 public void deleteTree(TreeName name) 654 { 655 throw new ReadOnlyStorageException(); 656 } 657 658 @Override 659 public void put(TreeName treeName, ByteSequence key, ByteSequence value) 660 { 661 throw new ReadOnlyStorageException(); 662 } 663 664 @Override 665 public boolean update(TreeName treeName, ByteSequence key, UpdateFunction f) 666 { 667 throw new ReadOnlyStorageException(); 668 } 669 670 @Override 671 public boolean delete(TreeName treeName, ByteSequence key) 672 { 673 throw new ReadOnlyStorageException(); 674 } 675 } 676 677 Exchange getNewExchange(final TreeName treeName, final boolean create) throws PersistitException 678 { 679 final Exchange ex = db.getExchange(volume, treeName.toString(), create); 680 ex.setMaximumValueSize(Value.MAXIMUM_SIZE); 681 return ex; 682 } 683 684 void releaseExchange(Exchange ex) 685 { 686 // Don't keep exchanges with enlarged value - let them be GC'd. 687 // This is also done internally by Persistit in TransactionPlayer line 197. 688 if (ex.getValue().getEncodedBytes().length < Value.DEFAULT_MAXIMUM_SIZE) 689 { 690 db.releaseExchange(ex); 691 } 692 } 693 694 private StorageImpl newStorageImpl() { 695 final WriteableStorageImpl writeableStorage = new WriteableStorageImpl(); 696 return accessMode.isWriteable() ? writeableStorage : new ReadOnlyStorageImpl(writeableStorage); 697 } 698 699 private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); 700 private final ThreadLocal<Boolean> isInsideWriteTransaction = new ThreadLocal<Boolean>(); 701 702 private final ServerContext serverContext; 703 private final File backendDirectory; 704 private AccessMode accessMode; 705 private Persistit db; 706 private Volume volume; 707 private PDBBackendCfg config; 708 private DiskSpaceMonitor diskMonitor; 709 private PDBMonitor monitor; 710 private MemoryQuota memQuota; 711 private StorageStatus storageStatus = StorageStatus.working(); 712 713 /** 714 * Creates a new persistit storage with the provided configuration. 715 * 716 * @param cfg 717 * The configuration. 718 * @param serverContext 719 * This server instance context 720 * @throws ConfigException if memory cannot be reserved 721 */ 722 // FIXME: should be package private once importer is decoupled. 723 public PDBStorage(final PDBBackendCfg cfg, ServerContext serverContext) throws ConfigException 724 { 725 this.serverContext = serverContext; 726 backendDirectory = getBackendDirectory(cfg); 727 config = cfg; 728 cfg.addPDBChangeListener(this); 729 } 730 731 private Configuration buildImportConfiguration() 732 { 733 final Configuration dbCfg = buildConfiguration(AccessMode.READ_WRITE); 734 getBufferPoolCfg(dbCfg).setMaximumMemory(IMPORT_DB_CACHE_SIZE); 735 dbCfg.setCommitPolicy(SOFT); 736 return dbCfg; 737 } 738 739 private Configuration buildConfiguration(AccessMode accessMode) 740 { 741 this.accessMode = accessMode; 742 743 final Configuration dbCfg = new Configuration(); 744 dbCfg.setLogFile(new File(backendDirectory, VOLUME_NAME + ".log").getPath()); 745 dbCfg.setJournalPath(new File(backendDirectory, JOURNAL_NAME).getPath()); 746 dbCfg.setCheckpointInterval(config.getDBCheckpointerWakeupInterval()); 747 // Volume is opened read write because recovery will fail if opened read-only 748 dbCfg.setVolumeList(asList(new VolumeSpecification(new File(backendDirectory, VOLUME_NAME).getPath(), null, 749 BUFFER_SIZE, 4096, Long.MAX_VALUE / BUFFER_SIZE, 2048, true, false, false))); 750 final BufferPoolConfiguration bufferPoolCfg = getBufferPoolCfg(dbCfg); 751 bufferPoolCfg.setMaximumCount(Integer.MAX_VALUE); 752 753 diskMonitor = serverContext.getDiskSpaceMonitor(); 754 memQuota = serverContext.getMemoryQuota(); 755 if (config.getDBCacheSize() > 0) 756 { 757 bufferPoolCfg.setMaximumMemory(config.getDBCacheSize()); 758 memQuota.acquireMemory(config.getDBCacheSize()); 759 } 760 else 761 { 762 bufferPoolCfg.setMaximumMemory(memQuota.memPercentToBytes(config.getDBCachePercent())); 763 memQuota.acquireMemory(memQuota.memPercentToBytes(config.getDBCachePercent())); 764 } 765 dbCfg.setCommitPolicy(config.isDBTxnNoSync() ? SOFT : GROUP); 766 dbCfg.setJmxEnabled(false); 767 return dbCfg; 768 } 769 770 @Override 771 public void close() 772 { 773 if (db != null) 774 { 775 DirectoryServer.deregisterMonitorProvider(monitor); 776 monitor = null; 777 try 778 { 779 db.close(); 780 db = null; 781 } 782 catch (final PersistitException e) 783 { 784 throw new IllegalStateException(e); 785 } 786 } 787 if (config.getDBCacheSize() > 0) 788 { 789 memQuota.releaseMemory(config.getDBCacheSize()); 790 } 791 else 792 { 793 memQuota.releaseMemory(memQuota.memPercentToBytes(config.getDBCachePercent())); 794 } 795 config.removePDBChangeListener(this); 796 diskMonitor.deregisterMonitoredDirectory(getDirectory(), this); 797 } 798 799 private static BufferPoolConfiguration getBufferPoolCfg(Configuration dbCfg) 800 { 801 return dbCfg.getBufferPoolMap().get(BUFFER_SIZE); 802 } 803 804 @Override 805 public void open(AccessMode accessMode) throws ConfigException, StorageRuntimeException 806 { 807 Reject.ifNull(accessMode, "accessMode must not be null"); 808 open0(buildConfiguration(accessMode)); 809 } 810 811 private void open0(final Configuration dbCfg) throws ConfigException 812 { 813 setupStorageFiles(backendDirectory, config.getDBDirectoryPermissions(), config.dn()); 814 try 815 { 816 if (db != null) 817 { 818 throw new IllegalStateException( 819 "Database is already open, either the backend is enabled or an import is currently running."); 820 } 821 db = new Persistit(dbCfg); 822 823 final long bufferCount = getBufferPoolCfg(dbCfg).computeBufferCount(db.getAvailableHeap()); 824 final long totalSize = bufferCount * BUFFER_SIZE / 1024; 825 logger.info(NOTE_PDB_MEMORY_CFG, config.getBackendId(), bufferCount, BUFFER_SIZE, totalSize); 826 827 db.initialize(); 828 volume = db.loadVolume(VOLUME_NAME); 829 monitor = new PDBMonitor(config.getBackendId() + " PDB Database", db); 830 DirectoryServer.registerMonitorProvider(monitor); 831 } 832 catch(final InUseException e) { 833 throw new StorageInUseException(e); 834 } 835 catch (final PersistitException | RollbackException e) 836 { 837 throw new StorageRuntimeException(e); 838 } 839 registerMonitoredDirectory(config); 840 } 841 842 @Override 843 public <T> T read(final ReadOperation<T> operation) throws Exception 844 { 845 // This check may be unnecessary for PDB, but it will help us detect bad business logic 846 // in the pluggable backend that would cause problems for JE. 847 throwIfNestedInWriteTransaction(); 848 849 final Transaction txn = db.getTransaction(); 850 for (;;) 851 { 852 txn.begin(); 853 try 854 { 855 try (final StorageImpl storageImpl = newStorageImpl()) 856 { 857 final T result = operation.run(storageImpl); 858 txn.commit(); 859 return result; 860 } 861 catch (final StorageRuntimeException e) 862 { 863 if (e.getCause() != null) 864 { 865 throw (Exception) e.getCause(); 866 } 867 throw e; 868 } 869 } 870 catch (final RollbackException e) 871 { 872 // retry 873 } 874 catch (final Exception e) 875 { 876 txn.rollback(); 877 throw e; 878 } 879 finally 880 { 881 txn.end(); 882 } 883 } 884 } 885 886 @Override 887 public Importer startImport() throws ConfigException, StorageRuntimeException 888 { 889 open0(buildImportConfiguration()); 890 return new ImporterImpl(); 891 } 892 893 @Override 894 public void write(final WriteOperation operation) throws Exception 895 { 896 throwIfNestedInWriteTransaction(); 897 898 final Transaction txn = db.getTransaction(); 899 for (;;) 900 { 901 txn.begin(); 902 isInsideWriteTransaction.set(Boolean.TRUE); 903 try 904 { 905 try (final StorageImpl storageImpl = newStorageImpl()) 906 { 907 operation.run(storageImpl); 908 txn.commit(); 909 return; 910 } 911 catch (final StorageRuntimeException e) 912 { 913 if (e.getCause() != null) 914 { 915 throw (Exception) e.getCause(); 916 } 917 throw e; 918 } 919 } 920 catch (final RollbackException e) 921 { 922 // retry after random sleep (reduces transactions collision. Drawback: increased latency) 923 Thread.sleep((long) (Math.random() * MAX_SLEEP_ON_RETRY_MS)); 924 } 925 catch (final Exception e) 926 { 927 txn.rollback(); 928 throw e; 929 } 930 finally 931 { 932 txn.end(); 933 isInsideWriteTransaction.set(Boolean.FALSE); 934 } 935 } 936 } 937 938 /** 939 * A nested transaction within a write transaction may cause a self-deadlock where an inner read 940 * attempts to read-lock a record that has been write-locked in an outer write. 941 * <p> 942 * It would also be good to forbid any nested transactions, but it is impractical due to some 943 * transactions being deeply nested into the call hierarchy. 944 * 945 * @see <a href="https://bugster.forgerock.org/jira/browse/OPENDJ-2645">OPENDJ-2645</a> 946 */ 947 private void throwIfNestedInWriteTransaction() 948 { 949 if (Boolean.TRUE.equals(isInsideWriteTransaction.get())) 950 { 951 throw new IllegalStateException("OpenDJ does not support transactions nested in a write transaction. " 952 + "Code is forbidden from opening one."); 953 } 954 } 955 956 @Override 957 public boolean supportsBackupAndRestore() 958 { 959 return true; 960 } 961 962 @Override 963 public File getDirectory() 964 { 965 return getBackendDirectory(config); 966 } 967 968 private static File getBackendDirectory(PDBBackendCfg cfg) 969 { 970 return getDBDirectory(cfg.getDBDirectory(), cfg.getBackendId()); 971 } 972 973 @Override 974 public ListIterator<Path> getFilesToBackup() throws DirectoryException 975 { 976 try 977 { 978 if (db == null) 979 { 980 return getFilesToBackupWhenOffline(); 981 } 982 983 // FIXME: use full programmatic way of retrieving backup file once available in persistIt 984 // When requesting files to backup, append only mode must also be set (-a) otherwise it will be ended 985 // by PersistIt and performing backup may corrupt the DB. 986 String filesAsString = db.getManagement().execute("backup -a -f"); 987 String[] allFiles = filesAsString.split("[\r\n]+"); 988 final List<Path> files = new ArrayList<>(); 989 for (String file : allFiles) 990 { 991 files.add(Paths.get(file)); 992 } 993 return files.listIterator(); 994 } 995 catch (Exception e) 996 { 997 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 998 ERR_BACKEND_LIST_FILES_TO_BACKUP.get(config.getBackendId(), stackTraceToSingleLineString(e))); 999 } 1000 } 1001 1002 /** Filter to retrieve the database files to backup. */ 1003 private static final FileFilter BACKUP_FILES_FILTER = new FileFilter() 1004 { 1005 @Override 1006 public boolean accept(File file) 1007 { 1008 String name = file.getName(); 1009 return VOLUME_NAME.equals(name) || name.matches(JOURNAL_NAME + "\\.\\d+$"); 1010 } 1011 }; 1012 1013 /** 1014 * Returns the list of files to backup when there is no open database. 1015 * <p> 1016 * It is not possible to rely on the database returning the files, so the files must be retrieved 1017 * from a file filter. 1018 */ 1019 private ListIterator<Path> getFilesToBackupWhenOffline() throws DirectoryException 1020 { 1021 return BackupManager.getFiles(getDirectory(), BACKUP_FILES_FILTER, config.getBackendId()).listIterator(); 1022 } 1023 1024 @Override 1025 public Path beforeRestore() throws DirectoryException 1026 { 1027 return null; 1028 } 1029 1030 @Override 1031 public boolean isDirectRestore() 1032 { 1033 // restore is done in an intermediate directory 1034 return false; 1035 } 1036 1037 @Override 1038 public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException 1039 { 1040 // intermediate directory content is moved to database directory 1041 File targetDirectory = getDirectory(); 1042 recursiveDelete(targetDirectory); 1043 try 1044 { 1045 Files.move(restoreDirectory, targetDirectory.toPath()); 1046 } 1047 catch(IOException e) 1048 { 1049 LocalizableMessage msg = ERR_CANNOT_RENAME_RESTORE_DIRECTORY.get(restoreDirectory, targetDirectory.getPath()); 1050 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), msg); 1051 } 1052 } 1053 1054 /** 1055 * Switch the database in append only mode. 1056 * <p> 1057 * This is a mandatory operation before performing a backup. 1058 */ 1059 private void switchToAppendOnlyMode() throws DirectoryException 1060 { 1061 try 1062 { 1063 // FIXME: use full programmatic way of switching to this mode once available in persistIt 1064 db.getManagement().execute("backup -a -c"); 1065 } 1066 catch (RemoteException e) 1067 { 1068 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1069 ERR_BACKEND_SWITCH_TO_APPEND_MODE.get(config.getBackendId(), stackTraceToSingleLineString(e))); 1070 } 1071 } 1072 1073 /** 1074 * Terminate the append only mode of the database. 1075 * <p> 1076 * This should be called only when database was previously switched to append only mode. 1077 */ 1078 private void endAppendOnlyMode() throws DirectoryException 1079 { 1080 try 1081 { 1082 // FIXME: use full programmatic way of ending append mode once available in persistIt 1083 db.getManagement().execute("backup -e"); 1084 } 1085 catch (RemoteException e) 1086 { 1087 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1088 ERR_BACKEND_END_APPEND_MODE.get(config.getBackendId(), stackTraceToSingleLineString(e))); 1089 } 1090 } 1091 1092 @Override 1093 public void createBackup(BackupConfig backupConfig) throws DirectoryException 1094 { 1095 if (db != null) 1096 { 1097 switchToAppendOnlyMode(); 1098 } 1099 try 1100 { 1101 new BackupManager(config.getBackendId()).createBackup(this, backupConfig); 1102 } 1103 finally 1104 { 1105 if (db != null) 1106 { 1107 endAppendOnlyMode(); 1108 } 1109 } 1110 } 1111 1112 @Override 1113 public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException 1114 { 1115 new BackupManager(config.getBackendId()).removeBackup(backupDirectory, backupID); 1116 } 1117 1118 @Override 1119 public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException 1120 { 1121 new BackupManager(config.getBackendId()).restoreBackup(this, restoreConfig); 1122 } 1123 1124 @Override 1125 public Set<TreeName> listTrees() 1126 { 1127 try 1128 { 1129 String[] treeNames = volume.getTreeNames(); 1130 final Set<TreeName> results = new HashSet<>(treeNames.length); 1131 for (String treeName : treeNames) 1132 { 1133 if (!treeName.equals("_classIndex")) 1134 { 1135 results.add(TreeName.valueOf(treeName)); 1136 } 1137 } 1138 return results; 1139 } 1140 catch (PersistitException e) 1141 { 1142 throw new StorageRuntimeException(e); 1143 } 1144 } 1145 1146 /** 1147 * TODO: it would be nice to use the low-level key/value APIs. They seem quite 1148 * inefficient at the moment for simple byte arrays. 1149 */ 1150 private static Key bytesToKey(final Key key, final ByteSequence bytes) 1151 { 1152 final byte[] tmp = bytes.toByteArray(); 1153 return key.clear().appendByteArray(tmp, 0, tmp.length); 1154 } 1155 1156 private static Value bytesToValue(final Value value, final ByteSequence bytes) 1157 { 1158 value.clear().putByteArray(bytes.toByteArray()); 1159 return value; 1160 } 1161 1162 private static ByteString valueToBytes(final Value value) 1163 { 1164 if (value.isDefined()) 1165 { 1166 return ByteString.wrap(value.getByteArray()); 1167 } 1168 return null; 1169 } 1170 1171 @Override 1172 public boolean isConfigurationChangeAcceptable(PDBBackendCfg newCfg, 1173 List<LocalizableMessage> unacceptableReasons) 1174 { 1175 long newSize = computeSize(newCfg); 1176 long oldSize = computeSize(config); 1177 return (newSize <= oldSize || memQuota.isMemoryAvailable(newSize - oldSize)) 1178 && checkConfigurationDirectories(newCfg, unacceptableReasons); 1179 } 1180 1181 private long computeSize(PDBBackendCfg cfg) 1182 { 1183 return cfg.getDBCacheSize() > 0 ? cfg.getDBCacheSize() : memQuota.memPercentToBytes(cfg.getDBCachePercent()); 1184 } 1185 1186 /** 1187 * Checks newly created backend has a valid configuration. 1188 * @param cfg the new configuration 1189 * @param unacceptableReasons the list of accumulated errors and their messages 1190 * @param context the server context 1191 * @return true if newly created backend has a valid configuration 1192 */ 1193 static boolean isConfigurationAcceptable(PDBBackendCfg cfg, List<LocalizableMessage> unacceptableReasons, 1194 ServerContext context) 1195 { 1196 if (context != null) 1197 { 1198 MemoryQuota memQuota = context.getMemoryQuota(); 1199 if (cfg.getDBCacheSize() > 0 && !memQuota.isMemoryAvailable(cfg.getDBCacheSize())) 1200 { 1201 unacceptableReasons.add(ERR_BACKEND_CONFIG_CACHE_SIZE_GREATER_THAN_JVM_HEAP.get( 1202 cfg.getDBCacheSize(), memQuota.getAvailableMemory())); 1203 return false; 1204 } 1205 else if (!memQuota.isMemoryAvailable(memQuota.memPercentToBytes(cfg.getDBCachePercent()))) 1206 { 1207 unacceptableReasons.add(ERR_BACKEND_CONFIG_CACHE_PERCENT_GREATER_THAN_JVM_HEAP.get( 1208 cfg.getDBCachePercent(), memQuota.memBytesToPercent(memQuota.getAvailableMemory()))); 1209 return false; 1210 } 1211 } 1212 return checkConfigurationDirectories(cfg, unacceptableReasons); 1213 } 1214 1215 private static boolean checkConfigurationDirectories(PDBBackendCfg cfg, 1216 List<LocalizableMessage> unacceptableReasons) 1217 { 1218 final ConfigChangeResult ccr = new ConfigChangeResult(); 1219 File newBackendDirectory = getBackendDirectory(cfg); 1220 1221 checkDBDirExistsOrCanCreate(newBackendDirectory, ccr, true); 1222 checkDBDirPermissions(cfg.getDBDirectoryPermissions(), cfg.dn(), ccr); 1223 if (!ccr.getMessages().isEmpty()) 1224 { 1225 unacceptableReasons.addAll(ccr.getMessages()); 1226 return false; 1227 } 1228 return true; 1229 } 1230 1231 @Override 1232 public ConfigChangeResult applyConfigurationChange(PDBBackendCfg cfg) 1233 { 1234 final ConfigChangeResult ccr = new ConfigChangeResult(); 1235 1236 try 1237 { 1238 File newBackendDirectory = getBackendDirectory(cfg); 1239 1240 // Create the directory if it doesn't exist. 1241 if(!cfg.getDBDirectory().equals(config.getDBDirectory())) 1242 { 1243 checkDBDirExistsOrCanCreate(newBackendDirectory, ccr, false); 1244 if (!ccr.getMessages().isEmpty()) 1245 { 1246 return ccr; 1247 } 1248 1249 ccr.setAdminActionRequired(true); 1250 ccr.addMessage(NOTE_CONFIG_DB_DIR_REQUIRES_RESTART.get(config.getDBDirectory(), cfg.getDBDirectory())); 1251 } 1252 1253 if (!cfg.getDBDirectoryPermissions().equalsIgnoreCase(config.getDBDirectoryPermissions()) 1254 || !cfg.getDBDirectory().equals(config.getDBDirectory())) 1255 { 1256 checkDBDirPermissions(cfg.getDBDirectoryPermissions(), cfg.dn(), ccr); 1257 if (!ccr.getMessages().isEmpty()) 1258 { 1259 return ccr; 1260 } 1261 1262 setDBDirPermissions(newBackendDirectory, cfg.getDBDirectoryPermissions(), cfg.dn(), ccr); 1263 if (!ccr.getMessages().isEmpty()) 1264 { 1265 return ccr; 1266 } 1267 } 1268 registerMonitoredDirectory(cfg); 1269 config = cfg; 1270 } 1271 catch (Exception e) 1272 { 1273 addErrorMessage(ccr, LocalizableMessage.raw(stackTraceToSingleLineString(e))); 1274 } 1275 return ccr; 1276 } 1277 1278 private void registerMonitoredDirectory(PDBBackendCfg cfg) 1279 { 1280 diskMonitor.registerMonitoredDirectory( 1281 cfg.getBackendId() + " backend", 1282 getDirectory(), 1283 cfg.getDiskLowThreshold(), 1284 cfg.getDiskFullThreshold(), 1285 this); 1286 } 1287 1288 @Override 1289 public void removeStorageFiles() throws StorageRuntimeException 1290 { 1291 StorageUtils.removeStorageFiles(backendDirectory); 1292 } 1293 1294 @Override 1295 public StorageStatus getStorageStatus() 1296 { 1297 return storageStatus; 1298 } 1299 1300 @Override 1301 public void diskFullThresholdReached(File directory, long thresholdInBytes) { 1302 storageStatus = statusWhenDiskSpaceFull(directory, thresholdInBytes, config.getBackendId()); 1303 } 1304 1305 @Override 1306 public void diskLowThresholdReached(File directory, long thresholdInBytes) { 1307 storageStatus = statusWhenDiskSpaceLow(directory, thresholdInBytes, config.getBackendId()); 1308 } 1309 1310 @Override 1311 public void diskSpaceRestored(File directory, long lowThresholdInBytes, long fullThresholdInBytes) { 1312 storageStatus = StorageStatus.working(); 1313 } 1314}