001/* 002 * The contents of this file are subject to the terms of the Common Development and 003 * Distribution License (the License). You may not use this file except in compliance with the 004 * License. 005 * 006 * You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the 007 * specific language governing permission and limitations under the License. 008 * 009 * When distributing Covered Software, include this CDDL Header Notice in each file and include 010 * the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL 011 * Header, with the fields enclosed by brackets [] replaced by your own identifying 012 * information: "Portions Copyright [year] [name of copyright owner]". 013 * 014 * Copyright 2008 Sun Microsystems, Inc. 015 * Portions Copyright 2013-2016 ForgeRock AS. 016 */ 017package org.opends.server.core; 018 019import static org.opends.messages.CoreMessages.*; 020import static org.opends.server.config.ConfigConstants.*; 021import static org.opends.server.util.StaticUtils.*; 022 023import java.io.File; 024import java.io.FileInputStream; 025import java.io.FileOutputStream; 026import java.util.Collection; 027import java.util.LinkedList; 028import java.util.List; 029import java.util.Map.Entry; 030 031import org.forgerock.i18n.LocalizableMessage; 032import org.forgerock.i18n.slf4j.LocalizedLogger; 033import org.forgerock.opendj.io.ASN1; 034import org.forgerock.opendj.io.ASN1Reader; 035import org.forgerock.opendj.io.ASN1Writer; 036import org.forgerock.opendj.ldap.ByteString; 037import org.opends.server.api.CompressedSchema; 038import org.opends.server.types.DirectoryException; 039 040/** 041 * This class provides a default implementation of a compressed schema manager 042 * that will store the schema definitions in a binary file 043 * (config/schematokens.dat). 044 */ 045public final class DefaultCompressedSchema extends CompressedSchema 046{ 047 private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); 048 049 /** Synchronizes calls to save. */ 050 private final Object saveLock = new Object(); 051 052 053 054 /** 055 * Creates a new instance of this compressed schema manager. 056 * 057 * @param serverContext 058 * The server context. 059 */ 060 public DefaultCompressedSchema(ServerContext serverContext) 061 { 062 super(serverContext); 063 load(); 064 } 065 066 067 068 /** {@inheritDoc} */ 069 @Override 070 protected void storeAttribute(final byte[] encodedAttribute, 071 final String attributeName, final Iterable<String> attributeOptions) 072 throws DirectoryException 073 { 074 save(); 075 } 076 077 078 079 /** {@inheritDoc} */ 080 @Override 081 protected void storeObjectClasses(final byte[] encodedObjectClasses, 082 final Collection<String> objectClassNames) throws DirectoryException 083 { 084 save(); 085 } 086 087 088 089 /** 090 * Loads the compressed schema information from disk. 091 */ 092 private void load() 093 { 094 FileInputStream inputStream = null; 095 096 try 097 { 098 // Determine the location of the compressed schema data file. It should 099 // be in the config directory with a name of "schematokens.dat". If that 100 // file doesn't exist, then don't do anything. 101 final String path = DirectoryServer.getInstanceRoot() + File.separator 102 + CONFIG_DIR_NAME + File.separator + COMPRESSED_SCHEMA_FILE_NAME; 103 if (!new File(path).exists()) 104 { 105 return; 106 } 107 inputStream = new FileInputStream(path); 108 final ASN1Reader reader = ASN1.getReader(inputStream); 109 110 // The first element in the file should be a sequence of object class 111 // sets. Each object class set will itself be a sequence of octet 112 // strings, where the first one is the token and the remaining elements 113 // are the names of the associated object classes. 114 reader.readStartSequence(); 115 while (reader.hasNextElement()) 116 { 117 reader.readStartSequence(); 118 final byte[] encodedObjectClasses = reader.readOctetString() 119 .toByteArray(); 120 final List<String> objectClassNames = new LinkedList<>(); 121 while (reader.hasNextElement()) 122 { 123 objectClassNames.add(reader.readOctetStringAsString()); 124 } 125 reader.readEndSequence(); 126 loadObjectClasses(encodedObjectClasses, objectClassNames); 127 } 128 reader.readEndSequence(); 129 130 // The second element in the file should be an integer element that holds 131 // the value to use to initialize the object class counter. 132 reader.readInteger(); // No longer used. 133 134 // The third element in the file should be a sequence of attribute 135 // description components. Each attribute description component will 136 // itself be a sequence of octet strings, where the first one is the 137 // token, the second is the attribute name, and all remaining elements are 138 // the attribute options. 139 reader.readStartSequence(); 140 while (reader.hasNextElement()) 141 { 142 reader.readStartSequence(); 143 final byte[] encodedAttribute = reader.readOctetString().toByteArray(); 144 final String attributeName = reader.readOctetStringAsString(); 145 final List<String> attributeOptions = new LinkedList<>(); 146 while (reader.hasNextElement()) 147 { 148 attributeOptions.add(reader.readOctetStringAsString()); 149 } 150 reader.readEndSequence(); 151 loadAttribute(encodedAttribute, attributeName, attributeOptions); 152 } 153 reader.readEndSequence(); 154 155 // The fourth element in the file should be an integer element that holds 156 // the value to use to initialize the attribute description counter. 157 reader.readInteger(); // No longer used. 158 } 159 catch (final Exception e) 160 { 161 logger.traceException(e); 162 163 // FIXME -- Should we do something else here? 164 throw new RuntimeException(e); 165 } 166 finally 167 { 168 close(inputStream); 169 } 170 } 171 172 173 174 /** 175 * Writes the compressed schema information to disk. 176 * 177 * @throws DirectoryException 178 * If a problem occurs while writing the updated information. 179 */ 180 private void save() throws DirectoryException 181 { 182 synchronized (saveLock) 183 { 184 FileOutputStream outputStream = null; 185 try 186 { 187 // Determine the location of the "live" compressed schema data file, and 188 // then append ".tmp" to get the name of the temporary file that we will 189 // use. 190 final String path = DirectoryServer.getInstanceRoot() + File.separator 191 + CONFIG_DIR_NAME + File.separator + COMPRESSED_SCHEMA_FILE_NAME; 192 final String tempPath = path + ".tmp"; 193 194 outputStream = new FileOutputStream(tempPath); 195 final ASN1Writer writer = ASN1.getWriter(outputStream); 196 197 // The first element in the file should be a sequence of object class 198 // sets. Each object class set will itself be a sequence of octet 199 // strings, where the first one is the token and the remaining elements 200 // are the names of the associated object classes. 201 writer.writeStartSequence(); 202 int ocCounter = 1; 203 for (final Entry<byte[], Collection<String>> mapEntry : 204 getAllObjectClasses()) 205 { 206 writer.writeStartSequence(); 207 writer.writeOctetString(ByteString.wrap(mapEntry.getKey())); 208 final Collection<String> objectClassNames = mapEntry.getValue(); 209 for (final String ocName : objectClassNames) 210 { 211 writer.writeOctetString(ocName); 212 } 213 writer.writeEndSequence(); 214 ocCounter++; 215 } 216 writer.writeEndSequence(); 217 218 // The second element in the file should be an integer element that 219 // holds the value to use to initialize the object class counter. 220 writer.writeInteger(ocCounter); // No longer used. 221 222 // The third element in the file should be a sequence of attribute 223 // description components. Each attribute description component will 224 // itself be a sequence of octet strings, where the first one is the 225 // token, the second is the attribute name, and all remaining elements 226 // are the attribute options. 227 writer.writeStartSequence(); 228 int adCounter = 1; 229 for (final Entry<byte[], Entry<String, Iterable<String>>> mapEntry : getAllAttributes()) 230 { 231 writer.writeStartSequence(); 232 writer.writeOctetString(ByteString.wrap(mapEntry.getKey())); 233 writer.writeOctetString(mapEntry.getValue().getKey()); 234 for (final String option : mapEntry.getValue().getValue()) 235 { 236 writer.writeOctetString(option); 237 } 238 writer.writeEndSequence(); 239 adCounter++; 240 } 241 writer.writeEndSequence(); 242 243 // The fourth element in the file should be an integer element that 244 // holds the value to use to initialize the attribute description 245 // counter. 246 writer.writeInteger(adCounter); // No longer used. 247 248 // Close the writer and swing the temp file into place. 249 outputStream.close(); 250 final File liveFile = new File(path); 251 final File tempFile = new File(tempPath); 252 253 if (liveFile.exists()) 254 { 255 final File saveFile = new File(liveFile.getAbsolutePath() + ".save"); 256 if (saveFile.exists()) 257 { 258 saveFile.delete(); 259 } 260 liveFile.renameTo(saveFile); 261 } 262 tempFile.renameTo(liveFile); 263 } 264 catch (final Exception e) 265 { 266 logger.traceException(e); 267 268 final LocalizableMessage message = ERR_COMPRESSEDSCHEMA_CANNOT_WRITE_UPDATED_DATA 269 .get(stackTraceToSingleLineString(e)); 270 throw new DirectoryException( 271 DirectoryServer.getServerErrorResultCode(), message, e); 272 } 273 finally 274 { 275 close(outputStream); 276 } 277 } 278 } 279 280}