本文整理汇总了Java中org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor类的典型用法代码示例。如果您正苦于以下问题:Java DefaultSchemaLdifExtractor类的具体用法?Java DefaultSchemaLdifExtractor怎么用?Java DefaultSchemaLdifExtractor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DefaultSchemaLdifExtractor类属于org.apache.directory.api.ldap.schemaextractor.impl包,在下文中一共展示了DefaultSchemaLdifExtractor类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: doInit
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; //导入依赖的package包/类
/**
* Partition initialization - loads schema entries from the files on classpath.
*
* @see org.apache.directory.server.core.partition.impl.avl.AvlPartition#doInit()
*/
@Override
protected void doInit() throws Exception {
if (initialized)
return;
LOG.debug("Initializing schema partition " + getId());
suffixDn.apply(schemaManager);
super.doInit();
// load schema
final Map<String, Boolean> resMap = ResourceMap.getResources(Pattern.compile("schema[/\\Q\\\\E]ou=schema.*"));
for (String resourcePath : new TreeSet<String>(resMap.keySet())) {
if (resourcePath.endsWith(".ldif")) {
URL resource = DefaultSchemaLdifExtractor.getUniqueResource(resourcePath, "Schema LDIF file");
LdifReader reader = new LdifReader(resource.openStream());
LdifEntry ldifEntry = reader.next();
reader.close();
Entry entry = new DefaultEntry(schemaManager, ldifEntry.getEntry());
// add mandatory attributes
if (entry.get(SchemaConstants.ENTRY_CSN_AT) == null) {
entry.add(SchemaConstants.ENTRY_CSN_AT, defaultCSNFactory.newInstance().toString());
}
if (entry.get(SchemaConstants.ENTRY_UUID_AT) == null) {
entry.add(SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString());
}
AddOperationContext addContext = new AddOperationContext(null, entry);
super.add(addContext);
}
}
}
开发者ID:wildfly,项目名称:wildfly-core,代码行数:37,代码来源:InMemorySchemaPartition.java
示例2: doInit
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; //导入依赖的package包/类
/**
* Partition initialization - loads schema entries from the files on classpath.
*
* @see org.apache.directory.server.core.partition.impl.avl.AvlPartition#doInit()
*/
@Override
protected void doInit() throws Exception {
if (initialized)
return;
LOG.debugf("Initializing schema partition %s", getId());
suffixDn.apply(schemaManager);
super.doInit();
// load schema
final Map<String, Boolean> resMap = ResourceMap.getResources(Pattern.compile("schema[/\\Q\\\\E]ou=schema.*"));
for (String resourcePath : new TreeSet<String>(resMap.keySet())) {
if (resourcePath.endsWith(".ldif")) {
URL resource = DefaultSchemaLdifExtractor.getUniqueResource(resourcePath, "Schema LDIF file");
LdifReader reader = new LdifReader(resource.openStream());
LdifEntry ldifEntry = reader.next();
reader.close();
Entry entry = new DefaultEntry(schemaManager, ldifEntry.getEntry());
// add mandatory attributes
if (entry.get(SchemaConstants.ENTRY_CSN_AT) == null) {
entry.add(SchemaConstants.ENTRY_CSN_AT, defaultCSNFactory.newInstance().toString());
}
if (entry.get(SchemaConstants.ENTRY_UUID_AT) == null) {
entry.add(SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString());
}
AddOperationContext addContext = new AddOperationContext(null, entry);
super.add(addContext);
}
}
}
开发者ID:wildfly,项目名称:wildfly-core,代码行数:37,代码来源:InMemorySchemaPartition.java
示例3: initDirectoryService
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; //导入依赖的package包/类
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(
instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(
instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(),
systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName= conf.getProperty(ORG_NAME).toLowerCase(Locale.ENGLISH);
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(Locale.ENGLISH);
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:MiniKdc.java
示例4: initDirectoryService
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; //导入依赖的package包/类
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(
new File(ds.getInstanceLayout().getPartitionsDirectory(), systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName = conf.getProperty(ORG_NAME).toLowerCase(Locale.ENGLISH);
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(Locale.ENGLISH);
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
开发者ID:l294265421,项目名称:ZooKeeper,代码行数:65,代码来源:MiniKdc.java
示例5: initSchemaPartition
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; //导入依赖的package包/类
/**
* initialize the schema manager and add the schema partition to diectory service
*
* @throws Exception if the schema LDIF files are not found on the classpath
*/
private void initSchemaPartition() throws Exception
{
InstanceLayout instanceLayout = directoryService.getInstanceLayout();
File schemaPartitionDirectory = new File( instanceLayout.getPartitionsDirectory(), "schema" );
// Extract the schema on disk (a brand new one) and load the registries
if ( schemaPartitionDirectory.exists() )
{
System.out.println( "schema partition already exists, skipping schema extraction" );
}
else
{
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor( instanceLayout.getPartitionsDirectory() );
extractor.extractOrCopy();
}
SchemaLoader loader = new LdifSchemaLoader( schemaPartitionDirectory );
SchemaManager schemaManager = new DefaultSchemaManager( loader );
// We have to load the schema now, otherwise we won't be able
// to initialize the Partitions, as we won't be able to parse
// and normalize their suffix Dn
schemaManager.loadAllEnabled();
List<Throwable> errors = schemaManager.getErrors();
if ( errors.size() != 0 )
{
throw new Exception( I18n.err( I18n.ERR_317, Exceptions.printErrors( errors ) ) );
}
directoryService.setSchemaManager( schemaManager );
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition( schemaManager );
schemaLdifPartition.setPartitionPath( schemaPartitionDirectory.toURI() );
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition( schemaManager );
schemaPartition.setWrappedPartition( schemaLdifPartition );
directoryService.setSchemaPartition( schemaPartition );
}
开发者ID:TremoloSecurity,项目名称:MyVirtualDirectory,代码行数:49,代码来源:Server.java
示例6: initDirectoryService
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor; //导入依赖的package包/类
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(
instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(
instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(),
systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName= conf.getProperty(ORG_NAME).toLowerCase();
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase();
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:69,代码来源:MiniKdc.java
注:本文中的org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论