本文整理汇总了Java中org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager类的典型用法代码示例。如果您正苦于以下问题:Java DefaultSchemaManager类的具体用法?Java DefaultSchemaManager怎么用?Java DefaultSchemaManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DefaultSchemaManager类属于org.apache.directory.api.ldap.schemamanager.impl包,在下文中一共展示了DefaultSchemaManager类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: initDirectoryService
import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; //导入依赖的package包/类
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(
instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(
instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(),
systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName= conf.getProperty(ORG_NAME).toLowerCase(Locale.ENGLISH);
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(Locale.ENGLISH);
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:MiniKdc.java
示例2: initDirectoryService
import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; //导入依赖的package包/类
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(
new File(ds.getInstanceLayout().getPartitionsDirectory(), systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName = conf.getProperty(ORG_NAME).toLowerCase(Locale.ENGLISH);
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(Locale.ENGLISH);
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
开发者ID:l294265421,项目名称:ZooKeeper,代码行数:65,代码来源:MiniKdc.java
示例3: initSchemaPartition
import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; //导入依赖的package包/类
/**
* initialize the schema manager and add the schema partition to diectory service
*
* @throws Exception if the schema LDIF files are not found on the classpath
*/
private void initSchemaPartition() throws Exception
{
InstanceLayout instanceLayout = directoryService.getInstanceLayout();
File schemaPartitionDirectory = new File( instanceLayout.getPartitionsDirectory(), "schema" );
// Extract the schema on disk (a brand new one) and load the registries
if ( schemaPartitionDirectory.exists() )
{
System.out.println( "schema partition already exists, skipping schema extraction" );
}
else
{
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor( instanceLayout.getPartitionsDirectory() );
extractor.extractOrCopy();
}
SchemaLoader loader = new LdifSchemaLoader( schemaPartitionDirectory );
SchemaManager schemaManager = new DefaultSchemaManager( loader );
// We have to load the schema now, otherwise we won't be able
// to initialize the Partitions, as we won't be able to parse
// and normalize their suffix Dn
schemaManager.loadAllEnabled();
List<Throwable> errors = schemaManager.getErrors();
if ( errors.size() != 0 )
{
throw new Exception( I18n.err( I18n.ERR_317, Exceptions.printErrors( errors ) ) );
}
directoryService.setSchemaManager( schemaManager );
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition( schemaManager );
schemaLdifPartition.setPartitionPath( schemaPartitionDirectory.toURI() );
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition( schemaManager );
schemaPartition.setWrappedPartition( schemaLdifPartition );
directoryService.setSchemaPartition( schemaPartition );
}
开发者ID:TremoloSecurity,项目名称:MyVirtualDirectory,代码行数:49,代码来源:Server.java
示例4: init
import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public void init(String name) throws Exception {
if ((directoryService != null) && directoryService.isStarted()) {
return;
}
directoryService.setInstanceId(name);
// instance layout
InstanceLayout instanceLayout = new InstanceLayout(System.getProperty("java.io.tmpdir") + "/server-work-" + name);
if (instanceLayout.getInstanceDirectory().exists()) {
try {
FileUtils.deleteDirectory(instanceLayout.getInstanceDirectory());
} catch (IOException e) {
LOG.warn("couldn't delete the instance directory before initializing the DirectoryService", e);
}
}
directoryService.setInstanceLayout(instanceLayout);
// EhCache in disabled-like-mode
Configuration ehCacheConfig = new Configuration();
CacheConfiguration defaultCache = new CacheConfiguration("ApacheDSTestCache", 1).eternal(false).timeToIdleSeconds(30)
.timeToLiveSeconds(30).overflowToDisk(false);
ehCacheConfig.addDefaultCache(defaultCache);
cacheManager = new CacheManager(ehCacheConfig);
CacheService cacheService = new CacheService(cacheManager);
directoryService.setCacheService(cacheService);
// Init the schema
// SchemaLoader loader = new SingleLdifSchemaLoader();
SchemaLoader loader = new JarLdifSchemaLoader();
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ComparatorRegistry comparatorRegistry = schemaManager.getComparatorRegistry();
for (LdapComparator<?> comparator : comparatorRegistry) {
if (comparator instanceof NormalizingComparator) {
((NormalizingComparator) comparator).setOnServer();
}
}
directoryService.setSchemaManager(schemaManager);
InMemorySchemaPartition inMemorySchemaPartition = new InMemorySchemaPartition(schemaManager);
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(inMemorySchemaPartition);
directoryService.setSchemaPartition(schemaPartition);
List<Throwable> errors = schemaManager.getErrors();
if (errors.size() != 0) {
throw new Exception(I18n.err(I18n.ERR_317, Exceptions.printErrors(errors)));
}
// Init system partition
Partition systemPartition = partitionFactory.createPartition(directoryService.getSchemaManager(), "system",
ServerDNConstants.SYSTEM_DN, 500, new File(directoryService.getInstanceLayout().getPartitionsDirectory(),
"system"));
systemPartition.setSchemaManager(directoryService.getSchemaManager());
partitionFactory.addIndex(systemPartition, SchemaConstants.OBJECT_CLASS_AT, 100);
directoryService.setSystemPartition(systemPartition);
directoryService.startup();
}
开发者ID:wildfly,项目名称:wildfly-core,代码行数:64,代码来源:InMemoryDirectoryServiceFactory.java
示例5: initDirectoryService
import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; //导入依赖的package包/类
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(
instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(
instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(),
systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName= conf.getProperty(ORG_NAME).toLowerCase();
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase();
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:69,代码来源:MiniKdc.java
注:本文中的org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论