001/* 002 * Copyright 2016-2024 Ping Identity Corporation 003 * All Rights Reserved. 004 */ 005/* 006 * Copyright 2016-2024 Ping Identity Corporation 007 * 008 * Licensed under the Apache License, Version 2.0 (the "License"); 009 * you may not use this file except in compliance with the License. 010 * You may obtain a copy of the License at 011 * 012 * http://www.apache.org/licenses/LICENSE-2.0 013 * 014 * Unless required by applicable law or agreed to in writing, software 015 * distributed under the License is distributed on an "AS IS" BASIS, 016 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 017 * See the License for the specific language governing permissions and 018 * limitations under the License. 019 */ 020/* 021 * Copyright (C) 2016-2024 Ping Identity Corporation 022 * 023 * This program is free software; you can redistribute it and/or modify 024 * it under the terms of the GNU General Public License (GPLv2 only) 025 * or the terms of the GNU Lesser General Public License (LGPLv2.1 only) 026 * as published by the Free Software Foundation. 027 * 028 * This program is distributed in the hope that it will be useful, 029 * but WITHOUT ANY WARRANTY; without even the implied warranty of 030 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 031 * GNU General Public License for more details. 032 * 033 * You should have received a copy of the GNU General Public License 034 * along with this program; if not, see <http://www.gnu.org/licenses>. 035 */ 036package com.unboundid.ldap.sdk.unboundidds.tools; 037 038 039 040import java.io.File; 041import java.io.FileOutputStream; 042import java.io.InputStream; 043import java.io.IOException; 044import java.io.OutputStream; 045import java.util.ArrayList; 046import java.util.Collections; 047import java.util.LinkedHashMap; 048import java.util.LinkedHashSet; 049import java.util.List; 050import java.util.Map; 051import java.util.Set; 052import java.util.TreeMap; 053import java.util.concurrent.atomic.AtomicLong; 054import java.util.zip.GZIPOutputStream; 055 056import com.unboundid.ldap.sdk.Filter; 057import com.unboundid.ldap.sdk.LDAPException; 058import com.unboundid.ldap.sdk.ResultCode; 059import com.unboundid.ldap.sdk.Version; 060import com.unboundid.ldap.sdk.schema.Schema; 061import com.unboundid.ldif.LDIFException; 062import com.unboundid.ldif.LDIFReader; 063import com.unboundid.util.ByteStringBuffer; 064import com.unboundid.util.CommandLineTool; 065import com.unboundid.util.Debug; 066import com.unboundid.util.NotNull; 067import com.unboundid.util.Nullable; 068import com.unboundid.util.ObjectPair; 069import com.unboundid.util.PassphraseEncryptedOutputStream; 070import com.unboundid.util.StaticUtils; 071import com.unboundid.util.ThreadSafety; 072import com.unboundid.util.ThreadSafetyLevel; 073import com.unboundid.util.args.ArgumentException; 074import com.unboundid.util.args.ArgumentParser; 075import com.unboundid.util.args.BooleanArgument; 076import com.unboundid.util.args.DNArgument; 077import com.unboundid.util.args.FileArgument; 078import com.unboundid.util.args.FilterArgument; 079import com.unboundid.util.args.IntegerArgument; 080import com.unboundid.util.args.SubCommand; 081import com.unboundid.util.args.StringArgument; 082 083import static com.unboundid.ldap.sdk.unboundidds.tools.ToolMessages.*; 084 085 086 087/** 088 * This class provides a command-line tool that can be used to split an LDIF 089 * file below a specified base DN. This can be used to help initialize an 090 * entry-balancing deployment for use with the Directory Proxy Server. 091 * <BR> 092 * <BLOCKQUOTE> 093 * <B>NOTE:</B> This class, and other classes within the 094 * {@code com.unboundid.ldap.sdk.unboundidds} package structure, are only 095 * supported for use against Ping Identity, UnboundID, and 096 * Nokia/Alcatel-Lucent 8661 server products. These classes provide support 097 * for proprietary functionality or for external specifications that are not 098 * considered stable or mature enough to be guaranteed to work in an 099 * interoperable way with other types of LDAP servers. 100 * </BLOCKQUOTE> 101 * <BR> 102 * It supports a number of algorithms for determining how to split the data, 103 * including: 104 * <UL> 105 * <LI> 106 * split-using-hash-on-rdn -- The tool will compute a digest of the DN 107 * component that is immediately below the split base DN, and will use a 108 * modulus to select a backend set for a given entry. Since the split is 109 * based purely on computation involving the DN, the there is no need for 110 * caching to ensure that children are placed in the same sets as their 111 * parent, which allows it to run effectively with a small memory footprint. 112 * </LI> 113 * <LI> 114 * split-using-hash-on-attribute -- The tool will compute a digest of the 115 * value(s) of a specified attribute, and will use a modulus to select a 116 * backend set for a given entry. This hash will only be computed for 117 * entries immediately below the split base DN, and a cache will be used to 118 * ensure that entries more than one level below the split base DN are 119 * placed in the same backend set as their parent. 120 * </LI> 121 * <LI> 122 * split-using-fewest-entries -- When examining an entry immediately below 123 * the split base DN, the tool will place that entry in the set that has the 124 * fewest entries. For flat DITs in which entries only exist one level 125 * below the split base DN, this will effectively ensure a round-robin 126 * distribution. But for cases in which there are branches of varying sizes 127 * below the split base DN, this can help ensure that entries are more 128 * evenly distributed across backend sets. A cache will be used to ensure 129 * that entries more than one level below the split base DN are placed in 130 * the same backend set as their parent. 131 * </LI> 132 * <LI> 133 * split-using-filter -- When examining an entry immediately below the split 134 * base DN, a series of filters will be evaluated against that entry, which 135 * each filter associated with a specific backend set. If an entry doesn't 136 * match any of the provided filters, an RDN hash can be used to select the 137 * set. A cache will be used to ensure that entries more than one level 138 * below the split base DN are placed in the same backend set as their 139 * parent. 140 * </LI> 141 * </UL> 142 */ 143@ThreadSafety(level=ThreadSafetyLevel.NOT_THREADSAFE) 144public final class SplitLDIF 145 extends CommandLineTool 146{ 147 /** 148 * The maximum length of any message to write to standard output or standard 149 * error. 150 */ 151 private static final int MAX_OUTPUT_LINE_LENGTH = 152 StaticUtils.TERMINAL_WIDTH_COLUMNS - 1; 153 154 155 156 // The global arguments used by this tool. 157 @Nullable private BooleanArgument addEntriesOutsideSplitBaseDNToAllSets = 158 null; 159 @Nullable private BooleanArgument addEntriesOutsideSplitBaseDNToDedicatedSet = 160 null; 161 @Nullable private BooleanArgument compressTarget = null; 162 @Nullable private BooleanArgument encryptTarget = null; 163 @Nullable private BooleanArgument sourceCompressed = null; 164 @Nullable private DNArgument splitBaseDN = null; 165 @Nullable private FileArgument encryptionPassphraseFile = null; 166 @Nullable private FileArgument schemaPath = null; 167 @Nullable private FileArgument sourceLDIF = null; 168 @Nullable private FileArgument targetLDIFBasePath = null; 169 @Nullable private IntegerArgument numThreads = null; 170 171 // The arguments used to split using a hash of the RDN. 172 @Nullable private IntegerArgument splitUsingHashOnRDNNumSets = null; 173 @Nullable private SubCommand splitUsingHashOnRDN = null; 174 175 // The arguments used to split using a hash on a specified attribute. 176 @Nullable private BooleanArgument splitUsingHashOnAttributeAssumeFlatDIT = 177 null; 178 @Nullable private BooleanArgument splitUsingHashOnAttributeUseAllValues = 179 null; 180 @Nullable private IntegerArgument splitUsingHashOnAttributeNumSets = null; 181 @Nullable private StringArgument splitUsingHashOnAttributeAttributeName = 182 null; 183 @Nullable private SubCommand splitUsingHashOnAttribute = null; 184 185 // The arguments used to choose the set with the fewest entries. 186 @Nullable private BooleanArgument splitUsingFewestEntriesAssumeFlatDIT = null; 187 @Nullable private IntegerArgument splitUsingFewestEntriesNumSets = null; 188 @Nullable private SubCommand splitUsingFewestEntries = null; 189 190 // The arguments used to choose the set using a provided set of filters. 191 @Nullable private BooleanArgument splitUsingFilterAssumeFlatDIT = null; 192 @Nullable private FilterArgument splitUsingFilterFilter = null; 193 @Nullable private SubCommand splitUsingFilter = null; 194 195 196 197 /** 198 * Runs the tool with the provided set of command-line arguments. 199 * 200 * @param args The command-line arguments provided to this tool. 201 */ 202 public static void main(@NotNull final String... args) 203 { 204 final ResultCode resultCode = main(System.out, System.err, args); 205 if (resultCode != ResultCode.SUCCESS) 206 { 207 System.exit(resultCode.intValue()); 208 } 209 } 210 211 212 213 /** 214 * Runs the tool with the provided set of command-line arguments. 215 * 216 * @param out The output stream used for standard output. It may be 217 * {@code null} if standard output should be suppressed. 218 * @param err The output stream used for standard error. It may be 219 * {@code null} if standard error should be suppressed. 220 * @param args The command-line arguments provided to this tool. 221 * 222 * @return A result code with information about the processing performed. 223 * Any result code other than {@link ResultCode#SUCCESS} indicates 224 * that an error occurred. 225 */ 226 @NotNull() 227 public static ResultCode main(@Nullable final OutputStream out, 228 @Nullable final OutputStream err, 229 @NotNull final String... args) 230 { 231 final SplitLDIF tool = new SplitLDIF(out, err); 232 return tool.runTool(args); 233 } 234 235 236 237 /** 238 * Creates a new instance of this tool with the provided information. 239 * 240 * @param out The output stream used for standard output. It may be 241 * {@code null} if standard output should be suppressed. 242 * @param err The output stream used for standard error. It may be 243 * {@code null} if standard error should be suppressed. 244 */ 245 public SplitLDIF(@Nullable final OutputStream out, 246 @Nullable final OutputStream err) 247 { 248 super(out, err); 249 } 250 251 252 253 /** 254 * {@inheritDoc} 255 */ 256 @Override() 257 @NotNull() 258 public String getToolName() 259 { 260 return "split-ldif"; 261 } 262 263 264 265 /** 266 * {@inheritDoc} 267 */ 268 @Override() 269 @NotNull() 270 public String getToolDescription() 271 { 272 return INFO_SPLIT_LDIF_TOOL_DESCRIPTION.get(); 273 } 274 275 276 277 /** 278 * {@inheritDoc} 279 */ 280 @Override() 281 @NotNull() 282 public String getToolVersion() 283 { 284 return Version.NUMERIC_VERSION_STRING; 285 } 286 287 288 289 /** 290 * {@inheritDoc} 291 */ 292 @Override() 293 public boolean supportsInteractiveMode() 294 { 295 return true; 296 } 297 298 299 300 /** 301 * {@inheritDoc} 302 */ 303 @Override() 304 public boolean defaultsToInteractiveMode() 305 { 306 return true; 307 } 308 309 310 311 /** 312 * {@inheritDoc} 313 */ 314 @Override() 315 public boolean supportsPropertiesFile() 316 { 317 return true; 318 } 319 320 321 322 /** 323 * {@inheritDoc} 324 */ 325 @Override() 326 protected boolean supportsDebugLogging() 327 { 328 return true; 329 } 330 331 332 333 /** 334 * {@inheritDoc} 335 */ 336 @Override() 337 public void addToolArguments(@NotNull final ArgumentParser parser) 338 throws ArgumentException 339 { 340 // Add the global arguments. 341 sourceLDIF = new FileArgument('l', "sourceLDIF", true, 0, null, 342 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SOURCE_LDIF.get(), true, false, true, 343 false); 344 sourceLDIF.addLongIdentifier("inputLDIF", true); 345 sourceLDIF.addLongIdentifier("source-ldif", true); 346 sourceLDIF.addLongIdentifier("input-ldif", true); 347 parser.addArgument(sourceLDIF); 348 349 sourceCompressed = new BooleanArgument('C', "sourceCompressed", 350 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SOURCE_COMPRESSED.get()); 351 sourceCompressed.addLongIdentifier("inputCompressed", true); 352 sourceCompressed.addLongIdentifier("source-compressed", true); 353 sourceCompressed.addLongIdentifier("input-compressed", true); 354 parser.addArgument(sourceCompressed); 355 356 targetLDIFBasePath = new FileArgument('o', "targetLDIFBasePath", false, 1, 357 null, INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_TARGET_LDIF_BASE.get(), false, 358 true, true, false); 359 targetLDIFBasePath.addLongIdentifier("outputLDIFBasePath", true); 360 targetLDIFBasePath.addLongIdentifier("target-ldif-base-path", true); 361 targetLDIFBasePath.addLongIdentifier("output-ldif-base-path", true); 362 parser.addArgument(targetLDIFBasePath); 363 364 compressTarget = new BooleanArgument('c', "compressTarget", 365 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_COMPRESS_TARGET.get()); 366 compressTarget.addLongIdentifier("compressOutput", true); 367 compressTarget.addLongIdentifier("compress", true); 368 compressTarget.addLongIdentifier("compress-target", true); 369 compressTarget.addLongIdentifier("compress-output", true); 370 parser.addArgument(compressTarget); 371 372 encryptTarget = new BooleanArgument(null, "encryptTarget", 373 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_ENCRYPT_TARGET.get()); 374 encryptTarget.addLongIdentifier("encryptOutput", true); 375 encryptTarget.addLongIdentifier("encrypt", true); 376 encryptTarget.addLongIdentifier("encrypt-target", true); 377 encryptTarget.addLongIdentifier("encrypt-output", true); 378 parser.addArgument(encryptTarget); 379 380 encryptionPassphraseFile = new FileArgument(null, 381 "encryptionPassphraseFile", false, 1, null, 382 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_ENCRYPT_PW_FILE.get(), true, true, 383 true, false); 384 encryptionPassphraseFile.addLongIdentifier("encryptionPasswordFile", true); 385 encryptionPassphraseFile.addLongIdentifier("encryption-passphrase-file", 386 true); 387 encryptionPassphraseFile.addLongIdentifier("encryption-password-file", 388 true); 389 parser.addArgument(encryptionPassphraseFile); 390 391 splitBaseDN = new DNArgument('b', "splitBaseDN", true, 1, null, 392 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SPLIT_BASE_DN.get()); 393 splitBaseDN.addLongIdentifier("baseDN", true); 394 splitBaseDN.addLongIdentifier("split-base-dn", true); 395 splitBaseDN.addLongIdentifier("base-dn", true); 396 parser.addArgument(splitBaseDN); 397 398 addEntriesOutsideSplitBaseDNToAllSets = new BooleanArgument(null, 399 "addEntriesOutsideSplitBaseDNToAllSets", 1, 400 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_OUTSIDE_TO_ALL_SETS.get()); 401 addEntriesOutsideSplitBaseDNToAllSets.addLongIdentifier( 402 "add-entries-outside-split-base-dn-to-all-sets", true); 403 parser.addArgument(addEntriesOutsideSplitBaseDNToAllSets); 404 405 addEntriesOutsideSplitBaseDNToDedicatedSet = new BooleanArgument(null, 406 "addEntriesOutsideSplitBaseDNToDedicatedSet", 1, 407 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_OUTSIDE_TO_DEDICATED_SET.get()); 408 addEntriesOutsideSplitBaseDNToDedicatedSet.addLongIdentifier( 409 "add-entries-outside-split-base-dn-to-dedicated-set", true); 410 parser.addArgument(addEntriesOutsideSplitBaseDNToDedicatedSet); 411 412 schemaPath = new FileArgument(null, "schemaPath", false, 0, null, 413 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SCHEMA_PATH.get(), true, false, false, 414 false); 415 schemaPath.addLongIdentifier("schemaFile", true); 416 schemaPath.addLongIdentifier("schemaDirectory", true); 417 schemaPath.addLongIdentifier("schema-path", true); 418 schemaPath.addLongIdentifier("schema-file", true); 419 schemaPath.addLongIdentifier("schema-directory", true); 420 parser.addArgument(schemaPath); 421 422 numThreads = new IntegerArgument('t', "numThreads", false, 1, null, 423 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_NUM_THREADS.get(), 1, 424 Integer.MAX_VALUE, 1); 425 numThreads.addLongIdentifier("num-threads", true); 426 parser.addArgument(numThreads); 427 428 429 // Add the subcommand used to split entries using a hash on the RDN. 430 final ArgumentParser splitUsingHashOnRDNParser = new ArgumentParser( 431 "split-using-hash-on-rdn", INFO_SPLIT_LDIF_SC_HASH_ON_RDN_DESC.get()); 432 433 splitUsingHashOnRDNNumSets = new IntegerArgument(null, "numSets", true, 1, 434 null, INFO_SPLIT_LDIF_SC_HASH_ON_RDN_ARG_DESC_NUM_SETS.get(), 2, 435 Integer.MAX_VALUE); 436 splitUsingHashOnRDNNumSets.addLongIdentifier("num-sets", true); 437 splitUsingHashOnRDNParser.addArgument(splitUsingHashOnRDNNumSets); 438 439 final LinkedHashMap<String[],String> splitUsingHashOnRDNExamples = 440 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 441 splitUsingHashOnRDNExamples.put( 442 new String[] 443 { 444 "split-using-hash-on-rdn", 445 "--sourceLDIF", "whole.ldif", 446 "--targetLDIFBasePath", "split.ldif", 447 "--splitBaseDN", "ou=People,dc=example,dc=com", 448 "--numSets", "4", 449 "--schemaPath", "config/schema", 450 "--addEntriesOutsideSplitBaseDNToAllSets" 451 }, 452 INFO_SPLIT_LDIF_SC_HASH_ON_RDN_EXAMPLE.get()); 453 454 splitUsingHashOnRDN = new SubCommand("split-using-hash-on-rdn", 455 INFO_SPLIT_LDIF_SC_HASH_ON_RDN_DESC.get(), splitUsingHashOnRDNParser, 456 splitUsingHashOnRDNExamples); 457 splitUsingHashOnRDN.addName("hash-on-rdn", true); 458 459 parser.addSubCommand(splitUsingHashOnRDN); 460 461 462 // Add the subcommand used to split entries using a hash on a specified 463 // attribute. 464 final ArgumentParser splitUsingHashOnAttributeParser = new ArgumentParser( 465 "split-using-hash-on-attribute", 466 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_DESC.get()); 467 468 splitUsingHashOnAttributeAttributeName = new StringArgument(null, 469 "attributeName", true, 1, "{attr}", 470 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ATTR_NAME.get()); 471 splitUsingHashOnAttributeAttributeName.addLongIdentifier("attribute-name", 472 true); 473 splitUsingHashOnAttributeParser.addArgument( 474 splitUsingHashOnAttributeAttributeName); 475 476 splitUsingHashOnAttributeNumSets = new IntegerArgument(null, "numSets", 477 true, 1, null, INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_NUM_SETS.get(), 478 2, Integer.MAX_VALUE); 479 splitUsingHashOnAttributeNumSets.addLongIdentifier("num-sets", true); 480 splitUsingHashOnAttributeParser.addArgument( 481 splitUsingHashOnAttributeNumSets); 482 483 splitUsingHashOnAttributeUseAllValues = new BooleanArgument(null, 484 "useAllValues", 1, 485 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ALL_VALUES.get()); 486 splitUsingHashOnAttributeUseAllValues.addLongIdentifier("use-all-values", 487 true); 488 splitUsingHashOnAttributeParser.addArgument( 489 splitUsingHashOnAttributeUseAllValues); 490 491 splitUsingHashOnAttributeAssumeFlatDIT = new BooleanArgument(null, 492 "assumeFlatDIT", 1, 493 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ASSUME_FLAT_DIT.get()); 494 splitUsingHashOnAttributeAssumeFlatDIT.addLongIdentifier("assume-flat-dit", 495 true); 496 splitUsingHashOnAttributeParser.addArgument( 497 splitUsingHashOnAttributeAssumeFlatDIT); 498 499 final LinkedHashMap<String[],String> splitUsingHashOnAttributeExamples = 500 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 501 splitUsingHashOnAttributeExamples.put( 502 new String[] 503 { 504 "split-using-hash-on-attribute", 505 "--sourceLDIF", "whole.ldif", 506 "--targetLDIFBasePath", "split.ldif", 507 "--splitBaseDN", "ou=People,dc=example,dc=com", 508 "--attributeName", "uid", 509 "--numSets", "4", 510 "--schemaPath", "config/schema", 511 "--addEntriesOutsideSplitBaseDNToAllSets" 512 }, 513 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_EXAMPLE.get()); 514 515 splitUsingHashOnAttribute = new SubCommand("split-using-hash-on-attribute", 516 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_DESC.get(), 517 splitUsingHashOnAttributeParser, splitUsingHashOnAttributeExamples); 518 splitUsingHashOnAttribute.addName("hash-on-attribute", true); 519 520 parser.addSubCommand(splitUsingHashOnAttribute); 521 522 523 // Add the subcommand used to split entries by selecting the set with the 524 // fewest entries. 525 final ArgumentParser splitUsingFewestEntriesParser = new ArgumentParser( 526 "split-using-fewest-entries", 527 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_DESC.get()); 528 529 splitUsingFewestEntriesNumSets = new IntegerArgument(null, "numSets", 530 true, 1, null, 531 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_ARG_DESC_NUM_SETS.get(), 532 2, Integer.MAX_VALUE); 533 splitUsingFewestEntriesNumSets.addLongIdentifier("num-sets", true); 534 splitUsingFewestEntriesParser.addArgument(splitUsingFewestEntriesNumSets); 535 536 splitUsingFewestEntriesAssumeFlatDIT = new BooleanArgument(null, 537 "assumeFlatDIT", 1, 538 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_ARG_DESC_ASSUME_FLAT_DIT.get()); 539 splitUsingFewestEntriesAssumeFlatDIT.addLongIdentifier("assume-flat-dit", 540 true); 541 splitUsingFewestEntriesParser.addArgument( 542 splitUsingFewestEntriesAssumeFlatDIT); 543 544 final LinkedHashMap<String[],String> splitUsingFewestEntriesExamples = 545 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 546 splitUsingFewestEntriesExamples.put( 547 new String[] 548 { 549 "split-using-fewest-entries", 550 "--sourceLDIF", "whole.ldif", 551 "--targetLDIFBasePath", "split.ldif", 552 "--splitBaseDN", "ou=People,dc=example,dc=com", 553 "--numSets", "4", 554 "--schemaPath", "config/schema", 555 "--addEntriesOutsideSplitBaseDNToAllSets" 556 }, 557 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_EXAMPLE.get()); 558 559 splitUsingFewestEntries = new SubCommand("split-using-fewest-entries", 560 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_DESC.get(), 561 splitUsingFewestEntriesParser, splitUsingFewestEntriesExamples); 562 splitUsingFewestEntries.addName("fewest-entries", true); 563 564 parser.addSubCommand(splitUsingFewestEntries); 565 566 567 // Add the subcommand used to split entries by selecting the set based on a 568 // filter. 569 final ArgumentParser splitUsingFilterParser = new ArgumentParser( 570 "split-using-filter", INFO_SPLIT_LDIF_SC_FILTER_DESC.get()); 571 572 splitUsingFilterFilter = new FilterArgument(null, "filter", true, 0, null, 573 INFO_SPLIT_LDIF_SC_FILTER_ARG_DESC_FILTER.get()); 574 splitUsingFilterParser.addArgument(splitUsingFilterFilter); 575 576 splitUsingFilterAssumeFlatDIT = new BooleanArgument(null, "assumeFlatDIT", 577 1, INFO_SPLIT_LDIF_SC_FILTER_ARG_DESC_ASSUME_FLAT_DIT.get()); 578 splitUsingFilterAssumeFlatDIT.addLongIdentifier("assume-flat-dit", true); 579 splitUsingFilterParser.addArgument(splitUsingFilterAssumeFlatDIT); 580 581 final LinkedHashMap<String[],String> splitUsingFilterExamples = 582 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 583 splitUsingFilterExamples.put( 584 new String[] 585 { 586 "split-using-filter", 587 "--sourceLDIF", "whole.ldif", 588 "--targetLDIFBasePath", "split.ldif", 589 "--splitBaseDN", "ou=People,dc=example,dc=com", 590 "--filter", "(timeZone=Eastern)", 591 "--filter", "(timeZone=Central)", 592 "--filter", "(timeZone=Mountain)", 593 "--filter", "(timeZone=Pacific)", 594 "--schemaPath", "config/schema", 595 "--addEntriesOutsideSplitBaseDNToAllSets" 596 }, 597 INFO_SPLIT_LDIF_SC_FILTER_EXAMPLE.get()); 598 599 splitUsingFilter = new SubCommand("split-using-filter", 600 INFO_SPLIT_LDIF_SC_FILTER_DESC.get(), 601 splitUsingFilterParser, splitUsingFilterExamples); 602 splitUsingFilter.addName("filter", true); 603 604 parser.addSubCommand(splitUsingFilter); 605 } 606 607 608 609 /** 610 * {@inheritDoc} 611 */ 612 @Override() 613 public void doExtendedArgumentValidation() 614 throws ArgumentException 615 { 616 // If multiple sourceLDIF values were provided, then a target LDIF base path 617 // must have been given. 618 final List<File> sourceLDIFValues = sourceLDIF.getValues(); 619 if (sourceLDIFValues.size() > 1) 620 { 621 if (! targetLDIFBasePath.isPresent()) 622 { 623 throw new ArgumentException(ERR_SPLIT_LDIF_NO_TARGET_BASE_PATH.get( 624 sourceLDIF.getIdentifierString(), 625 targetLDIFBasePath.getIdentifierString())); 626 } 627 } 628 629 630 // If the split-using-filter subcommand was provided, then at least two 631 // filters must have been provided, and none of the filters can be logically 632 // equivalent to any of the others. 633 if (splitUsingFilter.isPresent()) 634 { 635 final List<Filter> filterList = splitUsingFilterFilter.getValues(); 636 final Set<Filter> filterSet = new LinkedHashSet<>( 637 StaticUtils.computeMapCapacity(filterList.size())); 638 for (final Filter f : filterList) 639 { 640 if (filterSet.contains(f)) 641 { 642 throw new ArgumentException(ERR_SPLIT_LDIF_NON_UNIQUE_FILTER.get( 643 splitUsingFilterFilter.getIdentifierString(), f.toString())); 644 } 645 else 646 { 647 filterSet.add(f); 648 } 649 } 650 651 if (filterSet.size() < 2) 652 { 653 throw new ArgumentException(ERR_SPLIT_LDIF_NOT_ENOUGH_FILTERS.get( 654 splitUsingFilter.getPrimaryName(), 655 splitUsingFilterFilter.getIdentifierString())); 656 } 657 } 658 } 659 660 661 662 /** 663 * {@inheritDoc} 664 */ 665 @Override() 666 @NotNull() 667 public ResultCode doToolProcessing() 668 { 669 // Get the schema to use during processing. 670 final Schema schema; 671 try 672 { 673 schema = getSchema(); 674 } 675 catch (final LDAPException le) 676 { 677 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, le.getMessage()); 678 return le.getResultCode(); 679 } 680 681 682 // If an encryption passphrase file is provided, then get the passphrase 683 // from it. 684 String encryptionPassphrase = null; 685 if (encryptionPassphraseFile.isPresent()) 686 { 687 try 688 { 689 encryptionPassphrase = ToolUtils.readEncryptionPassphraseFromFile( 690 encryptionPassphraseFile.getValue()); 691 } 692 catch (final LDAPException e) 693 { 694 Debug.debugException(e); 695 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, e.getMessage()); 696 return e.getResultCode(); 697 } 698 } 699 700 701 // Figure out which subcommand was selected, and create the appropriate 702 // translator to use to perform the processing. 703 final SplitLDIFTranslator translator; 704 if (splitUsingHashOnRDN.isPresent()) 705 { 706 translator = new SplitLDIFRDNHashTranslator(splitBaseDN.getValue(), 707 splitUsingHashOnRDNNumSets.getValue(), 708 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 709 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 710 } 711 else if (splitUsingHashOnAttribute.isPresent()) 712 { 713 translator = new SplitLDIFAttributeHashTranslator(splitBaseDN.getValue(), 714 splitUsingHashOnAttributeNumSets.getValue(), 715 splitUsingHashOnAttributeAttributeName.getValue(), 716 splitUsingHashOnAttributeUseAllValues.isPresent(), 717 splitUsingHashOnAttributeAssumeFlatDIT.isPresent(), 718 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 719 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 720 } 721 else if (splitUsingFewestEntries.isPresent()) 722 { 723 translator = new SplitLDIFFewestEntriesTranslator(splitBaseDN.getValue(), 724 splitUsingFewestEntriesNumSets.getValue(), 725 splitUsingFewestEntriesAssumeFlatDIT.isPresent(), 726 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 727 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 728 } 729 else if (splitUsingFilter.isPresent()) 730 { 731 final List<Filter> filterList = splitUsingFilterFilter.getValues(); 732 final LinkedHashSet<Filter> filterSet = new LinkedHashSet<>( 733 StaticUtils.computeMapCapacity(filterList.size())); 734 for (final Filter f : filterList) 735 { 736 filterSet.add(f); 737 } 738 739 translator = new SplitLDIFFilterTranslator(splitBaseDN.getValue(), 740 schema, filterSet, splitUsingFilterAssumeFlatDIT.isPresent(), 741 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 742 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 743 } 744 else 745 { 746 // This should never happen. 747 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 748 ERR_SPLIT_LDIF_CANNOT_DETERMINE_SPLIT_ALGORITHM.get( 749 splitUsingHashOnRDN.getPrimaryName() + ", " + 750 splitUsingHashOnAttribute.getPrimaryName() + ", " + 751 splitUsingFewestEntries.getPrimaryName() + ", " + 752 splitUsingFilter.getPrimaryName())); 753 return ResultCode.PARAM_ERROR; 754 } 755 756 757 // Create the LDIF reader. 758 final LDIFReader ldifReader; 759 try 760 { 761 final InputStream inputStream; 762 if (sourceLDIF.isPresent()) 763 { 764 final ObjectPair<InputStream,String> p = 765 ToolUtils.getInputStreamForLDIFFiles(sourceLDIF.getValues(), 766 encryptionPassphrase, getOut(), getErr()); 767 inputStream = p.getFirst(); 768 if ((encryptionPassphrase == null) && (p.getSecond() != null)) 769 { 770 encryptionPassphrase = p.getSecond(); 771 } 772 } 773 else 774 { 775 inputStream = System.in; 776 } 777 778 ldifReader = new LDIFReader(inputStream, numThreads.getValue(), 779 translator); 780 if (schema != null) 781 { 782 ldifReader.setSchema(schema); 783 } 784 } 785 catch (final Exception e) 786 { 787 Debug.debugException(e); 788 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 789 ERR_SPLIT_LDIF_ERROR_CREATING_LDIF_READER.get( 790 StaticUtils.getExceptionMessage(e))); 791 return ResultCode.LOCAL_ERROR; 792 } 793 794 795 // Iterate through and process all of the entries. 796 ResultCode resultCode = ResultCode.SUCCESS; 797 final LinkedHashMap<String,OutputStream> outputStreams = 798 new LinkedHashMap<>(StaticUtils.computeMapCapacity(10)); 799 try 800 { 801 final AtomicLong entriesRead = new AtomicLong(0L); 802 final AtomicLong entriesExcluded = new AtomicLong(0L); 803 final TreeMap<String,AtomicLong> fileCounts = new TreeMap<>(); 804 805readLoop: 806 while (true) 807 { 808 final SplitLDIFEntry entry; 809 try 810 { 811 entry = (SplitLDIFEntry) ldifReader.readEntry(); 812 } 813 catch (final LDIFException le) 814 { 815 Debug.debugException(le); 816 resultCode = ResultCode.LOCAL_ERROR; 817 818 final File f = getOutputFile(SplitLDIFEntry.SET_NAME_ERRORS); 819 OutputStream s = outputStreams.get(SplitLDIFEntry.SET_NAME_ERRORS); 820 if (s == null) 821 { 822 try 823 { 824 s = new FileOutputStream(f); 825 826 if (encryptTarget.isPresent()) 827 { 828 if (encryptionPassphrase == null) 829 { 830 try 831 { 832 encryptionPassphrase = 833 ToolUtils.promptForEncryptionPassphrase(false, true, 834 getOut(), getErr()); 835 } 836 catch (final LDAPException ex) 837 { 838 Debug.debugException(ex); 839 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, ex.getMessage()); 840 return ex.getResultCode(); 841 } 842 } 843 844 s = new PassphraseEncryptedOutputStream(encryptionPassphrase, 845 s); 846 } 847 848 if (compressTarget.isPresent()) 849 { 850 s = new GZIPOutputStream(s); 851 } 852 853 outputStreams.put(SplitLDIFEntry.SET_NAME_ERRORS, s); 854 fileCounts.put(SplitLDIFEntry.SET_NAME_ERRORS, 855 new AtomicLong(0L)); 856 } 857 catch (final Exception e) 858 { 859 Debug.debugException(e); 860 resultCode = ResultCode.LOCAL_ERROR; 861 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 862 ERR_SPLIT_LDIF_CANNOT_OPEN_OUTPUT_FILE.get( 863 f.getAbsolutePath(), 864 StaticUtils.getExceptionMessage(e))); 865 break readLoop; 866 } 867 } 868 869 final ByteStringBuffer buffer = new ByteStringBuffer(); 870 buffer.append("# "); 871 buffer.append(le.getMessage()); 872 buffer.append(StaticUtils.EOL_BYTES); 873 874 final List<String> dataLines = le.getDataLines(); 875 if (dataLines != null) 876 { 877 for (final String dataLine : dataLines) 878 { 879 buffer.append(dataLine); 880 buffer.append(StaticUtils.EOL_BYTES); 881 } 882 } 883 884 buffer.append(StaticUtils.EOL_BYTES); 885 886 try 887 { 888 s.write(buffer.toByteArray()); 889 } 890 catch (final Exception e) 891 { 892 Debug.debugException(e); 893 resultCode = ResultCode.LOCAL_ERROR; 894 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 895 ERR_SPLIT_LDIF_ERROR_WRITING_ERROR_TO_FILE.get( 896 le.getMessage(), f.getAbsolutePath(), 897 StaticUtils.getExceptionMessage(e))); 898 break readLoop; 899 } 900 901 if (le.mayContinueReading()) 902 { 903 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 904 ERR_SPLIT_LDIF_INVALID_LDIF_RECORD_RECOVERABLE.get( 905 StaticUtils.getExceptionMessage(le))); 906 continue; 907 } 908 else 909 { 910 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 911 ERR_SPLIT_LDIF_INVALID_LDIF_RECORD_UNRECOVERABLE.get( 912 StaticUtils.getExceptionMessage(le))); 913 break; 914 } 915 } 916 catch (final IOException ioe) 917 { 918 Debug.debugException(ioe); 919 resultCode = ResultCode.LOCAL_ERROR; 920 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 921 ERR_SPLIT_LDIF_IO_READ_ERROR.get( 922 StaticUtils.getExceptionMessage(ioe))); 923 break; 924 } 925 catch (final Exception e) 926 { 927 Debug.debugException(e); 928 resultCode = ResultCode.LOCAL_ERROR; 929 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 930 ERR_SPLIT_LDIF_UNEXPECTED_READ_ERROR.get( 931 StaticUtils.getExceptionMessage(e))); 932 break; 933 } 934 935 if (entry == null) 936 { 937 break; 938 } 939 940 final long readCount = entriesRead.incrementAndGet(); 941 if ((readCount % 1000L) == 0) 942 { 943 // Even though we aren't done with this entry yet, we'll go ahead and 944 // log a progress message now because it's easier to do that now than 945 // to ensure that it's handled properly through all possible error 946 // conditions that need to be handled below. 947 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 948 INFO_SPLIT_LDIF_PROGRESS.get(readCount)); 949 } 950 951 952 // Get the set(s) to which the entry should be written. If this is 953 // null (which could be the case as a result of a race condition when 954 // using multiple threads where processing for a child completes before 955 // processing for its parent, or as a result of a case in which a 956 // child is included without or before its parent), then try to see if 957 // we can get the sets by passing the entry through the translator. 958 Set<String> sets = entry.getSets(); 959 byte[] ldifBytes = entry.getLDIFBytes(); 960 if (sets == null) 961 { 962 try 963 { 964 sets = translator.translate(entry, 0L).getSets(); 965 } 966 catch (final Exception e) 967 { 968 Debug.debugException(e); 969 } 970 971 if (sets == null) 972 { 973 final SplitLDIFEntry errorEntry = translator.createEntry(entry, 974 ERR_SPLIT_LDIF_ENTRY_WITHOUT_PARENT.get( 975 entry.getDN(), splitBaseDN.getStringValue()), 976 Collections.singleton(SplitLDIFEntry.SET_NAME_ERRORS)); 977 ldifBytes = errorEntry.getLDIFBytes(); 978 sets = errorEntry.getSets(); 979 } 980 } 981 982 983 // If the entry shouldn't be written into any sets, then we don't need 984 // to do anything else. 985 if (sets.isEmpty()) 986 { 987 entriesExcluded.incrementAndGet(); 988 continue; 989 } 990 991 992 // Write the entry into each of the target sets, creating the output 993 // files if necessary. 994 for (final String set : sets) 995 { 996 if (set.equals(SplitLDIFEntry.SET_NAME_ERRORS)) 997 { 998 // This indicates that an error was encountered during processing, 999 // so we'll update the result code to reflect that. 1000 resultCode = ResultCode.LOCAL_ERROR; 1001 } 1002 1003 final File f = getOutputFile(set); 1004 OutputStream s = outputStreams.get(set); 1005 if (s == null) 1006 { 1007 try 1008 { 1009 s = new FileOutputStream(f); 1010 1011 if (encryptTarget.isPresent()) 1012 { 1013 if (encryptionPassphrase == null) 1014 { 1015 try 1016 { 1017 encryptionPassphrase = 1018 ToolUtils.promptForEncryptionPassphrase(false, true, 1019 getOut(), getErr()); 1020 } 1021 catch (final LDAPException ex) 1022 { 1023 Debug.debugException(ex); 1024 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, ex.getMessage()); 1025 return ex.getResultCode(); 1026 } 1027 } 1028 1029 s = new PassphraseEncryptedOutputStream(encryptionPassphrase, 1030 s); 1031 } 1032 1033 if (compressTarget.isPresent()) 1034 { 1035 s = new GZIPOutputStream(s); 1036 } 1037 1038 outputStreams.put(set, s); 1039 fileCounts.put(set, new AtomicLong(0L)); 1040 } 1041 catch (final Exception e) 1042 { 1043 Debug.debugException(e); 1044 resultCode = ResultCode.LOCAL_ERROR; 1045 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1046 ERR_SPLIT_LDIF_CANNOT_OPEN_OUTPUT_FILE.get( 1047 f.getAbsolutePath(), 1048 StaticUtils.getExceptionMessage(e))); 1049 break readLoop; 1050 } 1051 } 1052 1053 try 1054 { 1055 s.write(ldifBytes); 1056 } 1057 catch (final Exception e) 1058 { 1059 Debug.debugException(e); 1060 resultCode = ResultCode.LOCAL_ERROR; 1061 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1062 ERR_SPLIT_LDIF_ERROR_WRITING_TO_FILE.get( 1063 entry.getDN(), f.getAbsolutePath(), 1064 StaticUtils.getExceptionMessage(e))); 1065 break readLoop; 1066 } 1067 1068 fileCounts.get(set).incrementAndGet(); 1069 } 1070 } 1071 1072 1073 // Processing is complete. Summarize the processing that was performed. 1074 final long finalReadCount = entriesRead.get(); 1075 if (finalReadCount > 1000L) 1076 { 1077 out(); 1078 } 1079 1080 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1081 INFO_SPLIT_LDIF_PROCESSING_COMPLETE.get(finalReadCount)); 1082 1083 final long excludedCount = entriesExcluded.get(); 1084 if (excludedCount > 0L) 1085 { 1086 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1087 INFO_SPLIT_LDIF_EXCLUDED_COUNT.get(excludedCount)); 1088 } 1089 1090 for (final Map.Entry<String,AtomicLong> e : fileCounts.entrySet()) 1091 { 1092 final File f = getOutputFile(e.getKey()); 1093 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1094 INFO_SPLIT_LDIF_COUNT_TO_FILE.get(e.getValue().get(), 1095 f.getName())); 1096 } 1097 } 1098 finally 1099 { 1100 try 1101 { 1102 ldifReader.close(); 1103 } 1104 catch (final Exception e) 1105 { 1106 Debug.debugException(e); 1107 } 1108 1109 for (final Map.Entry<String,OutputStream> e : outputStreams.entrySet()) 1110 { 1111 try 1112 { 1113 e.getValue().close(); 1114 } 1115 catch (final Exception ex) 1116 { 1117 Debug.debugException(ex); 1118 resultCode = ResultCode.LOCAL_ERROR; 1119 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1120 ERR_SPLIT_LDIF_ERROR_CLOSING_FILE.get( 1121 getOutputFile(e.getKey()), 1122 StaticUtils.getExceptionMessage(ex))); 1123 } 1124 } 1125 } 1126 1127 return resultCode; 1128 } 1129 1130 1131 1132 /** 1133 * Retrieves the schema that should be used for processing. 1134 * 1135 * @return The schema that was created. 1136 * 1137 * @throws LDAPException If a problem is encountered while retrieving the 1138 * schema. 1139 */ 1140 @Nullable() 1141 private Schema getSchema() 1142 throws LDAPException 1143 { 1144 // If any schema paths were specified, then load the schema only from those 1145 // paths. 1146 if (schemaPath.isPresent()) 1147 { 1148 final ArrayList<File> schemaFiles = new ArrayList<>(10); 1149 for (final File path : schemaPath.getValues()) 1150 { 1151 if (path.isFile()) 1152 { 1153 schemaFiles.add(path); 1154 } 1155 else 1156 { 1157 final TreeMap<String,File> fileMap = new TreeMap<>(); 1158 for (final File schemaDirFile : path.listFiles()) 1159 { 1160 final String name = schemaDirFile.getName(); 1161 if (schemaDirFile.isFile() && name.toLowerCase().endsWith(".ldif")) 1162 { 1163 fileMap.put(name, schemaDirFile); 1164 } 1165 } 1166 schemaFiles.addAll(fileMap.values()); 1167 } 1168 } 1169 1170 if (schemaFiles.isEmpty()) 1171 { 1172 throw new LDAPException(ResultCode.PARAM_ERROR, 1173 ERR_SPLIT_LDIF_NO_SCHEMA_FILES.get( 1174 schemaPath.getIdentifierString())); 1175 } 1176 else 1177 { 1178 try 1179 { 1180 return Schema.getSchema(schemaFiles); 1181 } 1182 catch (final Exception e) 1183 { 1184 Debug.debugException(e); 1185 throw new LDAPException(ResultCode.LOCAL_ERROR, 1186 ERR_SPLIT_LDIF_ERROR_LOADING_SCHEMA.get( 1187 StaticUtils.getExceptionMessage(e))); 1188 } 1189 } 1190 } 1191 else 1192 { 1193 // If the INSTANCE_ROOT environment variable is set and it refers to a 1194 // directory that has a config/schema subdirectory that has one or more 1195 // schema files in it, then read the schema from that directory. 1196 try 1197 { 1198 final String instanceRootStr = 1199 StaticUtils.getEnvironmentVariable("INSTANCE_ROOT"); 1200 if (instanceRootStr != null) 1201 { 1202 final File instanceRoot = new File(instanceRootStr); 1203 final File configDir = new File(instanceRoot, "config"); 1204 final File schemaDir = new File(configDir, "schema"); 1205 if (schemaDir.exists()) 1206 { 1207 final TreeMap<String,File> fileMap = new TreeMap<>(); 1208 for (final File schemaDirFile : schemaDir.listFiles()) 1209 { 1210 final String name = schemaDirFile.getName(); 1211 if (schemaDirFile.isFile() && 1212 name.toLowerCase().endsWith(".ldif")) 1213 { 1214 fileMap.put(name, schemaDirFile); 1215 } 1216 } 1217 1218 if (! fileMap.isEmpty()) 1219 { 1220 return Schema.getSchema(new ArrayList<>(fileMap.values())); 1221 } 1222 } 1223 } 1224 } 1225 catch (final Exception e) 1226 { 1227 Debug.debugException(e); 1228 } 1229 } 1230 1231 1232 // If we've gotten here, then just return null and the tool will try to use 1233 // the default standard schema. 1234 return null; 1235 } 1236 1237 1238 1239 /** 1240 * Retrieves a file object that refers to an output file with the provided 1241 * extension. 1242 * 1243 * @param extension The extension to use for the file. 1244 * 1245 * @return A file object that refers to an output file with the provided 1246 * extension. 1247 */ 1248 @NotNull() 1249 private File getOutputFile(@NotNull final String extension) 1250 { 1251 final File baseFile; 1252 if (targetLDIFBasePath.isPresent()) 1253 { 1254 baseFile = targetLDIFBasePath.getValue(); 1255 } 1256 else 1257 { 1258 baseFile = sourceLDIF.getValue(); 1259 } 1260 1261 return new File(baseFile.getAbsolutePath() + extension); 1262 } 1263 1264 1265 1266 /** 1267 * {@inheritDoc} 1268 */ 1269 @Override() 1270 @NotNull() 1271 public LinkedHashMap<String[],String> getExampleUsages() 1272 { 1273 final LinkedHashMap<String[],String> exampleMap = 1274 new LinkedHashMap<>(StaticUtils.computeMapCapacity(4)); 1275 1276 for (final Map.Entry<String[],String> e : 1277 splitUsingHashOnRDN.getExampleUsages().entrySet()) 1278 { 1279 exampleMap.put(e.getKey(), e.getValue()); 1280 } 1281 1282 for (final Map.Entry<String[],String> e : 1283 splitUsingHashOnAttribute.getExampleUsages().entrySet()) 1284 { 1285 exampleMap.put(e.getKey(), e.getValue()); 1286 } 1287 1288 for (final Map.Entry<String[],String> e : 1289 splitUsingFewestEntries.getExampleUsages().entrySet()) 1290 { 1291 exampleMap.put(e.getKey(), e.getValue()); 1292 } 1293 1294 for (final Map.Entry<String[],String> e : 1295 splitUsingFilter.getExampleUsages().entrySet()) 1296 { 1297 exampleMap.put(e.getKey(), e.getValue()); 1298 } 1299 1300 return exampleMap; 1301 } 1302}