001/* 002 * Copyright 2016-2024 Ping Identity Corporation 003 * All Rights Reserved. 004 */ 005/* 006 * Copyright 2016-2024 Ping Identity Corporation 007 * 008 * Licensed under the Apache License, Version 2.0 (the "License"); 009 * you may not use this file except in compliance with the License. 010 * You may obtain a copy of the License at 011 * 012 * http://www.apache.org/licenses/LICENSE-2.0 013 * 014 * Unless required by applicable law or agreed to in writing, software 015 * distributed under the License is distributed on an "AS IS" BASIS, 016 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 017 * See the License for the specific language governing permissions and 018 * limitations under the License. 019 */ 020/* 021 * Copyright (C) 2016-2024 Ping Identity Corporation 022 * 023 * This program is free software; you can redistribute it and/or modify 024 * it under the terms of the GNU General Public License (GPLv2 only) 025 * or the terms of the GNU Lesser General Public License (LGPLv2.1 only) 026 * as published by the Free Software Foundation. 027 * 028 * This program is distributed in the hope that it will be useful, 029 * but WITHOUT ANY WARRANTY; without even the implied warranty of 030 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 031 * GNU General Public License for more details. 032 * 033 * You should have received a copy of the GNU General Public License 034 * along with this program; if not, see <http://www.gnu.org/licenses>. 035 */ 036package com.unboundid.ldap.sdk.unboundidds.tools; 037 038 039 040import java.io.File; 041import java.io.FileOutputStream; 042import java.io.InputStream; 043import java.io.IOException; 044import java.io.OutputStream; 045import java.util.ArrayList; 046import java.util.Collections; 047import java.util.LinkedHashMap; 048import java.util.LinkedHashSet; 049import java.util.List; 050import java.util.Map; 051import java.util.Set; 052import java.util.TreeMap; 053import java.util.concurrent.atomic.AtomicLong; 054import java.util.zip.GZIPOutputStream; 055 056import com.unboundid.ldap.sdk.Filter; 057import com.unboundid.ldap.sdk.LDAPException; 058import com.unboundid.ldap.sdk.ResultCode; 059import com.unboundid.ldap.sdk.Version; 060import com.unboundid.ldap.sdk.schema.Schema; 061import com.unboundid.ldif.LDIFException; 062import com.unboundid.ldif.LDIFReader; 063import com.unboundid.util.ByteStringBuffer; 064import com.unboundid.util.CommandLineTool; 065import com.unboundid.util.Debug; 066import com.unboundid.util.NotNull; 067import com.unboundid.util.Nullable; 068import com.unboundid.util.ObjectPair; 069import com.unboundid.util.PassphraseEncryptedOutputStream; 070import com.unboundid.util.StaticUtils; 071import com.unboundid.util.ThreadSafety; 072import com.unboundid.util.ThreadSafetyLevel; 073import com.unboundid.util.args.ArgumentException; 074import com.unboundid.util.args.ArgumentParser; 075import com.unboundid.util.args.BooleanArgument; 076import com.unboundid.util.args.DNArgument; 077import com.unboundid.util.args.FileArgument; 078import com.unboundid.util.args.FilterArgument; 079import com.unboundid.util.args.IntegerArgument; 080import com.unboundid.util.args.SubCommand; 081import com.unboundid.util.args.StringArgument; 082 083import static com.unboundid.ldap.sdk.unboundidds.tools.ToolMessages.*; 084 085 086 087/** 088 * This class provides a command-line tool that can be used to split an LDIF 089 * file below a specified base DN. This can be used to help initialize an 090 * entry-balancing deployment for use with the Directory Proxy Server. 091 * <BR> 092 * <BLOCKQUOTE> 093 * <B>NOTE:</B> This class, and other classes within the 094 * {@code com.unboundid.ldap.sdk.unboundidds} package structure, are only 095 * supported for use against Ping Identity, UnboundID, and 096 * Nokia/Alcatel-Lucent 8661 server products. These classes provide support 097 * for proprietary functionality or for external specifications that are not 098 * considered stable or mature enough to be guaranteed to work in an 099 * interoperable way with other types of LDAP servers. 100 * </BLOCKQUOTE> 101 * <BR> 102 * It supports a number of algorithms for determining how to split the data, 103 * including: 104 * <UL> 105 * <LI> 106 * split-using-hash-on-rdn -- The tool will compute a digest of the DN 107 * component that is immediately below the split base DN, and will use a 108 * modulus to select a backend set for a given entry. Since the split is 109 * based purely on computation involving the DN, the there is no need for 110 * caching to ensure that children are placed in the same sets as their 111 * parent, which allows it to run effectively with a small memory footprint. 112 * </LI> 113 * <LI> 114 * split-using-hash-on-attribute -- The tool will compute a digest of the 115 * value(s) of a specified attribute, and will use a modulus to select a 116 * backend set for a given entry. This hash will only be computed for 117 * entries immediately below the split base DN, and a cache will be used to 118 * ensure that entries more than one level below the split base DN are 119 * placed in the same backend set as their parent. 120 * </LI> 121 * <LI> 122 * split-using-fewest-entries -- When examining an entry immediately below 123 * the split base DN, the tool will place that entry in the set that has the 124 * fewest entries. For flat DITs in which entries only exist one level 125 * below the split base DN, this will effectively ensure a round-robin 126 * distribution. But for cases in which there are branches of varying sizes 127 * below the split base DN, this can help ensure that entries are more 128 * evenly distributed across backend sets. A cache will be used to ensure 129 * that entries more than one level below the split base DN are placed in 130 * the same backend set as their parent. 131 * </LI> 132 * <LI> 133 * split-using-filter -- When examining an entry immediately below the split 134 * base DN, a series of filters will be evaluated against that entry, which 135 * each filter associated with a specific backend set. If an entry doesn't 136 * match any of the provided filters, an RDN hash can be used to select the 137 * set. A cache will be used to ensure that entries more than one level 138 * below the split base DN are placed in the same backend set as their 139 * parent. 140 * </LI> 141 * </UL> 142 */ 143@ThreadSafety(level=ThreadSafetyLevel.NOT_THREADSAFE) 144public final class SplitLDIF 145 extends CommandLineTool 146{ 147 /** 148 * The maximum length of any message to write to standard output or standard 149 * error. 150 */ 151 private static final int MAX_OUTPUT_LINE_LENGTH = 152 StaticUtils.TERMINAL_WIDTH_COLUMNS - 1; 153 154 155 156 // The global arguments used by this tool. 157 @Nullable private BooleanArgument addEntriesOutsideSplitBaseDNToAllSets = 158 null; 159 @Nullable private BooleanArgument addEntriesOutsideSplitBaseDNToDedicatedSet = 160 null; 161 @Nullable private BooleanArgument compressTarget = null; 162 @Nullable private BooleanArgument encryptTarget = null; 163 @Nullable private BooleanArgument sourceCompressed = null; 164 @Nullable private DNArgument splitBaseDN = null; 165 @Nullable private FileArgument encryptionPassphraseFile = null; 166 @Nullable private FileArgument schemaPath = null; 167 @Nullable private FileArgument sourceLDIF = null; 168 @Nullable private FileArgument targetLDIFBasePath = null; 169 @Nullable private IntegerArgument numThreads = null; 170 171 // The arguments used to split using a hash of the RDN. 172 @Nullable private IntegerArgument splitUsingHashOnRDNNumSets = null; 173 @Nullable private SubCommand splitUsingHashOnRDN = null; 174 175 // The arguments used to split using a hash on a specified attribute. 176 @Nullable private BooleanArgument splitUsingHashOnAttributeAssumeFlatDIT = 177 null; 178 @Nullable private BooleanArgument splitUsingHashOnAttributeUseAllValues = 179 null; 180 @Nullable private IntegerArgument splitUsingHashOnAttributeNumSets = null; 181 @Nullable private StringArgument splitUsingHashOnAttributeAttributeName = 182 null; 183 @Nullable private SubCommand splitUsingHashOnAttribute = null; 184 185 // The arguments used to choose the set with the fewest entries. 186 @Nullable private BooleanArgument splitUsingFewestEntriesAssumeFlatDIT = null; 187 @Nullable private IntegerArgument splitUsingFewestEntriesNumSets = null; 188 @Nullable private SubCommand splitUsingFewestEntries = null; 189 190 // The arguments used to choose the set using a provided set of filters. 191 @Nullable private BooleanArgument splitUsingFilterAssumeFlatDIT = null; 192 @Nullable private FilterArgument splitUsingFilterFilter = null; 193 @Nullable private SubCommand splitUsingFilter = null; 194 195 196 197 /** 198 * Runs the tool with the provided set of command-line arguments. 199 * 200 * @param args The command-line arguments provided to this tool. 201 */ 202 public static void main(@NotNull final String... args) 203 { 204 final ResultCode resultCode = main(System.out, System.err, args); 205 if (resultCode != ResultCode.SUCCESS) 206 { 207 System.exit(resultCode.intValue()); 208 } 209 } 210 211 212 213 /** 214 * Runs the tool with the provided set of command-line arguments. 215 * 216 * @param out The output stream used for standard output. It may be 217 * {@code null} if standard output should be suppressed. 218 * @param err The output stream used for standard error. It may be 219 * {@code null} if standard error should be suppressed. 220 * @param args The command-line arguments provided to this tool. 221 * 222 * @return A result code with information about the processing performed. 223 * Any result code other than {@link ResultCode#SUCCESS} indicates 224 * that an error occurred. 225 */ 226 @NotNull() 227 public static ResultCode main(@Nullable final OutputStream out, 228 @Nullable final OutputStream err, 229 @NotNull final String... args) 230 { 231 final SplitLDIF tool = new SplitLDIF(out, err); 232 return tool.runTool(args); 233 } 234 235 236 237 /** 238 * Creates a new instance of this tool with the provided information. 239 * 240 * @param out The output stream used for standard output. It may be 241 * {@code null} if standard output should be suppressed. 242 * @param err The output stream used for standard error. It may be 243 * {@code null} if standard error should be suppressed. 244 */ 245 public SplitLDIF(@Nullable final OutputStream out, 246 @Nullable final OutputStream err) 247 { 248 super(out, err); 249 } 250 251 252 253 /** 254 * {@inheritDoc} 255 */ 256 @Override() 257 @NotNull() 258 public String getToolName() 259 { 260 return "split-ldif"; 261 } 262 263 264 265 /** 266 * {@inheritDoc} 267 */ 268 @Override() 269 @NotNull() 270 public String getToolDescription() 271 { 272 return INFO_SPLIT_LDIF_TOOL_DESCRIPTION.get(); 273 } 274 275 276 277 /** 278 * {@inheritDoc} 279 */ 280 @Override() 281 @NotNull() 282 public String getToolVersion() 283 { 284 return Version.NUMERIC_VERSION_STRING; 285 } 286 287 288 289 /** 290 * {@inheritDoc} 291 */ 292 @Override() 293 public boolean supportsInteractiveMode() 294 { 295 return true; 296 } 297 298 299 300 /** 301 * {@inheritDoc} 302 */ 303 @Override() 304 public boolean defaultsToInteractiveMode() 305 { 306 return true; 307 } 308 309 310 311 /** 312 * {@inheritDoc} 313 */ 314 @Override() 315 public boolean supportsPropertiesFile() 316 { 317 return true; 318 } 319 320 321 322 /** 323 * {@inheritDoc} 324 */ 325 @Override() 326 public void addToolArguments(@NotNull final ArgumentParser parser) 327 throws ArgumentException 328 { 329 // Add the global arguments. 330 sourceLDIF = new FileArgument('l', "sourceLDIF", true, 0, null, 331 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SOURCE_LDIF.get(), true, false, true, 332 false); 333 sourceLDIF.addLongIdentifier("inputLDIF", true); 334 sourceLDIF.addLongIdentifier("source-ldif", true); 335 sourceLDIF.addLongIdentifier("input-ldif", true); 336 parser.addArgument(sourceLDIF); 337 338 sourceCompressed = new BooleanArgument('C', "sourceCompressed", 339 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SOURCE_COMPRESSED.get()); 340 sourceCompressed.addLongIdentifier("inputCompressed", true); 341 sourceCompressed.addLongIdentifier("source-compressed", true); 342 sourceCompressed.addLongIdentifier("input-compressed", true); 343 parser.addArgument(sourceCompressed); 344 345 targetLDIFBasePath = new FileArgument('o', "targetLDIFBasePath", false, 1, 346 null, INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_TARGET_LDIF_BASE.get(), false, 347 true, true, false); 348 targetLDIFBasePath.addLongIdentifier("outputLDIFBasePath", true); 349 targetLDIFBasePath.addLongIdentifier("target-ldif-base-path", true); 350 targetLDIFBasePath.addLongIdentifier("output-ldif-base-path", true); 351 parser.addArgument(targetLDIFBasePath); 352 353 compressTarget = new BooleanArgument('c', "compressTarget", 354 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_COMPRESS_TARGET.get()); 355 compressTarget.addLongIdentifier("compressOutput", true); 356 compressTarget.addLongIdentifier("compress", true); 357 compressTarget.addLongIdentifier("compress-target", true); 358 compressTarget.addLongIdentifier("compress-output", true); 359 parser.addArgument(compressTarget); 360 361 encryptTarget = new BooleanArgument(null, "encryptTarget", 362 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_ENCRYPT_TARGET.get()); 363 encryptTarget.addLongIdentifier("encryptOutput", true); 364 encryptTarget.addLongIdentifier("encrypt", true); 365 encryptTarget.addLongIdentifier("encrypt-target", true); 366 encryptTarget.addLongIdentifier("encrypt-output", true); 367 parser.addArgument(encryptTarget); 368 369 encryptionPassphraseFile = new FileArgument(null, 370 "encryptionPassphraseFile", false, 1, null, 371 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_ENCRYPT_PW_FILE.get(), true, true, 372 true, false); 373 encryptionPassphraseFile.addLongIdentifier("encryptionPasswordFile", true); 374 encryptionPassphraseFile.addLongIdentifier("encryption-passphrase-file", 375 true); 376 encryptionPassphraseFile.addLongIdentifier("encryption-password-file", 377 true); 378 parser.addArgument(encryptionPassphraseFile); 379 380 splitBaseDN = new DNArgument('b', "splitBaseDN", true, 1, null, 381 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SPLIT_BASE_DN.get()); 382 splitBaseDN.addLongIdentifier("baseDN", true); 383 splitBaseDN.addLongIdentifier("split-base-dn", true); 384 splitBaseDN.addLongIdentifier("base-dn", true); 385 parser.addArgument(splitBaseDN); 386 387 addEntriesOutsideSplitBaseDNToAllSets = new BooleanArgument(null, 388 "addEntriesOutsideSplitBaseDNToAllSets", 1, 389 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_OUTSIDE_TO_ALL_SETS.get()); 390 addEntriesOutsideSplitBaseDNToAllSets.addLongIdentifier( 391 "add-entries-outside-split-base-dn-to-all-sets", true); 392 parser.addArgument(addEntriesOutsideSplitBaseDNToAllSets); 393 394 addEntriesOutsideSplitBaseDNToDedicatedSet = new BooleanArgument(null, 395 "addEntriesOutsideSplitBaseDNToDedicatedSet", 1, 396 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_OUTSIDE_TO_DEDICATED_SET.get()); 397 addEntriesOutsideSplitBaseDNToDedicatedSet.addLongIdentifier( 398 "add-entries-outside-split-base-dn-to-dedicated-set", true); 399 parser.addArgument(addEntriesOutsideSplitBaseDNToDedicatedSet); 400 401 schemaPath = new FileArgument(null, "schemaPath", false, 0, null, 402 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SCHEMA_PATH.get(), true, false, false, 403 false); 404 schemaPath.addLongIdentifier("schemaFile", true); 405 schemaPath.addLongIdentifier("schemaDirectory", true); 406 schemaPath.addLongIdentifier("schema-path", true); 407 schemaPath.addLongIdentifier("schema-file", true); 408 schemaPath.addLongIdentifier("schema-directory", true); 409 parser.addArgument(schemaPath); 410 411 numThreads = new IntegerArgument('t', "numThreads", false, 1, null, 412 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_NUM_THREADS.get(), 1, 413 Integer.MAX_VALUE, 1); 414 numThreads.addLongIdentifier("num-threads", true); 415 parser.addArgument(numThreads); 416 417 418 // Add the subcommand used to split entries using a hash on the RDN. 419 final ArgumentParser splitUsingHashOnRDNParser = new ArgumentParser( 420 "split-using-hash-on-rdn", INFO_SPLIT_LDIF_SC_HASH_ON_RDN_DESC.get()); 421 422 splitUsingHashOnRDNNumSets = new IntegerArgument(null, "numSets", true, 1, 423 null, INFO_SPLIT_LDIF_SC_HASH_ON_RDN_ARG_DESC_NUM_SETS.get(), 2, 424 Integer.MAX_VALUE); 425 splitUsingHashOnRDNNumSets.addLongIdentifier("num-sets", true); 426 splitUsingHashOnRDNParser.addArgument(splitUsingHashOnRDNNumSets); 427 428 final LinkedHashMap<String[],String> splitUsingHashOnRDNExamples = 429 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 430 splitUsingHashOnRDNExamples.put( 431 new String[] 432 { 433 "split-using-hash-on-rdn", 434 "--sourceLDIF", "whole.ldif", 435 "--targetLDIFBasePath", "split.ldif", 436 "--splitBaseDN", "ou=People,dc=example,dc=com", 437 "--numSets", "4", 438 "--schemaPath", "config/schema", 439 "--addEntriesOutsideSplitBaseDNToAllSets" 440 }, 441 INFO_SPLIT_LDIF_SC_HASH_ON_RDN_EXAMPLE.get()); 442 443 splitUsingHashOnRDN = new SubCommand("split-using-hash-on-rdn", 444 INFO_SPLIT_LDIF_SC_HASH_ON_RDN_DESC.get(), splitUsingHashOnRDNParser, 445 splitUsingHashOnRDNExamples); 446 splitUsingHashOnRDN.addName("hash-on-rdn", true); 447 448 parser.addSubCommand(splitUsingHashOnRDN); 449 450 451 // Add the subcommand used to split entries using a hash on a specified 452 // attribute. 453 final ArgumentParser splitUsingHashOnAttributeParser = new ArgumentParser( 454 "split-using-hash-on-attribute", 455 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_DESC.get()); 456 457 splitUsingHashOnAttributeAttributeName = new StringArgument(null, 458 "attributeName", true, 1, "{attr}", 459 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ATTR_NAME.get()); 460 splitUsingHashOnAttributeAttributeName.addLongIdentifier("attribute-name", 461 true); 462 splitUsingHashOnAttributeParser.addArgument( 463 splitUsingHashOnAttributeAttributeName); 464 465 splitUsingHashOnAttributeNumSets = new IntegerArgument(null, "numSets", 466 true, 1, null, INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_NUM_SETS.get(), 467 2, Integer.MAX_VALUE); 468 splitUsingHashOnAttributeNumSets.addLongIdentifier("num-sets", true); 469 splitUsingHashOnAttributeParser.addArgument( 470 splitUsingHashOnAttributeNumSets); 471 472 splitUsingHashOnAttributeUseAllValues = new BooleanArgument(null, 473 "useAllValues", 1, 474 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ALL_VALUES.get()); 475 splitUsingHashOnAttributeUseAllValues.addLongIdentifier("use-all-values", 476 true); 477 splitUsingHashOnAttributeParser.addArgument( 478 splitUsingHashOnAttributeUseAllValues); 479 480 splitUsingHashOnAttributeAssumeFlatDIT = new BooleanArgument(null, 481 "assumeFlatDIT", 1, 482 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ASSUME_FLAT_DIT.get()); 483 splitUsingHashOnAttributeAssumeFlatDIT.addLongIdentifier("assume-flat-dit", 484 true); 485 splitUsingHashOnAttributeParser.addArgument( 486 splitUsingHashOnAttributeAssumeFlatDIT); 487 488 final LinkedHashMap<String[],String> splitUsingHashOnAttributeExamples = 489 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 490 splitUsingHashOnAttributeExamples.put( 491 new String[] 492 { 493 "split-using-hash-on-attribute", 494 "--sourceLDIF", "whole.ldif", 495 "--targetLDIFBasePath", "split.ldif", 496 "--splitBaseDN", "ou=People,dc=example,dc=com", 497 "--attributeName", "uid", 498 "--numSets", "4", 499 "--schemaPath", "config/schema", 500 "--addEntriesOutsideSplitBaseDNToAllSets" 501 }, 502 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_EXAMPLE.get()); 503 504 splitUsingHashOnAttribute = new SubCommand("split-using-hash-on-attribute", 505 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_DESC.get(), 506 splitUsingHashOnAttributeParser, splitUsingHashOnAttributeExamples); 507 splitUsingHashOnAttribute.addName("hash-on-attribute", true); 508 509 parser.addSubCommand(splitUsingHashOnAttribute); 510 511 512 // Add the subcommand used to split entries by selecting the set with the 513 // fewest entries. 514 final ArgumentParser splitUsingFewestEntriesParser = new ArgumentParser( 515 "split-using-fewest-entries", 516 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_DESC.get()); 517 518 splitUsingFewestEntriesNumSets = new IntegerArgument(null, "numSets", 519 true, 1, null, 520 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_ARG_DESC_NUM_SETS.get(), 521 2, Integer.MAX_VALUE); 522 splitUsingFewestEntriesNumSets.addLongIdentifier("num-sets", true); 523 splitUsingFewestEntriesParser.addArgument(splitUsingFewestEntriesNumSets); 524 525 splitUsingFewestEntriesAssumeFlatDIT = new BooleanArgument(null, 526 "assumeFlatDIT", 1, 527 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_ARG_DESC_ASSUME_FLAT_DIT.get()); 528 splitUsingFewestEntriesAssumeFlatDIT.addLongIdentifier("assume-flat-dit", 529 true); 530 splitUsingFewestEntriesParser.addArgument( 531 splitUsingFewestEntriesAssumeFlatDIT); 532 533 final LinkedHashMap<String[],String> splitUsingFewestEntriesExamples = 534 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 535 splitUsingFewestEntriesExamples.put( 536 new String[] 537 { 538 "split-using-fewest-entries", 539 "--sourceLDIF", "whole.ldif", 540 "--targetLDIFBasePath", "split.ldif", 541 "--splitBaseDN", "ou=People,dc=example,dc=com", 542 "--numSets", "4", 543 "--schemaPath", "config/schema", 544 "--addEntriesOutsideSplitBaseDNToAllSets" 545 }, 546 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_EXAMPLE.get()); 547 548 splitUsingFewestEntries = new SubCommand("split-using-fewest-entries", 549 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_DESC.get(), 550 splitUsingFewestEntriesParser, splitUsingFewestEntriesExamples); 551 splitUsingFewestEntries.addName("fewest-entries", true); 552 553 parser.addSubCommand(splitUsingFewestEntries); 554 555 556 // Add the subcommand used to split entries by selecting the set based on a 557 // filter. 558 final ArgumentParser splitUsingFilterParser = new ArgumentParser( 559 "split-using-filter", INFO_SPLIT_LDIF_SC_FILTER_DESC.get()); 560 561 splitUsingFilterFilter = new FilterArgument(null, "filter", true, 0, null, 562 INFO_SPLIT_LDIF_SC_FILTER_ARG_DESC_FILTER.get()); 563 splitUsingFilterParser.addArgument(splitUsingFilterFilter); 564 565 splitUsingFilterAssumeFlatDIT = new BooleanArgument(null, "assumeFlatDIT", 566 1, INFO_SPLIT_LDIF_SC_FILTER_ARG_DESC_ASSUME_FLAT_DIT.get()); 567 splitUsingFilterAssumeFlatDIT.addLongIdentifier("assume-flat-dit", true); 568 splitUsingFilterParser.addArgument(splitUsingFilterAssumeFlatDIT); 569 570 final LinkedHashMap<String[],String> splitUsingFilterExamples = 571 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 572 splitUsingFilterExamples.put( 573 new String[] 574 { 575 "split-using-filter", 576 "--sourceLDIF", "whole.ldif", 577 "--targetLDIFBasePath", "split.ldif", 578 "--splitBaseDN", "ou=People,dc=example,dc=com", 579 "--filter", "(timeZone=Eastern)", 580 "--filter", "(timeZone=Central)", 581 "--filter", "(timeZone=Mountain)", 582 "--filter", "(timeZone=Pacific)", 583 "--schemaPath", "config/schema", 584 "--addEntriesOutsideSplitBaseDNToAllSets" 585 }, 586 INFO_SPLIT_LDIF_SC_FILTER_EXAMPLE.get()); 587 588 splitUsingFilter = new SubCommand("split-using-filter", 589 INFO_SPLIT_LDIF_SC_FILTER_DESC.get(), 590 splitUsingFilterParser, splitUsingFilterExamples); 591 splitUsingFilter.addName("filter", true); 592 593 parser.addSubCommand(splitUsingFilter); 594 } 595 596 597 598 /** 599 * {@inheritDoc} 600 */ 601 @Override() 602 public void doExtendedArgumentValidation() 603 throws ArgumentException 604 { 605 // If multiple sourceLDIF values were provided, then a target LDIF base path 606 // must have been given. 607 final List<File> sourceLDIFValues = sourceLDIF.getValues(); 608 if (sourceLDIFValues.size() > 1) 609 { 610 if (! targetLDIFBasePath.isPresent()) 611 { 612 throw new ArgumentException(ERR_SPLIT_LDIF_NO_TARGET_BASE_PATH.get( 613 sourceLDIF.getIdentifierString(), 614 targetLDIFBasePath.getIdentifierString())); 615 } 616 } 617 618 619 // If the split-using-filter subcommand was provided, then at least two 620 // filters must have been provided, and none of the filters can be logically 621 // equivalent to any of the others. 622 if (splitUsingFilter.isPresent()) 623 { 624 final List<Filter> filterList = splitUsingFilterFilter.getValues(); 625 final Set<Filter> filterSet = new LinkedHashSet<>( 626 StaticUtils.computeMapCapacity(filterList.size())); 627 for (final Filter f : filterList) 628 { 629 if (filterSet.contains(f)) 630 { 631 throw new ArgumentException(ERR_SPLIT_LDIF_NON_UNIQUE_FILTER.get( 632 splitUsingFilterFilter.getIdentifierString(), f.toString())); 633 } 634 else 635 { 636 filterSet.add(f); 637 } 638 } 639 640 if (filterSet.size() < 2) 641 { 642 throw new ArgumentException(ERR_SPLIT_LDIF_NOT_ENOUGH_FILTERS.get( 643 splitUsingFilter.getPrimaryName(), 644 splitUsingFilterFilter.getIdentifierString())); 645 } 646 } 647 } 648 649 650 651 /** 652 * {@inheritDoc} 653 */ 654 @Override() 655 @NotNull() 656 public ResultCode doToolProcessing() 657 { 658 // Get the schema to use during processing. 659 final Schema schema; 660 try 661 { 662 schema = getSchema(); 663 } 664 catch (final LDAPException le) 665 { 666 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, le.getMessage()); 667 return le.getResultCode(); 668 } 669 670 671 // If an encryption passphrase file is provided, then get the passphrase 672 // from it. 673 String encryptionPassphrase = null; 674 if (encryptionPassphraseFile.isPresent()) 675 { 676 try 677 { 678 encryptionPassphrase = ToolUtils.readEncryptionPassphraseFromFile( 679 encryptionPassphraseFile.getValue()); 680 } 681 catch (final LDAPException e) 682 { 683 Debug.debugException(e); 684 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, e.getMessage()); 685 return e.getResultCode(); 686 } 687 } 688 689 690 // Figure out which subcommand was selected, and create the appropriate 691 // translator to use to perform the processing. 692 final SplitLDIFTranslator translator; 693 if (splitUsingHashOnRDN.isPresent()) 694 { 695 translator = new SplitLDIFRDNHashTranslator(splitBaseDN.getValue(), 696 splitUsingHashOnRDNNumSets.getValue(), 697 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 698 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 699 } 700 else if (splitUsingHashOnAttribute.isPresent()) 701 { 702 translator = new SplitLDIFAttributeHashTranslator(splitBaseDN.getValue(), 703 splitUsingHashOnAttributeNumSets.getValue(), 704 splitUsingHashOnAttributeAttributeName.getValue(), 705 splitUsingHashOnAttributeUseAllValues.isPresent(), 706 splitUsingHashOnAttributeAssumeFlatDIT.isPresent(), 707 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 708 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 709 } 710 else if (splitUsingFewestEntries.isPresent()) 711 { 712 translator = new SplitLDIFFewestEntriesTranslator(splitBaseDN.getValue(), 713 splitUsingFewestEntriesNumSets.getValue(), 714 splitUsingFewestEntriesAssumeFlatDIT.isPresent(), 715 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 716 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 717 } 718 else if (splitUsingFilter.isPresent()) 719 { 720 final List<Filter> filterList = splitUsingFilterFilter.getValues(); 721 final LinkedHashSet<Filter> filterSet = new LinkedHashSet<>( 722 StaticUtils.computeMapCapacity(filterList.size())); 723 for (final Filter f : filterList) 724 { 725 filterSet.add(f); 726 } 727 728 translator = new SplitLDIFFilterTranslator(splitBaseDN.getValue(), 729 schema, filterSet, splitUsingFilterAssumeFlatDIT.isPresent(), 730 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 731 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 732 } 733 else 734 { 735 // This should never happen. 736 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 737 ERR_SPLIT_LDIF_CANNOT_DETERMINE_SPLIT_ALGORITHM.get( 738 splitUsingHashOnRDN.getPrimaryName() + ", " + 739 splitUsingHashOnAttribute.getPrimaryName() + ", " + 740 splitUsingFewestEntries.getPrimaryName() + ", " + 741 splitUsingFilter.getPrimaryName())); 742 return ResultCode.PARAM_ERROR; 743 } 744 745 746 // Create the LDIF reader. 747 final LDIFReader ldifReader; 748 try 749 { 750 final InputStream inputStream; 751 if (sourceLDIF.isPresent()) 752 { 753 final ObjectPair<InputStream,String> p = 754 ToolUtils.getInputStreamForLDIFFiles(sourceLDIF.getValues(), 755 encryptionPassphrase, getOut(), getErr()); 756 inputStream = p.getFirst(); 757 if ((encryptionPassphrase == null) && (p.getSecond() != null)) 758 { 759 encryptionPassphrase = p.getSecond(); 760 } 761 } 762 else 763 { 764 inputStream = System.in; 765 } 766 767 ldifReader = new LDIFReader(inputStream, numThreads.getValue(), 768 translator); 769 if (schema != null) 770 { 771 ldifReader.setSchema(schema); 772 } 773 } 774 catch (final Exception e) 775 { 776 Debug.debugException(e); 777 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 778 ERR_SPLIT_LDIF_ERROR_CREATING_LDIF_READER.get( 779 StaticUtils.getExceptionMessage(e))); 780 return ResultCode.LOCAL_ERROR; 781 } 782 783 784 // Iterate through and process all of the entries. 785 ResultCode resultCode = ResultCode.SUCCESS; 786 final LinkedHashMap<String,OutputStream> outputStreams = 787 new LinkedHashMap<>(StaticUtils.computeMapCapacity(10)); 788 try 789 { 790 final AtomicLong entriesRead = new AtomicLong(0L); 791 final AtomicLong entriesExcluded = new AtomicLong(0L); 792 final TreeMap<String,AtomicLong> fileCounts = new TreeMap<>(); 793 794readLoop: 795 while (true) 796 { 797 final SplitLDIFEntry entry; 798 try 799 { 800 entry = (SplitLDIFEntry) ldifReader.readEntry(); 801 } 802 catch (final LDIFException le) 803 { 804 Debug.debugException(le); 805 resultCode = ResultCode.LOCAL_ERROR; 806 807 final File f = getOutputFile(SplitLDIFEntry.SET_NAME_ERRORS); 808 OutputStream s = outputStreams.get(SplitLDIFEntry.SET_NAME_ERRORS); 809 if (s == null) 810 { 811 try 812 { 813 s = new FileOutputStream(f); 814 815 if (encryptTarget.isPresent()) 816 { 817 if (encryptionPassphrase == null) 818 { 819 try 820 { 821 encryptionPassphrase = 822 ToolUtils.promptForEncryptionPassphrase(false, true, 823 getOut(), getErr()); 824 } 825 catch (final LDAPException ex) 826 { 827 Debug.debugException(ex); 828 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, ex.getMessage()); 829 return ex.getResultCode(); 830 } 831 } 832 833 s = new PassphraseEncryptedOutputStream(encryptionPassphrase, 834 s); 835 } 836 837 if (compressTarget.isPresent()) 838 { 839 s = new GZIPOutputStream(s); 840 } 841 842 outputStreams.put(SplitLDIFEntry.SET_NAME_ERRORS, s); 843 fileCounts.put(SplitLDIFEntry.SET_NAME_ERRORS, 844 new AtomicLong(0L)); 845 } 846 catch (final Exception e) 847 { 848 Debug.debugException(e); 849 resultCode = ResultCode.LOCAL_ERROR; 850 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 851 ERR_SPLIT_LDIF_CANNOT_OPEN_OUTPUT_FILE.get( 852 f.getAbsolutePath(), 853 StaticUtils.getExceptionMessage(e))); 854 break readLoop; 855 } 856 } 857 858 final ByteStringBuffer buffer = new ByteStringBuffer(); 859 buffer.append("# "); 860 buffer.append(le.getMessage()); 861 buffer.append(StaticUtils.EOL_BYTES); 862 863 final List<String> dataLines = le.getDataLines(); 864 if (dataLines != null) 865 { 866 for (final String dataLine : dataLines) 867 { 868 buffer.append(dataLine); 869 buffer.append(StaticUtils.EOL_BYTES); 870 } 871 } 872 873 buffer.append(StaticUtils.EOL_BYTES); 874 875 try 876 { 877 s.write(buffer.toByteArray()); 878 } 879 catch (final Exception e) 880 { 881 Debug.debugException(e); 882 resultCode = ResultCode.LOCAL_ERROR; 883 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 884 ERR_SPLIT_LDIF_ERROR_WRITING_ERROR_TO_FILE.get( 885 le.getMessage(), f.getAbsolutePath(), 886 StaticUtils.getExceptionMessage(e))); 887 break readLoop; 888 } 889 890 if (le.mayContinueReading()) 891 { 892 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 893 ERR_SPLIT_LDIF_INVALID_LDIF_RECORD_RECOVERABLE.get( 894 StaticUtils.getExceptionMessage(le))); 895 continue; 896 } 897 else 898 { 899 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 900 ERR_SPLIT_LDIF_INVALID_LDIF_RECORD_UNRECOVERABLE.get( 901 StaticUtils.getExceptionMessage(le))); 902 break; 903 } 904 } 905 catch (final IOException ioe) 906 { 907 Debug.debugException(ioe); 908 resultCode = ResultCode.LOCAL_ERROR; 909 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 910 ERR_SPLIT_LDIF_IO_READ_ERROR.get( 911 StaticUtils.getExceptionMessage(ioe))); 912 break; 913 } 914 catch (final Exception e) 915 { 916 Debug.debugException(e); 917 resultCode = ResultCode.LOCAL_ERROR; 918 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 919 ERR_SPLIT_LDIF_UNEXPECTED_READ_ERROR.get( 920 StaticUtils.getExceptionMessage(e))); 921 break; 922 } 923 924 if (entry == null) 925 { 926 break; 927 } 928 929 final long readCount = entriesRead.incrementAndGet(); 930 if ((readCount % 1000L) == 0) 931 { 932 // Even though we aren't done with this entry yet, we'll go ahead and 933 // log a progress message now because it's easier to do that now than 934 // to ensure that it's handled properly through all possible error 935 // conditions that need to be handled below. 936 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 937 INFO_SPLIT_LDIF_PROGRESS.get(readCount)); 938 } 939 940 941 // Get the set(s) to which the entry should be written. If this is 942 // null (which could be the case as a result of a race condition when 943 // using multiple threads where processing for a child completes before 944 // processing for its parent, or as a result of a case in which a 945 // child is included without or before its parent), then try to see if 946 // we can get the sets by passing the entry through the translator. 947 Set<String> sets = entry.getSets(); 948 byte[] ldifBytes = entry.getLDIFBytes(); 949 if (sets == null) 950 { 951 try 952 { 953 sets = translator.translate(entry, 0L).getSets(); 954 } 955 catch (final Exception e) 956 { 957 Debug.debugException(e); 958 } 959 960 if (sets == null) 961 { 962 final SplitLDIFEntry errorEntry = translator.createEntry(entry, 963 ERR_SPLIT_LDIF_ENTRY_WITHOUT_PARENT.get( 964 entry.getDN(), splitBaseDN.getStringValue()), 965 Collections.singleton(SplitLDIFEntry.SET_NAME_ERRORS)); 966 ldifBytes = errorEntry.getLDIFBytes(); 967 sets = errorEntry.getSets(); 968 } 969 } 970 971 972 // If the entry shouldn't be written into any sets, then we don't need 973 // to do anything else. 974 if (sets.isEmpty()) 975 { 976 entriesExcluded.incrementAndGet(); 977 continue; 978 } 979 980 981 // Write the entry into each of the target sets, creating the output 982 // files if necessary. 983 for (final String set : sets) 984 { 985 if (set.equals(SplitLDIFEntry.SET_NAME_ERRORS)) 986 { 987 // This indicates that an error was encountered during processing, 988 // so we'll update the result code to reflect that. 989 resultCode = ResultCode.LOCAL_ERROR; 990 } 991 992 final File f = getOutputFile(set); 993 OutputStream s = outputStreams.get(set); 994 if (s == null) 995 { 996 try 997 { 998 s = new FileOutputStream(f); 999 1000 if (encryptTarget.isPresent()) 1001 { 1002 if (encryptionPassphrase == null) 1003 { 1004 try 1005 { 1006 encryptionPassphrase = 1007 ToolUtils.promptForEncryptionPassphrase(false, true, 1008 getOut(), getErr()); 1009 } 1010 catch (final LDAPException ex) 1011 { 1012 Debug.debugException(ex); 1013 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, ex.getMessage()); 1014 return ex.getResultCode(); 1015 } 1016 } 1017 1018 s = new PassphraseEncryptedOutputStream(encryptionPassphrase, 1019 s); 1020 } 1021 1022 if (compressTarget.isPresent()) 1023 { 1024 s = new GZIPOutputStream(s); 1025 } 1026 1027 outputStreams.put(set, s); 1028 fileCounts.put(set, new AtomicLong(0L)); 1029 } 1030 catch (final Exception e) 1031 { 1032 Debug.debugException(e); 1033 resultCode = ResultCode.LOCAL_ERROR; 1034 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1035 ERR_SPLIT_LDIF_CANNOT_OPEN_OUTPUT_FILE.get( 1036 f.getAbsolutePath(), 1037 StaticUtils.getExceptionMessage(e))); 1038 break readLoop; 1039 } 1040 } 1041 1042 try 1043 { 1044 s.write(ldifBytes); 1045 } 1046 catch (final Exception e) 1047 { 1048 Debug.debugException(e); 1049 resultCode = ResultCode.LOCAL_ERROR; 1050 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1051 ERR_SPLIT_LDIF_ERROR_WRITING_TO_FILE.get( 1052 entry.getDN(), f.getAbsolutePath(), 1053 StaticUtils.getExceptionMessage(e))); 1054 break readLoop; 1055 } 1056 1057 fileCounts.get(set).incrementAndGet(); 1058 } 1059 } 1060 1061 1062 // Processing is complete. Summarize the processing that was performed. 1063 final long finalReadCount = entriesRead.get(); 1064 if (finalReadCount > 1000L) 1065 { 1066 out(); 1067 } 1068 1069 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1070 INFO_SPLIT_LDIF_PROCESSING_COMPLETE.get(finalReadCount)); 1071 1072 final long excludedCount = entriesExcluded.get(); 1073 if (excludedCount > 0L) 1074 { 1075 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1076 INFO_SPLIT_LDIF_EXCLUDED_COUNT.get(excludedCount)); 1077 } 1078 1079 for (final Map.Entry<String,AtomicLong> e : fileCounts.entrySet()) 1080 { 1081 final File f = getOutputFile(e.getKey()); 1082 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1083 INFO_SPLIT_LDIF_COUNT_TO_FILE.get(e.getValue().get(), 1084 f.getName())); 1085 } 1086 } 1087 finally 1088 { 1089 try 1090 { 1091 ldifReader.close(); 1092 } 1093 catch (final Exception e) 1094 { 1095 Debug.debugException(e); 1096 } 1097 1098 for (final Map.Entry<String,OutputStream> e : outputStreams.entrySet()) 1099 { 1100 try 1101 { 1102 e.getValue().close(); 1103 } 1104 catch (final Exception ex) 1105 { 1106 Debug.debugException(ex); 1107 resultCode = ResultCode.LOCAL_ERROR; 1108 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1109 ERR_SPLIT_LDIF_ERROR_CLOSING_FILE.get( 1110 getOutputFile(e.getKey()), 1111 StaticUtils.getExceptionMessage(ex))); 1112 } 1113 } 1114 } 1115 1116 return resultCode; 1117 } 1118 1119 1120 1121 /** 1122 * Retrieves the schema that should be used for processing. 1123 * 1124 * @return The schema that was created. 1125 * 1126 * @throws LDAPException If a problem is encountered while retrieving the 1127 * schema. 1128 */ 1129 @Nullable() 1130 private Schema getSchema() 1131 throws LDAPException 1132 { 1133 // If any schema paths were specified, then load the schema only from those 1134 // paths. 1135 if (schemaPath.isPresent()) 1136 { 1137 final ArrayList<File> schemaFiles = new ArrayList<>(10); 1138 for (final File path : schemaPath.getValues()) 1139 { 1140 if (path.isFile()) 1141 { 1142 schemaFiles.add(path); 1143 } 1144 else 1145 { 1146 final TreeMap<String,File> fileMap = new TreeMap<>(); 1147 for (final File schemaDirFile : path.listFiles()) 1148 { 1149 final String name = schemaDirFile.getName(); 1150 if (schemaDirFile.isFile() && name.toLowerCase().endsWith(".ldif")) 1151 { 1152 fileMap.put(name, schemaDirFile); 1153 } 1154 } 1155 schemaFiles.addAll(fileMap.values()); 1156 } 1157 } 1158 1159 if (schemaFiles.isEmpty()) 1160 { 1161 throw new LDAPException(ResultCode.PARAM_ERROR, 1162 ERR_SPLIT_LDIF_NO_SCHEMA_FILES.get( 1163 schemaPath.getIdentifierString())); 1164 } 1165 else 1166 { 1167 try 1168 { 1169 return Schema.getSchema(schemaFiles); 1170 } 1171 catch (final Exception e) 1172 { 1173 Debug.debugException(e); 1174 throw new LDAPException(ResultCode.LOCAL_ERROR, 1175 ERR_SPLIT_LDIF_ERROR_LOADING_SCHEMA.get( 1176 StaticUtils.getExceptionMessage(e))); 1177 } 1178 } 1179 } 1180 else 1181 { 1182 // If the INSTANCE_ROOT environment variable is set and it refers to a 1183 // directory that has a config/schema subdirectory that has one or more 1184 // schema files in it, then read the schema from that directory. 1185 try 1186 { 1187 final String instanceRootStr = 1188 StaticUtils.getEnvironmentVariable("INSTANCE_ROOT"); 1189 if (instanceRootStr != null) 1190 { 1191 final File instanceRoot = new File(instanceRootStr); 1192 final File configDir = new File(instanceRoot, "config"); 1193 final File schemaDir = new File(configDir, "schema"); 1194 if (schemaDir.exists()) 1195 { 1196 final TreeMap<String,File> fileMap = new TreeMap<>(); 1197 for (final File schemaDirFile : schemaDir.listFiles()) 1198 { 1199 final String name = schemaDirFile.getName(); 1200 if (schemaDirFile.isFile() && 1201 name.toLowerCase().endsWith(".ldif")) 1202 { 1203 fileMap.put(name, schemaDirFile); 1204 } 1205 } 1206 1207 if (! fileMap.isEmpty()) 1208 { 1209 return Schema.getSchema(new ArrayList<>(fileMap.values())); 1210 } 1211 } 1212 } 1213 } 1214 catch (final Exception e) 1215 { 1216 Debug.debugException(e); 1217 } 1218 } 1219 1220 1221 // If we've gotten here, then just return null and the tool will try to use 1222 // the default standard schema. 1223 return null; 1224 } 1225 1226 1227 1228 /** 1229 * Retrieves a file object that refers to an output file with the provided 1230 * extension. 1231 * 1232 * @param extension The extension to use for the file. 1233 * 1234 * @return A file object that refers to an output file with the provided 1235 * extension. 1236 */ 1237 @NotNull() 1238 private File getOutputFile(@NotNull final String extension) 1239 { 1240 final File baseFile; 1241 if (targetLDIFBasePath.isPresent()) 1242 { 1243 baseFile = targetLDIFBasePath.getValue(); 1244 } 1245 else 1246 { 1247 baseFile = sourceLDIF.getValue(); 1248 } 1249 1250 return new File(baseFile.getAbsolutePath() + extension); 1251 } 1252 1253 1254 1255 /** 1256 * {@inheritDoc} 1257 */ 1258 @Override() 1259 @NotNull() 1260 public LinkedHashMap<String[],String> getExampleUsages() 1261 { 1262 final LinkedHashMap<String[],String> exampleMap = 1263 new LinkedHashMap<>(StaticUtils.computeMapCapacity(4)); 1264 1265 for (final Map.Entry<String[],String> e : 1266 splitUsingHashOnRDN.getExampleUsages().entrySet()) 1267 { 1268 exampleMap.put(e.getKey(), e.getValue()); 1269 } 1270 1271 for (final Map.Entry<String[],String> e : 1272 splitUsingHashOnAttribute.getExampleUsages().entrySet()) 1273 { 1274 exampleMap.put(e.getKey(), e.getValue()); 1275 } 1276 1277 for (final Map.Entry<String[],String> e : 1278 splitUsingFewestEntries.getExampleUsages().entrySet()) 1279 { 1280 exampleMap.put(e.getKey(), e.getValue()); 1281 } 1282 1283 for (final Map.Entry<String[],String> e : 1284 splitUsingFilter.getExampleUsages().entrySet()) 1285 { 1286 exampleMap.put(e.getKey(), e.getValue()); 1287 } 1288 1289 return exampleMap; 1290 } 1291}