|
19 | 19 | import java.util.Arrays;
|
20 | 20 | import java.util.Formatter;
|
21 | 21 | import java.util.List;
|
| 22 | +import java.util.Objects; |
22 | 23 | import java.util.concurrent.BlockingQueue;
|
23 | 24 | import java.util.concurrent.ScheduledExecutorService;
|
24 | 25 | import java.util.concurrent.ScheduledThreadPoolExecutor;
|
25 | 26 | import java.util.concurrent.ThreadFactory;
|
26 | 27 | import java.util.concurrent.TimeUnit;
|
27 | 28 | import java.util.concurrent.atomic.AtomicInteger;
|
28 | 29 | import java.util.concurrent.atomic.AtomicLong;
|
| 30 | +import java.util.concurrent.locks.LockSupport; |
29 | 31 |
|
30 | 32 | import net.logstash.logback.appender.listener.AppenderListener;
|
31 | 33 | import net.logstash.logback.status.LevelFilteringStatusListener;
|
|
37 | 39 | import ch.qos.logback.core.spi.DeferredProcessingAware;
|
38 | 40 | import ch.qos.logback.core.status.OnConsoleStatusListener;
|
39 | 41 | import ch.qos.logback.core.status.Status;
|
| 42 | +import ch.qos.logback.core.util.Duration; |
40 | 43 | import com.lmax.disruptor.BlockingWaitStrategy;
|
41 | 44 | import com.lmax.disruptor.EventFactory;
|
42 | 45 | import com.lmax.disruptor.EventHandler;
|
@@ -250,6 +253,41 @@ public abstract class AsyncDisruptorAppender<Event extends DeferredProcessingAwa
|
250 | 253 | */
|
251 | 254 | protected final List<Listener> listeners = new ArrayList<>();
|
252 | 255 |
|
| 256 | + public enum AsyncMode { |
| 257 | + /** |
| 258 | + * Appender thread is blocked until space is available in the ring buffer |
| 259 | + * or the retry timeout expires. |
| 260 | + */ |
| 261 | + BLOCK, |
| 262 | + |
| 263 | + /** |
| 264 | + * Event is dropped when the ring buffer is full |
| 265 | + */ |
| 266 | + DROP |
| 267 | + } |
| 268 | + private AsyncMode asyncMode = AsyncMode.DROP; |
| 269 | + |
| 270 | + /** |
| 271 | + * Delay (in millis) between consecutive attempts to append an event in the ring buffer when full. |
| 272 | + * Applicable only when {@link #asyncMode} is set to {@link AsyncMode#DROP}. |
| 273 | + */ |
| 274 | + private long retryMillis = 100; |
| 275 | + |
| 276 | + /** |
| 277 | + * Maximum time to wait for space in the ring buffer before dropping the event. |
| 278 | + * Applicable only when {@link #asyncMode} is set to {@link AsyncMode#DROP}. |
| 279 | + * |
| 280 | + * <p>Use {@code -1} for no timeout, i.e. block until space is available. |
| 281 | + */ |
| 282 | + private Duration retryTimeout = Duration.buildByMilliseconds(1000); |
| 283 | + |
| 284 | + /** |
| 285 | + * How long to wait for in-flight events during shutdown. |
| 286 | + */ |
| 287 | + private Duration shutdownGracePeriod = Duration.buildByMinutes(1); |
| 288 | + |
| 289 | + |
| 290 | + |
253 | 291 | /**
|
254 | 292 | * Event wrapper object used for each element of the {@link RingBuffer}.
|
255 | 293 | */
|
@@ -422,57 +460,141 @@ public void stop() {
|
422 | 460 | if (!super.isStarted()) {
|
423 | 461 | return;
|
424 | 462 | }
|
| 463 | + |
425 | 464 | /*
|
426 | 465 | * Don't allow any more events to be appended.
|
427 | 466 | */
|
428 | 467 | super.stop();
|
| 468 | + |
| 469 | + |
| 470 | + /* |
| 471 | + * Shutdown disruptor and executorService |
| 472 | + */ |
| 473 | + boolean errorDuringShutdown = false; |
| 474 | + long remainingTime = Math.max(0, getShutdownGracePeriod().getMilliseconds()); |
| 475 | + long startTime = System.currentTimeMillis(); |
| 476 | + |
429 | 477 | try {
|
430 |
| - this.disruptor.shutdown(1, TimeUnit.MINUTES); |
| 478 | + this.disruptor.shutdown(remainingTime, TimeUnit.MILLISECONDS); |
431 | 479 | } catch (TimeoutException e) {
|
432 |
| - addWarn("Some queued events have not been logged due to requested shutdown"); |
| 480 | + errorDuringShutdown = true; |
433 | 481 | }
|
434 | 482 |
|
435 | 483 | this.executorService.shutdown();
|
436 | 484 |
|
437 | 485 | try {
|
438 |
| - if (!this.executorService.awaitTermination(1, TimeUnit.MINUTES)) { |
439 |
| - addWarn("Some queued events have not been logged due to requested shutdown"); |
| 486 | + remainingTime = Math.max(0, remainingTime - (System.currentTimeMillis() - startTime)); |
| 487 | + if (!this.executorService.awaitTermination(remainingTime, TimeUnit.MILLISECONDS)) { |
| 488 | + errorDuringShutdown = true; |
440 | 489 | }
|
441 | 490 | } catch (InterruptedException e) {
|
442 |
| - addWarn("Some queued events have not been logged due to requested shutdown", e); |
| 491 | + errorDuringShutdown = true; |
443 | 492 | }
|
| 493 | + |
| 494 | + if (errorDuringShutdown) { |
| 495 | + addWarn("Some queued events have not been logged due to requested shutdown"); |
| 496 | + } |
| 497 | + |
| 498 | + |
| 499 | + /* |
| 500 | + * Notify listeners |
| 501 | + */ |
444 | 502 | fireAppenderStopped();
|
445 | 503 | }
|
446 | 504 |
|
| 505 | + |
447 | 506 | @Override
|
448 | 507 | protected void append(Event event) {
|
449 | 508 | long startTime = System.nanoTime();
|
| 509 | + |
450 | 510 | try {
|
451 | 511 | prepareForDeferredProcessing(event);
|
452 | 512 | } catch (RuntimeException e) {
|
453 |
| - addWarn("Unable to prepare event for deferred processing. Event output might be missing data.", e); |
| 513 | + addWarn("Unable to prepare event for deferred processing. Event output might be missing data.", e); |
454 | 514 | }
|
455 | 515 |
|
456 |
| - if (!this.disruptor.getRingBuffer().tryPublishEvent(this.eventTranslator, event)) { |
457 |
| - long consecutiveDropped = this.consecutiveDroppedCount.incrementAndGet(); |
458 |
| - if ((consecutiveDropped) % this.droppedWarnFrequency == 1) { |
459 |
| - addWarn("Dropped " + consecutiveDropped + " events (and counting...) due to ring buffer at max capacity [" + this.ringBufferSize + "]"); |
460 |
| - } |
461 |
| - fireEventAppendFailed(event, RING_BUFFER_FULL_EXCEPTION); |
462 |
| - } else { |
463 |
| - long endTime = System.nanoTime(); |
| 516 | + if (enqueueEvent(event)) { |
| 517 | + // Enqueue success - notify if we had errors previously |
| 518 | + // |
464 | 519 | long consecutiveDropped = this.consecutiveDroppedCount.get();
|
465 | 520 | if (consecutiveDropped != 0 && this.consecutiveDroppedCount.compareAndSet(consecutiveDropped, 0L)) {
|
466 | 521 | addWarn("Dropped " + consecutiveDropped + " total events due to ring buffer at max capacity [" + this.ringBufferSize + "]");
|
467 | 522 | }
|
468 |
| - fireEventAppended(event, endTime - startTime); |
| 523 | + |
| 524 | + // Notify parties |
| 525 | + // |
| 526 | + fireEventAppended(event, System.nanoTime() - startTime); |
| 527 | + |
| 528 | + } else { |
| 529 | + // Log a warning status about the failure |
| 530 | + // |
| 531 | + long consecutiveDropped = this.consecutiveDroppedCount.incrementAndGet(); |
| 532 | + if ((consecutiveDropped % this.droppedWarnFrequency) == 1) { |
| 533 | + addWarn("Dropped " + consecutiveDropped + " events (and counting...) due to ring buffer at max capacity [" + this.ringBufferSize + "]"); |
| 534 | + } |
| 535 | + |
| 536 | + // Notify parties |
| 537 | + // |
| 538 | + fireEventAppendFailed(event, RING_BUFFER_FULL_EXCEPTION); |
469 | 539 | }
|
470 | 540 | }
|
471 | 541 |
|
472 | 542 | protected void prepareForDeferredProcessing(Event event) {
|
473 | 543 | event.prepareForDeferredProcessing();
|
474 | 544 | }
|
475 | 545 |
|
| 546 | + /** |
| 547 | + * Enqueue the given {@code event} in the ring buffer according to the configured {@link #asyncMode}. |
| 548 | + * |
| 549 | + * @param event the {@link Event} to enqueue |
| 550 | + * @return {@code true} when the even is successfully enqueued in the ring buffer |
| 551 | + */ |
| 552 | + protected boolean enqueueEvent(Event event) { |
| 553 | + if (this.asyncMode == AsyncMode.BLOCK) { |
| 554 | + return enqueueEventBlock(event); |
| 555 | + } else { |
| 556 | + return enqueueEventDrop(event); |
| 557 | + } |
| 558 | + } |
| 559 | + |
| 560 | + /** |
| 561 | + * Enqueue the given {@code event} in the ring buffer, blocking until enough space |
| 562 | + * is available or the {@link #retryTimeout} expires (if configured). |
| 563 | + * |
| 564 | + * @param event the {@link Event} to enqueue |
| 565 | + * @return {@code true} when the even is successfully enqueued in the ring buffer |
| 566 | + */ |
| 567 | + private boolean enqueueEventBlock(Event event) { |
| 568 | + long timeout = this.retryTimeout.getMilliseconds() <= 0 ? Long.MAX_VALUE : System.currentTimeMillis() + this.retryTimeout.getMilliseconds(); |
| 569 | + |
| 570 | + while (isStarted() && !this.disruptor.getRingBuffer().tryPublishEvent(this.eventTranslator, event)) { |
| 571 | + // Check for timeout |
| 572 | + // |
| 573 | + if (System.currentTimeMillis() >= timeout) { |
| 574 | + return false; |
| 575 | + } |
| 576 | + |
| 577 | + // Wait before retry |
| 578 | + // |
| 579 | + long waitDuration = Math.min(this.retryMillis, System.currentTimeMillis() - timeout); |
| 580 | + if (waitDuration > 0) { |
| 581 | + LockSupport.parkNanos(waitDuration * 1_000_000L); |
| 582 | + } |
| 583 | + } |
| 584 | + |
| 585 | + return true; |
| 586 | + } |
| 587 | + |
| 588 | + /** |
| 589 | + * Attempt to enqueue the given {@code event} in the ring buffer without blocking. Drop the event |
| 590 | + * if the ring buffer is full. |
| 591 | + * |
| 592 | + * @param event the {@link Event} to enqueue |
| 593 | + * @return {@code true} when the even is successfully enqueued in the ring buffer |
| 594 | + */ |
| 595 | + private boolean enqueueEventDrop(Event event) { |
| 596 | + return this.disruptor.getRingBuffer().tryPublishEvent(this.eventTranslator, event); |
| 597 | + } |
476 | 598 |
|
477 | 599 | protected String calculateThreadName() {
|
478 | 600 | List<Object> threadNameFormatParams = getThreadNameFormatParams();
|
@@ -581,6 +703,34 @@ public void setWaitStrategyType(String waitStrategyType) {
|
581 | 703 | setWaitStrategy(WaitStrategyFactory.createWaitStrategyFromString(waitStrategyType));
|
582 | 704 | }
|
583 | 705 |
|
| 706 | + public AsyncMode getAsyncMode() { |
| 707 | + return asyncMode; |
| 708 | + } |
| 709 | + public void setAsyncMode(AsyncMode asyncMode) { |
| 710 | + this.asyncMode = asyncMode; |
| 711 | + } |
| 712 | + |
| 713 | + public long getRetryMillis() { |
| 714 | + return retryMillis; |
| 715 | + } |
| 716 | + public void setRetryMillis(long retryMillis) { |
| 717 | + this.retryMillis = retryMillis; |
| 718 | + } |
| 719 | + |
| 720 | + public Duration getRetryTimeout() { |
| 721 | + return retryTimeout; |
| 722 | + } |
| 723 | + public void setRetryTimeout(Duration retryTimeout) { |
| 724 | + this.retryTimeout = Objects.requireNonNull(retryTimeout); |
| 725 | + } |
| 726 | + |
| 727 | + public void setShutdownGracePeriod(Duration shutdownGracePeriod) { |
| 728 | + this.shutdownGracePeriod = Objects.requireNonNull(shutdownGracePeriod); |
| 729 | + } |
| 730 | + public Duration getShutdownGracePeriod() { |
| 731 | + return shutdownGracePeriod; |
| 732 | + } |
| 733 | + |
584 | 734 | public ThreadFactory getThreadFactory() {
|
585 | 735 | return threadFactory;
|
586 | 736 | }
|
|
0 commit comments