Skip to content

Commit 77a002c

Browse files
Brandon Jacklyncopybara-github
Brandon Jacklyn
authored andcommitted
Remove DigestUtils.getDigestInExclusiveMode() now that SsdModule has …
…been removed There used to be a cli flag --experimental_multi_threaded_digest which defaulted to true and therefore would not compute the file digest in exclusive mode unless overridden to false. When that flag was removed this code was accidentally left behind which instead causes bazel to always compute file digests in exclusive mode for all files larger than MULTI_THREADED_DIGEST_MAX_FILE_SIZE. This causes bazel to take 5-6x longer to 'check cached actions' in my org's builds, specifically increasing the time from 30 secs to 2 mins 30 secs for fully cached no-op builds. Fixes bazelbuild#14034 Closes bazelbuild#14231. PiperOrigin-RevId: 407790990
1 parent 917e15e commit 77a002c

File tree

2 files changed

+1
-54
lines changed

2 files changed

+1
-54
lines changed

src/main/java/com/google/devtools/build/lib/vfs/DigestUtils.java

+1-34
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,8 @@
1616
import com.github.benmanes.caffeine.cache.Cache;
1717
import com.github.benmanes.caffeine.cache.Caffeine;
1818
import com.github.benmanes.caffeine.cache.stats.CacheStats;
19-
import com.google.common.annotations.VisibleForTesting;
2019
import com.google.common.base.Preconditions;
2120
import com.google.common.primitives.Longs;
22-
import com.google.devtools.build.lib.profiler.Profiler;
23-
import com.google.devtools.build.lib.profiler.ProfilerTask;
2421
import java.io.IOException;
2522

2623
/**
@@ -41,17 +38,9 @@
4138
* fail.
4239
*/
4340
public class DigestUtils {
44-
// Object to synchronize on when serializing large file reads.
45-
private static final Object DIGEST_LOCK = new Object();
46-
4741
// Typical size for a digest byte array.
4842
public static final int ESTIMATED_SIZE = 32;
4943

50-
// Files of this size or less are assumed to be readable in one seek.
51-
// (This is the default readahead window on Linux.)
52-
@VisibleForTesting // the unittest is in a different package!
53-
public static final int MULTI_THREADED_DIGEST_MAX_FILE_SIZE = 128 * 1024;
54-
5544
/**
5645
* Keys used to cache the values of the digests for files where we don't have fast digests.
5746
*
@@ -126,19 +115,6 @@ public int hashCode() {
126115
/** Private constructor to prevent instantiation of utility class. */
127116
private DigestUtils() {}
128117

129-
/**
130-
* Obtain file's digset using synchronized method, ensuring that system is not overloaded in case
131-
* when multiple threads are requesting digest calculations and underlying file system cannot
132-
* provide it via extended attribute.
133-
*/
134-
private static byte[] getDigestInExclusiveMode(Path path) throws IOException {
135-
long startTime = Profiler.nanoTimeMaybe();
136-
synchronized (DIGEST_LOCK) {
137-
Profiler.instance().logSimpleTask(startTime, ProfilerTask.WAIT, path.getPathString());
138-
return path.getDigest();
139-
}
140-
}
141-
142118
/**
143119
* Enables the caching of file digests based on file status data.
144120
*
@@ -221,16 +197,7 @@ public static byte[] manuallyComputeDigest(Path path, long fileSize) throws IOEx
221197
}
222198
}
223199

224-
// Compute digest from the file contents.
225-
if (fileSize > MULTI_THREADED_DIGEST_MAX_FILE_SIZE) {
226-
// We'll have to read file content in order to calculate the digest.
227-
// We avoid overlapping this process for multiple large files, as
228-
// seeking back and forth between them will result in an overall loss of
229-
// throughput.
230-
digest = getDigestInExclusiveMode(path);
231-
} else {
232-
digest = path.getDigest();
233-
}
200+
digest = path.getDigest();
234201

235202
Preconditions.checkNotNull(digest, "Missing digest for %s (size %s)", path, fileSize);
236203
if (cache != null) {

src/test/java/com/google/devtools/build/lib/vfs/DigestUtilsTest.java

-20
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import com.google.devtools.build.lib.testutil.TestUtils;
2020
import com.google.devtools.build.lib.vfs.inmemoryfs.InMemoryFileSystem;
2121
import java.io.IOException;
22-
import java.util.Arrays;
2322
import java.util.concurrent.CountDownLatch;
2423
import java.util.concurrent.TimeUnit;
2524
import java.util.concurrent.atomic.AtomicInteger;
@@ -92,25 +91,6 @@ protected byte[] getFastDigest(PathFragment path) throws IOException {
9291
thread2.joinAndAssertState(TestUtils.WAIT_TIMEOUT_MILLISECONDS);
9392
}
9493

95-
/**
96-
* Ensures that digest calculation is synchronized for files greater than
97-
* {@link DigestUtils#MULTI_THREADED_DIGEST_MAX_FILE_SIZE} bytes if the digest is not
98-
* available cheaply, so machines with rotating drives don't become unusable.
99-
*/
100-
@Test
101-
public void testCalculationConcurrency() throws Exception {
102-
int small = DigestUtils.MULTI_THREADED_DIGEST_MAX_FILE_SIZE;
103-
int large = DigestUtils.MULTI_THREADED_DIGEST_MAX_FILE_SIZE + 1;
104-
for (DigestHashFunction hf :
105-
Arrays.asList(DigestHashFunction.SHA256, DigestHashFunction.SHA1)) {
106-
assertDigestCalculationConcurrency(true, true, small, small, hf);
107-
assertDigestCalculationConcurrency(true, true, large, large, hf);
108-
assertDigestCalculationConcurrency(true, false, small, small, hf);
109-
assertDigestCalculationConcurrency(true, false, small, large, hf);
110-
assertDigestCalculationConcurrency(false, false, large, large, hf);
111-
}
112-
}
113-
11494
@Test
11595
public void testCache() throws Exception {
11696
AtomicInteger getFastDigestCounter = new AtomicInteger(0);

0 commit comments

Comments
 (0)