diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index e50f0f742..b92e6cb04 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -190,4 +190,18 @@ test_expect_success 'required filter clean failure' ' test_must_fail git add test.fc ' +test -n "$GIT_TEST_LONG" && test_set_prereq EXPENSIVE + +test_expect_success EXPENSIVE 'filter large file' ' + git config filter.largefile.smudge cat && + git config filter.largefile.clean cat && + for i in $(test_seq 1 2048); do printf "%1048576d" 1; done >2GB && + echo "2GB filter=largefile" >.gitattributes && + git add 2GB 2>err && + ! test -s err && + rm -f 2GB && + git checkout -- 2GB 2>err && + ! test -s err +' + test_done diff --git a/wrapper.c b/wrapper.c index 6a015de5f..f92b14759 100644 --- a/wrapper.c +++ b/wrapper.c @@ -130,6 +130,14 @@ void *xcalloc(size_t nmemb, size_t size) return ret; } +/* + * Limit size of IO chunks, because huge chunks only cause pain. OS X + * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in + * the absense of bugs, large chunks can result in bad latencies when + * you decide to kill the process. + */ +#define MAX_IO_SIZE (8*1024*1024) + /* * xread() is the same a read(), but it automatically restarts read() * operations with a recoverable error (EAGAIN and EINTR). xread() @@ -138,6 +146,8 @@ void *xcalloc(size_t nmemb, size_t size) ssize_t xread(int fd, void *buf, size_t len) { ssize_t nr; + if (len > MAX_IO_SIZE) + len = MAX_IO_SIZE; while (1) { nr = read(fd, buf, len); if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) @@ -154,6 +164,8 @@ ssize_t xread(int fd, void *buf, size_t len) ssize_t xwrite(int fd, const void *buf, size_t len) { ssize_t nr; + if (len > MAX_IO_SIZE) + len = MAX_IO_SIZE; while (1) { nr = write(fd, buf, len); if ((nr < 0) && (errno == EAGAIN || errno == EINTR))