Show More
@@ -46,7 +46,13 b' def _capabilities(orig, repo, proto):' | |||||
46 | '''Wrap server command to announce lfs server capability''' |
|
46 | '''Wrap server command to announce lfs server capability''' | |
47 | caps = orig(repo, proto) |
|
47 | caps = orig(repo, proto) | |
48 | if util.safehasattr(repo.svfs, 'lfslocalblobstore'): |
|
48 | if util.safehasattr(repo.svfs, 'lfslocalblobstore'): | |
49 | # XXX: change to 'lfs=serve' when separate git server isn't required? |
|
49 | # Advertise a slightly different capability when lfs is *required*, so | |
|
50 | # that the client knows it MUST load the extension. If lfs is not | |||
|
51 | # required on the server, there's no reason to autoload the extension | |||
|
52 | # on the client. | |||
|
53 | if b'lfs' in repo.requirements: | |||
|
54 | caps.append('lfs-serve') | |||
|
55 | ||||
50 | caps.append('lfs') |
|
56 | caps.append('lfs') | |
51 | return caps |
|
57 | return caps | |
52 |
|
58 |
@@ -578,6 +578,23 b' def clone(ui, peeropts, source, dest=Non' | |||||
578 |
|
578 | |||
579 | createopts['narrowfiles'] = True |
|
579 | createopts['narrowfiles'] = True | |
580 |
|
580 | |||
|
581 | if srcpeer.capable(b'lfs-serve'): | |||
|
582 | # Repository creation honors the config if it disabled the extension, so | |||
|
583 | # we can't just announce that lfs will be enabled. This check avoids | |||
|
584 | # saying that lfs will be enabled, and then saying it's an unknown | |||
|
585 | # feature. The lfs creation option is set in either case so that a | |||
|
586 | # requirement is added. If the extension is explicitly disabled but the | |||
|
587 | # requirement is set, the clone aborts early, before transferring any | |||
|
588 | # data. | |||
|
589 | createopts['lfs'] = True | |||
|
590 | ||||
|
591 | if extensions.disabledext('lfs'): | |||
|
592 | ui.status(_('(remote is using large file support (lfs), but it is ' | |||
|
593 | 'explicitly disabled in the local configuration)\n')) | |||
|
594 | else: | |||
|
595 | ui.status(_('(remote is using large file support (lfs); lfs will ' | |||
|
596 | 'be enabled for this repository)\n')) | |||
|
597 | ||||
581 | shareopts = shareopts or {} |
|
598 | shareopts = shareopts or {} | |
582 | sharepool = shareopts.get('pool') |
|
599 | sharepool = shareopts.get('pool') | |
583 | sharenamemode = shareopts.get('mode') |
|
600 | sharenamemode = shareopts.get('mode') |
@@ -2895,6 +2895,9 b' def newreporequirements(ui, createopts):' | |||||
2895 | if createopts.get('narrowfiles'): |
|
2895 | if createopts.get('narrowfiles'): | |
2896 | requirements.add(repository.NARROW_REQUIREMENT) |
|
2896 | requirements.add(repository.NARROW_REQUIREMENT) | |
2897 |
|
2897 | |||
|
2898 | if createopts.get('lfs'): | |||
|
2899 | requirements.add('lfs') | |||
|
2900 | ||||
2898 | return requirements |
|
2901 | return requirements | |
2899 |
|
2902 | |||
2900 | def filterknowncreateopts(ui, createopts): |
|
2903 | def filterknowncreateopts(ui, createopts): | |
@@ -2913,6 +2916,7 b' def filterknowncreateopts(ui, createopts' | |||||
2913 | """ |
|
2916 | """ | |
2914 | known = { |
|
2917 | known = { | |
2915 | 'backend', |
|
2918 | 'backend', | |
|
2919 | 'lfs', | |||
2916 | 'narrowfiles', |
|
2920 | 'narrowfiles', | |
2917 | 'sharedrepo', |
|
2921 | 'sharedrepo', | |
2918 | 'sharedrelative', |
|
2922 | 'sharedrelative', | |
@@ -2931,6 +2935,9 b' def createrepository(ui, path, createopt' | |||||
2931 |
|
2935 | |||
2932 | backend |
|
2936 | backend | |
2933 | The storage backend to use. |
|
2937 | The storage backend to use. | |
|
2938 | lfs | |||
|
2939 | Repository will be created with ``lfs`` requirement. The lfs extension | |||
|
2940 | will automatically be loaded when the repository is accessed. | |||
2934 | narrowfiles |
|
2941 | narrowfiles | |
2935 | Set up repository to support narrow file storage. |
|
2942 | Set up repository to support narrow file storage. | |
2936 | sharedrepo |
|
2943 | sharedrepo |
@@ -42,6 +42,7 b' default cache, so it attempts to downloa' | |||||
42 | Downloads fail... |
|
42 | Downloads fail... | |
43 |
|
43 | |||
44 | $ hg clone http://localhost:$HGPORT httpclone |
|
44 | $ hg clone http://localhost:$HGPORT httpclone | |
|
45 | (remote is using large file support (lfs); lfs will be enabled for this repository) | |||
45 | requesting all changes |
|
46 | requesting all changes | |
46 | adding changesets |
|
47 | adding changesets | |
47 | adding manifests |
|
48 | adding manifests | |
@@ -76,6 +77,7 b' Blob URIs are correct when --prefix is u' | |||||
76 | $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2 |
|
77 | $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2 | |
77 | using http://localhost:$HGPORT/subdir/mount/point |
|
78 | using http://localhost:$HGPORT/subdir/mount/point | |
78 | sending capabilities command |
|
79 | sending capabilities command | |
|
80 | (remote is using large file support (lfs); lfs will be enabled for this repository) | |||
79 | query 1; heads |
|
81 | query 1; heads | |
80 | sending batch command |
|
82 | sending batch command | |
81 | requesting all changes |
|
83 | requesting all changes | |
@@ -88,7 +90,6 b' Blob URIs are correct when --prefix is u' | |||||
88 | adding file changes |
|
90 | adding file changes | |
89 | adding lfs.bin revisions |
|
91 | adding lfs.bin revisions | |
90 | added 1 changesets with 1 changes to 1 files |
|
92 | added 1 changesets with 1 changes to 1 files | |
91 | calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs |
|
|||
92 | bundle2-input-part: total payload size 648 |
|
93 | bundle2-input-part: total payload size 648 | |
93 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported |
|
94 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported | |
94 | bundle2-input-part: "phase-heads" supported |
|
95 | bundle2-input-part: "phase-heads" supported | |
@@ -239,6 +240,7 b" though the client doesn't send the blob." | |||||
239 | Test an I/O error in localstore.verify() (Batch API) with GET |
|
240 | Test an I/O error in localstore.verify() (Batch API) with GET | |
240 |
|
241 | |||
241 | $ hg clone http://localhost:$HGPORT1 httpclone2 |
|
242 | $ hg clone http://localhost:$HGPORT1 httpclone2 | |
|
243 | (remote is using large file support (lfs); lfs will be enabled for this repository) | |||
242 | requesting all changes |
|
244 | requesting all changes | |
243 | adding changesets |
|
245 | adding changesets | |
244 | adding manifests |
|
246 | adding manifests |
@@ -304,10 +304,10 b' lfs content, and the extension enabled.' | |||||
304 | $ grep 'lfs' .hg/requires $SERVER_REQUIRES |
|
304 | $ grep 'lfs' .hg/requires $SERVER_REQUIRES | |
305 | $TESTTMP/server/.hg/requires:lfs |
|
305 | $TESTTMP/server/.hg/requires:lfs | |
306 |
|
306 | |||
307 | TODO: fail more gracefully. |
|
307 | $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone | |
308 |
|
308 | (remote is using large file support (lfs), but it is explicitly disabled in the local configuration) | ||
309 | $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone |
|
309 | abort: repository requires features unknown to this Mercurial: lfs! | |
310 | abort: HTTP Error 500: Internal Server Error |
|
310 | (see https://mercurial-scm.org/wiki/MissingRequirement for more information) | |
311 | [255] |
|
311 | [255] | |
312 | $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES |
|
312 | $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES | |
313 | grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$ |
|
313 | grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$ | |
@@ -661,8 +661,6 b' Only the files required by diff are pref' | |||||
661 | $ cat $TESTTMP/errors.log | grep '^[A-Z]' |
|
661 | $ cat $TESTTMP/errors.log | grep '^[A-Z]' | |
662 | Traceback (most recent call last): |
|
662 | Traceback (most recent call last): | |
663 | ValueError: no common changegroup version |
|
663 | ValueError: no common changegroup version | |
664 | Traceback (most recent call last): |
|
|||
665 | ValueError: no common changegroup version |
|
|||
666 | #else |
|
664 | #else | |
667 | $ cat $TESTTMP/errors.log |
|
665 | $ cat $TESTTMP/errors.log | |
668 | #endif |
|
666 | #endif |
General Comments 0
You need to be logged in to leave comments.
Login now