##// END OF EJS Templates
remotefilelog: use the right expandpath in to expand `~`...
marmoute -
r47707:c1749dd3 default
parent child Browse files
Show More
@@ -1,1262 +1,1262 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import (
133 from mercurial.node import (
134 hex,
134 hex,
135 wdirrev,
135 wdirrev,
136 )
136 )
137 from mercurial.i18n import _
137 from mercurial.i18n import _
138 from mercurial.pycompat import open
138 from mercurial.pycompat import open
139 from mercurial import (
139 from mercurial import (
140 changegroup,
140 changegroup,
141 changelog,
141 changelog,
142 commands,
142 commands,
143 configitems,
143 configitems,
144 context,
144 context,
145 copies,
145 copies,
146 debugcommands as hgdebugcommands,
146 debugcommands as hgdebugcommands,
147 dispatch,
147 dispatch,
148 error,
148 error,
149 exchange,
149 exchange,
150 extensions,
150 extensions,
151 hg,
151 hg,
152 localrepo,
152 localrepo,
153 match as matchmod,
153 match as matchmod,
154 merge,
154 merge,
155 mergestate as mergestatemod,
155 mergestate as mergestatemod,
156 patch,
156 patch,
157 pycompat,
157 pycompat,
158 registrar,
158 registrar,
159 repair,
159 repair,
160 repoview,
160 repoview,
161 revset,
161 revset,
162 scmutil,
162 scmutil,
163 smartset,
163 smartset,
164 streamclone,
164 streamclone,
165 util,
165 util,
166 )
166 )
167 from . import (
167 from . import (
168 constants,
168 constants,
169 debugcommands,
169 debugcommands,
170 fileserverclient,
170 fileserverclient,
171 remotefilectx,
171 remotefilectx,
172 remotefilelog,
172 remotefilelog,
173 remotefilelogserver,
173 remotefilelogserver,
174 repack as repackmod,
174 repack as repackmod,
175 shallowbundle,
175 shallowbundle,
176 shallowrepo,
176 shallowrepo,
177 shallowstore,
177 shallowstore,
178 shallowutil,
178 shallowutil,
179 shallowverifier,
179 shallowverifier,
180 )
180 )
181
181
182 # ensures debug commands are registered
182 # ensures debug commands are registered
183 hgdebugcommands.command
183 hgdebugcommands.command
184
184
185 cmdtable = {}
185 cmdtable = {}
186 command = registrar.command(cmdtable)
186 command = registrar.command(cmdtable)
187
187
188 configtable = {}
188 configtable = {}
189 configitem = registrar.configitem(configtable)
189 configitem = registrar.configitem(configtable)
190
190
191 configitem(b'remotefilelog', b'debug', default=False)
191 configitem(b'remotefilelog', b'debug', default=False)
192
192
193 configitem(b'remotefilelog', b'reponame', default=b'')
193 configitem(b'remotefilelog', b'reponame', default=b'')
194 configitem(b'remotefilelog', b'cachepath', default=None)
194 configitem(b'remotefilelog', b'cachepath', default=None)
195 configitem(b'remotefilelog', b'cachegroup', default=None)
195 configitem(b'remotefilelog', b'cachegroup', default=None)
196 configitem(b'remotefilelog', b'cacheprocess', default=None)
196 configitem(b'remotefilelog', b'cacheprocess', default=None)
197 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
197 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
198 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
198 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
199
199
200 configitem(
200 configitem(
201 b'remotefilelog',
201 b'remotefilelog',
202 b'fallbackpath',
202 b'fallbackpath',
203 default=configitems.dynamicdefault,
203 default=configitems.dynamicdefault,
204 alias=[(b'remotefilelog', b'fallbackrepo')],
204 alias=[(b'remotefilelog', b'fallbackrepo')],
205 )
205 )
206
206
207 configitem(b'remotefilelog', b'validatecachelog', default=None)
207 configitem(b'remotefilelog', b'validatecachelog', default=None)
208 configitem(b'remotefilelog', b'validatecache', default=b'on')
208 configitem(b'remotefilelog', b'validatecache', default=b'on')
209 configitem(b'remotefilelog', b'server', default=None)
209 configitem(b'remotefilelog', b'server', default=None)
210 configitem(b'remotefilelog', b'servercachepath', default=None)
210 configitem(b'remotefilelog', b'servercachepath', default=None)
211 configitem(b"remotefilelog", b"serverexpiration", default=30)
211 configitem(b"remotefilelog", b"serverexpiration", default=30)
212 configitem(b'remotefilelog', b'backgroundrepack', default=False)
212 configitem(b'remotefilelog', b'backgroundrepack', default=False)
213 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
213 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
214 configitem(b'remotefilelog', b'pullprefetch', default=None)
214 configitem(b'remotefilelog', b'pullprefetch', default=None)
215 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
215 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
216 configitem(b'remotefilelog', b'prefetchdelay', default=120)
216 configitem(b'remotefilelog', b'prefetchdelay', default=120)
217 configitem(b'remotefilelog', b'prefetchdays', default=14)
217 configitem(b'remotefilelog', b'prefetchdays', default=14)
218 # Other values include 'local' or 'none'. Any unrecognized value is 'all'.
218 # Other values include 'local' or 'none'. Any unrecognized value is 'all'.
219 configitem(b'remotefilelog', b'strip.includefiles', default='all')
219 configitem(b'remotefilelog', b'strip.includefiles', default='all')
220
220
221 configitem(b'remotefilelog', b'getfilesstep', default=10000)
221 configitem(b'remotefilelog', b'getfilesstep', default=10000)
222 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
222 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
223 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
223 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
224 configitem(b'remotefilelog', b'fetchwarning', default=b'')
224 configitem(b'remotefilelog', b'fetchwarning', default=b'')
225
225
226 configitem(b'remotefilelog', b'includepattern', default=None)
226 configitem(b'remotefilelog', b'includepattern', default=None)
227 configitem(b'remotefilelog', b'excludepattern', default=None)
227 configitem(b'remotefilelog', b'excludepattern', default=None)
228
228
229 configitem(b'remotefilelog', b'gcrepack', default=False)
229 configitem(b'remotefilelog', b'gcrepack', default=False)
230 configitem(b'remotefilelog', b'repackonhggc', default=False)
230 configitem(b'remotefilelog', b'repackonhggc', default=False)
231 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
231 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
232
232
233 configitem(b'packs', b'maxpacksize', default=0)
233 configitem(b'packs', b'maxpacksize', default=0)
234 configitem(b'packs', b'maxchainlen', default=1000)
234 configitem(b'packs', b'maxchainlen', default=1000)
235
235
236 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
236 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
237
237
238 # default TTL limit is 30 days
238 # default TTL limit is 30 days
239 _defaultlimit = 60 * 60 * 24 * 30
239 _defaultlimit = 60 * 60 * 24 * 30
240 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
240 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
241
241
242 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
242 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
243 configitem(
243 configitem(
244 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
244 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
245 )
245 )
246 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
246 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
247 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
247 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
248 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
248 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
249
249
250 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
250 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
251 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
251 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
252 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
252 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
253 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
253 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
254 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
254 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
255
255
256 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
256 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
257 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
257 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
258 # be specifying the version(s) of Mercurial they are tested with, or
258 # be specifying the version(s) of Mercurial they are tested with, or
259 # leave the attribute unspecified.
259 # leave the attribute unspecified.
260 testedwith = b'ships-with-hg-core'
260 testedwith = b'ships-with-hg-core'
261
261
262 repoclass = localrepo.localrepository
262 repoclass = localrepo.localrepository
263 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
263 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
264
264
265 isenabled = shallowutil.isenabled
265 isenabled = shallowutil.isenabled
266
266
267
267
268 def uisetup(ui):
268 def uisetup(ui):
269 """Wraps user facing Mercurial commands to swap them out with shallow
269 """Wraps user facing Mercurial commands to swap them out with shallow
270 versions.
270 versions.
271 """
271 """
272 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
272 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
273
273
274 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
274 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
275 entry[1].append(
275 entry[1].append(
276 (
276 (
277 b'',
277 b'',
278 b'shallow',
278 b'shallow',
279 None,
279 None,
280 _(b"create a shallow clone which uses remote file history"),
280 _(b"create a shallow clone which uses remote file history"),
281 )
281 )
282 )
282 )
283
283
284 extensions.wrapcommand(
284 extensions.wrapcommand(
285 commands.table, b'debugindex', debugcommands.debugindex
285 commands.table, b'debugindex', debugcommands.debugindex
286 )
286 )
287 extensions.wrapcommand(
287 extensions.wrapcommand(
288 commands.table, b'debugindexdot', debugcommands.debugindexdot
288 commands.table, b'debugindexdot', debugcommands.debugindexdot
289 )
289 )
290 extensions.wrapcommand(commands.table, b'log', log)
290 extensions.wrapcommand(commands.table, b'log', log)
291 extensions.wrapcommand(commands.table, b'pull', pull)
291 extensions.wrapcommand(commands.table, b'pull', pull)
292
292
293 # Prevent 'hg manifest --all'
293 # Prevent 'hg manifest --all'
294 def _manifest(orig, ui, repo, *args, **opts):
294 def _manifest(orig, ui, repo, *args, **opts):
295 if isenabled(repo) and opts.get('all'):
295 if isenabled(repo) and opts.get('all'):
296 raise error.Abort(_(b"--all is not supported in a shallow repo"))
296 raise error.Abort(_(b"--all is not supported in a shallow repo"))
297
297
298 return orig(ui, repo, *args, **opts)
298 return orig(ui, repo, *args, **opts)
299
299
300 extensions.wrapcommand(commands.table, b"manifest", _manifest)
300 extensions.wrapcommand(commands.table, b"manifest", _manifest)
301
301
302 # Wrap remotefilelog with lfs code
302 # Wrap remotefilelog with lfs code
303 def _lfsloaded(loaded=False):
303 def _lfsloaded(loaded=False):
304 lfsmod = None
304 lfsmod = None
305 try:
305 try:
306 lfsmod = extensions.find(b'lfs')
306 lfsmod = extensions.find(b'lfs')
307 except KeyError:
307 except KeyError:
308 pass
308 pass
309 if lfsmod:
309 if lfsmod:
310 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
310 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
311 fileserverclient._lfsmod = lfsmod
311 fileserverclient._lfsmod = lfsmod
312
312
313 extensions.afterloaded(b'lfs', _lfsloaded)
313 extensions.afterloaded(b'lfs', _lfsloaded)
314
314
315 # debugdata needs remotefilelog.len to work
315 # debugdata needs remotefilelog.len to work
316 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
316 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
317
317
318 changegroup.cgpacker = shallowbundle.shallowcg1packer
318 changegroup.cgpacker = shallowbundle.shallowcg1packer
319
319
320 extensions.wrapfunction(
320 extensions.wrapfunction(
321 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
321 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
322 )
322 )
323 extensions.wrapfunction(
323 extensions.wrapfunction(
324 changegroup, b'makechangegroup', shallowbundle.makechangegroup
324 changegroup, b'makechangegroup', shallowbundle.makechangegroup
325 )
325 )
326 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
326 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
327 extensions.wrapfunction(exchange, b'pull', exchangepull)
327 extensions.wrapfunction(exchange, b'pull', exchangepull)
328 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
328 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
329 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
329 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
330 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
330 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
331 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
331 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
332 extensions.wrapfunction(
332 extensions.wrapfunction(
333 copies, b'_computeforwardmissing', computeforwardmissing
333 copies, b'_computeforwardmissing', computeforwardmissing
334 )
334 )
335 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
335 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
336 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
336 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
337 extensions.wrapfunction(context.changectx, b'filectx', filectx)
337 extensions.wrapfunction(context.changectx, b'filectx', filectx)
338 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
338 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
339 extensions.wrapfunction(patch, b'trydiff', trydiff)
339 extensions.wrapfunction(patch, b'trydiff', trydiff)
340 extensions.wrapfunction(hg, b'verify', _verify)
340 extensions.wrapfunction(hg, b'verify', _verify)
341 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
341 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
342
342
343 # disappointing hacks below
343 # disappointing hacks below
344 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
344 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
345 extensions.wrapfunction(revset, b'filelog', filelogrevset)
345 extensions.wrapfunction(revset, b'filelog', filelogrevset)
346 revset.symbols[b'filelog'] = revset.filelog
346 revset.symbols[b'filelog'] = revset.filelog
347
347
348
348
349 def cloneshallow(orig, ui, repo, *args, **opts):
349 def cloneshallow(orig, ui, repo, *args, **opts):
350 if opts.get('shallow'):
350 if opts.get('shallow'):
351 repos = []
351 repos = []
352
352
353 def pull_shallow(orig, self, *args, **kwargs):
353 def pull_shallow(orig, self, *args, **kwargs):
354 if not isenabled(self):
354 if not isenabled(self):
355 repos.append(self.unfiltered())
355 repos.append(self.unfiltered())
356 # set up the client hooks so the post-clone update works
356 # set up the client hooks so the post-clone update works
357 setupclient(self.ui, self.unfiltered())
357 setupclient(self.ui, self.unfiltered())
358
358
359 # setupclient fixed the class on the repo itself
359 # setupclient fixed the class on the repo itself
360 # but we also need to fix it on the repoview
360 # but we also need to fix it on the repoview
361 if isinstance(self, repoview.repoview):
361 if isinstance(self, repoview.repoview):
362 self.__class__.__bases__ = (
362 self.__class__.__bases__ = (
363 self.__class__.__bases__[0],
363 self.__class__.__bases__[0],
364 self.unfiltered().__class__,
364 self.unfiltered().__class__,
365 )
365 )
366 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
366 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
367 with self.lock():
367 with self.lock():
368 # acquire store lock before writing requirements as some
368 # acquire store lock before writing requirements as some
369 # requirements might be written to .hg/store/requires
369 # requirements might be written to .hg/store/requires
370 scmutil.writereporequirements(self)
370 scmutil.writereporequirements(self)
371
371
372 # Since setupclient hadn't been called, exchange.pull was not
372 # Since setupclient hadn't been called, exchange.pull was not
373 # wrapped. So we need to manually invoke our version of it.
373 # wrapped. So we need to manually invoke our version of it.
374 return exchangepull(orig, self, *args, **kwargs)
374 return exchangepull(orig, self, *args, **kwargs)
375 else:
375 else:
376 return orig(self, *args, **kwargs)
376 return orig(self, *args, **kwargs)
377
377
378 extensions.wrapfunction(exchange, b'pull', pull_shallow)
378 extensions.wrapfunction(exchange, b'pull', pull_shallow)
379
379
380 # Wrap the stream logic to add requirements and to pass include/exclude
380 # Wrap the stream logic to add requirements and to pass include/exclude
381 # patterns around.
381 # patterns around.
382 def setup_streamout(repo, remote):
382 def setup_streamout(repo, remote):
383 # Replace remote.stream_out with a version that sends file
383 # Replace remote.stream_out with a version that sends file
384 # patterns.
384 # patterns.
385 def stream_out_shallow(orig):
385 def stream_out_shallow(orig):
386 caps = remote.capabilities()
386 caps = remote.capabilities()
387 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
387 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
388 opts = {}
388 opts = {}
389 if repo.includepattern:
389 if repo.includepattern:
390 opts['includepattern'] = b'\0'.join(repo.includepattern)
390 opts['includepattern'] = b'\0'.join(repo.includepattern)
391 if repo.excludepattern:
391 if repo.excludepattern:
392 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
392 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
393 return remote._callstream(b'stream_out_shallow', **opts)
393 return remote._callstream(b'stream_out_shallow', **opts)
394 else:
394 else:
395 return orig()
395 return orig()
396
396
397 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
397 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
398
398
399 def stream_wrap(orig, op):
399 def stream_wrap(orig, op):
400 setup_streamout(op.repo, op.remote)
400 setup_streamout(op.repo, op.remote)
401 return orig(op)
401 return orig(op)
402
402
403 extensions.wrapfunction(
403 extensions.wrapfunction(
404 streamclone, b'maybeperformlegacystreamclone', stream_wrap
404 streamclone, b'maybeperformlegacystreamclone', stream_wrap
405 )
405 )
406
406
407 def canperformstreamclone(orig, pullop, bundle2=False):
407 def canperformstreamclone(orig, pullop, bundle2=False):
408 # remotefilelog is currently incompatible with the
408 # remotefilelog is currently incompatible with the
409 # bundle2 flavor of streamclones, so force us to use
409 # bundle2 flavor of streamclones, so force us to use
410 # v1 instead.
410 # v1 instead.
411 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
411 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
412 pullop.remotebundle2caps[b'stream'] = [
412 pullop.remotebundle2caps[b'stream'] = [
413 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
413 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
414 ]
414 ]
415 if bundle2:
415 if bundle2:
416 return False, None
416 return False, None
417 supported, requirements = orig(pullop, bundle2=bundle2)
417 supported, requirements = orig(pullop, bundle2=bundle2)
418 if requirements is not None:
418 if requirements is not None:
419 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
419 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
420 return supported, requirements
420 return supported, requirements
421
421
422 extensions.wrapfunction(
422 extensions.wrapfunction(
423 streamclone, b'canperformstreamclone', canperformstreamclone
423 streamclone, b'canperformstreamclone', canperformstreamclone
424 )
424 )
425
425
426 try:
426 try:
427 orig(ui, repo, *args, **opts)
427 orig(ui, repo, *args, **opts)
428 finally:
428 finally:
429 if opts.get('shallow'):
429 if opts.get('shallow'):
430 for r in repos:
430 for r in repos:
431 if util.safehasattr(r, b'fileservice'):
431 if util.safehasattr(r, b'fileservice'):
432 r.fileservice.close()
432 r.fileservice.close()
433
433
434
434
435 def debugdatashallow(orig, *args, **kwds):
435 def debugdatashallow(orig, *args, **kwds):
436 oldlen = remotefilelog.remotefilelog.__len__
436 oldlen = remotefilelog.remotefilelog.__len__
437 try:
437 try:
438 remotefilelog.remotefilelog.__len__ = lambda x: 1
438 remotefilelog.remotefilelog.__len__ = lambda x: 1
439 return orig(*args, **kwds)
439 return orig(*args, **kwds)
440 finally:
440 finally:
441 remotefilelog.remotefilelog.__len__ = oldlen
441 remotefilelog.remotefilelog.__len__ = oldlen
442
442
443
443
444 def reposetup(ui, repo):
444 def reposetup(ui, repo):
445 if not repo.local():
445 if not repo.local():
446 return
446 return
447
447
448 # put here intentionally bc doesnt work in uisetup
448 # put here intentionally bc doesnt work in uisetup
449 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
449 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
450 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
450 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
451
451
452 isserverenabled = ui.configbool(b'remotefilelog', b'server')
452 isserverenabled = ui.configbool(b'remotefilelog', b'server')
453 isshallowclient = isenabled(repo)
453 isshallowclient = isenabled(repo)
454
454
455 if isserverenabled and isshallowclient:
455 if isserverenabled and isshallowclient:
456 raise RuntimeError(b"Cannot be both a server and shallow client.")
456 raise RuntimeError(b"Cannot be both a server and shallow client.")
457
457
458 if isshallowclient:
458 if isshallowclient:
459 setupclient(ui, repo)
459 setupclient(ui, repo)
460
460
461 if isserverenabled:
461 if isserverenabled:
462 remotefilelogserver.setupserver(ui, repo)
462 remotefilelogserver.setupserver(ui, repo)
463
463
464
464
465 def setupclient(ui, repo):
465 def setupclient(ui, repo):
466 if not isinstance(repo, localrepo.localrepository):
466 if not isinstance(repo, localrepo.localrepository):
467 return
467 return
468
468
469 # Even clients get the server setup since they need to have the
469 # Even clients get the server setup since they need to have the
470 # wireprotocol endpoints registered.
470 # wireprotocol endpoints registered.
471 remotefilelogserver.onetimesetup(ui)
471 remotefilelogserver.onetimesetup(ui)
472 onetimeclientsetup(ui)
472 onetimeclientsetup(ui)
473
473
474 shallowrepo.wraprepo(repo)
474 shallowrepo.wraprepo(repo)
475 repo.store = shallowstore.wrapstore(repo.store)
475 repo.store = shallowstore.wrapstore(repo.store)
476
476
477
477
478 def storewrapper(orig, requirements, path, vfstype):
478 def storewrapper(orig, requirements, path, vfstype):
479 s = orig(requirements, path, vfstype)
479 s = orig(requirements, path, vfstype)
480 if constants.SHALLOWREPO_REQUIREMENT in requirements:
480 if constants.SHALLOWREPO_REQUIREMENT in requirements:
481 s = shallowstore.wrapstore(s)
481 s = shallowstore.wrapstore(s)
482
482
483 return s
483 return s
484
484
485
485
486 # prefetch files before update
486 # prefetch files before update
487 def applyupdates(
487 def applyupdates(
488 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
488 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
489 ):
489 ):
490 if isenabled(repo):
490 if isenabled(repo):
491 manifest = mctx.manifest()
491 manifest = mctx.manifest()
492 files = []
492 files = []
493 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
493 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
494 files.append((f, hex(manifest[f])))
494 files.append((f, hex(manifest[f])))
495 # batch fetch the needed files from the server
495 # batch fetch the needed files from the server
496 repo.fileservice.prefetch(files)
496 repo.fileservice.prefetch(files)
497 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
497 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
498
498
499
499
500 # Prefetch merge checkunknownfiles
500 # Prefetch merge checkunknownfiles
501 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
501 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
502 if isenabled(repo):
502 if isenabled(repo):
503 files = []
503 files = []
504 sparsematch = repo.maybesparsematch(mctx.rev())
504 sparsematch = repo.maybesparsematch(mctx.rev())
505 for f, (m, actionargs, msg) in mresult.filemap():
505 for f, (m, actionargs, msg) in mresult.filemap():
506 if sparsematch and not sparsematch(f):
506 if sparsematch and not sparsematch(f):
507 continue
507 continue
508 if m in (
508 if m in (
509 mergestatemod.ACTION_CREATED,
509 mergestatemod.ACTION_CREATED,
510 mergestatemod.ACTION_DELETED_CHANGED,
510 mergestatemod.ACTION_DELETED_CHANGED,
511 mergestatemod.ACTION_CREATED_MERGE,
511 mergestatemod.ACTION_CREATED_MERGE,
512 ):
512 ):
513 files.append((f, hex(mctx.filenode(f))))
513 files.append((f, hex(mctx.filenode(f))))
514 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
514 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
515 f2 = actionargs[0]
515 f2 = actionargs[0]
516 files.append((f2, hex(mctx.filenode(f2))))
516 files.append((f2, hex(mctx.filenode(f2))))
517 # batch fetch the needed files from the server
517 # batch fetch the needed files from the server
518 repo.fileservice.prefetch(files)
518 repo.fileservice.prefetch(files)
519 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
519 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
520
520
521
521
522 # Prefetch files before status attempts to look at their size and contents
522 # Prefetch files before status attempts to look at their size and contents
523 def checklookup(orig, self, files):
523 def checklookup(orig, self, files):
524 repo = self._repo
524 repo = self._repo
525 if isenabled(repo):
525 if isenabled(repo):
526 prefetchfiles = []
526 prefetchfiles = []
527 for parent in self._parents:
527 for parent in self._parents:
528 for f in files:
528 for f in files:
529 if f in parent:
529 if f in parent:
530 prefetchfiles.append((f, hex(parent.filenode(f))))
530 prefetchfiles.append((f, hex(parent.filenode(f))))
531 # batch fetch the needed files from the server
531 # batch fetch the needed files from the server
532 repo.fileservice.prefetch(prefetchfiles)
532 repo.fileservice.prefetch(prefetchfiles)
533 return orig(self, files)
533 return orig(self, files)
534
534
535
535
536 # Prefetch the logic that compares added and removed files for renames
536 # Prefetch the logic that compares added and removed files for renames
537 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
537 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
538 if isenabled(repo):
538 if isenabled(repo):
539 files = []
539 files = []
540 pmf = repo[b'.'].manifest()
540 pmf = repo[b'.'].manifest()
541 for f in removed:
541 for f in removed:
542 if f in pmf:
542 if f in pmf:
543 files.append((f, hex(pmf[f])))
543 files.append((f, hex(pmf[f])))
544 # batch fetch the needed files from the server
544 # batch fetch the needed files from the server
545 repo.fileservice.prefetch(files)
545 repo.fileservice.prefetch(files)
546 return orig(repo, matcher, added, removed, *args, **kwargs)
546 return orig(repo, matcher, added, removed, *args, **kwargs)
547
547
548
548
549 # prefetch files before pathcopies check
549 # prefetch files before pathcopies check
550 def computeforwardmissing(orig, a, b, match=None):
550 def computeforwardmissing(orig, a, b, match=None):
551 missing = orig(a, b, match=match)
551 missing = orig(a, b, match=match)
552 repo = a._repo
552 repo = a._repo
553 if isenabled(repo):
553 if isenabled(repo):
554 mb = b.manifest()
554 mb = b.manifest()
555
555
556 files = []
556 files = []
557 sparsematch = repo.maybesparsematch(b.rev())
557 sparsematch = repo.maybesparsematch(b.rev())
558 if sparsematch:
558 if sparsematch:
559 sparsemissing = set()
559 sparsemissing = set()
560 for f in missing:
560 for f in missing:
561 if sparsematch(f):
561 if sparsematch(f):
562 files.append((f, hex(mb[f])))
562 files.append((f, hex(mb[f])))
563 sparsemissing.add(f)
563 sparsemissing.add(f)
564 missing = sparsemissing
564 missing = sparsemissing
565
565
566 # batch fetch the needed files from the server
566 # batch fetch the needed files from the server
567 repo.fileservice.prefetch(files)
567 repo.fileservice.prefetch(files)
568 return missing
568 return missing
569
569
570
570
571 # close cache miss server connection after the command has finished
571 # close cache miss server connection after the command has finished
572 def runcommand(orig, lui, repo, *args, **kwargs):
572 def runcommand(orig, lui, repo, *args, **kwargs):
573 fileservice = None
573 fileservice = None
574 # repo can be None when running in chg:
574 # repo can be None when running in chg:
575 # - at startup, reposetup was called because serve is not norepo
575 # - at startup, reposetup was called because serve is not norepo
576 # - a norepo command like "help" is called
576 # - a norepo command like "help" is called
577 if repo and isenabled(repo):
577 if repo and isenabled(repo):
578 fileservice = repo.fileservice
578 fileservice = repo.fileservice
579 try:
579 try:
580 return orig(lui, repo, *args, **kwargs)
580 return orig(lui, repo, *args, **kwargs)
581 finally:
581 finally:
582 if fileservice:
582 if fileservice:
583 fileservice.close()
583 fileservice.close()
584
584
585
585
586 # prevent strip from stripping remotefilelogs
586 # prevent strip from stripping remotefilelogs
587 def _collectbrokencsets(orig, repo, files, striprev):
587 def _collectbrokencsets(orig, repo, files, striprev):
588 if isenabled(repo):
588 if isenabled(repo):
589 files = list([f for f in files if not repo.shallowmatch(f)])
589 files = list([f for f in files if not repo.shallowmatch(f)])
590 return orig(repo, files, striprev)
590 return orig(repo, files, striprev)
591
591
592
592
593 # changectx wrappers
593 # changectx wrappers
594 def filectx(orig, self, path, fileid=None, filelog=None):
594 def filectx(orig, self, path, fileid=None, filelog=None):
595 if fileid is None:
595 if fileid is None:
596 fileid = self.filenode(path)
596 fileid = self.filenode(path)
597 if isenabled(self._repo) and self._repo.shallowmatch(path):
597 if isenabled(self._repo) and self._repo.shallowmatch(path):
598 return remotefilectx.remotefilectx(
598 return remotefilectx.remotefilectx(
599 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
599 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
600 )
600 )
601 return orig(self, path, fileid=fileid, filelog=filelog)
601 return orig(self, path, fileid=fileid, filelog=filelog)
602
602
603
603
604 def workingfilectx(orig, self, path, filelog=None):
604 def workingfilectx(orig, self, path, filelog=None):
605 if isenabled(self._repo) and self._repo.shallowmatch(path):
605 if isenabled(self._repo) and self._repo.shallowmatch(path):
606 return remotefilectx.remoteworkingfilectx(
606 return remotefilectx.remoteworkingfilectx(
607 self._repo, path, workingctx=self, filelog=filelog
607 self._repo, path, workingctx=self, filelog=filelog
608 )
608 )
609 return orig(self, path, filelog=filelog)
609 return orig(self, path, filelog=filelog)
610
610
611
611
612 # prefetch required revisions before a diff
612 # prefetch required revisions before a diff
613 def trydiff(
613 def trydiff(
614 orig,
614 orig,
615 repo,
615 repo,
616 revs,
616 revs,
617 ctx1,
617 ctx1,
618 ctx2,
618 ctx2,
619 modified,
619 modified,
620 added,
620 added,
621 removed,
621 removed,
622 copy,
622 copy,
623 getfilectx,
623 getfilectx,
624 *args,
624 *args,
625 **kwargs
625 **kwargs
626 ):
626 ):
627 if isenabled(repo):
627 if isenabled(repo):
628 prefetch = []
628 prefetch = []
629 mf1 = ctx1.manifest()
629 mf1 = ctx1.manifest()
630 for fname in modified + added + removed:
630 for fname in modified + added + removed:
631 if fname in mf1:
631 if fname in mf1:
632 fnode = getfilectx(fname, ctx1).filenode()
632 fnode = getfilectx(fname, ctx1).filenode()
633 # fnode can be None if it's a edited working ctx file
633 # fnode can be None if it's a edited working ctx file
634 if fnode:
634 if fnode:
635 prefetch.append((fname, hex(fnode)))
635 prefetch.append((fname, hex(fnode)))
636 if fname not in removed:
636 if fname not in removed:
637 fnode = getfilectx(fname, ctx2).filenode()
637 fnode = getfilectx(fname, ctx2).filenode()
638 if fnode:
638 if fnode:
639 prefetch.append((fname, hex(fnode)))
639 prefetch.append((fname, hex(fnode)))
640
640
641 repo.fileservice.prefetch(prefetch)
641 repo.fileservice.prefetch(prefetch)
642
642
643 return orig(
643 return orig(
644 repo,
644 repo,
645 revs,
645 revs,
646 ctx1,
646 ctx1,
647 ctx2,
647 ctx2,
648 modified,
648 modified,
649 added,
649 added,
650 removed,
650 removed,
651 copy,
651 copy,
652 getfilectx,
652 getfilectx,
653 *args,
653 *args,
654 **kwargs
654 **kwargs
655 )
655 )
656
656
657
657
658 # Prevent verify from processing files
658 # Prevent verify from processing files
659 # a stub for mercurial.hg.verify()
659 # a stub for mercurial.hg.verify()
660 def _verify(orig, repo, level=None):
660 def _verify(orig, repo, level=None):
661 lock = repo.lock()
661 lock = repo.lock()
662 try:
662 try:
663 return shallowverifier.shallowverifier(repo).verify()
663 return shallowverifier.shallowverifier(repo).verify()
664 finally:
664 finally:
665 lock.release()
665 lock.release()
666
666
667
667
668 clientonetime = False
668 clientonetime = False
669
669
670
670
671 def onetimeclientsetup(ui):
671 def onetimeclientsetup(ui):
672 global clientonetime
672 global clientonetime
673 if clientonetime:
673 if clientonetime:
674 return
674 return
675 clientonetime = True
675 clientonetime = True
676
676
677 # Don't commit filelogs until we know the commit hash, since the hash
677 # Don't commit filelogs until we know the commit hash, since the hash
678 # is present in the filelog blob.
678 # is present in the filelog blob.
679 # This violates Mercurial's filelog->manifest->changelog write order,
679 # This violates Mercurial's filelog->manifest->changelog write order,
680 # but is generally fine for client repos.
680 # but is generally fine for client repos.
681 pendingfilecommits = []
681 pendingfilecommits = []
682
682
683 def addrawrevision(
683 def addrawrevision(
684 orig,
684 orig,
685 self,
685 self,
686 rawtext,
686 rawtext,
687 transaction,
687 transaction,
688 link,
688 link,
689 p1,
689 p1,
690 p2,
690 p2,
691 node,
691 node,
692 flags,
692 flags,
693 cachedelta=None,
693 cachedelta=None,
694 _metatuple=None,
694 _metatuple=None,
695 ):
695 ):
696 if isinstance(link, int):
696 if isinstance(link, int):
697 pendingfilecommits.append(
697 pendingfilecommits.append(
698 (
698 (
699 self,
699 self,
700 rawtext,
700 rawtext,
701 transaction,
701 transaction,
702 link,
702 link,
703 p1,
703 p1,
704 p2,
704 p2,
705 node,
705 node,
706 flags,
706 flags,
707 cachedelta,
707 cachedelta,
708 _metatuple,
708 _metatuple,
709 )
709 )
710 )
710 )
711 return node
711 return node
712 else:
712 else:
713 return orig(
713 return orig(
714 self,
714 self,
715 rawtext,
715 rawtext,
716 transaction,
716 transaction,
717 link,
717 link,
718 p1,
718 p1,
719 p2,
719 p2,
720 node,
720 node,
721 flags,
721 flags,
722 cachedelta,
722 cachedelta,
723 _metatuple=_metatuple,
723 _metatuple=_metatuple,
724 )
724 )
725
725
726 extensions.wrapfunction(
726 extensions.wrapfunction(
727 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
727 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
728 )
728 )
729
729
730 def changelogadd(orig, self, *args, **kwargs):
730 def changelogadd(orig, self, *args, **kwargs):
731 oldlen = len(self)
731 oldlen = len(self)
732 node = orig(self, *args, **kwargs)
732 node = orig(self, *args, **kwargs)
733 newlen = len(self)
733 newlen = len(self)
734 if oldlen != newlen:
734 if oldlen != newlen:
735 for oldargs in pendingfilecommits:
735 for oldargs in pendingfilecommits:
736 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
736 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
737 linknode = self.node(link)
737 linknode = self.node(link)
738 if linknode == node:
738 if linknode == node:
739 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
739 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
740 else:
740 else:
741 raise error.ProgrammingError(
741 raise error.ProgrammingError(
742 b'pending multiple integer revisions are not supported'
742 b'pending multiple integer revisions are not supported'
743 )
743 )
744 else:
744 else:
745 # "link" is actually wrong here (it is set to len(changelog))
745 # "link" is actually wrong here (it is set to len(changelog))
746 # if changelog remains unchanged, skip writing file revisions
746 # if changelog remains unchanged, skip writing file revisions
747 # but still do a sanity check about pending multiple revisions
747 # but still do a sanity check about pending multiple revisions
748 if len({x[3] for x in pendingfilecommits}) > 1:
748 if len({x[3] for x in pendingfilecommits}) > 1:
749 raise error.ProgrammingError(
749 raise error.ProgrammingError(
750 b'pending multiple integer revisions are not supported'
750 b'pending multiple integer revisions are not supported'
751 )
751 )
752 del pendingfilecommits[:]
752 del pendingfilecommits[:]
753 return node
753 return node
754
754
755 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
755 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
756
756
757
757
758 def getrenamedfn(orig, repo, endrev=None):
758 def getrenamedfn(orig, repo, endrev=None):
759 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
759 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
760 return orig(repo, endrev)
760 return orig(repo, endrev)
761
761
762 rcache = {}
762 rcache = {}
763
763
764 def getrenamed(fn, rev):
764 def getrenamed(fn, rev):
765 """looks up all renames for a file (up to endrev) the first
765 """looks up all renames for a file (up to endrev) the first
766 time the file is given. It indexes on the changerev and only
766 time the file is given. It indexes on the changerev and only
767 parses the manifest if linkrev != changerev.
767 parses the manifest if linkrev != changerev.
768 Returns rename info for fn at changerev rev."""
768 Returns rename info for fn at changerev rev."""
769 if rev in rcache.setdefault(fn, {}):
769 if rev in rcache.setdefault(fn, {}):
770 return rcache[fn][rev]
770 return rcache[fn][rev]
771
771
772 try:
772 try:
773 fctx = repo[rev].filectx(fn)
773 fctx = repo[rev].filectx(fn)
774 for ancestor in fctx.ancestors():
774 for ancestor in fctx.ancestors():
775 if ancestor.path() == fn:
775 if ancestor.path() == fn:
776 renamed = ancestor.renamed()
776 renamed = ancestor.renamed()
777 rcache[fn][ancestor.rev()] = renamed and renamed[0]
777 rcache[fn][ancestor.rev()] = renamed and renamed[0]
778
778
779 renamed = fctx.renamed()
779 renamed = fctx.renamed()
780 return renamed and renamed[0]
780 return renamed and renamed[0]
781 except error.LookupError:
781 except error.LookupError:
782 return None
782 return None
783
783
784 return getrenamed
784 return getrenamed
785
785
786
786
787 def filelogrevset(orig, repo, subset, x):
787 def filelogrevset(orig, repo, subset, x):
788 """``filelog(pattern)``
788 """``filelog(pattern)``
789 Changesets connected to the specified filelog.
789 Changesets connected to the specified filelog.
790
790
791 For performance reasons, ``filelog()`` does not show every changeset
791 For performance reasons, ``filelog()`` does not show every changeset
792 that affects the requested file(s). See :hg:`help log` for details. For
792 that affects the requested file(s). See :hg:`help log` for details. For
793 a slower, more accurate result, use ``file()``.
793 a slower, more accurate result, use ``file()``.
794 """
794 """
795
795
796 if not isenabled(repo):
796 if not isenabled(repo):
797 return orig(repo, subset, x)
797 return orig(repo, subset, x)
798
798
799 # i18n: "filelog" is a keyword
799 # i18n: "filelog" is a keyword
800 pat = revset.getstring(x, _(b"filelog requires a pattern"))
800 pat = revset.getstring(x, _(b"filelog requires a pattern"))
801 m = matchmod.match(
801 m = matchmod.match(
802 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
802 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
803 )
803 )
804 s = set()
804 s = set()
805
805
806 if not matchmod.patkind(pat):
806 if not matchmod.patkind(pat):
807 # slow
807 # slow
808 for r in subset:
808 for r in subset:
809 ctx = repo[r]
809 ctx = repo[r]
810 cfiles = ctx.files()
810 cfiles = ctx.files()
811 for f in m.files():
811 for f in m.files():
812 if f in cfiles:
812 if f in cfiles:
813 s.add(ctx.rev())
813 s.add(ctx.rev())
814 break
814 break
815 else:
815 else:
816 # partial
816 # partial
817 files = (f for f in repo[None] if m(f))
817 files = (f for f in repo[None] if m(f))
818 for f in files:
818 for f in files:
819 fctx = repo[None].filectx(f)
819 fctx = repo[None].filectx(f)
820 s.add(fctx.linkrev())
820 s.add(fctx.linkrev())
821 for actx in fctx.ancestors():
821 for actx in fctx.ancestors():
822 s.add(actx.linkrev())
822 s.add(actx.linkrev())
823
823
824 return smartset.baseset([r for r in subset if r in s])
824 return smartset.baseset([r for r in subset if r in s])
825
825
826
826
827 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
827 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
828 def gc(ui, *args, **opts):
828 def gc(ui, *args, **opts):
829 """garbage collect the client and server filelog caches"""
829 """garbage collect the client and server filelog caches"""
830 cachepaths = set()
830 cachepaths = set()
831
831
832 # get the system client cache
832 # get the system client cache
833 systemcache = shallowutil.getcachepath(ui, allowempty=True)
833 systemcache = shallowutil.getcachepath(ui, allowempty=True)
834 if systemcache:
834 if systemcache:
835 cachepaths.add(systemcache)
835 cachepaths.add(systemcache)
836
836
837 # get repo client and server cache
837 # get repo client and server cache
838 repopaths = []
838 repopaths = []
839 pwd = ui.environ.get(b'PWD')
839 pwd = ui.environ.get(b'PWD')
840 if pwd:
840 if pwd:
841 repopaths.append(pwd)
841 repopaths.append(pwd)
842
842
843 repopaths.extend(args)
843 repopaths.extend(args)
844 repos = []
844 repos = []
845 for repopath in repopaths:
845 for repopath in repopaths:
846 try:
846 try:
847 repo = hg.peer(ui, {}, repopath)
847 repo = hg.peer(ui, {}, repopath)
848 repos.append(repo)
848 repos.append(repo)
849
849
850 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
850 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
851 if repocache:
851 if repocache:
852 cachepaths.add(repocache)
852 cachepaths.add(repocache)
853 except error.RepoError:
853 except error.RepoError:
854 pass
854 pass
855
855
856 # gc client cache
856 # gc client cache
857 for cachepath in cachepaths:
857 for cachepath in cachepaths:
858 gcclient(ui, cachepath)
858 gcclient(ui, cachepath)
859
859
860 # gc server cache
860 # gc server cache
861 for repo in repos:
861 for repo in repos:
862 remotefilelogserver.gcserver(ui, repo._repo)
862 remotefilelogserver.gcserver(ui, repo._repo)
863
863
864
864
865 def gcclient(ui, cachepath):
865 def gcclient(ui, cachepath):
866 # get list of repos that use this cache
866 # get list of repos that use this cache
867 repospath = os.path.join(cachepath, b'repos')
867 repospath = os.path.join(cachepath, b'repos')
868 if not os.path.exists(repospath):
868 if not os.path.exists(repospath):
869 ui.warn(_(b"no known cache at %s\n") % cachepath)
869 ui.warn(_(b"no known cache at %s\n") % cachepath)
870 return
870 return
871
871
872 reposfile = open(repospath, b'rb')
872 reposfile = open(repospath, b'rb')
873 repos = {r[:-1] for r in reposfile.readlines()}
873 repos = {r[:-1] for r in reposfile.readlines()}
874 reposfile.close()
874 reposfile.close()
875
875
876 # build list of useful files
876 # build list of useful files
877 validrepos = []
877 validrepos = []
878 keepkeys = set()
878 keepkeys = set()
879
879
880 sharedcache = None
880 sharedcache = None
881 filesrepacked = False
881 filesrepacked = False
882
882
883 count = 0
883 count = 0
884 progress = ui.makeprogress(
884 progress = ui.makeprogress(
885 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
885 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
886 )
886 )
887 for path in repos:
887 for path in repos:
888 progress.update(count)
888 progress.update(count)
889 count += 1
889 count += 1
890 try:
890 try:
891 path = ui.expandpath(os.path.normpath(path))
891 path = util.expandpath(os.path.normpath(path))
892 except TypeError as e:
892 except TypeError as e:
893 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
893 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
894 traceback.print_exc()
894 traceback.print_exc()
895 continue
895 continue
896 try:
896 try:
897 peer = hg.peer(ui, {}, path)
897 peer = hg.peer(ui, {}, path)
898 repo = peer._repo
898 repo = peer._repo
899 except error.RepoError:
899 except error.RepoError:
900 continue
900 continue
901
901
902 validrepos.append(path)
902 validrepos.append(path)
903
903
904 # Protect against any repo or config changes that have happened since
904 # Protect against any repo or config changes that have happened since
905 # this repo was added to the repos file. We'd rather this loop succeed
905 # this repo was added to the repos file. We'd rather this loop succeed
906 # and too much be deleted, than the loop fail and nothing gets deleted.
906 # and too much be deleted, than the loop fail and nothing gets deleted.
907 if not isenabled(repo):
907 if not isenabled(repo):
908 continue
908 continue
909
909
910 if not util.safehasattr(repo, b'name'):
910 if not util.safehasattr(repo, b'name'):
911 ui.warn(
911 ui.warn(
912 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
912 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
913 )
913 )
914 continue
914 continue
915
915
916 # If garbage collection on repack and repack on hg gc are enabled
916 # If garbage collection on repack and repack on hg gc are enabled
917 # then loose files are repacked and garbage collected.
917 # then loose files are repacked and garbage collected.
918 # Otherwise regular garbage collection is performed.
918 # Otherwise regular garbage collection is performed.
919 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
919 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
920 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
920 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
921 if repackonhggc and gcrepack:
921 if repackonhggc and gcrepack:
922 try:
922 try:
923 repackmod.incrementalrepack(repo)
923 repackmod.incrementalrepack(repo)
924 filesrepacked = True
924 filesrepacked = True
925 continue
925 continue
926 except (IOError, repackmod.RepackAlreadyRunning):
926 except (IOError, repackmod.RepackAlreadyRunning):
927 # If repack cannot be performed due to not enough disk space
927 # If repack cannot be performed due to not enough disk space
928 # continue doing garbage collection of loose files w/o repack
928 # continue doing garbage collection of loose files w/o repack
929 pass
929 pass
930
930
931 reponame = repo.name
931 reponame = repo.name
932 if not sharedcache:
932 if not sharedcache:
933 sharedcache = repo.sharedstore
933 sharedcache = repo.sharedstore
934
934
935 # Compute a keepset which is not garbage collected
935 # Compute a keepset which is not garbage collected
936 def keyfn(fname, fnode):
936 def keyfn(fname, fnode):
937 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
937 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
938
938
939 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
939 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
940
940
941 progress.complete()
941 progress.complete()
942
942
943 # write list of valid repos back
943 # write list of valid repos back
944 oldumask = os.umask(0o002)
944 oldumask = os.umask(0o002)
945 try:
945 try:
946 reposfile = open(repospath, b'wb')
946 reposfile = open(repospath, b'wb')
947 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
947 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
948 reposfile.close()
948 reposfile.close()
949 finally:
949 finally:
950 os.umask(oldumask)
950 os.umask(oldumask)
951
951
952 # prune cache
952 # prune cache
953 if sharedcache is not None:
953 if sharedcache is not None:
954 sharedcache.gc(keepkeys)
954 sharedcache.gc(keepkeys)
955 elif not filesrepacked:
955 elif not filesrepacked:
956 ui.warn(_(b"warning: no valid repos in repofile\n"))
956 ui.warn(_(b"warning: no valid repos in repofile\n"))
957
957
958
958
959 def log(orig, ui, repo, *pats, **opts):
959 def log(orig, ui, repo, *pats, **opts):
960 if not isenabled(repo):
960 if not isenabled(repo):
961 return orig(ui, repo, *pats, **opts)
961 return orig(ui, repo, *pats, **opts)
962
962
963 follow = opts.get('follow')
963 follow = opts.get('follow')
964 revs = opts.get('rev')
964 revs = opts.get('rev')
965 if pats:
965 if pats:
966 # Force slowpath for non-follow patterns and follows that start from
966 # Force slowpath for non-follow patterns and follows that start from
967 # non-working-copy-parent revs.
967 # non-working-copy-parent revs.
968 if not follow or revs:
968 if not follow or revs:
969 # This forces the slowpath
969 # This forces the slowpath
970 opts['removed'] = True
970 opts['removed'] = True
971
971
972 # If this is a non-follow log without any revs specified, recommend that
972 # If this is a non-follow log without any revs specified, recommend that
973 # the user add -f to speed it up.
973 # the user add -f to speed it up.
974 if not follow and not revs:
974 if not follow and not revs:
975 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
975 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
976 isfile = not match.anypats()
976 isfile = not match.anypats()
977 if isfile:
977 if isfile:
978 for file in match.files():
978 for file in match.files():
979 if not os.path.isfile(repo.wjoin(file)):
979 if not os.path.isfile(repo.wjoin(file)):
980 isfile = False
980 isfile = False
981 break
981 break
982
982
983 if isfile:
983 if isfile:
984 ui.warn(
984 ui.warn(
985 _(
985 _(
986 b"warning: file log can be slow on large repos - "
986 b"warning: file log can be slow on large repos - "
987 + b"use -f to speed it up\n"
987 + b"use -f to speed it up\n"
988 )
988 )
989 )
989 )
990
990
991 return orig(ui, repo, *pats, **opts)
991 return orig(ui, repo, *pats, **opts)
992
992
993
993
994 def revdatelimit(ui, revset):
994 def revdatelimit(ui, revset):
995 """Update revset so that only changesets no older than 'prefetchdays' days
995 """Update revset so that only changesets no older than 'prefetchdays' days
996 are included. The default value is set to 14 days. If 'prefetchdays' is set
996 are included. The default value is set to 14 days. If 'prefetchdays' is set
997 to zero or negative value then date restriction is not applied.
997 to zero or negative value then date restriction is not applied.
998 """
998 """
999 days = ui.configint(b'remotefilelog', b'prefetchdays')
999 days = ui.configint(b'remotefilelog', b'prefetchdays')
1000 if days > 0:
1000 if days > 0:
1001 revset = b'(%s) & date(-%s)' % (revset, days)
1001 revset = b'(%s) & date(-%s)' % (revset, days)
1002 return revset
1002 return revset
1003
1003
1004
1004
1005 def readytofetch(repo):
1005 def readytofetch(repo):
1006 """Check that enough time has passed since the last background prefetch.
1006 """Check that enough time has passed since the last background prefetch.
1007 This only relates to prefetches after operations that change the working
1007 This only relates to prefetches after operations that change the working
1008 copy parent. Default delay between background prefetches is 2 minutes.
1008 copy parent. Default delay between background prefetches is 2 minutes.
1009 """
1009 """
1010 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1010 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1011 fname = repo.vfs.join(b'lastprefetch')
1011 fname = repo.vfs.join(b'lastprefetch')
1012
1012
1013 ready = False
1013 ready = False
1014 with open(fname, b'a'):
1014 with open(fname, b'a'):
1015 # the with construct above is used to avoid race conditions
1015 # the with construct above is used to avoid race conditions
1016 modtime = os.path.getmtime(fname)
1016 modtime = os.path.getmtime(fname)
1017 if (time.time() - modtime) > timeout:
1017 if (time.time() - modtime) > timeout:
1018 os.utime(fname, None)
1018 os.utime(fname, None)
1019 ready = True
1019 ready = True
1020
1020
1021 return ready
1021 return ready
1022
1022
1023
1023
1024 def wcpprefetch(ui, repo, **kwargs):
1024 def wcpprefetch(ui, repo, **kwargs):
1025 """Prefetches in background revisions specified by bgprefetchrevs revset.
1025 """Prefetches in background revisions specified by bgprefetchrevs revset.
1026 Does background repack if backgroundrepack flag is set in config.
1026 Does background repack if backgroundrepack flag is set in config.
1027 """
1027 """
1028 shallow = isenabled(repo)
1028 shallow = isenabled(repo)
1029 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1029 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1030 isready = readytofetch(repo)
1030 isready = readytofetch(repo)
1031
1031
1032 if not (shallow and bgprefetchrevs and isready):
1032 if not (shallow and bgprefetchrevs and isready):
1033 return
1033 return
1034
1034
1035 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1035 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1036 # update a revset with a date limit
1036 # update a revset with a date limit
1037 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1037 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1038
1038
1039 def anon(unused_success):
1039 def anon(unused_success):
1040 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1040 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1041 return
1041 return
1042 repo.ranprefetch = True
1042 repo.ranprefetch = True
1043 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1043 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1044
1044
1045 repo._afterlock(anon)
1045 repo._afterlock(anon)
1046
1046
1047
1047
1048 def pull(orig, ui, repo, *pats, **opts):
1048 def pull(orig, ui, repo, *pats, **opts):
1049 result = orig(ui, repo, *pats, **opts)
1049 result = orig(ui, repo, *pats, **opts)
1050
1050
1051 if isenabled(repo):
1051 if isenabled(repo):
1052 # prefetch if it's configured
1052 # prefetch if it's configured
1053 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1053 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1054 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1054 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1055 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1055 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1056
1056
1057 if prefetchrevset:
1057 if prefetchrevset:
1058 ui.status(_(b"prefetching file contents\n"))
1058 ui.status(_(b"prefetching file contents\n"))
1059 revs = scmutil.revrange(repo, [prefetchrevset])
1059 revs = scmutil.revrange(repo, [prefetchrevset])
1060 base = repo[b'.'].rev()
1060 base = repo[b'.'].rev()
1061 if bgprefetch:
1061 if bgprefetch:
1062 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1062 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1063 else:
1063 else:
1064 repo.prefetch(revs, base=base)
1064 repo.prefetch(revs, base=base)
1065 if bgrepack:
1065 if bgrepack:
1066 repackmod.backgroundrepack(repo, incremental=True)
1066 repackmod.backgroundrepack(repo, incremental=True)
1067 elif bgrepack:
1067 elif bgrepack:
1068 repackmod.backgroundrepack(repo, incremental=True)
1068 repackmod.backgroundrepack(repo, incremental=True)
1069
1069
1070 return result
1070 return result
1071
1071
1072
1072
1073 def exchangepull(orig, repo, remote, *args, **kwargs):
1073 def exchangepull(orig, repo, remote, *args, **kwargs):
1074 # Hook into the callstream/getbundle to insert bundle capabilities
1074 # Hook into the callstream/getbundle to insert bundle capabilities
1075 # during a pull.
1075 # during a pull.
1076 def localgetbundle(
1076 def localgetbundle(
1077 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1077 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1078 ):
1078 ):
1079 if not bundlecaps:
1079 if not bundlecaps:
1080 bundlecaps = set()
1080 bundlecaps = set()
1081 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1081 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1082 return orig(
1082 return orig(
1083 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1083 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1084 )
1084 )
1085
1085
1086 if util.safehasattr(remote, b'_callstream'):
1086 if util.safehasattr(remote, b'_callstream'):
1087 remote._localrepo = repo
1087 remote._localrepo = repo
1088 elif util.safehasattr(remote, b'getbundle'):
1088 elif util.safehasattr(remote, b'getbundle'):
1089 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1089 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1090
1090
1091 return orig(repo, remote, *args, **kwargs)
1091 return orig(repo, remote, *args, **kwargs)
1092
1092
1093
1093
1094 def _fileprefetchhook(repo, revmatches):
1094 def _fileprefetchhook(repo, revmatches):
1095 if isenabled(repo):
1095 if isenabled(repo):
1096 allfiles = []
1096 allfiles = []
1097 for rev, match in revmatches:
1097 for rev, match in revmatches:
1098 if rev == wdirrev or rev is None:
1098 if rev == wdirrev or rev is None:
1099 continue
1099 continue
1100 ctx = repo[rev]
1100 ctx = repo[rev]
1101 mf = ctx.manifest()
1101 mf = ctx.manifest()
1102 sparsematch = repo.maybesparsematch(ctx.rev())
1102 sparsematch = repo.maybesparsematch(ctx.rev())
1103 for path in ctx.walk(match):
1103 for path in ctx.walk(match):
1104 if (not sparsematch or sparsematch(path)) and path in mf:
1104 if (not sparsematch or sparsematch(path)) and path in mf:
1105 allfiles.append((path, hex(mf[path])))
1105 allfiles.append((path, hex(mf[path])))
1106 repo.fileservice.prefetch(allfiles)
1106 repo.fileservice.prefetch(allfiles)
1107
1107
1108
1108
1109 @command(
1109 @command(
1110 b'debugremotefilelog',
1110 b'debugremotefilelog',
1111 [
1111 [
1112 (b'd', b'decompress', None, _(b'decompress the filelog first')),
1112 (b'd', b'decompress', None, _(b'decompress the filelog first')),
1113 ],
1113 ],
1114 _(b'hg debugremotefilelog <path>'),
1114 _(b'hg debugremotefilelog <path>'),
1115 norepo=True,
1115 norepo=True,
1116 )
1116 )
1117 def debugremotefilelog(ui, path, **opts):
1117 def debugremotefilelog(ui, path, **opts):
1118 return debugcommands.debugremotefilelog(ui, path, **opts)
1118 return debugcommands.debugremotefilelog(ui, path, **opts)
1119
1119
1120
1120
1121 @command(
1121 @command(
1122 b'verifyremotefilelog',
1122 b'verifyremotefilelog',
1123 [
1123 [
1124 (b'd', b'decompress', None, _(b'decompress the filelogs first')),
1124 (b'd', b'decompress', None, _(b'decompress the filelogs first')),
1125 ],
1125 ],
1126 _(b'hg verifyremotefilelogs <directory>'),
1126 _(b'hg verifyremotefilelogs <directory>'),
1127 norepo=True,
1127 norepo=True,
1128 )
1128 )
1129 def verifyremotefilelog(ui, path, **opts):
1129 def verifyremotefilelog(ui, path, **opts):
1130 return debugcommands.verifyremotefilelog(ui, path, **opts)
1130 return debugcommands.verifyremotefilelog(ui, path, **opts)
1131
1131
1132
1132
1133 @command(
1133 @command(
1134 b'debugdatapack',
1134 b'debugdatapack',
1135 [
1135 [
1136 (b'', b'long', None, _(b'print the long hashes')),
1136 (b'', b'long', None, _(b'print the long hashes')),
1137 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1137 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1138 ],
1138 ],
1139 _(b'hg debugdatapack <paths>'),
1139 _(b'hg debugdatapack <paths>'),
1140 norepo=True,
1140 norepo=True,
1141 )
1141 )
1142 def debugdatapack(ui, *paths, **opts):
1142 def debugdatapack(ui, *paths, **opts):
1143 return debugcommands.debugdatapack(ui, *paths, **opts)
1143 return debugcommands.debugdatapack(ui, *paths, **opts)
1144
1144
1145
1145
1146 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1146 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1147 def debughistorypack(ui, path, **opts):
1147 def debughistorypack(ui, path, **opts):
1148 return debugcommands.debughistorypack(ui, path)
1148 return debugcommands.debughistorypack(ui, path)
1149
1149
1150
1150
1151 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1151 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1152 def debugkeepset(ui, repo, **opts):
1152 def debugkeepset(ui, repo, **opts):
1153 # The command is used to measure keepset computation time
1153 # The command is used to measure keepset computation time
1154 def keyfn(fname, fnode):
1154 def keyfn(fname, fnode):
1155 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1155 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1156
1156
1157 repackmod.keepset(repo, keyfn)
1157 repackmod.keepset(repo, keyfn)
1158 return
1158 return
1159
1159
1160
1160
1161 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1161 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1162 def debugwaitonrepack(ui, repo, **opts):
1162 def debugwaitonrepack(ui, repo, **opts):
1163 return debugcommands.debugwaitonrepack(repo)
1163 return debugcommands.debugwaitonrepack(repo)
1164
1164
1165
1165
1166 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1166 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1167 def debugwaitonprefetch(ui, repo, **opts):
1167 def debugwaitonprefetch(ui, repo, **opts):
1168 return debugcommands.debugwaitonprefetch(repo)
1168 return debugcommands.debugwaitonprefetch(repo)
1169
1169
1170
1170
1171 def resolveprefetchopts(ui, opts):
1171 def resolveprefetchopts(ui, opts):
1172 if not opts.get(b'rev'):
1172 if not opts.get(b'rev'):
1173 revset = [b'.', b'draft()']
1173 revset = [b'.', b'draft()']
1174
1174
1175 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1175 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1176 if prefetchrevset:
1176 if prefetchrevset:
1177 revset.append(b'(%s)' % prefetchrevset)
1177 revset.append(b'(%s)' % prefetchrevset)
1178 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1178 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1179 if bgprefetchrevs:
1179 if bgprefetchrevs:
1180 revset.append(b'(%s)' % bgprefetchrevs)
1180 revset.append(b'(%s)' % bgprefetchrevs)
1181 revset = b'+'.join(revset)
1181 revset = b'+'.join(revset)
1182
1182
1183 # update a revset with a date limit
1183 # update a revset with a date limit
1184 revset = revdatelimit(ui, revset)
1184 revset = revdatelimit(ui, revset)
1185
1185
1186 opts[b'rev'] = [revset]
1186 opts[b'rev'] = [revset]
1187
1187
1188 if not opts.get(b'base'):
1188 if not opts.get(b'base'):
1189 opts[b'base'] = None
1189 opts[b'base'] = None
1190
1190
1191 return opts
1191 return opts
1192
1192
1193
1193
1194 @command(
1194 @command(
1195 b'prefetch',
1195 b'prefetch',
1196 [
1196 [
1197 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1197 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1198 (b'', b'repack', False, _(b'run repack after prefetch')),
1198 (b'', b'repack', False, _(b'run repack after prefetch')),
1199 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1199 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1200 ]
1200 ]
1201 + commands.walkopts,
1201 + commands.walkopts,
1202 _(b'hg prefetch [OPTIONS] [FILE...]'),
1202 _(b'hg prefetch [OPTIONS] [FILE...]'),
1203 helpcategory=command.CATEGORY_MAINTENANCE,
1203 helpcategory=command.CATEGORY_MAINTENANCE,
1204 )
1204 )
1205 def prefetch(ui, repo, *pats, **opts):
1205 def prefetch(ui, repo, *pats, **opts):
1206 """prefetch file revisions from the server
1206 """prefetch file revisions from the server
1207
1207
1208 Prefetchs file revisions for the specified revs and stores them in the
1208 Prefetchs file revisions for the specified revs and stores them in the
1209 local remotefilelog cache. If no rev is specified, the default rev is
1209 local remotefilelog cache. If no rev is specified, the default rev is
1210 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1210 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1211 File names or patterns can be used to limit which files are downloaded.
1211 File names or patterns can be used to limit which files are downloaded.
1212
1212
1213 Return 0 on success.
1213 Return 0 on success.
1214 """
1214 """
1215 opts = pycompat.byteskwargs(opts)
1215 opts = pycompat.byteskwargs(opts)
1216 if not isenabled(repo):
1216 if not isenabled(repo):
1217 raise error.Abort(_(b"repo is not shallow"))
1217 raise error.Abort(_(b"repo is not shallow"))
1218
1218
1219 opts = resolveprefetchopts(ui, opts)
1219 opts = resolveprefetchopts(ui, opts)
1220 revs = scmutil.revrange(repo, opts.get(b'rev'))
1220 revs = scmutil.revrange(repo, opts.get(b'rev'))
1221 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1221 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1222
1222
1223 # Run repack in background
1223 # Run repack in background
1224 if opts.get(b'repack'):
1224 if opts.get(b'repack'):
1225 repackmod.backgroundrepack(repo, incremental=True)
1225 repackmod.backgroundrepack(repo, incremental=True)
1226
1226
1227
1227
1228 @command(
1228 @command(
1229 b'repack',
1229 b'repack',
1230 [
1230 [
1231 (b'', b'background', None, _(b'run in a background process'), None),
1231 (b'', b'background', None, _(b'run in a background process'), None),
1232 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1232 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1233 (
1233 (
1234 b'',
1234 b'',
1235 b'packsonly',
1235 b'packsonly',
1236 None,
1236 None,
1237 _(b'only repack packs (skip loose objects)'),
1237 _(b'only repack packs (skip loose objects)'),
1238 None,
1238 None,
1239 ),
1239 ),
1240 ],
1240 ],
1241 _(b'hg repack [OPTIONS]'),
1241 _(b'hg repack [OPTIONS]'),
1242 )
1242 )
1243 def repack_(ui, repo, *pats, **opts):
1243 def repack_(ui, repo, *pats, **opts):
1244 if opts.get('background'):
1244 if opts.get('background'):
1245 repackmod.backgroundrepack(
1245 repackmod.backgroundrepack(
1246 repo,
1246 repo,
1247 incremental=opts.get('incremental'),
1247 incremental=opts.get('incremental'),
1248 packsonly=opts.get('packsonly', False),
1248 packsonly=opts.get('packsonly', False),
1249 )
1249 )
1250 return
1250 return
1251
1251
1252 options = {b'packsonly': opts.get('packsonly')}
1252 options = {b'packsonly': opts.get('packsonly')}
1253
1253
1254 try:
1254 try:
1255 if opts.get('incremental'):
1255 if opts.get('incremental'):
1256 repackmod.incrementalrepack(repo, options=options)
1256 repackmod.incrementalrepack(repo, options=options)
1257 else:
1257 else:
1258 repackmod.fullrepack(repo, options=options)
1258 repackmod.fullrepack(repo, options=options)
1259 except repackmod.RepackAlreadyRunning as ex:
1259 except repackmod.RepackAlreadyRunning as ex:
1260 # Don't propogate the exception if the repack is already in
1260 # Don't propogate the exception if the repack is already in
1261 # progress, since we want the command to exit 0.
1261 # progress, since we want the command to exit 0.
1262 repo.ui.warn(b'%s\n' % ex)
1262 repo.ui.warn(b'%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now