##// END OF EJS Templates
remotefilelog: move most setup from onetimesetup() to uisetup()...
Martin von Zweigbergk -
r42460:8a0e03f7 default
parent child Browse files
Show More
@@ -1,1124 +1,1111 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import hex
133 from mercurial.node import hex
134 from mercurial.i18n import _
134 from mercurial.i18n import _
135 from mercurial import (
135 from mercurial import (
136 changegroup,
136 changegroup,
137 changelog,
137 changelog,
138 cmdutil,
138 cmdutil,
139 commands,
139 commands,
140 configitems,
140 configitems,
141 context,
141 context,
142 copies,
142 copies,
143 debugcommands as hgdebugcommands,
143 debugcommands as hgdebugcommands,
144 dispatch,
144 dispatch,
145 error,
145 error,
146 exchange,
146 exchange,
147 extensions,
147 extensions,
148 hg,
148 hg,
149 localrepo,
149 localrepo,
150 match,
150 match,
151 merge,
151 merge,
152 node as nodemod,
152 node as nodemod,
153 patch,
153 patch,
154 pycompat,
154 pycompat,
155 registrar,
155 registrar,
156 repair,
156 repair,
157 repoview,
157 repoview,
158 revset,
158 revset,
159 scmutil,
159 scmutil,
160 smartset,
160 smartset,
161 streamclone,
161 streamclone,
162 util,
162 util,
163 )
163 )
164 from . import (
164 from . import (
165 constants,
165 constants,
166 debugcommands,
166 debugcommands,
167 fileserverclient,
167 fileserverclient,
168 remotefilectx,
168 remotefilectx,
169 remotefilelog,
169 remotefilelog,
170 remotefilelogserver,
170 remotefilelogserver,
171 repack as repackmod,
171 repack as repackmod,
172 shallowbundle,
172 shallowbundle,
173 shallowrepo,
173 shallowrepo,
174 shallowstore,
174 shallowstore,
175 shallowutil,
175 shallowutil,
176 shallowverifier,
176 shallowverifier,
177 )
177 )
178
178
179 # ensures debug commands are registered
179 # ensures debug commands are registered
180 hgdebugcommands.command
180 hgdebugcommands.command
181
181
182 cmdtable = {}
182 cmdtable = {}
183 command = registrar.command(cmdtable)
183 command = registrar.command(cmdtable)
184
184
185 configtable = {}
185 configtable = {}
186 configitem = registrar.configitem(configtable)
186 configitem = registrar.configitem(configtable)
187
187
188 configitem('remotefilelog', 'debug', default=False)
188 configitem('remotefilelog', 'debug', default=False)
189
189
190 configitem('remotefilelog', 'reponame', default='')
190 configitem('remotefilelog', 'reponame', default='')
191 configitem('remotefilelog', 'cachepath', default=None)
191 configitem('remotefilelog', 'cachepath', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196
196
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 alias=[('remotefilelog', 'fallbackrepo')])
198 alias=[('remotefilelog', 'fallbackrepo')])
199
199
200 configitem('remotefilelog', 'validatecachelog', default=None)
200 configitem('remotefilelog', 'validatecachelog', default=None)
201 configitem('remotefilelog', 'validatecache', default='on')
201 configitem('remotefilelog', 'validatecache', default='on')
202 configitem('remotefilelog', 'server', default=None)
202 configitem('remotefilelog', 'server', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
204 configitem("remotefilelog", "serverexpiration", default=30)
204 configitem("remotefilelog", "serverexpiration", default=30)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 configitem('remotefilelog', 'prefetchdays', default=14)
210 configitem('remotefilelog', 'prefetchdays', default=14)
211
211
212 configitem('remotefilelog', 'getfilesstep', default=10000)
212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 configitem('remotefilelog', 'fetchwarning', default='')
215 configitem('remotefilelog', 'fetchwarning', default='')
216
216
217 configitem('remotefilelog', 'includepattern', default=None)
217 configitem('remotefilelog', 'includepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
219
219
220 configitem('remotefilelog', 'gcrepack', default=False)
220 configitem('remotefilelog', 'gcrepack', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
222 configitem('repack', 'chainorphansbysize', default=True)
222 configitem('repack', 'chainorphansbysize', default=True)
223
223
224 configitem('packs', 'maxpacksize', default=0)
224 configitem('packs', 'maxpacksize', default=0)
225 configitem('packs', 'maxchainlen', default=1000)
225 configitem('packs', 'maxchainlen', default=1000)
226
226
227 # default TTL limit is 30 days
227 # default TTL limit is 30 days
228 _defaultlimit = 60 * 60 * 24 * 30
228 _defaultlimit = 60 * 60 * 24 * 30
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230
230
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 configitem('remotefilelog', 'data.generations',
232 configitem('remotefilelog', 'data.generations',
233 default=['1GB', '100MB', '1MB'])
233 default=['1GB', '100MB', '1MB'])
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237
237
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243
243
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 # be specifying the version(s) of Mercurial they are tested with, or
246 # be specifying the version(s) of Mercurial they are tested with, or
247 # leave the attribute unspecified.
247 # leave the attribute unspecified.
248 testedwith = 'ships-with-hg-core'
248 testedwith = 'ships-with-hg-core'
249
249
250 repoclass = localrepo.localrepository
250 repoclass = localrepo.localrepository
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252
252
253 isenabled = shallowutil.isenabled
253 isenabled = shallowutil.isenabled
254
254
255 def uisetup(ui):
255 def uisetup(ui):
256 """Wraps user facing Mercurial commands to swap them out with shallow
256 """Wraps user facing Mercurial commands to swap them out with shallow
257 versions.
257 versions.
258 """
258 """
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260
260
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 entry[1].append(('', 'shallow', None,
262 entry[1].append(('', 'shallow', None,
263 _("create a shallow clone which uses remote file "
263 _("create a shallow clone which uses remote file "
264 "history")))
264 "history")))
265
265
266 extensions.wrapcommand(commands.table, 'debugindex',
266 extensions.wrapcommand(commands.table, 'debugindex',
267 debugcommands.debugindex)
267 debugcommands.debugindex)
268 extensions.wrapcommand(commands.table, 'debugindexdot',
268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 debugcommands.debugindexdot)
269 debugcommands.debugindexdot)
270 extensions.wrapcommand(commands.table, 'log', log)
270 extensions.wrapcommand(commands.table, 'log', log)
271 extensions.wrapcommand(commands.table, 'pull', pull)
271 extensions.wrapcommand(commands.table, 'pull', pull)
272
272
273 # Prevent 'hg manifest --all'
273 # Prevent 'hg manifest --all'
274 def _manifest(orig, ui, repo, *args, **opts):
274 def _manifest(orig, ui, repo, *args, **opts):
275 if (isenabled(repo) and opts.get(r'all')):
275 if (isenabled(repo) and opts.get(r'all')):
276 raise error.Abort(_("--all is not supported in a shallow repo"))
276 raise error.Abort(_("--all is not supported in a shallow repo"))
277
277
278 return orig(ui, repo, *args, **opts)
278 return orig(ui, repo, *args, **opts)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280
280
281 # Wrap remotefilelog with lfs code
281 # Wrap remotefilelog with lfs code
282 def _lfsloaded(loaded=False):
282 def _lfsloaded(loaded=False):
283 lfsmod = None
283 lfsmod = None
284 try:
284 try:
285 lfsmod = extensions.find('lfs')
285 lfsmod = extensions.find('lfs')
286 except KeyError:
286 except KeyError:
287 pass
287 pass
288 if lfsmod:
288 if lfsmod:
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 fileserverclient._lfsmod = lfsmod
290 fileserverclient._lfsmod = lfsmod
291 extensions.afterloaded('lfs', _lfsloaded)
291 extensions.afterloaded('lfs', _lfsloaded)
292
292
293 # debugdata needs remotefilelog.len to work
293 # debugdata needs remotefilelog.len to work
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295
295
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
297
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
299 shallowbundle.addchangegroupfiles)
300 extensions.wrapfunction(
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
308 extensions.wrapfunction(copies, '_computeforwardmissing',
309 computeforwardmissing)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
315 extensions.wrapfunction(hg, 'verify', _verify)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
317
318 # disappointing hacks below
319 scmutil.getrenamedfn = getrenamedfn
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
321 revset.symbols['filelog'] = revset.filelog
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
323
324
296 def cloneshallow(orig, ui, repo, *args, **opts):
325 def cloneshallow(orig, ui, repo, *args, **opts):
297 if opts.get(r'shallow'):
326 if opts.get(r'shallow'):
298 repos = []
327 repos = []
299 def pull_shallow(orig, self, *args, **kwargs):
328 def pull_shallow(orig, self, *args, **kwargs):
300 if not isenabled(self):
329 if not isenabled(self):
301 repos.append(self.unfiltered())
330 repos.append(self.unfiltered())
302 # set up the client hooks so the post-clone update works
331 # set up the client hooks so the post-clone update works
303 setupclient(self.ui, self.unfiltered())
332 setupclient(self.ui, self.unfiltered())
304
333
305 # setupclient fixed the class on the repo itself
334 # setupclient fixed the class on the repo itself
306 # but we also need to fix it on the repoview
335 # but we also need to fix it on the repoview
307 if isinstance(self, repoview.repoview):
336 if isinstance(self, repoview.repoview):
308 self.__class__.__bases__ = (self.__class__.__bases__[0],
337 self.__class__.__bases__ = (self.__class__.__bases__[0],
309 self.unfiltered().__class__)
338 self.unfiltered().__class__)
310 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
311 self._writerequirements()
340 self._writerequirements()
312
341
313 # Since setupclient hadn't been called, exchange.pull was not
342 # Since setupclient hadn't been called, exchange.pull was not
314 # wrapped. So we need to manually invoke our version of it.
343 # wrapped. So we need to manually invoke our version of it.
315 return exchangepull(orig, self, *args, **kwargs)
344 return exchangepull(orig, self, *args, **kwargs)
316 else:
345 else:
317 return orig(self, *args, **kwargs)
346 return orig(self, *args, **kwargs)
318 extensions.wrapfunction(exchange, 'pull', pull_shallow)
347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
319
348
320 # Wrap the stream logic to add requirements and to pass include/exclude
349 # Wrap the stream logic to add requirements and to pass include/exclude
321 # patterns around.
350 # patterns around.
322 def setup_streamout(repo, remote):
351 def setup_streamout(repo, remote):
323 # Replace remote.stream_out with a version that sends file
352 # Replace remote.stream_out with a version that sends file
324 # patterns.
353 # patterns.
325 def stream_out_shallow(orig):
354 def stream_out_shallow(orig):
326 caps = remote.capabilities()
355 caps = remote.capabilities()
327 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
328 opts = {}
357 opts = {}
329 if repo.includepattern:
358 if repo.includepattern:
330 opts[r'includepattern'] = '\0'.join(repo.includepattern)
359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
331 if repo.excludepattern:
360 if repo.excludepattern:
332 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
333 return remote._callstream('stream_out_shallow', **opts)
362 return remote._callstream('stream_out_shallow', **opts)
334 else:
363 else:
335 return orig()
364 return orig()
336 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
337 def stream_wrap(orig, op):
366 def stream_wrap(orig, op):
338 setup_streamout(op.repo, op.remote)
367 setup_streamout(op.repo, op.remote)
339 return orig(op)
368 return orig(op)
340 extensions.wrapfunction(
369 extensions.wrapfunction(
341 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
342
371
343 def canperformstreamclone(orig, pullop, bundle2=False):
372 def canperformstreamclone(orig, pullop, bundle2=False):
344 # remotefilelog is currently incompatible with the
373 # remotefilelog is currently incompatible with the
345 # bundle2 flavor of streamclones, so force us to use
374 # bundle2 flavor of streamclones, so force us to use
346 # v1 instead.
375 # v1 instead.
347 if 'v2' in pullop.remotebundle2caps.get('stream', []):
376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
348 pullop.remotebundle2caps['stream'] = [
377 pullop.remotebundle2caps['stream'] = [
349 c for c in pullop.remotebundle2caps['stream']
378 c for c in pullop.remotebundle2caps['stream']
350 if c != 'v2']
379 if c != 'v2']
351 if bundle2:
380 if bundle2:
352 return False, None
381 return False, None
353 supported, requirements = orig(pullop, bundle2=bundle2)
382 supported, requirements = orig(pullop, bundle2=bundle2)
354 if requirements is not None:
383 if requirements is not None:
355 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
356 return supported, requirements
385 return supported, requirements
357 extensions.wrapfunction(
386 extensions.wrapfunction(
358 streamclone, 'canperformstreamclone', canperformstreamclone)
387 streamclone, 'canperformstreamclone', canperformstreamclone)
359
388
360 try:
389 try:
361 orig(ui, repo, *args, **opts)
390 orig(ui, repo, *args, **opts)
362 finally:
391 finally:
363 if opts.get(r'shallow'):
392 if opts.get(r'shallow'):
364 for r in repos:
393 for r in repos:
365 if util.safehasattr(r, 'fileservice'):
394 if util.safehasattr(r, 'fileservice'):
366 r.fileservice.close()
395 r.fileservice.close()
367
396
368 def debugdatashallow(orig, *args, **kwds):
397 def debugdatashallow(orig, *args, **kwds):
369 oldlen = remotefilelog.remotefilelog.__len__
398 oldlen = remotefilelog.remotefilelog.__len__
370 try:
399 try:
371 remotefilelog.remotefilelog.__len__ = lambda x: 1
400 remotefilelog.remotefilelog.__len__ = lambda x: 1
372 return orig(*args, **kwds)
401 return orig(*args, **kwds)
373 finally:
402 finally:
374 remotefilelog.remotefilelog.__len__ = oldlen
403 remotefilelog.remotefilelog.__len__ = oldlen
375
404
376 def reposetup(ui, repo):
405 def reposetup(ui, repo):
377 if not repo.local():
406 if not repo.local():
378 return
407 return
379
408
380 # put here intentionally bc doesnt work in uisetup
409 # put here intentionally bc doesnt work in uisetup
381 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
382 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
383
412
384 isserverenabled = ui.configbool('remotefilelog', 'server')
413 isserverenabled = ui.configbool('remotefilelog', 'server')
385 isshallowclient = isenabled(repo)
414 isshallowclient = isenabled(repo)
386
415
387 if isserverenabled and isshallowclient:
416 if isserverenabled and isshallowclient:
388 raise RuntimeError("Cannot be both a server and shallow client.")
417 raise RuntimeError("Cannot be both a server and shallow client.")
389
418
390 if isshallowclient:
419 if isshallowclient:
391 setupclient(ui, repo)
420 setupclient(ui, repo)
392
421
393 if isserverenabled:
422 if isserverenabled:
394 remotefilelogserver.setupserver(ui, repo)
423 remotefilelogserver.setupserver(ui, repo)
395
424
396 def setupclient(ui, repo):
425 def setupclient(ui, repo):
397 if not isinstance(repo, localrepo.localrepository):
426 if not isinstance(repo, localrepo.localrepository):
398 return
427 return
399
428
400 # Even clients get the server setup since they need to have the
429 # Even clients get the server setup since they need to have the
401 # wireprotocol endpoints registered.
430 # wireprotocol endpoints registered.
402 remotefilelogserver.onetimesetup(ui)
431 remotefilelogserver.onetimesetup(ui)
403 onetimeclientsetup(ui)
432 onetimeclientsetup(ui)
404
433
405 shallowrepo.wraprepo(repo)
434 shallowrepo.wraprepo(repo)
406 repo.store = shallowstore.wrapstore(repo.store)
435 repo.store = shallowstore.wrapstore(repo.store)
407
436
408 def storewrapper(orig, requirements, path, vfstype):
437 def storewrapper(orig, requirements, path, vfstype):
409 s = orig(requirements, path, vfstype)
438 s = orig(requirements, path, vfstype)
410 if constants.SHALLOWREPO_REQUIREMENT in requirements:
439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
411 s = shallowstore.wrapstore(s)
440 s = shallowstore.wrapstore(s)
412
441
413 return s
442 return s
414
443
415 # prefetch files before update
444 # prefetch files before update
416 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
417 if isenabled(repo):
446 if isenabled(repo):
418 manifest = mctx.manifest()
447 manifest = mctx.manifest()
419 files = []
448 files = []
420 for f, args, msg in actions['g']:
449 for f, args, msg in actions['g']:
421 files.append((f, hex(manifest[f])))
450 files.append((f, hex(manifest[f])))
422 # batch fetch the needed files from the server
451 # batch fetch the needed files from the server
423 repo.fileservice.prefetch(files)
452 repo.fileservice.prefetch(files)
424 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
453 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
425
454
426 # Prefetch merge checkunknownfiles
455 # Prefetch merge checkunknownfiles
427 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
456 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
428 *args, **kwargs):
457 *args, **kwargs):
429 if isenabled(repo):
458 if isenabled(repo):
430 files = []
459 files = []
431 sparsematch = repo.maybesparsematch(mctx.rev())
460 sparsematch = repo.maybesparsematch(mctx.rev())
432 for f, (m, actionargs, msg) in actions.iteritems():
461 for f, (m, actionargs, msg) in actions.iteritems():
433 if sparsematch and not sparsematch(f):
462 if sparsematch and not sparsematch(f):
434 continue
463 continue
435 if m in ('c', 'dc', 'cm'):
464 if m in ('c', 'dc', 'cm'):
436 files.append((f, hex(mctx.filenode(f))))
465 files.append((f, hex(mctx.filenode(f))))
437 elif m == 'dg':
466 elif m == 'dg':
438 f2 = actionargs[0]
467 f2 = actionargs[0]
439 files.append((f2, hex(mctx.filenode(f2))))
468 files.append((f2, hex(mctx.filenode(f2))))
440 # batch fetch the needed files from the server
469 # batch fetch the needed files from the server
441 repo.fileservice.prefetch(files)
470 repo.fileservice.prefetch(files)
442 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
471 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
443
472
444 # Prefetch files before status attempts to look at their size and contents
473 # Prefetch files before status attempts to look at their size and contents
445 def checklookup(orig, self, files):
474 def checklookup(orig, self, files):
446 repo = self._repo
475 repo = self._repo
447 if isenabled(repo):
476 if isenabled(repo):
448 prefetchfiles = []
477 prefetchfiles = []
449 for parent in self._parents:
478 for parent in self._parents:
450 for f in files:
479 for f in files:
451 if f in parent:
480 if f in parent:
452 prefetchfiles.append((f, hex(parent.filenode(f))))
481 prefetchfiles.append((f, hex(parent.filenode(f))))
453 # batch fetch the needed files from the server
482 # batch fetch the needed files from the server
454 repo.fileservice.prefetch(prefetchfiles)
483 repo.fileservice.prefetch(prefetchfiles)
455 return orig(self, files)
484 return orig(self, files)
456
485
457 # Prefetch the logic that compares added and removed files for renames
486 # Prefetch the logic that compares added and removed files for renames
458 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
487 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
459 if isenabled(repo):
488 if isenabled(repo):
460 files = []
489 files = []
461 pmf = repo['.'].manifest()
490 pmf = repo['.'].manifest()
462 for f in removed:
491 for f in removed:
463 if f in pmf:
492 if f in pmf:
464 files.append((f, hex(pmf[f])))
493 files.append((f, hex(pmf[f])))
465 # batch fetch the needed files from the server
494 # batch fetch the needed files from the server
466 repo.fileservice.prefetch(files)
495 repo.fileservice.prefetch(files)
467 return orig(repo, matcher, added, removed, *args, **kwargs)
496 return orig(repo, matcher, added, removed, *args, **kwargs)
468
497
469 # prefetch files before pathcopies check
498 # prefetch files before pathcopies check
470 def computeforwardmissing(orig, a, b, match=None):
499 def computeforwardmissing(orig, a, b, match=None):
471 missing = orig(a, b, match=match)
500 missing = orig(a, b, match=match)
472 repo = a._repo
501 repo = a._repo
473 if isenabled(repo):
502 if isenabled(repo):
474 mb = b.manifest()
503 mb = b.manifest()
475
504
476 files = []
505 files = []
477 sparsematch = repo.maybesparsematch(b.rev())
506 sparsematch = repo.maybesparsematch(b.rev())
478 if sparsematch:
507 if sparsematch:
479 sparsemissing = set()
508 sparsemissing = set()
480 for f in missing:
509 for f in missing:
481 if sparsematch(f):
510 if sparsematch(f):
482 files.append((f, hex(mb[f])))
511 files.append((f, hex(mb[f])))
483 sparsemissing.add(f)
512 sparsemissing.add(f)
484 missing = sparsemissing
513 missing = sparsemissing
485
514
486 # batch fetch the needed files from the server
515 # batch fetch the needed files from the server
487 repo.fileservice.prefetch(files)
516 repo.fileservice.prefetch(files)
488 return missing
517 return missing
489
518
490 # close cache miss server connection after the command has finished
519 # close cache miss server connection after the command has finished
491 def runcommand(orig, lui, repo, *args, **kwargs):
520 def runcommand(orig, lui, repo, *args, **kwargs):
492 fileservice = None
521 fileservice = None
493 # repo can be None when running in chg:
522 # repo can be None when running in chg:
494 # - at startup, reposetup was called because serve is not norepo
523 # - at startup, reposetup was called because serve is not norepo
495 # - a norepo command like "help" is called
524 # - a norepo command like "help" is called
496 if repo and isenabled(repo):
525 if repo and isenabled(repo):
497 fileservice = repo.fileservice
526 fileservice = repo.fileservice
498 try:
527 try:
499 return orig(lui, repo, *args, **kwargs)
528 return orig(lui, repo, *args, **kwargs)
500 finally:
529 finally:
501 if fileservice:
530 if fileservice:
502 fileservice.close()
531 fileservice.close()
503
532
504 # prevent strip from stripping remotefilelogs
533 # prevent strip from stripping remotefilelogs
505 def _collectbrokencsets(orig, repo, files, striprev):
534 def _collectbrokencsets(orig, repo, files, striprev):
506 if isenabled(repo):
535 if isenabled(repo):
507 files = list([f for f in files if not repo.shallowmatch(f)])
536 files = list([f for f in files if not repo.shallowmatch(f)])
508 return orig(repo, files, striprev)
537 return orig(repo, files, striprev)
509
538
510 # changectx wrappers
539 # changectx wrappers
511 def filectx(orig, self, path, fileid=None, filelog=None):
540 def filectx(orig, self, path, fileid=None, filelog=None):
512 if fileid is None:
541 if fileid is None:
513 fileid = self.filenode(path)
542 fileid = self.filenode(path)
514 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
543 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
515 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
544 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
516 changectx=self, filelog=filelog)
545 changectx=self, filelog=filelog)
517 return orig(self, path, fileid=fileid, filelog=filelog)
546 return orig(self, path, fileid=fileid, filelog=filelog)
518
547
519 def workingfilectx(orig, self, path, filelog=None):
548 def workingfilectx(orig, self, path, filelog=None):
520 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
549 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
521 return remotefilectx.remoteworkingfilectx(self._repo, path,
550 return remotefilectx.remoteworkingfilectx(self._repo, path,
522 workingctx=self,
551 workingctx=self,
523 filelog=filelog)
552 filelog=filelog)
524 return orig(self, path, filelog=filelog)
553 return orig(self, path, filelog=filelog)
525
554
526 # prefetch required revisions before a diff
555 # prefetch required revisions before a diff
527 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
556 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
528 copy, getfilectx, *args, **kwargs):
557 copy, getfilectx, *args, **kwargs):
529 if isenabled(repo):
558 if isenabled(repo):
530 prefetch = []
559 prefetch = []
531 mf1 = ctx1.manifest()
560 mf1 = ctx1.manifest()
532 for fname in modified + added + removed:
561 for fname in modified + added + removed:
533 if fname in mf1:
562 if fname in mf1:
534 fnode = getfilectx(fname, ctx1).filenode()
563 fnode = getfilectx(fname, ctx1).filenode()
535 # fnode can be None if it's a edited working ctx file
564 # fnode can be None if it's a edited working ctx file
536 if fnode:
565 if fnode:
537 prefetch.append((fname, hex(fnode)))
566 prefetch.append((fname, hex(fnode)))
538 if fname not in removed:
567 if fname not in removed:
539 fnode = getfilectx(fname, ctx2).filenode()
568 fnode = getfilectx(fname, ctx2).filenode()
540 if fnode:
569 if fnode:
541 prefetch.append((fname, hex(fnode)))
570 prefetch.append((fname, hex(fnode)))
542
571
543 repo.fileservice.prefetch(prefetch)
572 repo.fileservice.prefetch(prefetch)
544
573
545 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
574 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
546 getfilectx, *args, **kwargs)
575 getfilectx, *args, **kwargs)
547
576
548 # Prevent verify from processing files
577 # Prevent verify from processing files
549 # a stub for mercurial.hg.verify()
578 # a stub for mercurial.hg.verify()
550 def _verify(orig, repo, level=None):
579 def _verify(orig, repo, level=None):
551 lock = repo.lock()
580 lock = repo.lock()
552 try:
581 try:
553 return shallowverifier.shallowverifier(repo).verify()
582 return shallowverifier.shallowverifier(repo).verify()
554 finally:
583 finally:
555 lock.release()
584 lock.release()
556
585
557
586
558 clientonetime = False
587 clientonetime = False
559 def onetimeclientsetup(ui):
588 def onetimeclientsetup(ui):
560 global clientonetime
589 global clientonetime
561 if clientonetime:
590 if clientonetime:
562 return
591 return
563 clientonetime = True
592 clientonetime = True
564
593
565 changegroup.cgpacker = shallowbundle.shallowcg1packer
566
567 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
568 shallowbundle.addchangegroupfiles)
569 extensions.wrapfunction(
570 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
571
572 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
573
574 extensions.wrapfunction(exchange, 'pull', exchangepull)
575
576 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
577
578 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
579
580 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
581
582 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
583
584 extensions.wrapfunction(copies, '_computeforwardmissing',
585 computeforwardmissing)
586
587 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
588
589 # disappointing hacks below
590 scmutil.getrenamedfn = getrenamedfn
591 extensions.wrapfunction(revset, 'filelog', filelogrevset)
592 revset.symbols['filelog'] = revset.filelog
593 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
594
595 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
596
597 # Don't commit filelogs until we know the commit hash, since the hash
594 # Don't commit filelogs until we know the commit hash, since the hash
598 # is present in the filelog blob.
595 # is present in the filelog blob.
599 # This violates Mercurial's filelog->manifest->changelog write order,
596 # This violates Mercurial's filelog->manifest->changelog write order,
600 # but is generally fine for client repos.
597 # but is generally fine for client repos.
601 pendingfilecommits = []
598 pendingfilecommits = []
602 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
599 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
603 flags, cachedelta=None, _metatuple=None):
600 flags, cachedelta=None, _metatuple=None):
604 if isinstance(link, int):
601 if isinstance(link, int):
605 pendingfilecommits.append(
602 pendingfilecommits.append(
606 (self, rawtext, transaction, link, p1, p2, node, flags,
603 (self, rawtext, transaction, link, p1, p2, node, flags,
607 cachedelta, _metatuple))
604 cachedelta, _metatuple))
608 return node
605 return node
609 else:
606 else:
610 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
607 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
611 cachedelta, _metatuple=_metatuple)
608 cachedelta, _metatuple=_metatuple)
612 extensions.wrapfunction(
609 extensions.wrapfunction(
613 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
610 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
614
611
615 def changelogadd(orig, self, *args):
612 def changelogadd(orig, self, *args):
616 oldlen = len(self)
613 oldlen = len(self)
617 node = orig(self, *args)
614 node = orig(self, *args)
618 newlen = len(self)
615 newlen = len(self)
619 if oldlen != newlen:
616 if oldlen != newlen:
620 for oldargs in pendingfilecommits:
617 for oldargs in pendingfilecommits:
621 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
618 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
622 linknode = self.node(link)
619 linknode = self.node(link)
623 if linknode == node:
620 if linknode == node:
624 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
621 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
625 else:
622 else:
626 raise error.ProgrammingError(
623 raise error.ProgrammingError(
627 'pending multiple integer revisions are not supported')
624 'pending multiple integer revisions are not supported')
628 else:
625 else:
629 # "link" is actually wrong here (it is set to len(changelog))
626 # "link" is actually wrong here (it is set to len(changelog))
630 # if changelog remains unchanged, skip writing file revisions
627 # if changelog remains unchanged, skip writing file revisions
631 # but still do a sanity check about pending multiple revisions
628 # but still do a sanity check about pending multiple revisions
632 if len(set(x[3] for x in pendingfilecommits)) > 1:
629 if len(set(x[3] for x in pendingfilecommits)) > 1:
633 raise error.ProgrammingError(
630 raise error.ProgrammingError(
634 'pending multiple integer revisions are not supported')
631 'pending multiple integer revisions are not supported')
635 del pendingfilecommits[:]
632 del pendingfilecommits[:]
636 return node
633 return node
637 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
634 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
638
635
639 extensions.wrapfunction(context.changectx, 'filectx', filectx)
640
641 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
642
643 extensions.wrapfunction(patch, 'trydiff', trydiff)
644
645 extensions.wrapfunction(hg, 'verify', _verify)
646
647 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
648
649 def getrenamedfn(repo, endrev=None):
636 def getrenamedfn(repo, endrev=None):
650 rcache = {}
637 rcache = {}
651
638
652 def getrenamed(fn, rev):
639 def getrenamed(fn, rev):
653 '''looks up all renames for a file (up to endrev) the first
640 '''looks up all renames for a file (up to endrev) the first
654 time the file is given. It indexes on the changerev and only
641 time the file is given. It indexes on the changerev and only
655 parses the manifest if linkrev != changerev.
642 parses the manifest if linkrev != changerev.
656 Returns rename info for fn at changerev rev.'''
643 Returns rename info for fn at changerev rev.'''
657 if rev in rcache.setdefault(fn, {}):
644 if rev in rcache.setdefault(fn, {}):
658 return rcache[fn][rev]
645 return rcache[fn][rev]
659
646
660 try:
647 try:
661 fctx = repo[rev].filectx(fn)
648 fctx = repo[rev].filectx(fn)
662 for ancestor in fctx.ancestors():
649 for ancestor in fctx.ancestors():
663 if ancestor.path() == fn:
650 if ancestor.path() == fn:
664 renamed = ancestor.renamed()
651 renamed = ancestor.renamed()
665 rcache[fn][ancestor.rev()] = renamed and renamed[0]
652 rcache[fn][ancestor.rev()] = renamed and renamed[0]
666
653
667 renamed = fctx.renamed()
654 renamed = fctx.renamed()
668 return renamed and renamed[0]
655 return renamed and renamed[0]
669 except error.LookupError:
656 except error.LookupError:
670 return None
657 return None
671
658
672 return getrenamed
659 return getrenamed
673
660
674 def walkfilerevs(orig, repo, match, follow, revs, fncache):
661 def walkfilerevs(orig, repo, match, follow, revs, fncache):
675 if not isenabled(repo):
662 if not isenabled(repo):
676 return orig(repo, match, follow, revs, fncache)
663 return orig(repo, match, follow, revs, fncache)
677
664
678 # remotefilelog's can't be walked in rev order, so throw.
665 # remotefilelog's can't be walked in rev order, so throw.
679 # The caller will see the exception and walk the commit tree instead.
666 # The caller will see the exception and walk the commit tree instead.
680 if not follow:
667 if not follow:
681 raise cmdutil.FileWalkError("Cannot walk via filelog")
668 raise cmdutil.FileWalkError("Cannot walk via filelog")
682
669
683 wanted = set()
670 wanted = set()
684 minrev, maxrev = min(revs), max(revs)
671 minrev, maxrev = min(revs), max(revs)
685
672
686 pctx = repo['.']
673 pctx = repo['.']
687 for filename in match.files():
674 for filename in match.files():
688 if filename not in pctx:
675 if filename not in pctx:
689 raise error.Abort(_('cannot follow file not in parent '
676 raise error.Abort(_('cannot follow file not in parent '
690 'revision: "%s"') % filename)
677 'revision: "%s"') % filename)
691 fctx = pctx[filename]
678 fctx = pctx[filename]
692
679
693 linkrev = fctx.linkrev()
680 linkrev = fctx.linkrev()
694 if linkrev >= minrev and linkrev <= maxrev:
681 if linkrev >= minrev and linkrev <= maxrev:
695 fncache.setdefault(linkrev, []).append(filename)
682 fncache.setdefault(linkrev, []).append(filename)
696 wanted.add(linkrev)
683 wanted.add(linkrev)
697
684
698 for ancestor in fctx.ancestors():
685 for ancestor in fctx.ancestors():
699 linkrev = ancestor.linkrev()
686 linkrev = ancestor.linkrev()
700 if linkrev >= minrev and linkrev <= maxrev:
687 if linkrev >= minrev and linkrev <= maxrev:
701 fncache.setdefault(linkrev, []).append(ancestor.path())
688 fncache.setdefault(linkrev, []).append(ancestor.path())
702 wanted.add(linkrev)
689 wanted.add(linkrev)
703
690
704 return wanted
691 return wanted
705
692
706 def filelogrevset(orig, repo, subset, x):
693 def filelogrevset(orig, repo, subset, x):
707 """``filelog(pattern)``
694 """``filelog(pattern)``
708 Changesets connected to the specified filelog.
695 Changesets connected to the specified filelog.
709
696
710 For performance reasons, ``filelog()`` does not show every changeset
697 For performance reasons, ``filelog()`` does not show every changeset
711 that affects the requested file(s). See :hg:`help log` for details. For
698 that affects the requested file(s). See :hg:`help log` for details. For
712 a slower, more accurate result, use ``file()``.
699 a slower, more accurate result, use ``file()``.
713 """
700 """
714
701
715 if not isenabled(repo):
702 if not isenabled(repo):
716 return orig(repo, subset, x)
703 return orig(repo, subset, x)
717
704
718 # i18n: "filelog" is a keyword
705 # i18n: "filelog" is a keyword
719 pat = revset.getstring(x, _("filelog requires a pattern"))
706 pat = revset.getstring(x, _("filelog requires a pattern"))
720 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
707 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
721 ctx=repo[None])
708 ctx=repo[None])
722 s = set()
709 s = set()
723
710
724 if not match.patkind(pat):
711 if not match.patkind(pat):
725 # slow
712 # slow
726 for r in subset:
713 for r in subset:
727 ctx = repo[r]
714 ctx = repo[r]
728 cfiles = ctx.files()
715 cfiles = ctx.files()
729 for f in m.files():
716 for f in m.files():
730 if f in cfiles:
717 if f in cfiles:
731 s.add(ctx.rev())
718 s.add(ctx.rev())
732 break
719 break
733 else:
720 else:
734 # partial
721 # partial
735 files = (f for f in repo[None] if m(f))
722 files = (f for f in repo[None] if m(f))
736 for f in files:
723 for f in files:
737 fctx = repo[None].filectx(f)
724 fctx = repo[None].filectx(f)
738 s.add(fctx.linkrev())
725 s.add(fctx.linkrev())
739 for actx in fctx.ancestors():
726 for actx in fctx.ancestors():
740 s.add(actx.linkrev())
727 s.add(actx.linkrev())
741
728
742 return smartset.baseset([r for r in subset if r in s])
729 return smartset.baseset([r for r in subset if r in s])
743
730
744 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
731 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
745 def gc(ui, *args, **opts):
732 def gc(ui, *args, **opts):
746 '''garbage collect the client and server filelog caches
733 '''garbage collect the client and server filelog caches
747 '''
734 '''
748 cachepaths = set()
735 cachepaths = set()
749
736
750 # get the system client cache
737 # get the system client cache
751 systemcache = shallowutil.getcachepath(ui, allowempty=True)
738 systemcache = shallowutil.getcachepath(ui, allowempty=True)
752 if systemcache:
739 if systemcache:
753 cachepaths.add(systemcache)
740 cachepaths.add(systemcache)
754
741
755 # get repo client and server cache
742 # get repo client and server cache
756 repopaths = []
743 repopaths = []
757 pwd = ui.environ.get('PWD')
744 pwd = ui.environ.get('PWD')
758 if pwd:
745 if pwd:
759 repopaths.append(pwd)
746 repopaths.append(pwd)
760
747
761 repopaths.extend(args)
748 repopaths.extend(args)
762 repos = []
749 repos = []
763 for repopath in repopaths:
750 for repopath in repopaths:
764 try:
751 try:
765 repo = hg.peer(ui, {}, repopath)
752 repo = hg.peer(ui, {}, repopath)
766 repos.append(repo)
753 repos.append(repo)
767
754
768 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
755 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
769 if repocache:
756 if repocache:
770 cachepaths.add(repocache)
757 cachepaths.add(repocache)
771 except error.RepoError:
758 except error.RepoError:
772 pass
759 pass
773
760
774 # gc client cache
761 # gc client cache
775 for cachepath in cachepaths:
762 for cachepath in cachepaths:
776 gcclient(ui, cachepath)
763 gcclient(ui, cachepath)
777
764
778 # gc server cache
765 # gc server cache
779 for repo in repos:
766 for repo in repos:
780 remotefilelogserver.gcserver(ui, repo._repo)
767 remotefilelogserver.gcserver(ui, repo._repo)
781
768
782 def gcclient(ui, cachepath):
769 def gcclient(ui, cachepath):
783 # get list of repos that use this cache
770 # get list of repos that use this cache
784 repospath = os.path.join(cachepath, 'repos')
771 repospath = os.path.join(cachepath, 'repos')
785 if not os.path.exists(repospath):
772 if not os.path.exists(repospath):
786 ui.warn(_("no known cache at %s\n") % cachepath)
773 ui.warn(_("no known cache at %s\n") % cachepath)
787 return
774 return
788
775
789 reposfile = open(repospath, 'rb')
776 reposfile = open(repospath, 'rb')
790 repos = {r[:-1] for r in reposfile.readlines()}
777 repos = {r[:-1] for r in reposfile.readlines()}
791 reposfile.close()
778 reposfile.close()
792
779
793 # build list of useful files
780 # build list of useful files
794 validrepos = []
781 validrepos = []
795 keepkeys = set()
782 keepkeys = set()
796
783
797 sharedcache = None
784 sharedcache = None
798 filesrepacked = False
785 filesrepacked = False
799
786
800 count = 0
787 count = 0
801 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
788 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
802 total=len(repos))
789 total=len(repos))
803 for path in repos:
790 for path in repos:
804 progress.update(count)
791 progress.update(count)
805 count += 1
792 count += 1
806 try:
793 try:
807 path = ui.expandpath(os.path.normpath(path))
794 path = ui.expandpath(os.path.normpath(path))
808 except TypeError as e:
795 except TypeError as e:
809 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
796 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
810 traceback.print_exc()
797 traceback.print_exc()
811 continue
798 continue
812 try:
799 try:
813 peer = hg.peer(ui, {}, path)
800 peer = hg.peer(ui, {}, path)
814 repo = peer._repo
801 repo = peer._repo
815 except error.RepoError:
802 except error.RepoError:
816 continue
803 continue
817
804
818 validrepos.append(path)
805 validrepos.append(path)
819
806
820 # Protect against any repo or config changes that have happened since
807 # Protect against any repo or config changes that have happened since
821 # this repo was added to the repos file. We'd rather this loop succeed
808 # this repo was added to the repos file. We'd rather this loop succeed
822 # and too much be deleted, than the loop fail and nothing gets deleted.
809 # and too much be deleted, than the loop fail and nothing gets deleted.
823 if not isenabled(repo):
810 if not isenabled(repo):
824 continue
811 continue
825
812
826 if not util.safehasattr(repo, 'name'):
813 if not util.safehasattr(repo, 'name'):
827 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
814 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
828 continue
815 continue
829
816
830 # If garbage collection on repack and repack on hg gc are enabled
817 # If garbage collection on repack and repack on hg gc are enabled
831 # then loose files are repacked and garbage collected.
818 # then loose files are repacked and garbage collected.
832 # Otherwise regular garbage collection is performed.
819 # Otherwise regular garbage collection is performed.
833 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
820 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
834 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
821 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
835 if repackonhggc and gcrepack:
822 if repackonhggc and gcrepack:
836 try:
823 try:
837 repackmod.incrementalrepack(repo)
824 repackmod.incrementalrepack(repo)
838 filesrepacked = True
825 filesrepacked = True
839 continue
826 continue
840 except (IOError, repackmod.RepackAlreadyRunning):
827 except (IOError, repackmod.RepackAlreadyRunning):
841 # If repack cannot be performed due to not enough disk space
828 # If repack cannot be performed due to not enough disk space
842 # continue doing garbage collection of loose files w/o repack
829 # continue doing garbage collection of loose files w/o repack
843 pass
830 pass
844
831
845 reponame = repo.name
832 reponame = repo.name
846 if not sharedcache:
833 if not sharedcache:
847 sharedcache = repo.sharedstore
834 sharedcache = repo.sharedstore
848
835
849 # Compute a keepset which is not garbage collected
836 # Compute a keepset which is not garbage collected
850 def keyfn(fname, fnode):
837 def keyfn(fname, fnode):
851 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
838 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
852 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
839 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
853
840
854 progress.complete()
841 progress.complete()
855
842
856 # write list of valid repos back
843 # write list of valid repos back
857 oldumask = os.umask(0o002)
844 oldumask = os.umask(0o002)
858 try:
845 try:
859 reposfile = open(repospath, 'wb')
846 reposfile = open(repospath, 'wb')
860 reposfile.writelines([("%s\n" % r) for r in validrepos])
847 reposfile.writelines([("%s\n" % r) for r in validrepos])
861 reposfile.close()
848 reposfile.close()
862 finally:
849 finally:
863 os.umask(oldumask)
850 os.umask(oldumask)
864
851
865 # prune cache
852 # prune cache
866 if sharedcache is not None:
853 if sharedcache is not None:
867 sharedcache.gc(keepkeys)
854 sharedcache.gc(keepkeys)
868 elif not filesrepacked:
855 elif not filesrepacked:
869 ui.warn(_("warning: no valid repos in repofile\n"))
856 ui.warn(_("warning: no valid repos in repofile\n"))
870
857
871 def log(orig, ui, repo, *pats, **opts):
858 def log(orig, ui, repo, *pats, **opts):
872 if not isenabled(repo):
859 if not isenabled(repo):
873 return orig(ui, repo, *pats, **opts)
860 return orig(ui, repo, *pats, **opts)
874
861
875 follow = opts.get(r'follow')
862 follow = opts.get(r'follow')
876 revs = opts.get(r'rev')
863 revs = opts.get(r'rev')
877 if pats:
864 if pats:
878 # Force slowpath for non-follow patterns and follows that start from
865 # Force slowpath for non-follow patterns and follows that start from
879 # non-working-copy-parent revs.
866 # non-working-copy-parent revs.
880 if not follow or revs:
867 if not follow or revs:
881 # This forces the slowpath
868 # This forces the slowpath
882 opts[r'removed'] = True
869 opts[r'removed'] = True
883
870
884 # If this is a non-follow log without any revs specified, recommend that
871 # If this is a non-follow log without any revs specified, recommend that
885 # the user add -f to speed it up.
872 # the user add -f to speed it up.
886 if not follow and not revs:
873 if not follow and not revs:
887 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
874 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
888 isfile = not match.anypats()
875 isfile = not match.anypats()
889 if isfile:
876 if isfile:
890 for file in match.files():
877 for file in match.files():
891 if not os.path.isfile(repo.wjoin(file)):
878 if not os.path.isfile(repo.wjoin(file)):
892 isfile = False
879 isfile = False
893 break
880 break
894
881
895 if isfile:
882 if isfile:
896 ui.warn(_("warning: file log can be slow on large repos - " +
883 ui.warn(_("warning: file log can be slow on large repos - " +
897 "use -f to speed it up\n"))
884 "use -f to speed it up\n"))
898
885
899 return orig(ui, repo, *pats, **opts)
886 return orig(ui, repo, *pats, **opts)
900
887
901 def revdatelimit(ui, revset):
888 def revdatelimit(ui, revset):
902 """Update revset so that only changesets no older than 'prefetchdays' days
889 """Update revset so that only changesets no older than 'prefetchdays' days
903 are included. The default value is set to 14 days. If 'prefetchdays' is set
890 are included. The default value is set to 14 days. If 'prefetchdays' is set
904 to zero or negative value then date restriction is not applied.
891 to zero or negative value then date restriction is not applied.
905 """
892 """
906 days = ui.configint('remotefilelog', 'prefetchdays')
893 days = ui.configint('remotefilelog', 'prefetchdays')
907 if days > 0:
894 if days > 0:
908 revset = '(%s) & date(-%s)' % (revset, days)
895 revset = '(%s) & date(-%s)' % (revset, days)
909 return revset
896 return revset
910
897
911 def readytofetch(repo):
898 def readytofetch(repo):
912 """Check that enough time has passed since the last background prefetch.
899 """Check that enough time has passed since the last background prefetch.
913 This only relates to prefetches after operations that change the working
900 This only relates to prefetches after operations that change the working
914 copy parent. Default delay between background prefetches is 2 minutes.
901 copy parent. Default delay between background prefetches is 2 minutes.
915 """
902 """
916 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
903 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
917 fname = repo.vfs.join('lastprefetch')
904 fname = repo.vfs.join('lastprefetch')
918
905
919 ready = False
906 ready = False
920 with open(fname, 'a'):
907 with open(fname, 'a'):
921 # the with construct above is used to avoid race conditions
908 # the with construct above is used to avoid race conditions
922 modtime = os.path.getmtime(fname)
909 modtime = os.path.getmtime(fname)
923 if (time.time() - modtime) > timeout:
910 if (time.time() - modtime) > timeout:
924 os.utime(fname, None)
911 os.utime(fname, None)
925 ready = True
912 ready = True
926
913
927 return ready
914 return ready
928
915
929 def wcpprefetch(ui, repo, **kwargs):
916 def wcpprefetch(ui, repo, **kwargs):
930 """Prefetches in background revisions specified by bgprefetchrevs revset.
917 """Prefetches in background revisions specified by bgprefetchrevs revset.
931 Does background repack if backgroundrepack flag is set in config.
918 Does background repack if backgroundrepack flag is set in config.
932 """
919 """
933 shallow = isenabled(repo)
920 shallow = isenabled(repo)
934 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
921 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
935 isready = readytofetch(repo)
922 isready = readytofetch(repo)
936
923
937 if not (shallow and bgprefetchrevs and isready):
924 if not (shallow and bgprefetchrevs and isready):
938 return
925 return
939
926
940 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
927 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
941 # update a revset with a date limit
928 # update a revset with a date limit
942 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
929 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
943
930
944 def anon():
931 def anon():
945 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
932 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
946 return
933 return
947 repo.ranprefetch = True
934 repo.ranprefetch = True
948 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
935 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
949
936
950 repo._afterlock(anon)
937 repo._afterlock(anon)
951
938
952 def pull(orig, ui, repo, *pats, **opts):
939 def pull(orig, ui, repo, *pats, **opts):
953 result = orig(ui, repo, *pats, **opts)
940 result = orig(ui, repo, *pats, **opts)
954
941
955 if isenabled(repo):
942 if isenabled(repo):
956 # prefetch if it's configured
943 # prefetch if it's configured
957 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
944 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
958 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
945 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
959 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
946 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
960
947
961 if prefetchrevset:
948 if prefetchrevset:
962 ui.status(_("prefetching file contents\n"))
949 ui.status(_("prefetching file contents\n"))
963 revs = scmutil.revrange(repo, [prefetchrevset])
950 revs = scmutil.revrange(repo, [prefetchrevset])
964 base = repo['.'].rev()
951 base = repo['.'].rev()
965 if bgprefetch:
952 if bgprefetch:
966 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
953 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
967 else:
954 else:
968 repo.prefetch(revs, base=base)
955 repo.prefetch(revs, base=base)
969 if bgrepack:
956 if bgrepack:
970 repackmod.backgroundrepack(repo, incremental=True)
957 repackmod.backgroundrepack(repo, incremental=True)
971 elif bgrepack:
958 elif bgrepack:
972 repackmod.backgroundrepack(repo, incremental=True)
959 repackmod.backgroundrepack(repo, incremental=True)
973
960
974 return result
961 return result
975
962
976 def exchangepull(orig, repo, remote, *args, **kwargs):
963 def exchangepull(orig, repo, remote, *args, **kwargs):
977 # Hook into the callstream/getbundle to insert bundle capabilities
964 # Hook into the callstream/getbundle to insert bundle capabilities
978 # during a pull.
965 # during a pull.
979 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
966 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
980 **kwargs):
967 **kwargs):
981 if not bundlecaps:
968 if not bundlecaps:
982 bundlecaps = set()
969 bundlecaps = set()
983 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
970 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
984 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
971 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
985 **kwargs)
972 **kwargs)
986
973
987 if util.safehasattr(remote, '_callstream'):
974 if util.safehasattr(remote, '_callstream'):
988 remote._localrepo = repo
975 remote._localrepo = repo
989 elif util.safehasattr(remote, 'getbundle'):
976 elif util.safehasattr(remote, 'getbundle'):
990 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
977 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
991
978
992 return orig(repo, remote, *args, **kwargs)
979 return orig(repo, remote, *args, **kwargs)
993
980
994 def _fileprefetchhook(repo, revs, match):
981 def _fileprefetchhook(repo, revs, match):
995 if isenabled(repo):
982 if isenabled(repo):
996 allfiles = []
983 allfiles = []
997 for rev in revs:
984 for rev in revs:
998 if rev == nodemod.wdirrev or rev is None:
985 if rev == nodemod.wdirrev or rev is None:
999 continue
986 continue
1000 ctx = repo[rev]
987 ctx = repo[rev]
1001 mf = ctx.manifest()
988 mf = ctx.manifest()
1002 sparsematch = repo.maybesparsematch(ctx.rev())
989 sparsematch = repo.maybesparsematch(ctx.rev())
1003 for path in ctx.walk(match):
990 for path in ctx.walk(match):
1004 if path.endswith('/'):
991 if path.endswith('/'):
1005 # Tree manifest that's being excluded as part of narrow
992 # Tree manifest that's being excluded as part of narrow
1006 continue
993 continue
1007 if (not sparsematch or sparsematch(path)) and path in mf:
994 if (not sparsematch or sparsematch(path)) and path in mf:
1008 allfiles.append((path, hex(mf[path])))
995 allfiles.append((path, hex(mf[path])))
1009 repo.fileservice.prefetch(allfiles)
996 repo.fileservice.prefetch(allfiles)
1010
997
1011 @command('debugremotefilelog', [
998 @command('debugremotefilelog', [
1012 ('d', 'decompress', None, _('decompress the filelog first')),
999 ('d', 'decompress', None, _('decompress the filelog first')),
1013 ], _('hg debugremotefilelog <path>'), norepo=True)
1000 ], _('hg debugremotefilelog <path>'), norepo=True)
1014 def debugremotefilelog(ui, path, **opts):
1001 def debugremotefilelog(ui, path, **opts):
1015 return debugcommands.debugremotefilelog(ui, path, **opts)
1002 return debugcommands.debugremotefilelog(ui, path, **opts)
1016
1003
1017 @command('verifyremotefilelog', [
1004 @command('verifyremotefilelog', [
1018 ('d', 'decompress', None, _('decompress the filelogs first')),
1005 ('d', 'decompress', None, _('decompress the filelogs first')),
1019 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1006 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1020 def verifyremotefilelog(ui, path, **opts):
1007 def verifyremotefilelog(ui, path, **opts):
1021 return debugcommands.verifyremotefilelog(ui, path, **opts)
1008 return debugcommands.verifyremotefilelog(ui, path, **opts)
1022
1009
1023 @command('debugdatapack', [
1010 @command('debugdatapack', [
1024 ('', 'long', None, _('print the long hashes')),
1011 ('', 'long', None, _('print the long hashes')),
1025 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1012 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1026 ], _('hg debugdatapack <paths>'), norepo=True)
1013 ], _('hg debugdatapack <paths>'), norepo=True)
1027 def debugdatapack(ui, *paths, **opts):
1014 def debugdatapack(ui, *paths, **opts):
1028 return debugcommands.debugdatapack(ui, *paths, **opts)
1015 return debugcommands.debugdatapack(ui, *paths, **opts)
1029
1016
1030 @command('debughistorypack', [
1017 @command('debughistorypack', [
1031 ], _('hg debughistorypack <path>'), norepo=True)
1018 ], _('hg debughistorypack <path>'), norepo=True)
1032 def debughistorypack(ui, path, **opts):
1019 def debughistorypack(ui, path, **opts):
1033 return debugcommands.debughistorypack(ui, path)
1020 return debugcommands.debughistorypack(ui, path)
1034
1021
1035 @command('debugkeepset', [
1022 @command('debugkeepset', [
1036 ], _('hg debugkeepset'))
1023 ], _('hg debugkeepset'))
1037 def debugkeepset(ui, repo, **opts):
1024 def debugkeepset(ui, repo, **opts):
1038 # The command is used to measure keepset computation time
1025 # The command is used to measure keepset computation time
1039 def keyfn(fname, fnode):
1026 def keyfn(fname, fnode):
1040 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1027 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1041 repackmod.keepset(repo, keyfn)
1028 repackmod.keepset(repo, keyfn)
1042 return
1029 return
1043
1030
1044 @command('debugwaitonrepack', [
1031 @command('debugwaitonrepack', [
1045 ], _('hg debugwaitonrepack'))
1032 ], _('hg debugwaitonrepack'))
1046 def debugwaitonrepack(ui, repo, **opts):
1033 def debugwaitonrepack(ui, repo, **opts):
1047 return debugcommands.debugwaitonrepack(repo)
1034 return debugcommands.debugwaitonrepack(repo)
1048
1035
1049 @command('debugwaitonprefetch', [
1036 @command('debugwaitonprefetch', [
1050 ], _('hg debugwaitonprefetch'))
1037 ], _('hg debugwaitonprefetch'))
1051 def debugwaitonprefetch(ui, repo, **opts):
1038 def debugwaitonprefetch(ui, repo, **opts):
1052 return debugcommands.debugwaitonprefetch(repo)
1039 return debugcommands.debugwaitonprefetch(repo)
1053
1040
1054 def resolveprefetchopts(ui, opts):
1041 def resolveprefetchopts(ui, opts):
1055 if not opts.get('rev'):
1042 if not opts.get('rev'):
1056 revset = ['.', 'draft()']
1043 revset = ['.', 'draft()']
1057
1044
1058 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1045 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1059 if prefetchrevset:
1046 if prefetchrevset:
1060 revset.append('(%s)' % prefetchrevset)
1047 revset.append('(%s)' % prefetchrevset)
1061 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1048 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1062 if bgprefetchrevs:
1049 if bgprefetchrevs:
1063 revset.append('(%s)' % bgprefetchrevs)
1050 revset.append('(%s)' % bgprefetchrevs)
1064 revset = '+'.join(revset)
1051 revset = '+'.join(revset)
1065
1052
1066 # update a revset with a date limit
1053 # update a revset with a date limit
1067 revset = revdatelimit(ui, revset)
1054 revset = revdatelimit(ui, revset)
1068
1055
1069 opts['rev'] = [revset]
1056 opts['rev'] = [revset]
1070
1057
1071 if not opts.get('base'):
1058 if not opts.get('base'):
1072 opts['base'] = None
1059 opts['base'] = None
1073
1060
1074 return opts
1061 return opts
1075
1062
1076 @command('prefetch', [
1063 @command('prefetch', [
1077 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1064 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1078 ('', 'repack', False, _('run repack after prefetch')),
1065 ('', 'repack', False, _('run repack after prefetch')),
1079 ('b', 'base', '', _("rev that is assumed to already be local")),
1066 ('b', 'base', '', _("rev that is assumed to already be local")),
1080 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1067 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1081 def prefetch(ui, repo, *pats, **opts):
1068 def prefetch(ui, repo, *pats, **opts):
1082 """prefetch file revisions from the server
1069 """prefetch file revisions from the server
1083
1070
1084 Prefetchs file revisions for the specified revs and stores them in the
1071 Prefetchs file revisions for the specified revs and stores them in the
1085 local remotefilelog cache. If no rev is specified, the default rev is
1072 local remotefilelog cache. If no rev is specified, the default rev is
1086 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1073 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1087 File names or patterns can be used to limit which files are downloaded.
1074 File names or patterns can be used to limit which files are downloaded.
1088
1075
1089 Return 0 on success.
1076 Return 0 on success.
1090 """
1077 """
1091 opts = pycompat.byteskwargs(opts)
1078 opts = pycompat.byteskwargs(opts)
1092 if not isenabled(repo):
1079 if not isenabled(repo):
1093 raise error.Abort(_("repo is not shallow"))
1080 raise error.Abort(_("repo is not shallow"))
1094
1081
1095 opts = resolveprefetchopts(ui, opts)
1082 opts = resolveprefetchopts(ui, opts)
1096 revs = scmutil.revrange(repo, opts.get('rev'))
1083 revs = scmutil.revrange(repo, opts.get('rev'))
1097 repo.prefetch(revs, opts.get('base'), pats, opts)
1084 repo.prefetch(revs, opts.get('base'), pats, opts)
1098
1085
1099 # Run repack in background
1086 # Run repack in background
1100 if opts.get('repack'):
1087 if opts.get('repack'):
1101 repackmod.backgroundrepack(repo, incremental=True)
1088 repackmod.backgroundrepack(repo, incremental=True)
1102
1089
1103 @command('repack', [
1090 @command('repack', [
1104 ('', 'background', None, _('run in a background process'), None),
1091 ('', 'background', None, _('run in a background process'), None),
1105 ('', 'incremental', None, _('do an incremental repack'), None),
1092 ('', 'incremental', None, _('do an incremental repack'), None),
1106 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1093 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1107 ], _('hg repack [OPTIONS]'))
1094 ], _('hg repack [OPTIONS]'))
1108 def repack_(ui, repo, *pats, **opts):
1095 def repack_(ui, repo, *pats, **opts):
1109 if opts.get(r'background'):
1096 if opts.get(r'background'):
1110 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1097 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1111 packsonly=opts.get(r'packsonly', False))
1098 packsonly=opts.get(r'packsonly', False))
1112 return
1099 return
1113
1100
1114 options = {'packsonly': opts.get(r'packsonly')}
1101 options = {'packsonly': opts.get(r'packsonly')}
1115
1102
1116 try:
1103 try:
1117 if opts.get(r'incremental'):
1104 if opts.get(r'incremental'):
1118 repackmod.incrementalrepack(repo, options=options)
1105 repackmod.incrementalrepack(repo, options=options)
1119 else:
1106 else:
1120 repackmod.fullrepack(repo, options=options)
1107 repackmod.fullrepack(repo, options=options)
1121 except repackmod.RepackAlreadyRunning as ex:
1108 except repackmod.RepackAlreadyRunning as ex:
1122 # Don't propogate the exception if the repack is already in
1109 # Don't propogate the exception if the repack is already in
1123 # progress, since we want the command to exit 0.
1110 # progress, since we want the command to exit 0.
1124 repo.ui.warn('%s\n' % ex)
1111 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now