##// END OF EJS Templates
remotefilelog: use sysstr to check for attribute presence...
marmoute -
r51788:a834ec41 default
parent child Browse files
Show More
@@ -1,1259 +1,1259 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127
127
128 import os
128 import os
129 import time
129 import time
130 import traceback
130 import traceback
131
131
132 from mercurial.node import (
132 from mercurial.node import (
133 hex,
133 hex,
134 wdirrev,
134 wdirrev,
135 )
135 )
136 from mercurial.i18n import _
136 from mercurial.i18n import _
137 from mercurial.pycompat import open
137 from mercurial.pycompat import open
138 from mercurial import (
138 from mercurial import (
139 changegroup,
139 changegroup,
140 changelog,
140 changelog,
141 commands,
141 commands,
142 configitems,
142 configitems,
143 context,
143 context,
144 copies,
144 copies,
145 debugcommands as hgdebugcommands,
145 debugcommands as hgdebugcommands,
146 dispatch,
146 dispatch,
147 error,
147 error,
148 exchange,
148 exchange,
149 extensions,
149 extensions,
150 hg,
150 hg,
151 localrepo,
151 localrepo,
152 match as matchmod,
152 match as matchmod,
153 merge,
153 merge,
154 mergestate as mergestatemod,
154 mergestate as mergestatemod,
155 patch,
155 patch,
156 pycompat,
156 pycompat,
157 registrar,
157 registrar,
158 repair,
158 repair,
159 repoview,
159 repoview,
160 revset,
160 revset,
161 scmutil,
161 scmutil,
162 smartset,
162 smartset,
163 streamclone,
163 streamclone,
164 util,
164 util,
165 )
165 )
166 from . import (
166 from . import (
167 constants,
167 constants,
168 debugcommands,
168 debugcommands,
169 fileserverclient,
169 fileserverclient,
170 remotefilectx,
170 remotefilectx,
171 remotefilelog,
171 remotefilelog,
172 remotefilelogserver,
172 remotefilelogserver,
173 repack as repackmod,
173 repack as repackmod,
174 shallowbundle,
174 shallowbundle,
175 shallowrepo,
175 shallowrepo,
176 shallowstore,
176 shallowstore,
177 shallowutil,
177 shallowutil,
178 shallowverifier,
178 shallowverifier,
179 )
179 )
180
180
181 # ensures debug commands are registered
181 # ensures debug commands are registered
182 hgdebugcommands.command
182 hgdebugcommands.command
183
183
184 cmdtable = {}
184 cmdtable = {}
185 command = registrar.command(cmdtable)
185 command = registrar.command(cmdtable)
186
186
187 configtable = {}
187 configtable = {}
188 configitem = registrar.configitem(configtable)
188 configitem = registrar.configitem(configtable)
189
189
190 configitem(b'remotefilelog', b'debug', default=False)
190 configitem(b'remotefilelog', b'debug', default=False)
191
191
192 configitem(b'remotefilelog', b'reponame', default=b'')
192 configitem(b'remotefilelog', b'reponame', default=b'')
193 configitem(b'remotefilelog', b'cachepath', default=None)
193 configitem(b'remotefilelog', b'cachepath', default=None)
194 configitem(b'remotefilelog', b'cachegroup', default=None)
194 configitem(b'remotefilelog', b'cachegroup', default=None)
195 configitem(b'remotefilelog', b'cacheprocess', default=None)
195 configitem(b'remotefilelog', b'cacheprocess', default=None)
196 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
196 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
197 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
197 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
198
198
199 configitem(
199 configitem(
200 b'remotefilelog',
200 b'remotefilelog',
201 b'fallbackpath',
201 b'fallbackpath',
202 default=configitems.dynamicdefault,
202 default=configitems.dynamicdefault,
203 alias=[(b'remotefilelog', b'fallbackrepo')],
203 alias=[(b'remotefilelog', b'fallbackrepo')],
204 )
204 )
205
205
206 configitem(b'remotefilelog', b'validatecachelog', default=None)
206 configitem(b'remotefilelog', b'validatecachelog', default=None)
207 configitem(b'remotefilelog', b'validatecache', default=b'on')
207 configitem(b'remotefilelog', b'validatecache', default=b'on')
208 configitem(b'remotefilelog', b'server', default=None)
208 configitem(b'remotefilelog', b'server', default=None)
209 configitem(b'remotefilelog', b'servercachepath', default=None)
209 configitem(b'remotefilelog', b'servercachepath', default=None)
210 configitem(b"remotefilelog", b"serverexpiration", default=30)
210 configitem(b"remotefilelog", b"serverexpiration", default=30)
211 configitem(b'remotefilelog', b'backgroundrepack', default=False)
211 configitem(b'remotefilelog', b'backgroundrepack', default=False)
212 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
212 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
213 configitem(b'remotefilelog', b'pullprefetch', default=None)
213 configitem(b'remotefilelog', b'pullprefetch', default=None)
214 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
214 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
215 configitem(b'remotefilelog', b'prefetchdelay', default=120)
215 configitem(b'remotefilelog', b'prefetchdelay', default=120)
216 configitem(b'remotefilelog', b'prefetchdays', default=14)
216 configitem(b'remotefilelog', b'prefetchdays', default=14)
217 # Other values include 'local' or 'none'. Any unrecognized value is 'all'.
217 # Other values include 'local' or 'none'. Any unrecognized value is 'all'.
218 configitem(b'remotefilelog', b'strip.includefiles', default='all')
218 configitem(b'remotefilelog', b'strip.includefiles', default='all')
219
219
220 configitem(b'remotefilelog', b'getfilesstep', default=10000)
220 configitem(b'remotefilelog', b'getfilesstep', default=10000)
221 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
221 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
222 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
222 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
223 configitem(b'remotefilelog', b'fetchwarning', default=b'')
223 configitem(b'remotefilelog', b'fetchwarning', default=b'')
224
224
225 configitem(b'remotefilelog', b'includepattern', default=None)
225 configitem(b'remotefilelog', b'includepattern', default=None)
226 configitem(b'remotefilelog', b'excludepattern', default=None)
226 configitem(b'remotefilelog', b'excludepattern', default=None)
227
227
228 configitem(b'remotefilelog', b'gcrepack', default=False)
228 configitem(b'remotefilelog', b'gcrepack', default=False)
229 configitem(b'remotefilelog', b'repackonhggc', default=False)
229 configitem(b'remotefilelog', b'repackonhggc', default=False)
230 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
230 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
231
231
232 configitem(b'packs', b'maxpacksize', default=0)
232 configitem(b'packs', b'maxpacksize', default=0)
233 configitem(b'packs', b'maxchainlen', default=1000)
233 configitem(b'packs', b'maxchainlen', default=1000)
234
234
235 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
235 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
236
236
237 # default TTL limit is 30 days
237 # default TTL limit is 30 days
238 _defaultlimit = 60 * 60 * 24 * 30
238 _defaultlimit = 60 * 60 * 24 * 30
239 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
239 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
240
240
241 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
241 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
242 configitem(
242 configitem(
243 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
243 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
244 )
244 )
245 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
245 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
246 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
246 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
247 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
247 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
248
248
249 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
249 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
250 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
250 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
251 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
251 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
252 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
252 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
253 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
253 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
254
254
255 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
255 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
256 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
256 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
257 # be specifying the version(s) of Mercurial they are tested with, or
257 # be specifying the version(s) of Mercurial they are tested with, or
258 # leave the attribute unspecified.
258 # leave the attribute unspecified.
259 testedwith = b'ships-with-hg-core'
259 testedwith = b'ships-with-hg-core'
260
260
261 repoclass = localrepo.localrepository
261 repoclass = localrepo.localrepository
262 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
262 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
263
263
264 isenabled = shallowutil.isenabled
264 isenabled = shallowutil.isenabled
265
265
266
266
267 def uisetup(ui):
267 def uisetup(ui):
268 """Wraps user facing Mercurial commands to swap them out with shallow
268 """Wraps user facing Mercurial commands to swap them out with shallow
269 versions.
269 versions.
270 """
270 """
271 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
271 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
272
272
273 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
273 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
274 entry[1].append(
274 entry[1].append(
275 (
275 (
276 b'',
276 b'',
277 b'shallow',
277 b'shallow',
278 None,
278 None,
279 _(b"create a shallow clone which uses remote file history"),
279 _(b"create a shallow clone which uses remote file history"),
280 )
280 )
281 )
281 )
282
282
283 extensions.wrapcommand(
283 extensions.wrapcommand(
284 commands.table, b'debugindex', debugcommands.debugindex
284 commands.table, b'debugindex', debugcommands.debugindex
285 )
285 )
286 extensions.wrapcommand(
286 extensions.wrapcommand(
287 commands.table, b'debugindexdot', debugcommands.debugindexdot
287 commands.table, b'debugindexdot', debugcommands.debugindexdot
288 )
288 )
289 extensions.wrapcommand(commands.table, b'log', log)
289 extensions.wrapcommand(commands.table, b'log', log)
290 extensions.wrapcommand(commands.table, b'pull', pull)
290 extensions.wrapcommand(commands.table, b'pull', pull)
291
291
292 # Prevent 'hg manifest --all'
292 # Prevent 'hg manifest --all'
293 def _manifest(orig, ui, repo, *args, **opts):
293 def _manifest(orig, ui, repo, *args, **opts):
294 if isenabled(repo) and opts.get('all'):
294 if isenabled(repo) and opts.get('all'):
295 raise error.Abort(_(b"--all is not supported in a shallow repo"))
295 raise error.Abort(_(b"--all is not supported in a shallow repo"))
296
296
297 return orig(ui, repo, *args, **opts)
297 return orig(ui, repo, *args, **opts)
298
298
299 extensions.wrapcommand(commands.table, b"manifest", _manifest)
299 extensions.wrapcommand(commands.table, b"manifest", _manifest)
300
300
301 # Wrap remotefilelog with lfs code
301 # Wrap remotefilelog with lfs code
302 def _lfsloaded(loaded=False):
302 def _lfsloaded(loaded=False):
303 lfsmod = None
303 lfsmod = None
304 try:
304 try:
305 lfsmod = extensions.find(b'lfs')
305 lfsmod = extensions.find(b'lfs')
306 except KeyError:
306 except KeyError:
307 pass
307 pass
308 if lfsmod:
308 if lfsmod:
309 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
309 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
310 fileserverclient._lfsmod = lfsmod
310 fileserverclient._lfsmod = lfsmod
311
311
312 extensions.afterloaded(b'lfs', _lfsloaded)
312 extensions.afterloaded(b'lfs', _lfsloaded)
313
313
314 # debugdata needs remotefilelog.len to work
314 # debugdata needs remotefilelog.len to work
315 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
315 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
316
316
317 changegroup.cgpacker = shallowbundle.shallowcg1packer
317 changegroup.cgpacker = shallowbundle.shallowcg1packer
318
318
319 extensions.wrapfunction(
319 extensions.wrapfunction(
320 changegroup, '_addchangegroupfiles', shallowbundle.addchangegroupfiles
320 changegroup, '_addchangegroupfiles', shallowbundle.addchangegroupfiles
321 )
321 )
322 extensions.wrapfunction(
322 extensions.wrapfunction(
323 changegroup, 'makechangegroup', shallowbundle.makechangegroup
323 changegroup, 'makechangegroup', shallowbundle.makechangegroup
324 )
324 )
325 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
325 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
326 extensions.wrapfunction(exchange, 'pull', exchangepull)
326 extensions.wrapfunction(exchange, 'pull', exchangepull)
327 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
327 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
328 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
328 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
329 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
329 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
330 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
330 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
331 extensions.wrapfunction(
331 extensions.wrapfunction(
332 copies, '_computeforwardmissing', computeforwardmissing
332 copies, '_computeforwardmissing', computeforwardmissing
333 )
333 )
334 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
334 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
335 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
335 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
336 extensions.wrapfunction(context.changectx, 'filectx', filectx)
336 extensions.wrapfunction(context.changectx, 'filectx', filectx)
337 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
337 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
338 extensions.wrapfunction(patch, 'trydiff', trydiff)
338 extensions.wrapfunction(patch, 'trydiff', trydiff)
339 extensions.wrapfunction(hg, 'verify', _verify)
339 extensions.wrapfunction(hg, 'verify', _verify)
340 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
340 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
341
341
342 # disappointing hacks below
342 # disappointing hacks below
343 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
343 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
344 extensions.wrapfunction(revset, 'filelog', filelogrevset)
344 extensions.wrapfunction(revset, 'filelog', filelogrevset)
345 revset.symbols[b'filelog'] = revset.filelog
345 revset.symbols[b'filelog'] = revset.filelog
346
346
347
347
348 def cloneshallow(orig, ui, repo, *args, **opts):
348 def cloneshallow(orig, ui, repo, *args, **opts):
349 if opts.get('shallow'):
349 if opts.get('shallow'):
350 repos = []
350 repos = []
351
351
352 def pull_shallow(orig, self, *args, **kwargs):
352 def pull_shallow(orig, self, *args, **kwargs):
353 if not isenabled(self):
353 if not isenabled(self):
354 repos.append(self.unfiltered())
354 repos.append(self.unfiltered())
355 # set up the client hooks so the post-clone update works
355 # set up the client hooks so the post-clone update works
356 setupclient(self.ui, self.unfiltered())
356 setupclient(self.ui, self.unfiltered())
357
357
358 # setupclient fixed the class on the repo itself
358 # setupclient fixed the class on the repo itself
359 # but we also need to fix it on the repoview
359 # but we also need to fix it on the repoview
360 if isinstance(self, repoview.repoview):
360 if isinstance(self, repoview.repoview):
361 self.__class__.__bases__ = (
361 self.__class__.__bases__ = (
362 self.__class__.__bases__[0],
362 self.__class__.__bases__[0],
363 self.unfiltered().__class__,
363 self.unfiltered().__class__,
364 )
364 )
365 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
365 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
366 with self.lock():
366 with self.lock():
367 # acquire store lock before writing requirements as some
367 # acquire store lock before writing requirements as some
368 # requirements might be written to .hg/store/requires
368 # requirements might be written to .hg/store/requires
369 scmutil.writereporequirements(self)
369 scmutil.writereporequirements(self)
370
370
371 # Since setupclient hadn't been called, exchange.pull was not
371 # Since setupclient hadn't been called, exchange.pull was not
372 # wrapped. So we need to manually invoke our version of it.
372 # wrapped. So we need to manually invoke our version of it.
373 return exchangepull(orig, self, *args, **kwargs)
373 return exchangepull(orig, self, *args, **kwargs)
374 else:
374 else:
375 return orig(self, *args, **kwargs)
375 return orig(self, *args, **kwargs)
376
376
377 extensions.wrapfunction(exchange, 'pull', pull_shallow)
377 extensions.wrapfunction(exchange, 'pull', pull_shallow)
378
378
379 # Wrap the stream logic to add requirements and to pass include/exclude
379 # Wrap the stream logic to add requirements and to pass include/exclude
380 # patterns around.
380 # patterns around.
381 def setup_streamout(repo, remote):
381 def setup_streamout(repo, remote):
382 # Replace remote.stream_out with a version that sends file
382 # Replace remote.stream_out with a version that sends file
383 # patterns.
383 # patterns.
384 def stream_out_shallow(orig):
384 def stream_out_shallow(orig):
385 caps = remote.capabilities()
385 caps = remote.capabilities()
386 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
386 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
387 opts = {}
387 opts = {}
388 if repo.includepattern:
388 if repo.includepattern:
389 opts['includepattern'] = b'\0'.join(repo.includepattern)
389 opts['includepattern'] = b'\0'.join(repo.includepattern)
390 if repo.excludepattern:
390 if repo.excludepattern:
391 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
391 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
392 return remote._callstream(b'stream_out_shallow', **opts)
392 return remote._callstream(b'stream_out_shallow', **opts)
393 else:
393 else:
394 return orig()
394 return orig()
395
395
396 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
396 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
397
397
398 def stream_wrap(orig, op):
398 def stream_wrap(orig, op):
399 setup_streamout(op.repo, op.remote)
399 setup_streamout(op.repo, op.remote)
400 return orig(op)
400 return orig(op)
401
401
402 extensions.wrapfunction(
402 extensions.wrapfunction(
403 streamclone, 'maybeperformlegacystreamclone', stream_wrap
403 streamclone, 'maybeperformlegacystreamclone', stream_wrap
404 )
404 )
405
405
406 def canperformstreamclone(orig, pullop, bundle2=False):
406 def canperformstreamclone(orig, pullop, bundle2=False):
407 # remotefilelog is currently incompatible with the
407 # remotefilelog is currently incompatible with the
408 # bundle2 flavor of streamclones, so force us to use
408 # bundle2 flavor of streamclones, so force us to use
409 # v1 instead.
409 # v1 instead.
410 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
410 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
411 pullop.remotebundle2caps[b'stream'] = []
411 pullop.remotebundle2caps[b'stream'] = []
412 if bundle2:
412 if bundle2:
413 return False, None
413 return False, None
414 supported, requirements = orig(pullop, bundle2=bundle2)
414 supported, requirements = orig(pullop, bundle2=bundle2)
415 if requirements is not None:
415 if requirements is not None:
416 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
416 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
417 return supported, requirements
417 return supported, requirements
418
418
419 extensions.wrapfunction(
419 extensions.wrapfunction(
420 streamclone, 'canperformstreamclone', canperformstreamclone
420 streamclone, 'canperformstreamclone', canperformstreamclone
421 )
421 )
422
422
423 try:
423 try:
424 orig(ui, repo, *args, **opts)
424 orig(ui, repo, *args, **opts)
425 finally:
425 finally:
426 if opts.get('shallow'):
426 if opts.get('shallow'):
427 for r in repos:
427 for r in repos:
428 if util.safehasattr(r, b'fileservice'):
428 if util.safehasattr(r, 'fileservice'):
429 r.fileservice.close()
429 r.fileservice.close()
430
430
431
431
432 def debugdatashallow(orig, *args, **kwds):
432 def debugdatashallow(orig, *args, **kwds):
433 oldlen = remotefilelog.remotefilelog.__len__
433 oldlen = remotefilelog.remotefilelog.__len__
434 try:
434 try:
435 remotefilelog.remotefilelog.__len__ = lambda x: 1
435 remotefilelog.remotefilelog.__len__ = lambda x: 1
436 return orig(*args, **kwds)
436 return orig(*args, **kwds)
437 finally:
437 finally:
438 remotefilelog.remotefilelog.__len__ = oldlen
438 remotefilelog.remotefilelog.__len__ = oldlen
439
439
440
440
441 def reposetup(ui, repo):
441 def reposetup(ui, repo):
442 if not repo.local():
442 if not repo.local():
443 return
443 return
444
444
445 # put here intentionally bc doesnt work in uisetup
445 # put here intentionally bc doesnt work in uisetup
446 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
446 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
447 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
447 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
448
448
449 isserverenabled = ui.configbool(b'remotefilelog', b'server')
449 isserverenabled = ui.configbool(b'remotefilelog', b'server')
450 isshallowclient = isenabled(repo)
450 isshallowclient = isenabled(repo)
451
451
452 if isserverenabled and isshallowclient:
452 if isserverenabled and isshallowclient:
453 raise RuntimeError(b"Cannot be both a server and shallow client.")
453 raise RuntimeError(b"Cannot be both a server and shallow client.")
454
454
455 if isshallowclient:
455 if isshallowclient:
456 setupclient(ui, repo)
456 setupclient(ui, repo)
457
457
458 if isserverenabled:
458 if isserverenabled:
459 remotefilelogserver.setupserver(ui, repo)
459 remotefilelogserver.setupserver(ui, repo)
460
460
461
461
462 def setupclient(ui, repo):
462 def setupclient(ui, repo):
463 if not isinstance(repo, localrepo.localrepository):
463 if not isinstance(repo, localrepo.localrepository):
464 return
464 return
465
465
466 # Even clients get the server setup since they need to have the
466 # Even clients get the server setup since they need to have the
467 # wireprotocol endpoints registered.
467 # wireprotocol endpoints registered.
468 remotefilelogserver.onetimesetup(ui)
468 remotefilelogserver.onetimesetup(ui)
469 onetimeclientsetup(ui)
469 onetimeclientsetup(ui)
470
470
471 shallowrepo.wraprepo(repo)
471 shallowrepo.wraprepo(repo)
472 repo.store = shallowstore.wrapstore(repo.store)
472 repo.store = shallowstore.wrapstore(repo.store)
473
473
474
474
475 def storewrapper(orig, requirements, path, vfstype):
475 def storewrapper(orig, requirements, path, vfstype):
476 s = orig(requirements, path, vfstype)
476 s = orig(requirements, path, vfstype)
477 if constants.SHALLOWREPO_REQUIREMENT in requirements:
477 if constants.SHALLOWREPO_REQUIREMENT in requirements:
478 s = shallowstore.wrapstore(s)
478 s = shallowstore.wrapstore(s)
479
479
480 return s
480 return s
481
481
482
482
483 # prefetch files before update
483 # prefetch files before update
484 def applyupdates(
484 def applyupdates(
485 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
485 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
486 ):
486 ):
487 if isenabled(repo):
487 if isenabled(repo):
488 manifest = mctx.manifest()
488 manifest = mctx.manifest()
489 files = []
489 files = []
490 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
490 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
491 files.append((f, hex(manifest[f])))
491 files.append((f, hex(manifest[f])))
492 # batch fetch the needed files from the server
492 # batch fetch the needed files from the server
493 repo.fileservice.prefetch(files)
493 repo.fileservice.prefetch(files)
494 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
494 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
495
495
496
496
497 # Prefetch merge checkunknownfiles
497 # Prefetch merge checkunknownfiles
498 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
498 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
499 if isenabled(repo):
499 if isenabled(repo):
500 files = []
500 files = []
501 sparsematch = repo.maybesparsematch(mctx.rev())
501 sparsematch = repo.maybesparsematch(mctx.rev())
502 for f, (m, actionargs, msg) in mresult.filemap():
502 for f, (m, actionargs, msg) in mresult.filemap():
503 if sparsematch and not sparsematch(f):
503 if sparsematch and not sparsematch(f):
504 continue
504 continue
505 if m in (
505 if m in (
506 mergestatemod.ACTION_CREATED,
506 mergestatemod.ACTION_CREATED,
507 mergestatemod.ACTION_DELETED_CHANGED,
507 mergestatemod.ACTION_DELETED_CHANGED,
508 mergestatemod.ACTION_CREATED_MERGE,
508 mergestatemod.ACTION_CREATED_MERGE,
509 ):
509 ):
510 files.append((f, hex(mctx.filenode(f))))
510 files.append((f, hex(mctx.filenode(f))))
511 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
511 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
512 f2 = actionargs[0]
512 f2 = actionargs[0]
513 files.append((f2, hex(mctx.filenode(f2))))
513 files.append((f2, hex(mctx.filenode(f2))))
514 # batch fetch the needed files from the server
514 # batch fetch the needed files from the server
515 repo.fileservice.prefetch(files)
515 repo.fileservice.prefetch(files)
516 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
516 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
517
517
518
518
519 # Prefetch files before status attempts to look at their size and contents
519 # Prefetch files before status attempts to look at their size and contents
520 def checklookup(orig, self, files, mtime_boundary):
520 def checklookup(orig, self, files, mtime_boundary):
521 repo = self._repo
521 repo = self._repo
522 if isenabled(repo):
522 if isenabled(repo):
523 prefetchfiles = []
523 prefetchfiles = []
524 for parent in self._parents:
524 for parent in self._parents:
525 for f in files:
525 for f in files:
526 if f in parent:
526 if f in parent:
527 prefetchfiles.append((f, hex(parent.filenode(f))))
527 prefetchfiles.append((f, hex(parent.filenode(f))))
528 # batch fetch the needed files from the server
528 # batch fetch the needed files from the server
529 repo.fileservice.prefetch(prefetchfiles)
529 repo.fileservice.prefetch(prefetchfiles)
530 return orig(self, files, mtime_boundary)
530 return orig(self, files, mtime_boundary)
531
531
532
532
533 # Prefetch the logic that compares added and removed files for renames
533 # Prefetch the logic that compares added and removed files for renames
534 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
534 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
535 if isenabled(repo):
535 if isenabled(repo):
536 files = []
536 files = []
537 pmf = repo[b'.'].manifest()
537 pmf = repo[b'.'].manifest()
538 for f in removed:
538 for f in removed:
539 if f in pmf:
539 if f in pmf:
540 files.append((f, hex(pmf[f])))
540 files.append((f, hex(pmf[f])))
541 # batch fetch the needed files from the server
541 # batch fetch the needed files from the server
542 repo.fileservice.prefetch(files)
542 repo.fileservice.prefetch(files)
543 return orig(repo, matcher, added, removed, *args, **kwargs)
543 return orig(repo, matcher, added, removed, *args, **kwargs)
544
544
545
545
546 # prefetch files before pathcopies check
546 # prefetch files before pathcopies check
547 def computeforwardmissing(orig, a, b, match=None):
547 def computeforwardmissing(orig, a, b, match=None):
548 missing = orig(a, b, match=match)
548 missing = orig(a, b, match=match)
549 repo = a._repo
549 repo = a._repo
550 if isenabled(repo):
550 if isenabled(repo):
551 mb = b.manifest()
551 mb = b.manifest()
552
552
553 files = []
553 files = []
554 sparsematch = repo.maybesparsematch(b.rev())
554 sparsematch = repo.maybesparsematch(b.rev())
555 if sparsematch:
555 if sparsematch:
556 sparsemissing = set()
556 sparsemissing = set()
557 for f in missing:
557 for f in missing:
558 if sparsematch(f):
558 if sparsematch(f):
559 files.append((f, hex(mb[f])))
559 files.append((f, hex(mb[f])))
560 sparsemissing.add(f)
560 sparsemissing.add(f)
561 missing = sparsemissing
561 missing = sparsemissing
562
562
563 # batch fetch the needed files from the server
563 # batch fetch the needed files from the server
564 repo.fileservice.prefetch(files)
564 repo.fileservice.prefetch(files)
565 return missing
565 return missing
566
566
567
567
568 # close cache miss server connection after the command has finished
568 # close cache miss server connection after the command has finished
569 def runcommand(orig, lui, repo, *args, **kwargs):
569 def runcommand(orig, lui, repo, *args, **kwargs):
570 fileservice = None
570 fileservice = None
571 # repo can be None when running in chg:
571 # repo can be None when running in chg:
572 # - at startup, reposetup was called because serve is not norepo
572 # - at startup, reposetup was called because serve is not norepo
573 # - a norepo command like "help" is called
573 # - a norepo command like "help" is called
574 if repo and isenabled(repo):
574 if repo and isenabled(repo):
575 fileservice = repo.fileservice
575 fileservice = repo.fileservice
576 try:
576 try:
577 return orig(lui, repo, *args, **kwargs)
577 return orig(lui, repo, *args, **kwargs)
578 finally:
578 finally:
579 if fileservice:
579 if fileservice:
580 fileservice.close()
580 fileservice.close()
581
581
582
582
583 # prevent strip from stripping remotefilelogs
583 # prevent strip from stripping remotefilelogs
584 def _collectbrokencsets(orig, repo, files, striprev):
584 def _collectbrokencsets(orig, repo, files, striprev):
585 if isenabled(repo):
585 if isenabled(repo):
586 files = list([f for f in files if not repo.shallowmatch(f)])
586 files = list([f for f in files if not repo.shallowmatch(f)])
587 return orig(repo, files, striprev)
587 return orig(repo, files, striprev)
588
588
589
589
590 # changectx wrappers
590 # changectx wrappers
591 def filectx(orig, self, path, fileid=None, filelog=None):
591 def filectx(orig, self, path, fileid=None, filelog=None):
592 if fileid is None:
592 if fileid is None:
593 fileid = self.filenode(path)
593 fileid = self.filenode(path)
594 if isenabled(self._repo) and self._repo.shallowmatch(path):
594 if isenabled(self._repo) and self._repo.shallowmatch(path):
595 return remotefilectx.remotefilectx(
595 return remotefilectx.remotefilectx(
596 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
596 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
597 )
597 )
598 return orig(self, path, fileid=fileid, filelog=filelog)
598 return orig(self, path, fileid=fileid, filelog=filelog)
599
599
600
600
601 def workingfilectx(orig, self, path, filelog=None):
601 def workingfilectx(orig, self, path, filelog=None):
602 if isenabled(self._repo) and self._repo.shallowmatch(path):
602 if isenabled(self._repo) and self._repo.shallowmatch(path):
603 return remotefilectx.remoteworkingfilectx(
603 return remotefilectx.remoteworkingfilectx(
604 self._repo, path, workingctx=self, filelog=filelog
604 self._repo, path, workingctx=self, filelog=filelog
605 )
605 )
606 return orig(self, path, filelog=filelog)
606 return orig(self, path, filelog=filelog)
607
607
608
608
609 # prefetch required revisions before a diff
609 # prefetch required revisions before a diff
610 def trydiff(
610 def trydiff(
611 orig,
611 orig,
612 repo,
612 repo,
613 revs,
613 revs,
614 ctx1,
614 ctx1,
615 ctx2,
615 ctx2,
616 modified,
616 modified,
617 added,
617 added,
618 removed,
618 removed,
619 copy,
619 copy,
620 getfilectx,
620 getfilectx,
621 *args,
621 *args,
622 **kwargs
622 **kwargs
623 ):
623 ):
624 if isenabled(repo):
624 if isenabled(repo):
625 prefetch = []
625 prefetch = []
626 mf1 = ctx1.manifest()
626 mf1 = ctx1.manifest()
627 for fname in modified + added + removed:
627 for fname in modified + added + removed:
628 if fname in mf1:
628 if fname in mf1:
629 fnode = getfilectx(fname, ctx1).filenode()
629 fnode = getfilectx(fname, ctx1).filenode()
630 # fnode can be None if it's a edited working ctx file
630 # fnode can be None if it's a edited working ctx file
631 if fnode:
631 if fnode:
632 prefetch.append((fname, hex(fnode)))
632 prefetch.append((fname, hex(fnode)))
633 if fname not in removed:
633 if fname not in removed:
634 fnode = getfilectx(fname, ctx2).filenode()
634 fnode = getfilectx(fname, ctx2).filenode()
635 if fnode:
635 if fnode:
636 prefetch.append((fname, hex(fnode)))
636 prefetch.append((fname, hex(fnode)))
637
637
638 repo.fileservice.prefetch(prefetch)
638 repo.fileservice.prefetch(prefetch)
639
639
640 return orig(
640 return orig(
641 repo,
641 repo,
642 revs,
642 revs,
643 ctx1,
643 ctx1,
644 ctx2,
644 ctx2,
645 modified,
645 modified,
646 added,
646 added,
647 removed,
647 removed,
648 copy,
648 copy,
649 getfilectx,
649 getfilectx,
650 *args,
650 *args,
651 **kwargs
651 **kwargs
652 )
652 )
653
653
654
654
655 # Prevent verify from processing files
655 # Prevent verify from processing files
656 # a stub for mercurial.hg.verify()
656 # a stub for mercurial.hg.verify()
657 def _verify(orig, repo, level=None):
657 def _verify(orig, repo, level=None):
658 lock = repo.lock()
658 lock = repo.lock()
659 try:
659 try:
660 return shallowverifier.shallowverifier(repo).verify()
660 return shallowverifier.shallowverifier(repo).verify()
661 finally:
661 finally:
662 lock.release()
662 lock.release()
663
663
664
664
665 clientonetime = False
665 clientonetime = False
666
666
667
667
668 def onetimeclientsetup(ui):
668 def onetimeclientsetup(ui):
669 global clientonetime
669 global clientonetime
670 if clientonetime:
670 if clientonetime:
671 return
671 return
672 clientonetime = True
672 clientonetime = True
673
673
674 # Don't commit filelogs until we know the commit hash, since the hash
674 # Don't commit filelogs until we know the commit hash, since the hash
675 # is present in the filelog blob.
675 # is present in the filelog blob.
676 # This violates Mercurial's filelog->manifest->changelog write order,
676 # This violates Mercurial's filelog->manifest->changelog write order,
677 # but is generally fine for client repos.
677 # but is generally fine for client repos.
678 pendingfilecommits = []
678 pendingfilecommits = []
679
679
680 def addrawrevision(
680 def addrawrevision(
681 orig,
681 orig,
682 self,
682 self,
683 rawtext,
683 rawtext,
684 transaction,
684 transaction,
685 link,
685 link,
686 p1,
686 p1,
687 p2,
687 p2,
688 node,
688 node,
689 flags,
689 flags,
690 cachedelta=None,
690 cachedelta=None,
691 _metatuple=None,
691 _metatuple=None,
692 ):
692 ):
693 if isinstance(link, int):
693 if isinstance(link, int):
694 pendingfilecommits.append(
694 pendingfilecommits.append(
695 (
695 (
696 self,
696 self,
697 rawtext,
697 rawtext,
698 transaction,
698 transaction,
699 link,
699 link,
700 p1,
700 p1,
701 p2,
701 p2,
702 node,
702 node,
703 flags,
703 flags,
704 cachedelta,
704 cachedelta,
705 _metatuple,
705 _metatuple,
706 )
706 )
707 )
707 )
708 return node
708 return node
709 else:
709 else:
710 return orig(
710 return orig(
711 self,
711 self,
712 rawtext,
712 rawtext,
713 transaction,
713 transaction,
714 link,
714 link,
715 p1,
715 p1,
716 p2,
716 p2,
717 node,
717 node,
718 flags,
718 flags,
719 cachedelta,
719 cachedelta,
720 _metatuple=_metatuple,
720 _metatuple=_metatuple,
721 )
721 )
722
722
723 extensions.wrapfunction(
723 extensions.wrapfunction(
724 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision
724 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision
725 )
725 )
726
726
727 def changelogadd(orig, self, *args, **kwargs):
727 def changelogadd(orig, self, *args, **kwargs):
728 oldlen = len(self)
728 oldlen = len(self)
729 node = orig(self, *args, **kwargs)
729 node = orig(self, *args, **kwargs)
730 newlen = len(self)
730 newlen = len(self)
731 if oldlen != newlen:
731 if oldlen != newlen:
732 for oldargs in pendingfilecommits:
732 for oldargs in pendingfilecommits:
733 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
733 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
734 linknode = self.node(link)
734 linknode = self.node(link)
735 if linknode == node:
735 if linknode == node:
736 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
736 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
737 else:
737 else:
738 raise error.ProgrammingError(
738 raise error.ProgrammingError(
739 b'pending multiple integer revisions are not supported'
739 b'pending multiple integer revisions are not supported'
740 )
740 )
741 else:
741 else:
742 # "link" is actually wrong here (it is set to len(changelog))
742 # "link" is actually wrong here (it is set to len(changelog))
743 # if changelog remains unchanged, skip writing file revisions
743 # if changelog remains unchanged, skip writing file revisions
744 # but still do a sanity check about pending multiple revisions
744 # but still do a sanity check about pending multiple revisions
745 if len({x[3] for x in pendingfilecommits}) > 1:
745 if len({x[3] for x in pendingfilecommits}) > 1:
746 raise error.ProgrammingError(
746 raise error.ProgrammingError(
747 b'pending multiple integer revisions are not supported'
747 b'pending multiple integer revisions are not supported'
748 )
748 )
749 del pendingfilecommits[:]
749 del pendingfilecommits[:]
750 return node
750 return node
751
751
752 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
752 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
753
753
754
754
755 def getrenamedfn(orig, repo, endrev=None):
755 def getrenamedfn(orig, repo, endrev=None):
756 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
756 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
757 return orig(repo, endrev)
757 return orig(repo, endrev)
758
758
759 rcache = {}
759 rcache = {}
760
760
761 def getrenamed(fn, rev):
761 def getrenamed(fn, rev):
762 """looks up all renames for a file (up to endrev) the first
762 """looks up all renames for a file (up to endrev) the first
763 time the file is given. It indexes on the changerev and only
763 time the file is given. It indexes on the changerev and only
764 parses the manifest if linkrev != changerev.
764 parses the manifest if linkrev != changerev.
765 Returns rename info for fn at changerev rev."""
765 Returns rename info for fn at changerev rev."""
766 if rev in rcache.setdefault(fn, {}):
766 if rev in rcache.setdefault(fn, {}):
767 return rcache[fn][rev]
767 return rcache[fn][rev]
768
768
769 try:
769 try:
770 fctx = repo[rev].filectx(fn)
770 fctx = repo[rev].filectx(fn)
771 for ancestor in fctx.ancestors():
771 for ancestor in fctx.ancestors():
772 if ancestor.path() == fn:
772 if ancestor.path() == fn:
773 renamed = ancestor.renamed()
773 renamed = ancestor.renamed()
774 rcache[fn][ancestor.rev()] = renamed and renamed[0]
774 rcache[fn][ancestor.rev()] = renamed and renamed[0]
775
775
776 renamed = fctx.renamed()
776 renamed = fctx.renamed()
777 return renamed and renamed[0]
777 return renamed and renamed[0]
778 except error.LookupError:
778 except error.LookupError:
779 return None
779 return None
780
780
781 return getrenamed
781 return getrenamed
782
782
783
783
784 def filelogrevset(orig, repo, subset, x):
784 def filelogrevset(orig, repo, subset, x):
785 """``filelog(pattern)``
785 """``filelog(pattern)``
786 Changesets connected to the specified filelog.
786 Changesets connected to the specified filelog.
787
787
788 For performance reasons, ``filelog()`` does not show every changeset
788 For performance reasons, ``filelog()`` does not show every changeset
789 that affects the requested file(s). See :hg:`help log` for details. For
789 that affects the requested file(s). See :hg:`help log` for details. For
790 a slower, more accurate result, use ``file()``.
790 a slower, more accurate result, use ``file()``.
791 """
791 """
792
792
793 if not isenabled(repo):
793 if not isenabled(repo):
794 return orig(repo, subset, x)
794 return orig(repo, subset, x)
795
795
796 # i18n: "filelog" is a keyword
796 # i18n: "filelog" is a keyword
797 pat = revset.getstring(x, _(b"filelog requires a pattern"))
797 pat = revset.getstring(x, _(b"filelog requires a pattern"))
798 m = matchmod.match(
798 m = matchmod.match(
799 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
799 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
800 )
800 )
801 s = set()
801 s = set()
802
802
803 if not matchmod.patkind(pat):
803 if not matchmod.patkind(pat):
804 # slow
804 # slow
805 for r in subset:
805 for r in subset:
806 ctx = repo[r]
806 ctx = repo[r]
807 cfiles = ctx.files()
807 cfiles = ctx.files()
808 for f in m.files():
808 for f in m.files():
809 if f in cfiles:
809 if f in cfiles:
810 s.add(ctx.rev())
810 s.add(ctx.rev())
811 break
811 break
812 else:
812 else:
813 # partial
813 # partial
814 files = (f for f in repo[None] if m(f))
814 files = (f for f in repo[None] if m(f))
815 for f in files:
815 for f in files:
816 fctx = repo[None].filectx(f)
816 fctx = repo[None].filectx(f)
817 s.add(fctx.linkrev())
817 s.add(fctx.linkrev())
818 for actx in fctx.ancestors():
818 for actx in fctx.ancestors():
819 s.add(actx.linkrev())
819 s.add(actx.linkrev())
820
820
821 return smartset.baseset([r for r in subset if r in s])
821 return smartset.baseset([r for r in subset if r in s])
822
822
823
823
824 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
824 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
825 def gc(ui, *args, **opts):
825 def gc(ui, *args, **opts):
826 """garbage collect the client and server filelog caches"""
826 """garbage collect the client and server filelog caches"""
827 cachepaths = set()
827 cachepaths = set()
828
828
829 # get the system client cache
829 # get the system client cache
830 systemcache = shallowutil.getcachepath(ui, allowempty=True)
830 systemcache = shallowutil.getcachepath(ui, allowempty=True)
831 if systemcache:
831 if systemcache:
832 cachepaths.add(systemcache)
832 cachepaths.add(systemcache)
833
833
834 # get repo client and server cache
834 # get repo client and server cache
835 repopaths = []
835 repopaths = []
836 pwd = ui.environ.get(b'PWD')
836 pwd = ui.environ.get(b'PWD')
837 if pwd:
837 if pwd:
838 repopaths.append(pwd)
838 repopaths.append(pwd)
839
839
840 repopaths.extend(args)
840 repopaths.extend(args)
841 repos = []
841 repos = []
842 for repopath in repopaths:
842 for repopath in repopaths:
843 try:
843 try:
844 repo = hg.peer(ui, {}, repopath)
844 repo = hg.peer(ui, {}, repopath)
845 repos.append(repo)
845 repos.append(repo)
846
846
847 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
847 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
848 if repocache:
848 if repocache:
849 cachepaths.add(repocache)
849 cachepaths.add(repocache)
850 except error.RepoError:
850 except error.RepoError:
851 pass
851 pass
852
852
853 # gc client cache
853 # gc client cache
854 for cachepath in cachepaths:
854 for cachepath in cachepaths:
855 gcclient(ui, cachepath)
855 gcclient(ui, cachepath)
856
856
857 # gc server cache
857 # gc server cache
858 for repo in repos:
858 for repo in repos:
859 remotefilelogserver.gcserver(ui, repo._repo)
859 remotefilelogserver.gcserver(ui, repo._repo)
860
860
861
861
862 def gcclient(ui, cachepath):
862 def gcclient(ui, cachepath):
863 # get list of repos that use this cache
863 # get list of repos that use this cache
864 repospath = os.path.join(cachepath, b'repos')
864 repospath = os.path.join(cachepath, b'repos')
865 if not os.path.exists(repospath):
865 if not os.path.exists(repospath):
866 ui.warn(_(b"no known cache at %s\n") % cachepath)
866 ui.warn(_(b"no known cache at %s\n") % cachepath)
867 return
867 return
868
868
869 reposfile = open(repospath, b'rb')
869 reposfile = open(repospath, b'rb')
870 repos = {r[:-1] for r in reposfile.readlines()}
870 repos = {r[:-1] for r in reposfile.readlines()}
871 reposfile.close()
871 reposfile.close()
872
872
873 # build list of useful files
873 # build list of useful files
874 validrepos = []
874 validrepos = []
875 keepkeys = set()
875 keepkeys = set()
876
876
877 sharedcache = None
877 sharedcache = None
878 filesrepacked = False
878 filesrepacked = False
879
879
880 count = 0
880 count = 0
881 progress = ui.makeprogress(
881 progress = ui.makeprogress(
882 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
882 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
883 )
883 )
884 for path in repos:
884 for path in repos:
885 progress.update(count)
885 progress.update(count)
886 count += 1
886 count += 1
887 try:
887 try:
888 path = util.expandpath(os.path.normpath(path))
888 path = util.expandpath(os.path.normpath(path))
889 except TypeError as e:
889 except TypeError as e:
890 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
890 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
891 traceback.print_exc()
891 traceback.print_exc()
892 continue
892 continue
893 try:
893 try:
894 peer = hg.peer(ui, {}, path)
894 peer = hg.peer(ui, {}, path)
895 repo = peer._repo
895 repo = peer._repo
896 except error.RepoError:
896 except error.RepoError:
897 continue
897 continue
898
898
899 validrepos.append(path)
899 validrepos.append(path)
900
900
901 # Protect against any repo or config changes that have happened since
901 # Protect against any repo or config changes that have happened since
902 # this repo was added to the repos file. We'd rather this loop succeed
902 # this repo was added to the repos file. We'd rather this loop succeed
903 # and too much be deleted, than the loop fail and nothing gets deleted.
903 # and too much be deleted, than the loop fail and nothing gets deleted.
904 if not isenabled(repo):
904 if not isenabled(repo):
905 continue
905 continue
906
906
907 if not util.safehasattr(repo, b'name'):
907 if not util.safehasattr(repo, 'name'):
908 ui.warn(
908 ui.warn(
909 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
909 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
910 )
910 )
911 continue
911 continue
912
912
913 # If garbage collection on repack and repack on hg gc are enabled
913 # If garbage collection on repack and repack on hg gc are enabled
914 # then loose files are repacked and garbage collected.
914 # then loose files are repacked and garbage collected.
915 # Otherwise regular garbage collection is performed.
915 # Otherwise regular garbage collection is performed.
916 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
916 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
917 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
917 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
918 if repackonhggc and gcrepack:
918 if repackonhggc and gcrepack:
919 try:
919 try:
920 repackmod.incrementalrepack(repo)
920 repackmod.incrementalrepack(repo)
921 filesrepacked = True
921 filesrepacked = True
922 continue
922 continue
923 except (IOError, repackmod.RepackAlreadyRunning):
923 except (IOError, repackmod.RepackAlreadyRunning):
924 # If repack cannot be performed due to not enough disk space
924 # If repack cannot be performed due to not enough disk space
925 # continue doing garbage collection of loose files w/o repack
925 # continue doing garbage collection of loose files w/o repack
926 pass
926 pass
927
927
928 reponame = repo.name
928 reponame = repo.name
929 if not sharedcache:
929 if not sharedcache:
930 sharedcache = repo.sharedstore
930 sharedcache = repo.sharedstore
931
931
932 # Compute a keepset which is not garbage collected
932 # Compute a keepset which is not garbage collected
933 def keyfn(fname, fnode):
933 def keyfn(fname, fnode):
934 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
934 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
935
935
936 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
936 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
937
937
938 progress.complete()
938 progress.complete()
939
939
940 # write list of valid repos back
940 # write list of valid repos back
941 oldumask = os.umask(0o002)
941 oldumask = os.umask(0o002)
942 try:
942 try:
943 reposfile = open(repospath, b'wb')
943 reposfile = open(repospath, b'wb')
944 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
944 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
945 reposfile.close()
945 reposfile.close()
946 finally:
946 finally:
947 os.umask(oldumask)
947 os.umask(oldumask)
948
948
949 # prune cache
949 # prune cache
950 if sharedcache is not None:
950 if sharedcache is not None:
951 sharedcache.gc(keepkeys)
951 sharedcache.gc(keepkeys)
952 elif not filesrepacked:
952 elif not filesrepacked:
953 ui.warn(_(b"warning: no valid repos in repofile\n"))
953 ui.warn(_(b"warning: no valid repos in repofile\n"))
954
954
955
955
956 def log(orig, ui, repo, *pats, **opts):
956 def log(orig, ui, repo, *pats, **opts):
957 if not isenabled(repo):
957 if not isenabled(repo):
958 return orig(ui, repo, *pats, **opts)
958 return orig(ui, repo, *pats, **opts)
959
959
960 follow = opts.get('follow')
960 follow = opts.get('follow')
961 revs = opts.get('rev')
961 revs = opts.get('rev')
962 if pats:
962 if pats:
963 # Force slowpath for non-follow patterns and follows that start from
963 # Force slowpath for non-follow patterns and follows that start from
964 # non-working-copy-parent revs.
964 # non-working-copy-parent revs.
965 if not follow or revs:
965 if not follow or revs:
966 # This forces the slowpath
966 # This forces the slowpath
967 opts['removed'] = True
967 opts['removed'] = True
968
968
969 # If this is a non-follow log without any revs specified, recommend that
969 # If this is a non-follow log without any revs specified, recommend that
970 # the user add -f to speed it up.
970 # the user add -f to speed it up.
971 if not follow and not revs:
971 if not follow and not revs:
972 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
972 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
973 isfile = not match.anypats()
973 isfile = not match.anypats()
974 if isfile:
974 if isfile:
975 for file in match.files():
975 for file in match.files():
976 if not os.path.isfile(repo.wjoin(file)):
976 if not os.path.isfile(repo.wjoin(file)):
977 isfile = False
977 isfile = False
978 break
978 break
979
979
980 if isfile:
980 if isfile:
981 ui.warn(
981 ui.warn(
982 _(
982 _(
983 b"warning: file log can be slow on large repos - "
983 b"warning: file log can be slow on large repos - "
984 + b"use -f to speed it up\n"
984 + b"use -f to speed it up\n"
985 )
985 )
986 )
986 )
987
987
988 return orig(ui, repo, *pats, **opts)
988 return orig(ui, repo, *pats, **opts)
989
989
990
990
991 def revdatelimit(ui, revset):
991 def revdatelimit(ui, revset):
992 """Update revset so that only changesets no older than 'prefetchdays' days
992 """Update revset so that only changesets no older than 'prefetchdays' days
993 are included. The default value is set to 14 days. If 'prefetchdays' is set
993 are included. The default value is set to 14 days. If 'prefetchdays' is set
994 to zero or negative value then date restriction is not applied.
994 to zero or negative value then date restriction is not applied.
995 """
995 """
996 days = ui.configint(b'remotefilelog', b'prefetchdays')
996 days = ui.configint(b'remotefilelog', b'prefetchdays')
997 if days > 0:
997 if days > 0:
998 revset = b'(%s) & date(-%s)' % (revset, days)
998 revset = b'(%s) & date(-%s)' % (revset, days)
999 return revset
999 return revset
1000
1000
1001
1001
1002 def readytofetch(repo):
1002 def readytofetch(repo):
1003 """Check that enough time has passed since the last background prefetch.
1003 """Check that enough time has passed since the last background prefetch.
1004 This only relates to prefetches after operations that change the working
1004 This only relates to prefetches after operations that change the working
1005 copy parent. Default delay between background prefetches is 2 minutes.
1005 copy parent. Default delay between background prefetches is 2 minutes.
1006 """
1006 """
1007 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1007 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1008 fname = repo.vfs.join(b'lastprefetch')
1008 fname = repo.vfs.join(b'lastprefetch')
1009
1009
1010 ready = False
1010 ready = False
1011 with open(fname, b'a'):
1011 with open(fname, b'a'):
1012 # the with construct above is used to avoid race conditions
1012 # the with construct above is used to avoid race conditions
1013 modtime = os.path.getmtime(fname)
1013 modtime = os.path.getmtime(fname)
1014 if (time.time() - modtime) > timeout:
1014 if (time.time() - modtime) > timeout:
1015 os.utime(fname, None)
1015 os.utime(fname, None)
1016 ready = True
1016 ready = True
1017
1017
1018 return ready
1018 return ready
1019
1019
1020
1020
1021 def wcpprefetch(ui, repo, **kwargs):
1021 def wcpprefetch(ui, repo, **kwargs):
1022 """Prefetches in background revisions specified by bgprefetchrevs revset.
1022 """Prefetches in background revisions specified by bgprefetchrevs revset.
1023 Does background repack if backgroundrepack flag is set in config.
1023 Does background repack if backgroundrepack flag is set in config.
1024 """
1024 """
1025 shallow = isenabled(repo)
1025 shallow = isenabled(repo)
1026 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1026 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1027 isready = readytofetch(repo)
1027 isready = readytofetch(repo)
1028
1028
1029 if not (shallow and bgprefetchrevs and isready):
1029 if not (shallow and bgprefetchrevs and isready):
1030 return
1030 return
1031
1031
1032 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1032 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1033 # update a revset with a date limit
1033 # update a revset with a date limit
1034 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1034 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1035
1035
1036 def anon(unused_success):
1036 def anon(unused_success):
1037 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1037 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
1038 return
1038 return
1039 repo.ranprefetch = True
1039 repo.ranprefetch = True
1040 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1040 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1041
1041
1042 repo._afterlock(anon)
1042 repo._afterlock(anon)
1043
1043
1044
1044
1045 def pull(orig, ui, repo, *pats, **opts):
1045 def pull(orig, ui, repo, *pats, **opts):
1046 result = orig(ui, repo, *pats, **opts)
1046 result = orig(ui, repo, *pats, **opts)
1047
1047
1048 if isenabled(repo):
1048 if isenabled(repo):
1049 # prefetch if it's configured
1049 # prefetch if it's configured
1050 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1050 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1051 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1051 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1052 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1052 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1053
1053
1054 if prefetchrevset:
1054 if prefetchrevset:
1055 ui.status(_(b"prefetching file contents\n"))
1055 ui.status(_(b"prefetching file contents\n"))
1056 revs = scmutil.revrange(repo, [prefetchrevset])
1056 revs = scmutil.revrange(repo, [prefetchrevset])
1057 base = repo[b'.'].rev()
1057 base = repo[b'.'].rev()
1058 if bgprefetch:
1058 if bgprefetch:
1059 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1059 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1060 else:
1060 else:
1061 repo.prefetch(revs, base=base)
1061 repo.prefetch(revs, base=base)
1062 if bgrepack:
1062 if bgrepack:
1063 repackmod.backgroundrepack(repo, incremental=True)
1063 repackmod.backgroundrepack(repo, incremental=True)
1064 elif bgrepack:
1064 elif bgrepack:
1065 repackmod.backgroundrepack(repo, incremental=True)
1065 repackmod.backgroundrepack(repo, incremental=True)
1066
1066
1067 return result
1067 return result
1068
1068
1069
1069
1070 def exchangepull(orig, repo, remote, *args, **kwargs):
1070 def exchangepull(orig, repo, remote, *args, **kwargs):
1071 # Hook into the callstream/getbundle to insert bundle capabilities
1071 # Hook into the callstream/getbundle to insert bundle capabilities
1072 # during a pull.
1072 # during a pull.
1073 def localgetbundle(
1073 def localgetbundle(
1074 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1074 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1075 ):
1075 ):
1076 if not bundlecaps:
1076 if not bundlecaps:
1077 bundlecaps = set()
1077 bundlecaps = set()
1078 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1078 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1079 return orig(
1079 return orig(
1080 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1080 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1081 )
1081 )
1082
1082
1083 if util.safehasattr(remote, b'_callstream'):
1083 if util.safehasattr(remote, '_callstream'):
1084 remote._localrepo = repo
1084 remote._localrepo = repo
1085 elif util.safehasattr(remote, b'getbundle'):
1085 elif util.safehasattr(remote, 'getbundle'):
1086 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
1086 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
1087
1087
1088 return orig(repo, remote, *args, **kwargs)
1088 return orig(repo, remote, *args, **kwargs)
1089
1089
1090
1090
1091 def _fileprefetchhook(repo, revmatches):
1091 def _fileprefetchhook(repo, revmatches):
1092 if isenabled(repo):
1092 if isenabled(repo):
1093 allfiles = []
1093 allfiles = []
1094 for rev, match in revmatches:
1094 for rev, match in revmatches:
1095 if rev == wdirrev or rev is None:
1095 if rev == wdirrev or rev is None:
1096 continue
1096 continue
1097 ctx = repo[rev]
1097 ctx = repo[rev]
1098 mf = ctx.manifest()
1098 mf = ctx.manifest()
1099 sparsematch = repo.maybesparsematch(ctx.rev())
1099 sparsematch = repo.maybesparsematch(ctx.rev())
1100 for path in ctx.walk(match):
1100 for path in ctx.walk(match):
1101 if (not sparsematch or sparsematch(path)) and path in mf:
1101 if (not sparsematch or sparsematch(path)) and path in mf:
1102 allfiles.append((path, hex(mf[path])))
1102 allfiles.append((path, hex(mf[path])))
1103 repo.fileservice.prefetch(allfiles)
1103 repo.fileservice.prefetch(allfiles)
1104
1104
1105
1105
1106 @command(
1106 @command(
1107 b'debugremotefilelog',
1107 b'debugremotefilelog',
1108 [
1108 [
1109 (b'd', b'decompress', None, _(b'decompress the filelog first')),
1109 (b'd', b'decompress', None, _(b'decompress the filelog first')),
1110 ],
1110 ],
1111 _(b'hg debugremotefilelog <path>'),
1111 _(b'hg debugremotefilelog <path>'),
1112 norepo=True,
1112 norepo=True,
1113 )
1113 )
1114 def debugremotefilelog(ui, path, **opts):
1114 def debugremotefilelog(ui, path, **opts):
1115 return debugcommands.debugremotefilelog(ui, path, **opts)
1115 return debugcommands.debugremotefilelog(ui, path, **opts)
1116
1116
1117
1117
1118 @command(
1118 @command(
1119 b'verifyremotefilelog',
1119 b'verifyremotefilelog',
1120 [
1120 [
1121 (b'd', b'decompress', None, _(b'decompress the filelogs first')),
1121 (b'd', b'decompress', None, _(b'decompress the filelogs first')),
1122 ],
1122 ],
1123 _(b'hg verifyremotefilelogs <directory>'),
1123 _(b'hg verifyremotefilelogs <directory>'),
1124 norepo=True,
1124 norepo=True,
1125 )
1125 )
1126 def verifyremotefilelog(ui, path, **opts):
1126 def verifyremotefilelog(ui, path, **opts):
1127 return debugcommands.verifyremotefilelog(ui, path, **opts)
1127 return debugcommands.verifyremotefilelog(ui, path, **opts)
1128
1128
1129
1129
1130 @command(
1130 @command(
1131 b'debugdatapack',
1131 b'debugdatapack',
1132 [
1132 [
1133 (b'', b'long', None, _(b'print the long hashes')),
1133 (b'', b'long', None, _(b'print the long hashes')),
1134 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1134 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1135 ],
1135 ],
1136 _(b'hg debugdatapack <paths>'),
1136 _(b'hg debugdatapack <paths>'),
1137 norepo=True,
1137 norepo=True,
1138 )
1138 )
1139 def debugdatapack(ui, *paths, **opts):
1139 def debugdatapack(ui, *paths, **opts):
1140 return debugcommands.debugdatapack(ui, *paths, **opts)
1140 return debugcommands.debugdatapack(ui, *paths, **opts)
1141
1141
1142
1142
1143 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1143 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1144 def debughistorypack(ui, path, **opts):
1144 def debughistorypack(ui, path, **opts):
1145 return debugcommands.debughistorypack(ui, path)
1145 return debugcommands.debughistorypack(ui, path)
1146
1146
1147
1147
1148 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1148 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1149 def debugkeepset(ui, repo, **opts):
1149 def debugkeepset(ui, repo, **opts):
1150 # The command is used to measure keepset computation time
1150 # The command is used to measure keepset computation time
1151 def keyfn(fname, fnode):
1151 def keyfn(fname, fnode):
1152 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1152 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1153
1153
1154 repackmod.keepset(repo, keyfn)
1154 repackmod.keepset(repo, keyfn)
1155 return
1155 return
1156
1156
1157
1157
1158 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1158 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1159 def debugwaitonrepack(ui, repo, **opts):
1159 def debugwaitonrepack(ui, repo, **opts):
1160 return debugcommands.debugwaitonrepack(repo)
1160 return debugcommands.debugwaitonrepack(repo)
1161
1161
1162
1162
1163 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1163 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1164 def debugwaitonprefetch(ui, repo, **opts):
1164 def debugwaitonprefetch(ui, repo, **opts):
1165 return debugcommands.debugwaitonprefetch(repo)
1165 return debugcommands.debugwaitonprefetch(repo)
1166
1166
1167
1167
1168 def resolveprefetchopts(ui, opts):
1168 def resolveprefetchopts(ui, opts):
1169 if not opts.get(b'rev'):
1169 if not opts.get(b'rev'):
1170 revset = [b'.', b'draft()']
1170 revset = [b'.', b'draft()']
1171
1171
1172 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1172 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1173 if prefetchrevset:
1173 if prefetchrevset:
1174 revset.append(b'(%s)' % prefetchrevset)
1174 revset.append(b'(%s)' % prefetchrevset)
1175 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1175 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1176 if bgprefetchrevs:
1176 if bgprefetchrevs:
1177 revset.append(b'(%s)' % bgprefetchrevs)
1177 revset.append(b'(%s)' % bgprefetchrevs)
1178 revset = b'+'.join(revset)
1178 revset = b'+'.join(revset)
1179
1179
1180 # update a revset with a date limit
1180 # update a revset with a date limit
1181 revset = revdatelimit(ui, revset)
1181 revset = revdatelimit(ui, revset)
1182
1182
1183 opts[b'rev'] = [revset]
1183 opts[b'rev'] = [revset]
1184
1184
1185 if not opts.get(b'base'):
1185 if not opts.get(b'base'):
1186 opts[b'base'] = None
1186 opts[b'base'] = None
1187
1187
1188 return opts
1188 return opts
1189
1189
1190
1190
1191 @command(
1191 @command(
1192 b'prefetch',
1192 b'prefetch',
1193 [
1193 [
1194 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1194 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1195 (b'', b'repack', False, _(b'run repack after prefetch')),
1195 (b'', b'repack', False, _(b'run repack after prefetch')),
1196 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1196 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1197 ]
1197 ]
1198 + commands.walkopts,
1198 + commands.walkopts,
1199 _(b'hg prefetch [OPTIONS] [FILE...]'),
1199 _(b'hg prefetch [OPTIONS] [FILE...]'),
1200 helpcategory=command.CATEGORY_MAINTENANCE,
1200 helpcategory=command.CATEGORY_MAINTENANCE,
1201 )
1201 )
1202 def prefetch(ui, repo, *pats, **opts):
1202 def prefetch(ui, repo, *pats, **opts):
1203 """prefetch file revisions from the server
1203 """prefetch file revisions from the server
1204
1204
1205 Prefetchs file revisions for the specified revs and stores them in the
1205 Prefetchs file revisions for the specified revs and stores them in the
1206 local remotefilelog cache. If no rev is specified, the default rev is
1206 local remotefilelog cache. If no rev is specified, the default rev is
1207 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1207 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1208 File names or patterns can be used to limit which files are downloaded.
1208 File names or patterns can be used to limit which files are downloaded.
1209
1209
1210 Return 0 on success.
1210 Return 0 on success.
1211 """
1211 """
1212 opts = pycompat.byteskwargs(opts)
1212 opts = pycompat.byteskwargs(opts)
1213 if not isenabled(repo):
1213 if not isenabled(repo):
1214 raise error.Abort(_(b"repo is not shallow"))
1214 raise error.Abort(_(b"repo is not shallow"))
1215
1215
1216 opts = resolveprefetchopts(ui, opts)
1216 opts = resolveprefetchopts(ui, opts)
1217 revs = scmutil.revrange(repo, opts.get(b'rev'))
1217 revs = scmutil.revrange(repo, opts.get(b'rev'))
1218 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1218 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1219
1219
1220 # Run repack in background
1220 # Run repack in background
1221 if opts.get(b'repack'):
1221 if opts.get(b'repack'):
1222 repackmod.backgroundrepack(repo, incremental=True)
1222 repackmod.backgroundrepack(repo, incremental=True)
1223
1223
1224
1224
1225 @command(
1225 @command(
1226 b'repack',
1226 b'repack',
1227 [
1227 [
1228 (b'', b'background', None, _(b'run in a background process'), None),
1228 (b'', b'background', None, _(b'run in a background process'), None),
1229 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1229 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1230 (
1230 (
1231 b'',
1231 b'',
1232 b'packsonly',
1232 b'packsonly',
1233 None,
1233 None,
1234 _(b'only repack packs (skip loose objects)'),
1234 _(b'only repack packs (skip loose objects)'),
1235 None,
1235 None,
1236 ),
1236 ),
1237 ],
1237 ],
1238 _(b'hg repack [OPTIONS]'),
1238 _(b'hg repack [OPTIONS]'),
1239 )
1239 )
1240 def repack_(ui, repo, *pats, **opts):
1240 def repack_(ui, repo, *pats, **opts):
1241 if opts.get('background'):
1241 if opts.get('background'):
1242 repackmod.backgroundrepack(
1242 repackmod.backgroundrepack(
1243 repo,
1243 repo,
1244 incremental=opts.get('incremental'),
1244 incremental=opts.get('incremental'),
1245 packsonly=opts.get('packsonly', False),
1245 packsonly=opts.get('packsonly', False),
1246 )
1246 )
1247 return
1247 return
1248
1248
1249 options = {b'packsonly': opts.get('packsonly')}
1249 options = {b'packsonly': opts.get('packsonly')}
1250
1250
1251 try:
1251 try:
1252 if opts.get('incremental'):
1252 if opts.get('incremental'):
1253 repackmod.incrementalrepack(repo, options=options)
1253 repackmod.incrementalrepack(repo, options=options)
1254 else:
1254 else:
1255 repackmod.fullrepack(repo, options=options)
1255 repackmod.fullrepack(repo, options=options)
1256 except repackmod.RepackAlreadyRunning as ex:
1256 except repackmod.RepackAlreadyRunning as ex:
1257 # Don't propogate the exception if the repack is already in
1257 # Don't propogate the exception if the repack is already in
1258 # progress, since we want the command to exit 0.
1258 # progress, since we want the command to exit 0.
1259 repo.ui.warn(b'%s\n' % ex)
1259 repo.ui.warn(b'%s\n' % ex)
@@ -1,446 +1,446 b''
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import stat
9 import stat
10 import time
10 import time
11 import zlib
11 import zlib
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.node import bin, hex
14 from mercurial.node import bin, hex
15 from mercurial.pycompat import open
15 from mercurial.pycompat import open
16 from mercurial import (
16 from mercurial import (
17 changegroup,
17 changegroup,
18 changelog,
18 changelog,
19 context,
19 context,
20 error,
20 error,
21 extensions,
21 extensions,
22 match,
22 match,
23 scmutil,
23 scmutil,
24 store,
24 store,
25 streamclone,
25 streamclone,
26 util,
26 util,
27 wireprotoserver,
27 wireprotoserver,
28 wireprototypes,
28 wireprototypes,
29 wireprotov1server,
29 wireprotov1server,
30 )
30 )
31 from . import (
31 from . import (
32 constants,
32 constants,
33 shallowutil,
33 shallowutil,
34 )
34 )
35
35
36 _sshv1server = wireprotoserver.sshv1protocolhandler
36 _sshv1server = wireprotoserver.sshv1protocolhandler
37
37
38
38
39 def setupserver(ui, repo):
39 def setupserver(ui, repo):
40 """Sets up a normal Mercurial repo so it can serve files to shallow repos."""
40 """Sets up a normal Mercurial repo so it can serve files to shallow repos."""
41 onetimesetup(ui)
41 onetimesetup(ui)
42
42
43 # don't send files to shallow clients during pulls
43 # don't send files to shallow clients during pulls
44 def generatefiles(
44 def generatefiles(
45 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
45 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
46 ):
46 ):
47 caps = self._bundlecaps or []
47 caps = self._bundlecaps or []
48 if constants.BUNDLE2_CAPABLITY in caps:
48 if constants.BUNDLE2_CAPABLITY in caps:
49 # only send files that don't match the specified patterns
49 # only send files that don't match the specified patterns
50 includepattern = None
50 includepattern = None
51 excludepattern = None
51 excludepattern = None
52 for cap in self._bundlecaps or []:
52 for cap in self._bundlecaps or []:
53 if cap.startswith(b"includepattern="):
53 if cap.startswith(b"includepattern="):
54 includepattern = cap[len(b"includepattern=") :].split(b'\0')
54 includepattern = cap[len(b"includepattern=") :].split(b'\0')
55 elif cap.startswith(b"excludepattern="):
55 elif cap.startswith(b"excludepattern="):
56 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
56 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
57
57
58 m = match.always()
58 m = match.always()
59 if includepattern or excludepattern:
59 if includepattern or excludepattern:
60 m = match.match(
60 m = match.match(
61 repo.root, b'', None, includepattern, excludepattern
61 repo.root, b'', None, includepattern, excludepattern
62 )
62 )
63
63
64 changedfiles = list([f for f in changedfiles if not m(f)])
64 changedfiles = list([f for f in changedfiles if not m(f)])
65 return orig(
65 return orig(
66 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
66 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
67 )
67 )
68
68
69 extensions.wrapfunction(
69 extensions.wrapfunction(
70 changegroup.cgpacker, 'generatefiles', generatefiles
70 changegroup.cgpacker, 'generatefiles', generatefiles
71 )
71 )
72
72
73
73
74 onetime = False
74 onetime = False
75
75
76
76
77 def onetimesetup(ui):
77 def onetimesetup(ui):
78 """Configures the wireprotocol for both clients and servers."""
78 """Configures the wireprotocol for both clients and servers."""
79 global onetime
79 global onetime
80 if onetime:
80 if onetime:
81 return
81 return
82 onetime = True
82 onetime = True
83
83
84 # support file content requests
84 # support file content requests
85 wireprotov1server.wireprotocommand(
85 wireprotov1server.wireprotocommand(
86 b'x_rfl_getflogheads', b'path', permission=b'pull'
86 b'x_rfl_getflogheads', b'path', permission=b'pull'
87 )(getflogheads)
87 )(getflogheads)
88 wireprotov1server.wireprotocommand(
88 wireprotov1server.wireprotocommand(
89 b'x_rfl_getfiles', b'', permission=b'pull'
89 b'x_rfl_getfiles', b'', permission=b'pull'
90 )(getfiles)
90 )(getfiles)
91 wireprotov1server.wireprotocommand(
91 wireprotov1server.wireprotocommand(
92 b'x_rfl_getfile', b'file node', permission=b'pull'
92 b'x_rfl_getfile', b'file node', permission=b'pull'
93 )(getfile)
93 )(getfile)
94
94
95 class streamstate:
95 class streamstate:
96 match = None
96 match = None
97 shallowremote = False
97 shallowremote = False
98 noflatmf = False
98 noflatmf = False
99
99
100 state = streamstate()
100 state = streamstate()
101
101
102 def stream_out_shallow(repo, proto, other):
102 def stream_out_shallow(repo, proto, other):
103 includepattern = None
103 includepattern = None
104 excludepattern = None
104 excludepattern = None
105 raw = other.get(b'includepattern')
105 raw = other.get(b'includepattern')
106 if raw:
106 if raw:
107 includepattern = raw.split(b'\0')
107 includepattern = raw.split(b'\0')
108 raw = other.get(b'excludepattern')
108 raw = other.get(b'excludepattern')
109 if raw:
109 if raw:
110 excludepattern = raw.split(b'\0')
110 excludepattern = raw.split(b'\0')
111
111
112 oldshallow = state.shallowremote
112 oldshallow = state.shallowremote
113 oldmatch = state.match
113 oldmatch = state.match
114 oldnoflatmf = state.noflatmf
114 oldnoflatmf = state.noflatmf
115 try:
115 try:
116 state.shallowremote = True
116 state.shallowremote = True
117 state.match = match.always()
117 state.match = match.always()
118 state.noflatmf = other.get(b'noflatmanifest') == b'True'
118 state.noflatmf = other.get(b'noflatmanifest') == b'True'
119 if includepattern or excludepattern:
119 if includepattern or excludepattern:
120 state.match = match.match(
120 state.match = match.match(
121 repo.root, b'', None, includepattern, excludepattern
121 repo.root, b'', None, includepattern, excludepattern
122 )
122 )
123 streamres = wireprotov1server.stream(repo, proto)
123 streamres = wireprotov1server.stream(repo, proto)
124
124
125 # Force the first value to execute, so the file list is computed
125 # Force the first value to execute, so the file list is computed
126 # within the try/finally scope
126 # within the try/finally scope
127 first = next(streamres.gen)
127 first = next(streamres.gen)
128 second = next(streamres.gen)
128 second = next(streamres.gen)
129
129
130 def gen():
130 def gen():
131 yield first
131 yield first
132 yield second
132 yield second
133 for value in streamres.gen:
133 for value in streamres.gen:
134 yield value
134 yield value
135
135
136 return wireprototypes.streamres(gen())
136 return wireprototypes.streamres(gen())
137 finally:
137 finally:
138 state.shallowremote = oldshallow
138 state.shallowremote = oldshallow
139 state.match = oldmatch
139 state.match = oldmatch
140 state.noflatmf = oldnoflatmf
140 state.noflatmf = oldnoflatmf
141
141
142 wireprotov1server.commands[b'stream_out_shallow'] = (
142 wireprotov1server.commands[b'stream_out_shallow'] = (
143 stream_out_shallow,
143 stream_out_shallow,
144 b'*',
144 b'*',
145 )
145 )
146
146
147 # don't clone filelogs to shallow clients
147 # don't clone filelogs to shallow clients
148 def _walkstreamfiles(
148 def _walkstreamfiles(
149 orig, repo, matcher=None, phase=False, obsolescence=False
149 orig, repo, matcher=None, phase=False, obsolescence=False
150 ):
150 ):
151 if state.shallowremote:
151 if state.shallowremote:
152 # if we are shallow ourselves, stream our local commits
152 # if we are shallow ourselves, stream our local commits
153 if shallowutil.isenabled(repo):
153 if shallowutil.isenabled(repo):
154 striplen = len(repo.store.path) + 1
154 striplen = len(repo.store.path) + 1
155 readdir = repo.store.rawvfs.readdir
155 readdir = repo.store.rawvfs.readdir
156 visit = [os.path.join(repo.store.path, b'data')]
156 visit = [os.path.join(repo.store.path, b'data')]
157 while visit:
157 while visit:
158 p = visit.pop()
158 p = visit.pop()
159 for f, kind, st in readdir(p, stat=True):
159 for f, kind, st in readdir(p, stat=True):
160 fp = p + b'/' + f
160 fp = p + b'/' + f
161 if kind == stat.S_IFREG:
161 if kind == stat.S_IFREG:
162 if not fp.endswith(b'.i') and not fp.endswith(
162 if not fp.endswith(b'.i') and not fp.endswith(
163 b'.d'
163 b'.d'
164 ):
164 ):
165 n = util.pconvert(fp[striplen:])
165 n = util.pconvert(fp[striplen:])
166 d = store.decodedir(n)
166 d = store.decodedir(n)
167 yield store.SimpleStoreEntry(
167 yield store.SimpleStoreEntry(
168 entry_path=d,
168 entry_path=d,
169 is_volatile=False,
169 is_volatile=False,
170 file_size=st.st_size,
170 file_size=st.st_size,
171 )
171 )
172
172
173 if kind == stat.S_IFDIR:
173 if kind == stat.S_IFDIR:
174 visit.append(fp)
174 visit.append(fp)
175
175
176 if scmutil.istreemanifest(repo):
176 if scmutil.istreemanifest(repo):
177 for entry in repo.store.data_entries():
177 for entry in repo.store.data_entries():
178 if not entry.is_revlog:
178 if not entry.is_revlog:
179 continue
179 continue
180 if entry.is_manifestlog:
180 if entry.is_manifestlog:
181 yield entry
181 yield entry
182
182
183 # Return .d and .i files that do not match the shallow pattern
183 # Return .d and .i files that do not match the shallow pattern
184 match = state.match
184 match = state.match
185 if match and not match.always():
185 if match and not match.always():
186 for entry in repo.store.data_entries():
186 for entry in repo.store.data_entries():
187 if not entry.is_revlog:
187 if not entry.is_revlog:
188 continue
188 continue
189 if not state.match(entry.target_id):
189 if not state.match(entry.target_id):
190 yield entry
190 yield entry
191
191
192 for x in repo.store.top_entries():
192 for x in repo.store.top_entries():
193 if state.noflatmf and x[1][:11] == b'00manifest.':
193 if state.noflatmf and x[1][:11] == b'00manifest.':
194 continue
194 continue
195 yield x
195 yield x
196
196
197 elif shallowutil.isenabled(repo):
197 elif shallowutil.isenabled(repo):
198 # don't allow cloning from a shallow repo to a full repo
198 # don't allow cloning from a shallow repo to a full repo
199 # since it would require fetching every version of every
199 # since it would require fetching every version of every
200 # file in order to create the revlogs.
200 # file in order to create the revlogs.
201 raise error.Abort(
201 raise error.Abort(
202 _(b"Cannot clone from a shallow repo to a full repo.")
202 _(b"Cannot clone from a shallow repo to a full repo.")
203 )
203 )
204 else:
204 else:
205 for x in orig(
205 for x in orig(
206 repo, matcher, phase=phase, obsolescence=obsolescence
206 repo, matcher, phase=phase, obsolescence=obsolescence
207 ):
207 ):
208 yield x
208 yield x
209
209
210 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
210 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
211
211
212 # expose remotefilelog capabilities
212 # expose remotefilelog capabilities
213 def _capabilities(orig, repo, proto):
213 def _capabilities(orig, repo, proto):
214 caps = orig(repo, proto)
214 caps = orig(repo, proto)
215 if shallowutil.isenabled(repo) or ui.configbool(
215 if shallowutil.isenabled(repo) or ui.configbool(
216 b'remotefilelog', b'server'
216 b'remotefilelog', b'server'
217 ):
217 ):
218 if isinstance(proto, _sshv1server):
218 if isinstance(proto, _sshv1server):
219 # legacy getfiles method which only works over ssh
219 # legacy getfiles method which only works over ssh
220 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
220 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
221 caps.append(b'x_rfl_getflogheads')
221 caps.append(b'x_rfl_getflogheads')
222 caps.append(b'x_rfl_getfile')
222 caps.append(b'x_rfl_getfile')
223 return caps
223 return caps
224
224
225 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
225 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
226
226
227 def _adjustlinkrev(orig, self, *args, **kwargs):
227 def _adjustlinkrev(orig, self, *args, **kwargs):
228 # When generating file blobs, taking the real path is too slow on large
228 # When generating file blobs, taking the real path is too slow on large
229 # repos, so force it to just return the linkrev directly.
229 # repos, so force it to just return the linkrev directly.
230 repo = self._repo
230 repo = self._repo
231 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
231 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
232 return self._filelog.linkrev(self._filelog.rev(self._filenode))
232 return self._filelog.linkrev(self._filelog.rev(self._filenode))
233 return orig(self, *args, **kwargs)
233 return orig(self, *args, **kwargs)
234
234
235 extensions.wrapfunction(
235 extensions.wrapfunction(
236 context.basefilectx, '_adjustlinkrev', _adjustlinkrev
236 context.basefilectx, '_adjustlinkrev', _adjustlinkrev
237 )
237 )
238
238
239 def _iscmd(orig, cmd):
239 def _iscmd(orig, cmd):
240 if cmd == b'x_rfl_getfiles':
240 if cmd == b'x_rfl_getfiles':
241 return False
241 return False
242 return orig(cmd)
242 return orig(cmd)
243
243
244 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
244 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
245
245
246
246
247 def _loadfileblob(repo, cachepath, path, node):
247 def _loadfileblob(repo, cachepath, path, node):
248 filecachepath = os.path.join(cachepath, path, hex(node))
248 filecachepath = os.path.join(cachepath, path, hex(node))
249 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
249 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
250 filectx = repo.filectx(path, fileid=node)
250 filectx = repo.filectx(path, fileid=node)
251 if filectx.node() == repo.nullid:
251 if filectx.node() == repo.nullid:
252 repo.changelog = changelog.changelog(repo.svfs)
252 repo.changelog = changelog.changelog(repo.svfs)
253 filectx = repo.filectx(path, fileid=node)
253 filectx = repo.filectx(path, fileid=node)
254
254
255 text = createfileblob(filectx)
255 text = createfileblob(filectx)
256 # TODO configurable compression engines
256 # TODO configurable compression engines
257 text = zlib.compress(text)
257 text = zlib.compress(text)
258
258
259 # everything should be user & group read/writable
259 # everything should be user & group read/writable
260 oldumask = os.umask(0o002)
260 oldumask = os.umask(0o002)
261 try:
261 try:
262 dirname = os.path.dirname(filecachepath)
262 dirname = os.path.dirname(filecachepath)
263 if not os.path.exists(dirname):
263 if not os.path.exists(dirname):
264 try:
264 try:
265 os.makedirs(dirname)
265 os.makedirs(dirname)
266 except FileExistsError:
266 except FileExistsError:
267 pass
267 pass
268
268
269 f = None
269 f = None
270 try:
270 try:
271 f = util.atomictempfile(filecachepath, b"wb")
271 f = util.atomictempfile(filecachepath, b"wb")
272 f.write(text)
272 f.write(text)
273 except (IOError, OSError):
273 except (IOError, OSError):
274 # Don't abort if the user only has permission to read,
274 # Don't abort if the user only has permission to read,
275 # and not write.
275 # and not write.
276 pass
276 pass
277 finally:
277 finally:
278 if f:
278 if f:
279 f.close()
279 f.close()
280 finally:
280 finally:
281 os.umask(oldumask)
281 os.umask(oldumask)
282 else:
282 else:
283 with open(filecachepath, b"rb") as f:
283 with open(filecachepath, b"rb") as f:
284 text = f.read()
284 text = f.read()
285 return text
285 return text
286
286
287
287
288 def getflogheads(repo, proto, path):
288 def getflogheads(repo, proto, path):
289 """A server api for requesting a filelog's heads"""
289 """A server api for requesting a filelog's heads"""
290 flog = repo.file(path)
290 flog = repo.file(path)
291 heads = flog.heads()
291 heads = flog.heads()
292 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
292 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
293
293
294
294
295 def getfile(repo, proto, file, node):
295 def getfile(repo, proto, file, node):
296 """A server api for requesting a particular version of a file. Can be used
296 """A server api for requesting a particular version of a file. Can be used
297 in batches to request many files at once. The return protocol is:
297 in batches to request many files at once. The return protocol is:
298 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
298 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
299 non-zero for an error.
299 non-zero for an error.
300
300
301 data is a compressed blob with revlog flag and ancestors information. See
301 data is a compressed blob with revlog flag and ancestors information. See
302 createfileblob for its content.
302 createfileblob for its content.
303 """
303 """
304 if shallowutil.isenabled(repo):
304 if shallowutil.isenabled(repo):
305 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
305 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
306 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
306 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
307 if not cachepath:
307 if not cachepath:
308 cachepath = os.path.join(repo.path, b"remotefilelogcache")
308 cachepath = os.path.join(repo.path, b"remotefilelogcache")
309 node = bin(node.strip())
309 node = bin(node.strip())
310 if node == repo.nullid:
310 if node == repo.nullid:
311 return b'0\0'
311 return b'0\0'
312 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
312 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
313
313
314
314
315 def getfiles(repo, proto):
315 def getfiles(repo, proto):
316 """A server api for requesting particular versions of particular files."""
316 """A server api for requesting particular versions of particular files."""
317 if shallowutil.isenabled(repo):
317 if shallowutil.isenabled(repo):
318 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
318 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
319 if not isinstance(proto, _sshv1server):
319 if not isinstance(proto, _sshv1server):
320 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
320 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
321
321
322 def streamer():
322 def streamer():
323 fin = proto._fin
323 fin = proto._fin
324
324
325 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
325 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
326 if not cachepath:
326 if not cachepath:
327 cachepath = os.path.join(repo.path, b"remotefilelogcache")
327 cachepath = os.path.join(repo.path, b"remotefilelogcache")
328
328
329 while True:
329 while True:
330 request = fin.readline()[:-1]
330 request = fin.readline()[:-1]
331 if not request:
331 if not request:
332 break
332 break
333
333
334 node = bin(request[:40])
334 node = bin(request[:40])
335 if node == repo.nullid:
335 if node == repo.nullid:
336 yield b'0\n'
336 yield b'0\n'
337 continue
337 continue
338
338
339 path = request[40:]
339 path = request[40:]
340
340
341 text = _loadfileblob(repo, cachepath, path, node)
341 text = _loadfileblob(repo, cachepath, path, node)
342
342
343 yield b'%d\n%s' % (len(text), text)
343 yield b'%d\n%s' % (len(text), text)
344
344
345 # it would be better to only flush after processing a whole batch
345 # it would be better to only flush after processing a whole batch
346 # but currently we don't know if there are more requests coming
346 # but currently we don't know if there are more requests coming
347 proto._fout.flush()
347 proto._fout.flush()
348
348
349 return wireprototypes.streamres(streamer())
349 return wireprototypes.streamres(streamer())
350
350
351
351
352 def createfileblob(filectx):
352 def createfileblob(filectx):
353 """
353 """
354 format:
354 format:
355 v0:
355 v0:
356 str(len(rawtext)) + '\0' + rawtext + ancestortext
356 str(len(rawtext)) + '\0' + rawtext + ancestortext
357 v1:
357 v1:
358 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
358 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
359 metalist := metalist + '\n' + meta | meta
359 metalist := metalist + '\n' + meta | meta
360 meta := sizemeta | flagmeta
360 meta := sizemeta | flagmeta
361 sizemeta := METAKEYSIZE + str(len(rawtext))
361 sizemeta := METAKEYSIZE + str(len(rawtext))
362 flagmeta := METAKEYFLAG + str(flag)
362 flagmeta := METAKEYFLAG + str(flag)
363
363
364 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
364 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
365 length of 1.
365 length of 1.
366 """
366 """
367 flog = filectx.filelog()
367 flog = filectx.filelog()
368 frev = filectx.filerev()
368 frev = filectx.filerev()
369 revlogflags = flog._revlog.flags(frev)
369 revlogflags = flog._revlog.flags(frev)
370 if revlogflags == 0:
370 if revlogflags == 0:
371 # normal files
371 # normal files
372 text = filectx.data()
372 text = filectx.data()
373 else:
373 else:
374 # lfs, read raw revision data
374 # lfs, read raw revision data
375 text = flog.rawdata(frev)
375 text = flog.rawdata(frev)
376
376
377 repo = filectx._repo
377 repo = filectx._repo
378
378
379 ancestors = [filectx]
379 ancestors = [filectx]
380
380
381 try:
381 try:
382 repo.forcelinkrev = True
382 repo.forcelinkrev = True
383 ancestors.extend([f for f in filectx.ancestors()])
383 ancestors.extend([f for f in filectx.ancestors()])
384
384
385 ancestortext = b""
385 ancestortext = b""
386 for ancestorctx in ancestors:
386 for ancestorctx in ancestors:
387 parents = ancestorctx.parents()
387 parents = ancestorctx.parents()
388 p1 = repo.nullid
388 p1 = repo.nullid
389 p2 = repo.nullid
389 p2 = repo.nullid
390 if len(parents) > 0:
390 if len(parents) > 0:
391 p1 = parents[0].filenode()
391 p1 = parents[0].filenode()
392 if len(parents) > 1:
392 if len(parents) > 1:
393 p2 = parents[1].filenode()
393 p2 = parents[1].filenode()
394
394
395 copyname = b""
395 copyname = b""
396 rename = ancestorctx.renamed()
396 rename = ancestorctx.renamed()
397 if rename:
397 if rename:
398 copyname = rename[0]
398 copyname = rename[0]
399 linknode = ancestorctx.node()
399 linknode = ancestorctx.node()
400 ancestortext += b"%s%s%s%s%s\0" % (
400 ancestortext += b"%s%s%s%s%s\0" % (
401 ancestorctx.filenode(),
401 ancestorctx.filenode(),
402 p1,
402 p1,
403 p2,
403 p2,
404 linknode,
404 linknode,
405 copyname,
405 copyname,
406 )
406 )
407 finally:
407 finally:
408 repo.forcelinkrev = False
408 repo.forcelinkrev = False
409
409
410 header = shallowutil.buildfileblobheader(len(text), revlogflags)
410 header = shallowutil.buildfileblobheader(len(text), revlogflags)
411
411
412 return b"%s\0%s%s" % (header, text, ancestortext)
412 return b"%s\0%s%s" % (header, text, ancestortext)
413
413
414
414
415 def gcserver(ui, repo):
415 def gcserver(ui, repo):
416 if not repo.ui.configbool(b"remotefilelog", b"server"):
416 if not repo.ui.configbool(b"remotefilelog", b"server"):
417 return
417 return
418
418
419 neededfiles = set()
419 neededfiles = set()
420 heads = repo.revs(b"heads(tip~25000:) - null")
420 heads = repo.revs(b"heads(tip~25000:) - null")
421
421
422 cachepath = repo.vfs.join(b"remotefilelogcache")
422 cachepath = repo.vfs.join(b"remotefilelogcache")
423 for head in heads:
423 for head in heads:
424 mf = repo[head].manifest()
424 mf = repo[head].manifest()
425 for filename, filenode in mf.items():
425 for filename, filenode in mf.items():
426 filecachepath = os.path.join(cachepath, filename, hex(filenode))
426 filecachepath = os.path.join(cachepath, filename, hex(filenode))
427 neededfiles.add(filecachepath)
427 neededfiles.add(filecachepath)
428
428
429 # delete unneeded older files
429 # delete unneeded older files
430 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
430 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
431 expiration = time.time() - (days * 24 * 60 * 60)
431 expiration = time.time() - (days * 24 * 60 * 60)
432
432
433 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
433 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
434 progress.update(0)
434 progress.update(0)
435 for root, dirs, files in os.walk(cachepath):
435 for root, dirs, files in os.walk(cachepath):
436 for file in files:
436 for file in files:
437 filepath = os.path.join(root, file)
437 filepath = os.path.join(root, file)
438 progress.increment()
438 progress.increment()
439 if filepath in neededfiles:
439 if filepath in neededfiles:
440 continue
440 continue
441
441
442 stat = os.stat(filepath)
442 stat = os.stat(filepath)
443 if stat.st_mtime < expiration:
443 if stat.st_mtime < expiration:
444 os.remove(filepath)
444 os.remove(filepath)
445
445
446 progress.complete()
446 progress.complete()
General Comments 0
You need to be logged in to leave comments. Login now