Show More
@@ -1,1103 +1,1102 b'' | |||
|
1 | 1 | # __init__.py - remotefilelog extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
10 | 10 | GUARANTEES. This means that repositories created with this extension may |
|
11 | 11 | only be usable with the exact version of this extension/Mercurial that was |
|
12 | 12 | used. The extension attempts to enforce this in order to prevent repository |
|
13 | 13 | corruption. |
|
14 | 14 | |
|
15 | 15 | remotefilelog works by fetching file contents lazily and storing them |
|
16 | 16 | in a cache on the client rather than in revlogs. This allows enormous |
|
17 | 17 | histories to be transferred only partially, making them easier to |
|
18 | 18 | operate on. |
|
19 | 19 | |
|
20 | 20 | Configs: |
|
21 | 21 | |
|
22 | 22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files |
|
23 | 23 | ``packs.maxpacksize`` specifies the maximum pack file size |
|
24 | 24 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the |
|
25 | 25 | shared cache (trees only for now) |
|
26 | 26 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True |
|
27 | 27 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and |
|
28 | 28 | update, and on other commands that use them. Different from pullprefetch. |
|
29 | 29 | ``remotefilelog.gcrepack`` does garbage collection during repack when True |
|
30 | 30 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before |
|
31 | 31 | it is garbage collected |
|
32 | 32 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True |
|
33 | 33 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in |
|
34 | 34 | days after which it is no longer prefetched. |
|
35 | 35 | ``remotefilelog.prefetchdelay`` specifies delay between background |
|
36 | 36 | prefetches in seconds after operations that change the working copy parent |
|
37 | 37 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data |
|
38 | 38 | pack files required to be considered part of a generation. In particular, |
|
39 | 39 | minimum number of packs files > gencountlimit. |
|
40 | 40 | ``remotefilelog.data.generations`` list for specifying the lower bound of |
|
41 | 41 | each generation of the data pack files. For example, list ['100MB','1MB'] |
|
42 | 42 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ |
|
43 | 43 | 1MB, 100MB) and [100MB, infinity). |
|
44 | 44 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to |
|
45 | 45 | include in an incremental data repack. |
|
46 | 46 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for |
|
47 | 47 | it to be considered for an incremental data repack. |
|
48 | 48 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files |
|
49 | 49 | to include in an incremental data repack. |
|
50 | 50 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of |
|
51 | 51 | history pack files required to be considered part of a generation. In |
|
52 | 52 | particular, minimum number of packs files > gencountlimit. |
|
53 | 53 | ``remotefilelog.history.generations`` list for specifying the lower bound of |
|
54 | 54 | each generation of the historhy pack files. For example, list [ |
|
55 | 55 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ |
|
56 | 56 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). |
|
57 | 57 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to |
|
58 | 58 | include in an incremental history repack. |
|
59 | 59 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file |
|
60 | 60 | for it to be considered for an incremental history repack. |
|
61 | 61 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack |
|
62 | 62 | files to include in an incremental history repack. |
|
63 | 63 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the |
|
64 | 64 | background |
|
65 | 65 | ``remotefilelog.cachepath`` path to cache |
|
66 | 66 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this |
|
67 | 67 | group |
|
68 | 68 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data |
|
69 | 69 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output |
|
70 | 70 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls |
|
71 | 71 | ``remotefilelog.includepattern``pattern of files to include in pulls |
|
72 | 72 | ``remotefilelog.fetchwarning``: message to print when too many |
|
73 | 73 | single-file fetches occur |
|
74 | 74 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC |
|
75 | 75 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch |
|
76 | 76 | files, otherwise use optimistic fetching |
|
77 | 77 | ``remotefilelog.pullprefetch`` revset for selecting files that should be |
|
78 | 78 | eagerly downloaded rather than lazily |
|
79 | 79 | ``remotefilelog.reponame`` name of the repo. If set, used to partition |
|
80 | 80 | data from other repos in a shared store. |
|
81 | 81 | ``remotefilelog.server`` if true, enable server-side functionality |
|
82 | 82 | ``remotefilelog.servercachepath`` path for caching blobs on the server |
|
83 | 83 | ``remotefilelog.serverexpiration`` number of days to keep cached server |
|
84 | 84 | blobs |
|
85 | 85 | ``remotefilelog.validatecache`` if set, check cache entries for corruption |
|
86 | 86 | before returning blobs |
|
87 | 87 | ``remotefilelog.validatecachelog`` if set, check cache entries for |
|
88 | 88 | corruption before returning metadata |
|
89 | 89 | |
|
90 | 90 | """ |
|
91 | 91 | from __future__ import absolute_import |
|
92 | 92 | |
|
93 | 93 | import os |
|
94 | 94 | import time |
|
95 | 95 | import traceback |
|
96 | 96 | |
|
97 | 97 | from mercurial.node import hex |
|
98 | 98 | from mercurial.i18n import _ |
|
99 | 99 | from mercurial import ( |
|
100 | 100 | changegroup, |
|
101 | 101 | changelog, |
|
102 | 102 | cmdutil, |
|
103 | 103 | commands, |
|
104 | 104 | configitems, |
|
105 | 105 | context, |
|
106 | 106 | copies, |
|
107 | 107 | debugcommands as hgdebugcommands, |
|
108 | 108 | dispatch, |
|
109 | 109 | error, |
|
110 | 110 | exchange, |
|
111 | 111 | extensions, |
|
112 | 112 | hg, |
|
113 | 113 | localrepo, |
|
114 | 114 | match, |
|
115 | 115 | merge, |
|
116 | 116 | node as nodemod, |
|
117 | 117 | patch, |
|
118 | 118 | registrar, |
|
119 | 119 | repair, |
|
120 | 120 | repoview, |
|
121 | 121 | revset, |
|
122 | 122 | scmutil, |
|
123 | 123 | smartset, |
|
124 | 124 | streamclone, |
|
125 | 125 | templatekw, |
|
126 | 126 | util, |
|
127 | 127 | ) |
|
128 | 128 | from . import ( |
|
129 | 129 | constants, |
|
130 | 130 | debugcommands, |
|
131 | 131 | fileserverclient, |
|
132 | 132 | remotefilectx, |
|
133 | 133 | remotefilelog, |
|
134 | 134 | remotefilelogserver, |
|
135 | 135 | repack as repackmod, |
|
136 | 136 | shallowbundle, |
|
137 | 137 | shallowrepo, |
|
138 | 138 | shallowstore, |
|
139 | 139 | shallowutil, |
|
140 | 140 | shallowverifier, |
|
141 | 141 | ) |
|
142 | 142 | |
|
143 | 143 | # ensures debug commands are registered |
|
144 | 144 | hgdebugcommands.command |
|
145 | 145 | |
|
146 | 146 | cmdtable = {} |
|
147 | 147 | command = registrar.command(cmdtable) |
|
148 | 148 | |
|
149 | 149 | configtable = {} |
|
150 | 150 | configitem = registrar.configitem(configtable) |
|
151 | 151 | |
|
152 | 152 | configitem('remotefilelog', 'debug', default=False) |
|
153 | 153 | |
|
154 | 154 | configitem('remotefilelog', 'reponame', default='') |
|
155 | 155 | configitem('remotefilelog', 'cachepath', default=None) |
|
156 | 156 | configitem('remotefilelog', 'cachegroup', default=None) |
|
157 | 157 | configitem('remotefilelog', 'cacheprocess', default=None) |
|
158 | 158 | configitem('remotefilelog', 'cacheprocess.includepath', default=None) |
|
159 | 159 | configitem("remotefilelog", "cachelimit", default="1000 GB") |
|
160 | 160 | |
|
161 | 161 | configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault, |
|
162 | 162 | alias=[('remotefilelog', 'fallbackrepo')]) |
|
163 | 163 | |
|
164 | 164 | configitem('remotefilelog', 'validatecachelog', default=None) |
|
165 | 165 | configitem('remotefilelog', 'validatecache', default='on') |
|
166 | 166 | configitem('remotefilelog', 'server', default=None) |
|
167 | 167 | configitem('remotefilelog', 'servercachepath', default=None) |
|
168 | 168 | configitem("remotefilelog", "serverexpiration", default=30) |
|
169 | 169 | configitem('remotefilelog', 'backgroundrepack', default=False) |
|
170 | 170 | configitem('remotefilelog', 'bgprefetchrevs', default=None) |
|
171 | 171 | configitem('remotefilelog', 'pullprefetch', default=None) |
|
172 | 172 | configitem('remotefilelog', 'backgroundprefetch', default=False) |
|
173 | 173 | configitem('remotefilelog', 'prefetchdelay', default=120) |
|
174 | 174 | configitem('remotefilelog', 'prefetchdays', default=14) |
|
175 | 175 | |
|
176 | 176 | configitem('remotefilelog', 'getfilesstep', default=10000) |
|
177 | 177 | configitem('remotefilelog', 'getfilestype', default='optimistic') |
|
178 | 178 | configitem('remotefilelog', 'batchsize', configitems.dynamicdefault) |
|
179 | 179 | configitem('remotefilelog', 'fetchwarning', default='') |
|
180 | 180 | |
|
181 | 181 | configitem('remotefilelog', 'includepattern', default=None) |
|
182 | 182 | configitem('remotefilelog', 'excludepattern', default=None) |
|
183 | 183 | |
|
184 | 184 | configitem('remotefilelog', 'gcrepack', default=False) |
|
185 | 185 | configitem('remotefilelog', 'repackonhggc', default=False) |
|
186 | 186 | configitem('remotefilelog', 'datapackversion', default=0) |
|
187 | 187 | configitem('repack', 'chainorphansbysize', default=True) |
|
188 | 188 | |
|
189 | 189 | configitem('packs', 'maxpacksize', default=0) |
|
190 | 190 | configitem('packs', 'maxchainlen', default=1000) |
|
191 | 191 | |
|
192 | 192 | configitem('remotefilelog', 'historypackv1', default=False) |
|
193 | 193 | # default TTL limit is 30 days |
|
194 | 194 | _defaultlimit = 60 * 60 * 24 * 30 |
|
195 | 195 | configitem('remotefilelog', 'nodettl', default=_defaultlimit) |
|
196 | 196 | |
|
197 | 197 | configitem('remotefilelog', 'data.gencountlimit', default=2), |
|
198 | 198 | configitem('remotefilelog', 'data.generations', |
|
199 | 199 | default=['1GB', '100MB', '1MB']) |
|
200 | 200 | configitem('remotefilelog', 'data.maxrepackpacks', default=50) |
|
201 | 201 | configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB') |
|
202 | 202 | configitem('remotefilelog', 'data.repacksizelimit', default='100MB') |
|
203 | 203 | |
|
204 | 204 | configitem('remotefilelog', 'history.gencountlimit', default=2), |
|
205 | 205 | configitem('remotefilelog', 'history.generations', default=['100MB']) |
|
206 | 206 | configitem('remotefilelog', 'history.maxrepackpacks', default=50) |
|
207 | 207 | configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB') |
|
208 | 208 | configitem('remotefilelog', 'history.repacksizelimit', default='100MB') |
|
209 | 209 | |
|
210 | 210 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
211 | 211 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
212 | 212 | # be specifying the version(s) of Mercurial they are tested with, or |
|
213 | 213 | # leave the attribute unspecified. |
|
214 | 214 | testedwith = 'ships-with-hg-core' |
|
215 | 215 | |
|
216 | 216 | repoclass = localrepo.localrepository |
|
217 | 217 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) |
|
218 | 218 | |
|
219 | isenabled = shallowutil.isenabled | |
|
220 | ||
|
219 | 221 | def uisetup(ui): |
|
220 | 222 | """Wraps user facing Mercurial commands to swap them out with shallow |
|
221 | 223 | versions. |
|
222 | 224 | """ |
|
223 | 225 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) |
|
224 | 226 | |
|
225 | 227 | entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow) |
|
226 | 228 | entry[1].append(('', 'shallow', None, |
|
227 | 229 | _("create a shallow clone which uses remote file " |
|
228 | 230 | "history"))) |
|
229 | 231 | |
|
230 | 232 | extensions.wrapcommand(commands.table, 'debugindex', |
|
231 | 233 | debugcommands.debugindex) |
|
232 | 234 | extensions.wrapcommand(commands.table, 'debugindexdot', |
|
233 | 235 | debugcommands.debugindexdot) |
|
234 | 236 | extensions.wrapcommand(commands.table, 'log', log) |
|
235 | 237 | extensions.wrapcommand(commands.table, 'pull', pull) |
|
236 | 238 | |
|
237 | 239 | # Prevent 'hg manifest --all' |
|
238 | 240 | def _manifest(orig, ui, repo, *args, **opts): |
|
239 | if (constants.SHALLOWREPO_REQUIREMENT in repo.requirements | |
|
240 | and opts.get('all')): | |
|
241 | if (isenabled(repo) and opts.get('all')): | |
|
241 | 242 | raise error.Abort(_("--all is not supported in a shallow repo")) |
|
242 | 243 | |
|
243 | 244 | return orig(ui, repo, *args, **opts) |
|
244 | 245 | extensions.wrapcommand(commands.table, "manifest", _manifest) |
|
245 | 246 | |
|
246 | 247 | # Wrap remotefilelog with lfs code |
|
247 | 248 | def _lfsloaded(loaded=False): |
|
248 | 249 | lfsmod = None |
|
249 | 250 | try: |
|
250 | 251 | lfsmod = extensions.find('lfs') |
|
251 | 252 | except KeyError: |
|
252 | 253 | pass |
|
253 | 254 | if lfsmod: |
|
254 | 255 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) |
|
255 | 256 | fileserverclient._lfsmod = lfsmod |
|
256 | 257 | extensions.afterloaded('lfs', _lfsloaded) |
|
257 | 258 | |
|
258 | 259 | # debugdata needs remotefilelog.len to work |
|
259 | 260 | extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow) |
|
260 | 261 | |
|
261 | 262 | def cloneshallow(orig, ui, repo, *args, **opts): |
|
262 | 263 | if opts.get('shallow'): |
|
263 | 264 | repos = [] |
|
264 | 265 | def pull_shallow(orig, self, *args, **kwargs): |
|
265 | if constants.SHALLOWREPO_REQUIREMENT not in self.requirements: | |
|
266 | if not isenabled(self): | |
|
266 | 267 | repos.append(self.unfiltered()) |
|
267 | 268 | # set up the client hooks so the post-clone update works |
|
268 | 269 | setupclient(self.ui, self.unfiltered()) |
|
269 | 270 | |
|
270 | 271 | # setupclient fixed the class on the repo itself |
|
271 | 272 | # but we also need to fix it on the repoview |
|
272 | 273 | if isinstance(self, repoview.repoview): |
|
273 | 274 | self.__class__.__bases__ = (self.__class__.__bases__[0], |
|
274 | 275 | self.unfiltered().__class__) |
|
275 | 276 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
276 | 277 | self._writerequirements() |
|
277 | 278 | |
|
278 | 279 | # Since setupclient hadn't been called, exchange.pull was not |
|
279 | 280 | # wrapped. So we need to manually invoke our version of it. |
|
280 | 281 | return exchangepull(orig, self, *args, **kwargs) |
|
281 | 282 | else: |
|
282 | 283 | return orig(self, *args, **kwargs) |
|
283 | 284 | extensions.wrapfunction(exchange, 'pull', pull_shallow) |
|
284 | 285 | |
|
285 | 286 | # Wrap the stream logic to add requirements and to pass include/exclude |
|
286 | 287 | # patterns around. |
|
287 | 288 | def setup_streamout(repo, remote): |
|
288 | 289 | # Replace remote.stream_out with a version that sends file |
|
289 | 290 | # patterns. |
|
290 | 291 | def stream_out_shallow(orig): |
|
291 | 292 | caps = remote.capabilities() |
|
292 | 293 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: |
|
293 | 294 | opts = {} |
|
294 | 295 | if repo.includepattern: |
|
295 | 296 | opts['includepattern'] = '\0'.join(repo.includepattern) |
|
296 | 297 | if repo.excludepattern: |
|
297 | 298 | opts['excludepattern'] = '\0'.join(repo.excludepattern) |
|
298 | 299 | return remote._callstream('stream_out_shallow', **opts) |
|
299 | 300 | else: |
|
300 | 301 | return orig() |
|
301 | 302 | extensions.wrapfunction(remote, 'stream_out', stream_out_shallow) |
|
302 | 303 | def stream_wrap(orig, op): |
|
303 | 304 | setup_streamout(op.repo, op.remote) |
|
304 | 305 | return orig(op) |
|
305 | 306 | extensions.wrapfunction( |
|
306 | 307 | streamclone, 'maybeperformlegacystreamclone', stream_wrap) |
|
307 | 308 | |
|
308 | 309 | def canperformstreamclone(orig, pullop, bundle2=False): |
|
309 | 310 | # remotefilelog is currently incompatible with the |
|
310 | 311 | # bundle2 flavor of streamclones, so force us to use |
|
311 | 312 | # v1 instead. |
|
312 | 313 | if 'v2' in pullop.remotebundle2caps.get('stream', []): |
|
313 | 314 | pullop.remotebundle2caps['stream'] = [ |
|
314 | 315 | c for c in pullop.remotebundle2caps['stream'] |
|
315 | 316 | if c != 'v2'] |
|
316 | 317 | if bundle2: |
|
317 | 318 | return False, None |
|
318 | 319 | supported, requirements = orig(pullop, bundle2=bundle2) |
|
319 | 320 | if requirements is not None: |
|
320 | 321 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
321 | 322 | return supported, requirements |
|
322 | 323 | extensions.wrapfunction( |
|
323 | 324 | streamclone, 'canperformstreamclone', canperformstreamclone) |
|
324 | 325 | |
|
325 | 326 | try: |
|
326 | 327 | orig(ui, repo, *args, **opts) |
|
327 | 328 | finally: |
|
328 | 329 | if opts.get('shallow'): |
|
329 | 330 | for r in repos: |
|
330 | 331 | if util.safehasattr(r, 'fileservice'): |
|
331 | 332 | r.fileservice.close() |
|
332 | 333 | |
|
333 | 334 | def debugdatashallow(orig, *args, **kwds): |
|
334 | 335 | oldlen = remotefilelog.remotefilelog.__len__ |
|
335 | 336 | try: |
|
336 | 337 | remotefilelog.remotefilelog.__len__ = lambda x: 1 |
|
337 | 338 | return orig(*args, **kwds) |
|
338 | 339 | finally: |
|
339 | 340 | remotefilelog.remotefilelog.__len__ = oldlen |
|
340 | 341 | |
|
341 | 342 | def reposetup(ui, repo): |
|
342 | 343 | if not isinstance(repo, localrepo.localrepository): |
|
343 | 344 | return |
|
344 | 345 | |
|
345 | 346 | # put here intentionally bc doesnt work in uisetup |
|
346 | 347 | ui.setconfig('hooks', 'update.prefetch', wcpprefetch) |
|
347 | 348 | ui.setconfig('hooks', 'commit.prefetch', wcpprefetch) |
|
348 | 349 | |
|
349 | 350 | isserverenabled = ui.configbool('remotefilelog', 'server') |
|
350 | isshallowclient = constants.SHALLOWREPO_REQUIREMENT in repo.requirements | |
|
351 | isshallowclient = isenabled(repo) | |
|
351 | 352 | |
|
352 | 353 | if isserverenabled and isshallowclient: |
|
353 | 354 | raise RuntimeError("Cannot be both a server and shallow client.") |
|
354 | 355 | |
|
355 | 356 | if isshallowclient: |
|
356 | 357 | setupclient(ui, repo) |
|
357 | 358 | |
|
358 | 359 | if isserverenabled: |
|
359 | 360 | remotefilelogserver.setupserver(ui, repo) |
|
360 | 361 | |
|
361 | 362 | def setupclient(ui, repo): |
|
362 | 363 | if not isinstance(repo, localrepo.localrepository): |
|
363 | 364 | return |
|
364 | 365 | |
|
365 | 366 | # Even clients get the server setup since they need to have the |
|
366 | 367 | # wireprotocol endpoints registered. |
|
367 | 368 | remotefilelogserver.onetimesetup(ui) |
|
368 | 369 | onetimeclientsetup(ui) |
|
369 | 370 | |
|
370 | 371 | shallowrepo.wraprepo(repo) |
|
371 | 372 | repo.store = shallowstore.wrapstore(repo.store) |
|
372 | 373 | |
|
373 | 374 | clientonetime = False |
|
374 | 375 | def onetimeclientsetup(ui): |
|
375 | 376 | global clientonetime |
|
376 | 377 | if clientonetime: |
|
377 | 378 | return |
|
378 | 379 | clientonetime = True |
|
379 | 380 | |
|
380 | 381 | changegroup.cgpacker = shallowbundle.shallowcg1packer |
|
381 | 382 | |
|
382 | 383 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', |
|
383 | 384 | shallowbundle.addchangegroupfiles) |
|
384 | 385 | extensions.wrapfunction( |
|
385 | 386 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) |
|
386 | 387 | |
|
387 | 388 | def storewrapper(orig, requirements, path, vfstype): |
|
388 | 389 | s = orig(requirements, path, vfstype) |
|
389 | 390 | if constants.SHALLOWREPO_REQUIREMENT in requirements: |
|
390 | 391 | s = shallowstore.wrapstore(s) |
|
391 | 392 | |
|
392 | 393 | return s |
|
393 | 394 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) |
|
394 | 395 | |
|
395 | 396 | extensions.wrapfunction(exchange, 'pull', exchangepull) |
|
396 | 397 | |
|
397 | 398 | # prefetch files before update |
|
398 | 399 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None): |
|
399 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
400 | if isenabled(repo): | |
|
400 | 401 | manifest = mctx.manifest() |
|
401 | 402 | files = [] |
|
402 | 403 | for f, args, msg in actions['g']: |
|
403 | 404 | files.append((f, hex(manifest[f]))) |
|
404 | 405 | # batch fetch the needed files from the server |
|
405 | 406 | repo.fileservice.prefetch(files) |
|
406 | 407 | return orig(repo, actions, wctx, mctx, overwrite, labels=labels) |
|
407 | 408 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) |
|
408 | 409 | |
|
409 | 410 | # Prefetch merge checkunknownfiles |
|
410 | 411 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, |
|
411 | 412 | *args, **kwargs): |
|
412 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
413 | if isenabled(repo): | |
|
413 | 414 | files = [] |
|
414 | 415 | sparsematch = repo.maybesparsematch(mctx.rev()) |
|
415 | 416 | for f, (m, actionargs, msg) in actions.iteritems(): |
|
416 | 417 | if sparsematch and not sparsematch(f): |
|
417 | 418 | continue |
|
418 | 419 | if m in ('c', 'dc', 'cm'): |
|
419 | 420 | files.append((f, hex(mctx.filenode(f)))) |
|
420 | 421 | elif m == 'dg': |
|
421 | 422 | f2 = actionargs[0] |
|
422 | 423 | files.append((f2, hex(mctx.filenode(f2)))) |
|
423 | 424 | # batch fetch the needed files from the server |
|
424 | 425 | repo.fileservice.prefetch(files) |
|
425 | 426 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) |
|
426 | 427 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) |
|
427 | 428 | |
|
428 | 429 | # Prefetch files before status attempts to look at their size and contents |
|
429 | 430 | def checklookup(orig, self, files): |
|
430 | 431 | repo = self._repo |
|
431 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
432 | if isenabled(repo): | |
|
432 | 433 | prefetchfiles = [] |
|
433 | 434 | for parent in self._parents: |
|
434 | 435 | for f in files: |
|
435 | 436 | if f in parent: |
|
436 | 437 | prefetchfiles.append((f, hex(parent.filenode(f)))) |
|
437 | 438 | # batch fetch the needed files from the server |
|
438 | 439 | repo.fileservice.prefetch(prefetchfiles) |
|
439 | 440 | return orig(self, files) |
|
440 | 441 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) |
|
441 | 442 | |
|
442 | 443 | # Prefetch the logic that compares added and removed files for renames |
|
443 | 444 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): |
|
444 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
445 | if isenabled(repo): | |
|
445 | 446 | files = [] |
|
446 | 447 | parentctx = repo['.'] |
|
447 | 448 | for f in removed: |
|
448 | 449 | files.append((f, hex(parentctx.filenode(f)))) |
|
449 | 450 | # batch fetch the needed files from the server |
|
450 | 451 | repo.fileservice.prefetch(files) |
|
451 | 452 | return orig(repo, matcher, added, removed, *args, **kwargs) |
|
452 | 453 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) |
|
453 | 454 | |
|
454 | 455 | # prefetch files before mergecopies check |
|
455 | 456 | def computenonoverlap(orig, repo, c1, c2, *args, **kwargs): |
|
456 | 457 | u1, u2 = orig(repo, c1, c2, *args, **kwargs) |
|
457 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
458 | if isenabled(repo): | |
|
458 | 459 | m1 = c1.manifest() |
|
459 | 460 | m2 = c2.manifest() |
|
460 | 461 | files = [] |
|
461 | 462 | |
|
462 | 463 | sparsematch1 = repo.maybesparsematch(c1.rev()) |
|
463 | 464 | if sparsematch1: |
|
464 | 465 | sparseu1 = [] |
|
465 | 466 | for f in u1: |
|
466 | 467 | if sparsematch1(f): |
|
467 | 468 | files.append((f, hex(m1[f]))) |
|
468 | 469 | sparseu1.append(f) |
|
469 | 470 | u1 = sparseu1 |
|
470 | 471 | |
|
471 | 472 | sparsematch2 = repo.maybesparsematch(c2.rev()) |
|
472 | 473 | if sparsematch2: |
|
473 | 474 | sparseu2 = [] |
|
474 | 475 | for f in u2: |
|
475 | 476 | if sparsematch2(f): |
|
476 | 477 | files.append((f, hex(m2[f]))) |
|
477 | 478 | sparseu2.append(f) |
|
478 | 479 | u2 = sparseu2 |
|
479 | 480 | |
|
480 | 481 | # batch fetch the needed files from the server |
|
481 | 482 | repo.fileservice.prefetch(files) |
|
482 | 483 | return u1, u2 |
|
483 | 484 | extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap) |
|
484 | 485 | |
|
485 | 486 | # prefetch files before pathcopies check |
|
486 | 487 | def computeforwardmissing(orig, a, b, match=None): |
|
487 | 488 | missing = list(orig(a, b, match=match)) |
|
488 | 489 | repo = a._repo |
|
489 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
490 | if isenabled(repo): | |
|
490 | 491 | mb = b.manifest() |
|
491 | 492 | |
|
492 | 493 | files = [] |
|
493 | 494 | sparsematch = repo.maybesparsematch(b.rev()) |
|
494 | 495 | if sparsematch: |
|
495 | 496 | sparsemissing = [] |
|
496 | 497 | for f in missing: |
|
497 | 498 | if sparsematch(f): |
|
498 | 499 | files.append((f, hex(mb[f]))) |
|
499 | 500 | sparsemissing.append(f) |
|
500 | 501 | missing = sparsemissing |
|
501 | 502 | |
|
502 | 503 | # batch fetch the needed files from the server |
|
503 | 504 | repo.fileservice.prefetch(files) |
|
504 | 505 | return missing |
|
505 | 506 | extensions.wrapfunction(copies, '_computeforwardmissing', |
|
506 | 507 | computeforwardmissing) |
|
507 | 508 | |
|
508 | 509 | # close cache miss server connection after the command has finished |
|
509 | 510 | def runcommand(orig, lui, repo, *args, **kwargs): |
|
510 | 511 | try: |
|
511 | 512 | return orig(lui, repo, *args, **kwargs) |
|
512 | 513 | finally: |
|
513 | 514 | # repo can be None when running in chg: |
|
514 | 515 | # - at startup, reposetup was called because serve is not norepo |
|
515 | 516 | # - a norepo command like "help" is called |
|
516 | if repo and constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
517 | if repo and isenabled(repo): | |
|
517 | 518 | repo.fileservice.close() |
|
518 | 519 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) |
|
519 | 520 | |
|
520 | 521 | # disappointing hacks below |
|
521 | 522 | templatekw.getrenamedfn = getrenamedfn |
|
522 | 523 | extensions.wrapfunction(revset, 'filelog', filelogrevset) |
|
523 | 524 | revset.symbols['filelog'] = revset.filelog |
|
524 | 525 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) |
|
525 | 526 | |
|
526 | 527 | # prevent strip from stripping remotefilelogs |
|
527 | 528 | def _collectbrokencsets(orig, repo, files, striprev): |
|
528 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
529 | if isenabled(repo): | |
|
529 | 530 | files = list([f for f in files if not repo.shallowmatch(f)]) |
|
530 | 531 | return orig(repo, files, striprev) |
|
531 | 532 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) |
|
532 | 533 | |
|
533 | 534 | # Don't commit filelogs until we know the commit hash, since the hash |
|
534 | 535 | # is present in the filelog blob. |
|
535 | 536 | # This violates Mercurial's filelog->manifest->changelog write order, |
|
536 | 537 | # but is generally fine for client repos. |
|
537 | 538 | pendingfilecommits = [] |
|
538 | 539 | def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node, |
|
539 | 540 | flags, cachedelta=None, _metatuple=None): |
|
540 | 541 | if isinstance(link, int): |
|
541 | 542 | pendingfilecommits.append( |
|
542 | 543 | (self, rawtext, transaction, link, p1, p2, node, flags, |
|
543 | 544 | cachedelta, _metatuple)) |
|
544 | 545 | return node |
|
545 | 546 | else: |
|
546 | 547 | return orig(self, rawtext, transaction, link, p1, p2, node, flags, |
|
547 | 548 | cachedelta, _metatuple=_metatuple) |
|
548 | 549 | extensions.wrapfunction( |
|
549 | 550 | remotefilelog.remotefilelog, 'addrawrevision', addrawrevision) |
|
550 | 551 | |
|
551 | 552 | def changelogadd(orig, self, *args): |
|
552 | 553 | oldlen = len(self) |
|
553 | 554 | node = orig(self, *args) |
|
554 | 555 | newlen = len(self) |
|
555 | 556 | if oldlen != newlen: |
|
556 | 557 | for oldargs in pendingfilecommits: |
|
557 | 558 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs |
|
558 | 559 | linknode = self.node(link) |
|
559 | 560 | if linknode == node: |
|
560 | 561 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) |
|
561 | 562 | else: |
|
562 | 563 | raise error.ProgrammingError( |
|
563 | 564 | 'pending multiple integer revisions are not supported') |
|
564 | 565 | else: |
|
565 | 566 | # "link" is actually wrong here (it is set to len(changelog)) |
|
566 | 567 | # if changelog remains unchanged, skip writing file revisions |
|
567 | 568 | # but still do a sanity check about pending multiple revisions |
|
568 | 569 | if len(set(x[3] for x in pendingfilecommits)) > 1: |
|
569 | 570 | raise error.ProgrammingError( |
|
570 | 571 | 'pending multiple integer revisions are not supported') |
|
571 | 572 | del pendingfilecommits[:] |
|
572 | 573 | return node |
|
573 | 574 | extensions.wrapfunction(changelog.changelog, 'add', changelogadd) |
|
574 | 575 | |
|
575 | 576 | # changectx wrappers |
|
576 | 577 | def filectx(orig, self, path, fileid=None, filelog=None): |
|
577 | 578 | if fileid is None: |
|
578 | 579 | fileid = self.filenode(path) |
|
579 | if (constants.SHALLOWREPO_REQUIREMENT in self._repo.requirements and | |
|
580 | self._repo.shallowmatch(path)): | |
|
580 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
|
581 | 581 | return remotefilectx.remotefilectx(self._repo, path, |
|
582 | 582 | fileid=fileid, changectx=self, filelog=filelog) |
|
583 | 583 | return orig(self, path, fileid=fileid, filelog=filelog) |
|
584 | 584 | extensions.wrapfunction(context.changectx, 'filectx', filectx) |
|
585 | 585 | |
|
586 | 586 | def workingfilectx(orig, self, path, filelog=None): |
|
587 | if (constants.SHALLOWREPO_REQUIREMENT in self._repo.requirements and | |
|
588 | self._repo.shallowmatch(path)): | |
|
587 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
|
589 | 588 | return remotefilectx.remoteworkingfilectx(self._repo, |
|
590 | 589 | path, workingctx=self, filelog=filelog) |
|
591 | 590 | return orig(self, path, filelog=filelog) |
|
592 | 591 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) |
|
593 | 592 | |
|
594 | 593 | # prefetch required revisions before a diff |
|
595 | 594 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, |
|
596 | 595 | copy, getfilectx, *args, **kwargs): |
|
597 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
596 | if isenabled(repo): | |
|
598 | 597 | prefetch = [] |
|
599 | 598 | mf1 = ctx1.manifest() |
|
600 | 599 | for fname in modified + added + removed: |
|
601 | 600 | if fname in mf1: |
|
602 | 601 | fnode = getfilectx(fname, ctx1).filenode() |
|
603 | 602 | # fnode can be None if it's a edited working ctx file |
|
604 | 603 | if fnode: |
|
605 | 604 | prefetch.append((fname, hex(fnode))) |
|
606 | 605 | if fname not in removed: |
|
607 | 606 | fnode = getfilectx(fname, ctx2).filenode() |
|
608 | 607 | if fnode: |
|
609 | 608 | prefetch.append((fname, hex(fnode))) |
|
610 | 609 | |
|
611 | 610 | repo.fileservice.prefetch(prefetch) |
|
612 | 611 | |
|
613 | 612 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, |
|
614 | 613 | copy, getfilectx, *args, **kwargs) |
|
615 | 614 | extensions.wrapfunction(patch, 'trydiff', trydiff) |
|
616 | 615 | |
|
617 | 616 | # Prevent verify from processing files |
|
618 | 617 | # a stub for mercurial.hg.verify() |
|
619 | 618 | def _verify(orig, repo): |
|
620 | 619 | lock = repo.lock() |
|
621 | 620 | try: |
|
622 | 621 | return shallowverifier.shallowverifier(repo).verify() |
|
623 | 622 | finally: |
|
624 | 623 | lock.release() |
|
625 | 624 | |
|
626 | 625 | extensions.wrapfunction(hg, 'verify', _verify) |
|
627 | 626 | |
|
628 | 627 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) |
|
629 | 628 | |
|
630 | 629 | def getrenamedfn(repo, endrev=None): |
|
631 | 630 | rcache = {} |
|
632 | 631 | |
|
633 | 632 | def getrenamed(fn, rev): |
|
634 | 633 | '''looks up all renames for a file (up to endrev) the first |
|
635 | 634 | time the file is given. It indexes on the changerev and only |
|
636 | 635 | parses the manifest if linkrev != changerev. |
|
637 | 636 | Returns rename info for fn at changerev rev.''' |
|
638 | 637 | if rev in rcache.setdefault(fn, {}): |
|
639 | 638 | return rcache[fn][rev] |
|
640 | 639 | |
|
641 | 640 | try: |
|
642 | 641 | fctx = repo[rev].filectx(fn) |
|
643 | 642 | for ancestor in fctx.ancestors(): |
|
644 | 643 | if ancestor.path() == fn: |
|
645 | 644 | renamed = ancestor.renamed() |
|
646 | 645 | rcache[fn][ancestor.rev()] = renamed |
|
647 | 646 | |
|
648 | 647 | return fctx.renamed() |
|
649 | 648 | except error.LookupError: |
|
650 | 649 | return None |
|
651 | 650 | |
|
652 | 651 | return getrenamed |
|
653 | 652 | |
|
654 | 653 | def walkfilerevs(orig, repo, match, follow, revs, fncache): |
|
655 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
654 | if not isenabled(repo): | |
|
656 | 655 | return orig(repo, match, follow, revs, fncache) |
|
657 | 656 | |
|
658 | 657 | # remotefilelog's can't be walked in rev order, so throw. |
|
659 | 658 | # The caller will see the exception and walk the commit tree instead. |
|
660 | 659 | if not follow: |
|
661 | 660 | raise cmdutil.FileWalkError("Cannot walk via filelog") |
|
662 | 661 | |
|
663 | 662 | wanted = set() |
|
664 | 663 | minrev, maxrev = min(revs), max(revs) |
|
665 | 664 | |
|
666 | 665 | pctx = repo['.'] |
|
667 | 666 | for filename in match.files(): |
|
668 | 667 | if filename not in pctx: |
|
669 | 668 | raise error.Abort(_('cannot follow file not in parent ' |
|
670 | 669 | 'revision: "%s"') % filename) |
|
671 | 670 | fctx = pctx[filename] |
|
672 | 671 | |
|
673 | 672 | linkrev = fctx.linkrev() |
|
674 | 673 | if linkrev >= minrev and linkrev <= maxrev: |
|
675 | 674 | fncache.setdefault(linkrev, []).append(filename) |
|
676 | 675 | wanted.add(linkrev) |
|
677 | 676 | |
|
678 | 677 | for ancestor in fctx.ancestors(): |
|
679 | 678 | linkrev = ancestor.linkrev() |
|
680 | 679 | if linkrev >= minrev and linkrev <= maxrev: |
|
681 | 680 | fncache.setdefault(linkrev, []).append(ancestor.path()) |
|
682 | 681 | wanted.add(linkrev) |
|
683 | 682 | |
|
684 | 683 | return wanted |
|
685 | 684 | |
|
686 | 685 | def filelogrevset(orig, repo, subset, x): |
|
687 | 686 | """``filelog(pattern)`` |
|
688 | 687 | Changesets connected to the specified filelog. |
|
689 | 688 | |
|
690 | 689 | For performance reasons, ``filelog()`` does not show every changeset |
|
691 | 690 | that affects the requested file(s). See :hg:`help log` for details. For |
|
692 | 691 | a slower, more accurate result, use ``file()``. |
|
693 | 692 | """ |
|
694 | 693 | |
|
695 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
694 | if not isenabled(repo): | |
|
696 | 695 | return orig(repo, subset, x) |
|
697 | 696 | |
|
698 | 697 | # i18n: "filelog" is a keyword |
|
699 | 698 | pat = revset.getstring(x, _("filelog requires a pattern")) |
|
700 | 699 | m = match.match(repo.root, repo.getcwd(), [pat], default='relpath', |
|
701 | 700 | ctx=repo[None]) |
|
702 | 701 | s = set() |
|
703 | 702 | |
|
704 | 703 | if not match.patkind(pat): |
|
705 | 704 | # slow |
|
706 | 705 | for r in subset: |
|
707 | 706 | ctx = repo[r] |
|
708 | 707 | cfiles = ctx.files() |
|
709 | 708 | for f in m.files(): |
|
710 | 709 | if f in cfiles: |
|
711 | 710 | s.add(ctx.rev()) |
|
712 | 711 | break |
|
713 | 712 | else: |
|
714 | 713 | # partial |
|
715 | 714 | files = (f for f in repo[None] if m(f)) |
|
716 | 715 | for f in files: |
|
717 | 716 | fctx = repo[None].filectx(f) |
|
718 | 717 | s.add(fctx.linkrev()) |
|
719 | 718 | for actx in fctx.ancestors(): |
|
720 | 719 | s.add(actx.linkrev()) |
|
721 | 720 | |
|
722 | 721 | return smartset.baseset([r for r in subset if r in s]) |
|
723 | 722 | |
|
724 | 723 | @command('gc', [], _('hg gc [REPO...]'), norepo=True) |
|
725 | 724 | def gc(ui, *args, **opts): |
|
726 | 725 | '''garbage collect the client and server filelog caches |
|
727 | 726 | ''' |
|
728 | 727 | cachepaths = set() |
|
729 | 728 | |
|
730 | 729 | # get the system client cache |
|
731 | 730 | systemcache = shallowutil.getcachepath(ui, allowempty=True) |
|
732 | 731 | if systemcache: |
|
733 | 732 | cachepaths.add(systemcache) |
|
734 | 733 | |
|
735 | 734 | # get repo client and server cache |
|
736 | 735 | repopaths = [] |
|
737 | 736 | pwd = ui.environ.get('PWD') |
|
738 | 737 | if pwd: |
|
739 | 738 | repopaths.append(pwd) |
|
740 | 739 | |
|
741 | 740 | repopaths.extend(args) |
|
742 | 741 | repos = [] |
|
743 | 742 | for repopath in repopaths: |
|
744 | 743 | try: |
|
745 | 744 | repo = hg.peer(ui, {}, repopath) |
|
746 | 745 | repos.append(repo) |
|
747 | 746 | |
|
748 | 747 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) |
|
749 | 748 | if repocache: |
|
750 | 749 | cachepaths.add(repocache) |
|
751 | 750 | except error.RepoError: |
|
752 | 751 | pass |
|
753 | 752 | |
|
754 | 753 | # gc client cache |
|
755 | 754 | for cachepath in cachepaths: |
|
756 | 755 | gcclient(ui, cachepath) |
|
757 | 756 | |
|
758 | 757 | # gc server cache |
|
759 | 758 | for repo in repos: |
|
760 | 759 | remotefilelogserver.gcserver(ui, repo._repo) |
|
761 | 760 | |
|
762 | 761 | def gcclient(ui, cachepath): |
|
763 | 762 | # get list of repos that use this cache |
|
764 | 763 | repospath = os.path.join(cachepath, 'repos') |
|
765 | 764 | if not os.path.exists(repospath): |
|
766 | 765 | ui.warn(_("no known cache at %s\n") % cachepath) |
|
767 | 766 | return |
|
768 | 767 | |
|
769 | 768 | reposfile = open(repospath, 'r') |
|
770 | 769 | repos = set([r[:-1] for r in reposfile.readlines()]) |
|
771 | 770 | reposfile.close() |
|
772 | 771 | |
|
773 | 772 | # build list of useful files |
|
774 | 773 | validrepos = [] |
|
775 | 774 | keepkeys = set() |
|
776 | 775 | |
|
777 | 776 | _analyzing = _("analyzing repositories") |
|
778 | 777 | |
|
779 | 778 | sharedcache = None |
|
780 | 779 | filesrepacked = False |
|
781 | 780 | |
|
782 | 781 | count = 0 |
|
783 | 782 | for path in repos: |
|
784 | 783 | ui.progress(_analyzing, count, unit="repos", total=len(repos)) |
|
785 | 784 | count += 1 |
|
786 | 785 | try: |
|
787 | 786 | path = ui.expandpath(os.path.normpath(path)) |
|
788 | 787 | except TypeError as e: |
|
789 | 788 | ui.warn(_("warning: malformed path: %r:%s\n") % (path, e)) |
|
790 | 789 | traceback.print_exc() |
|
791 | 790 | continue |
|
792 | 791 | try: |
|
793 | 792 | peer = hg.peer(ui, {}, path) |
|
794 | 793 | repo = peer._repo |
|
795 | 794 | except error.RepoError: |
|
796 | 795 | continue |
|
797 | 796 | |
|
798 | 797 | validrepos.append(path) |
|
799 | 798 | |
|
800 | 799 | # Protect against any repo or config changes that have happened since |
|
801 | 800 | # this repo was added to the repos file. We'd rather this loop succeed |
|
802 | 801 | # and too much be deleted, than the loop fail and nothing gets deleted. |
|
803 | if constants.SHALLOWREPO_REQUIREMENT not in repo.requirements: | |
|
802 | if not isenabled(repo): | |
|
804 | 803 | continue |
|
805 | 804 | |
|
806 | 805 | if not util.safehasattr(repo, 'name'): |
|
807 | 806 | ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path) |
|
808 | 807 | continue |
|
809 | 808 | |
|
810 | 809 | # If garbage collection on repack and repack on hg gc are enabled |
|
811 | 810 | # then loose files are repacked and garbage collected. |
|
812 | 811 | # Otherwise regular garbage collection is performed. |
|
813 | 812 | repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc') |
|
814 | 813 | gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack') |
|
815 | 814 | if repackonhggc and gcrepack: |
|
816 | 815 | try: |
|
817 | 816 | repackmod.incrementalrepack(repo) |
|
818 | 817 | filesrepacked = True |
|
819 | 818 | continue |
|
820 | 819 | except (IOError, repackmod.RepackAlreadyRunning): |
|
821 | 820 | # If repack cannot be performed due to not enough disk space |
|
822 | 821 | # continue doing garbage collection of loose files w/o repack |
|
823 | 822 | pass |
|
824 | 823 | |
|
825 | 824 | reponame = repo.name |
|
826 | 825 | if not sharedcache: |
|
827 | 826 | sharedcache = repo.sharedstore |
|
828 | 827 | |
|
829 | 828 | # Compute a keepset which is not garbage collected |
|
830 | 829 | def keyfn(fname, fnode): |
|
831 | 830 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) |
|
832 | 831 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) |
|
833 | 832 | |
|
834 | 833 | ui.progress(_analyzing, None) |
|
835 | 834 | |
|
836 | 835 | # write list of valid repos back |
|
837 | 836 | oldumask = os.umask(0o002) |
|
838 | 837 | try: |
|
839 | 838 | reposfile = open(repospath, 'w') |
|
840 | 839 | reposfile.writelines([("%s\n" % r) for r in validrepos]) |
|
841 | 840 | reposfile.close() |
|
842 | 841 | finally: |
|
843 | 842 | os.umask(oldumask) |
|
844 | 843 | |
|
845 | 844 | # prune cache |
|
846 | 845 | if sharedcache is not None: |
|
847 | 846 | sharedcache.gc(keepkeys) |
|
848 | 847 | elif not filesrepacked: |
|
849 | 848 | ui.warn(_("warning: no valid repos in repofile\n")) |
|
850 | 849 | |
|
851 | 850 | def log(orig, ui, repo, *pats, **opts): |
|
852 | if constants.SHALLOWREPO_REQUIREMENT not in repo.requirements: | |
|
851 | if not isenabled(repo): | |
|
853 | 852 | return orig(ui, repo, *pats, **opts) |
|
854 | 853 | |
|
855 | 854 | follow = opts.get('follow') |
|
856 | 855 | revs = opts.get('rev') |
|
857 | 856 | if pats: |
|
858 | 857 | # Force slowpath for non-follow patterns and follows that start from |
|
859 | 858 | # non-working-copy-parent revs. |
|
860 | 859 | if not follow or revs: |
|
861 | 860 | # This forces the slowpath |
|
862 | 861 | opts['removed'] = True |
|
863 | 862 | |
|
864 | 863 | # If this is a non-follow log without any revs specified, recommend that |
|
865 | 864 | # the user add -f to speed it up. |
|
866 | 865 | if not follow and not revs: |
|
867 | 866 | match, pats = scmutil.matchandpats(repo['.'], pats, opts) |
|
868 | 867 | isfile = not match.anypats() |
|
869 | 868 | if isfile: |
|
870 | 869 | for file in match.files(): |
|
871 | 870 | if not os.path.isfile(repo.wjoin(file)): |
|
872 | 871 | isfile = False |
|
873 | 872 | break |
|
874 | 873 | |
|
875 | 874 | if isfile: |
|
876 | 875 | ui.warn(_("warning: file log can be slow on large repos - " + |
|
877 | 876 | "use -f to speed it up\n")) |
|
878 | 877 | |
|
879 | 878 | return orig(ui, repo, *pats, **opts) |
|
880 | 879 | |
|
881 | 880 | def revdatelimit(ui, revset): |
|
882 | 881 | """Update revset so that only changesets no older than 'prefetchdays' days |
|
883 | 882 | are included. The default value is set to 14 days. If 'prefetchdays' is set |
|
884 | 883 | to zero or negative value then date restriction is not applied. |
|
885 | 884 | """ |
|
886 | 885 | days = ui.configint('remotefilelog', 'prefetchdays') |
|
887 | 886 | if days > 0: |
|
888 | 887 | revset = '(%s) & date(-%s)' % (revset, days) |
|
889 | 888 | return revset |
|
890 | 889 | |
|
891 | 890 | def readytofetch(repo): |
|
892 | 891 | """Check that enough time has passed since the last background prefetch. |
|
893 | 892 | This only relates to prefetches after operations that change the working |
|
894 | 893 | copy parent. Default delay between background prefetches is 2 minutes. |
|
895 | 894 | """ |
|
896 | 895 | timeout = repo.ui.configint('remotefilelog', 'prefetchdelay') |
|
897 | 896 | fname = repo.vfs.join('lastprefetch') |
|
898 | 897 | |
|
899 | 898 | ready = False |
|
900 | 899 | with open(fname, 'a'): |
|
901 | 900 | # the with construct above is used to avoid race conditions |
|
902 | 901 | modtime = os.path.getmtime(fname) |
|
903 | 902 | if (time.time() - modtime) > timeout: |
|
904 | 903 | os.utime(fname, None) |
|
905 | 904 | ready = True |
|
906 | 905 | |
|
907 | 906 | return ready |
|
908 | 907 | |
|
909 | 908 | def wcpprefetch(ui, repo, **kwargs): |
|
910 | 909 | """Prefetches in background revisions specified by bgprefetchrevs revset. |
|
911 | 910 | Does background repack if backgroundrepack flag is set in config. |
|
912 | 911 | """ |
|
913 | shallow = constants.SHALLOWREPO_REQUIREMENT in repo.requirements | |
|
912 | shallow = isenabled(repo) | |
|
914 | 913 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs') |
|
915 | 914 | isready = readytofetch(repo) |
|
916 | 915 | |
|
917 | 916 | if not (shallow and bgprefetchrevs and isready): |
|
918 | 917 | return |
|
919 | 918 | |
|
920 | 919 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') |
|
921 | 920 | # update a revset with a date limit |
|
922 | 921 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) |
|
923 | 922 | |
|
924 | 923 | def anon(): |
|
925 | 924 | if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch: |
|
926 | 925 | return |
|
927 | 926 | repo.ranprefetch = True |
|
928 | 927 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) |
|
929 | 928 | |
|
930 | 929 | repo._afterlock(anon) |
|
931 | 930 | |
|
932 | 931 | def pull(orig, ui, repo, *pats, **opts): |
|
933 | 932 | result = orig(ui, repo, *pats, **opts) |
|
934 | 933 | |
|
935 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
934 | if isenabled(repo): | |
|
936 | 935 | # prefetch if it's configured |
|
937 | 936 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch') |
|
938 | 937 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') |
|
939 | 938 | bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch') |
|
940 | 939 | |
|
941 | 940 | if prefetchrevset: |
|
942 | 941 | ui.status(_("prefetching file contents\n")) |
|
943 | 942 | revs = scmutil.revrange(repo, [prefetchrevset]) |
|
944 | 943 | base = repo['.'].rev() |
|
945 | 944 | if bgprefetch: |
|
946 | 945 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) |
|
947 | 946 | else: |
|
948 | 947 | repo.prefetch(revs, base=base) |
|
949 | 948 | if bgrepack: |
|
950 | 949 | repackmod.backgroundrepack(repo, incremental=True) |
|
951 | 950 | elif bgrepack: |
|
952 | 951 | repackmod.backgroundrepack(repo, incremental=True) |
|
953 | 952 | |
|
954 | 953 | return result |
|
955 | 954 | |
|
956 | 955 | def exchangepull(orig, repo, remote, *args, **kwargs): |
|
957 | 956 | # Hook into the callstream/getbundle to insert bundle capabilities |
|
958 | 957 | # during a pull. |
|
959 | 958 | def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None, |
|
960 | 959 | **kwargs): |
|
961 | 960 | if not bundlecaps: |
|
962 | 961 | bundlecaps = set() |
|
963 | 962 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) |
|
964 | 963 | return orig(source, heads=heads, common=common, bundlecaps=bundlecaps, |
|
965 | 964 | **kwargs) |
|
966 | 965 | |
|
967 | 966 | if util.safehasattr(remote, '_callstream'): |
|
968 | 967 | remote._localrepo = repo |
|
969 | 968 | elif util.safehasattr(remote, 'getbundle'): |
|
970 | 969 | extensions.wrapfunction(remote, 'getbundle', localgetbundle) |
|
971 | 970 | |
|
972 | 971 | return orig(repo, remote, *args, **kwargs) |
|
973 | 972 | |
|
974 | 973 | def _fileprefetchhook(repo, revs, match): |
|
975 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
974 | if isenabled(repo): | |
|
976 | 975 | allfiles = [] |
|
977 | 976 | for rev in revs: |
|
978 | 977 | if rev == nodemod.wdirrev or rev is None: |
|
979 | 978 | continue |
|
980 | 979 | ctx = repo[rev] |
|
981 | 980 | mf = ctx.manifest() |
|
982 | 981 | sparsematch = repo.maybesparsematch(ctx.rev()) |
|
983 | 982 | for path in ctx.walk(match): |
|
984 | 983 | if path.endswith('/'): |
|
985 | 984 | # Tree manifest that's being excluded as part of narrow |
|
986 | 985 | continue |
|
987 | 986 | if (not sparsematch or sparsematch(path)) and path in mf: |
|
988 | 987 | allfiles.append((path, hex(mf[path]))) |
|
989 | 988 | repo.fileservice.prefetch(allfiles) |
|
990 | 989 | |
|
991 | 990 | @command('debugremotefilelog', [ |
|
992 | 991 | ('d', 'decompress', None, _('decompress the filelog first')), |
|
993 | 992 | ], _('hg debugremotefilelog <path>'), norepo=True) |
|
994 | 993 | def debugremotefilelog(ui, path, **opts): |
|
995 | 994 | return debugcommands.debugremotefilelog(ui, path, **opts) |
|
996 | 995 | |
|
997 | 996 | @command('verifyremotefilelog', [ |
|
998 | 997 | ('d', 'decompress', None, _('decompress the filelogs first')), |
|
999 | 998 | ], _('hg verifyremotefilelogs <directory>'), norepo=True) |
|
1000 | 999 | def verifyremotefilelog(ui, path, **opts): |
|
1001 | 1000 | return debugcommands.verifyremotefilelog(ui, path, **opts) |
|
1002 | 1001 | |
|
1003 | 1002 | @command('debugdatapack', [ |
|
1004 | 1003 | ('', 'long', None, _('print the long hashes')), |
|
1005 | 1004 | ('', 'node', '', _('dump the contents of node'), 'NODE'), |
|
1006 | 1005 | ], _('hg debugdatapack <paths>'), norepo=True) |
|
1007 | 1006 | def debugdatapack(ui, *paths, **opts): |
|
1008 | 1007 | return debugcommands.debugdatapack(ui, *paths, **opts) |
|
1009 | 1008 | |
|
1010 | 1009 | @command('debughistorypack', [ |
|
1011 | 1010 | ], _('hg debughistorypack <path>'), norepo=True) |
|
1012 | 1011 | def debughistorypack(ui, path, **opts): |
|
1013 | 1012 | return debugcommands.debughistorypack(ui, path) |
|
1014 | 1013 | |
|
1015 | 1014 | @command('debugkeepset', [ |
|
1016 | 1015 | ], _('hg debugkeepset')) |
|
1017 | 1016 | def debugkeepset(ui, repo, **opts): |
|
1018 | 1017 | # The command is used to measure keepset computation time |
|
1019 | 1018 | def keyfn(fname, fnode): |
|
1020 | 1019 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) |
|
1021 | 1020 | repackmod.keepset(repo, keyfn) |
|
1022 | 1021 | return |
|
1023 | 1022 | |
|
1024 | 1023 | @command('debugwaitonrepack', [ |
|
1025 | 1024 | ], _('hg debugwaitonrepack')) |
|
1026 | 1025 | def debugwaitonrepack(ui, repo, **opts): |
|
1027 | 1026 | return debugcommands.debugwaitonrepack(repo) |
|
1028 | 1027 | |
|
1029 | 1028 | @command('debugwaitonprefetch', [ |
|
1030 | 1029 | ], _('hg debugwaitonprefetch')) |
|
1031 | 1030 | def debugwaitonprefetch(ui, repo, **opts): |
|
1032 | 1031 | return debugcommands.debugwaitonprefetch(repo) |
|
1033 | 1032 | |
|
1034 | 1033 | def resolveprefetchopts(ui, opts): |
|
1035 | 1034 | if not opts.get('rev'): |
|
1036 | 1035 | revset = ['.', 'draft()'] |
|
1037 | 1036 | |
|
1038 | 1037 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None) |
|
1039 | 1038 | if prefetchrevset: |
|
1040 | 1039 | revset.append('(%s)' % prefetchrevset) |
|
1041 | 1040 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None) |
|
1042 | 1041 | if bgprefetchrevs: |
|
1043 | 1042 | revset.append('(%s)' % bgprefetchrevs) |
|
1044 | 1043 | revset = '+'.join(revset) |
|
1045 | 1044 | |
|
1046 | 1045 | # update a revset with a date limit |
|
1047 | 1046 | revset = revdatelimit(ui, revset) |
|
1048 | 1047 | |
|
1049 | 1048 | opts['rev'] = [revset] |
|
1050 | 1049 | |
|
1051 | 1050 | if not opts.get('base'): |
|
1052 | 1051 | opts['base'] = None |
|
1053 | 1052 | |
|
1054 | 1053 | return opts |
|
1055 | 1054 | |
|
1056 | 1055 | @command('prefetch', [ |
|
1057 | 1056 | ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')), |
|
1058 | 1057 | ('', 'repack', False, _('run repack after prefetch')), |
|
1059 | 1058 | ('b', 'base', '', _("rev that is assumed to already be local")), |
|
1060 | 1059 | ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]')) |
|
1061 | 1060 | def prefetch(ui, repo, *pats, **opts): |
|
1062 | 1061 | """prefetch file revisions from the server |
|
1063 | 1062 | |
|
1064 | 1063 | Prefetchs file revisions for the specified revs and stores them in the |
|
1065 | 1064 | local remotefilelog cache. If no rev is specified, the default rev is |
|
1066 | 1065 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. |
|
1067 | 1066 | File names or patterns can be used to limit which files are downloaded. |
|
1068 | 1067 | |
|
1069 | 1068 | Return 0 on success. |
|
1070 | 1069 | """ |
|
1071 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
1070 | if not isenabled(repo): | |
|
1072 | 1071 | raise error.Abort(_("repo is not shallow")) |
|
1073 | 1072 | |
|
1074 | 1073 | opts = resolveprefetchopts(ui, opts) |
|
1075 | 1074 | revs = scmutil.revrange(repo, opts.get('rev')) |
|
1076 | 1075 | repo.prefetch(revs, opts.get('base'), pats, opts) |
|
1077 | 1076 | |
|
1078 | 1077 | # Run repack in background |
|
1079 | 1078 | if opts.get('repack'): |
|
1080 | 1079 | repackmod.backgroundrepack(repo, incremental=True) |
|
1081 | 1080 | |
|
1082 | 1081 | @command('repack', [ |
|
1083 | 1082 | ('', 'background', None, _('run in a background process'), None), |
|
1084 | 1083 | ('', 'incremental', None, _('do an incremental repack'), None), |
|
1085 | 1084 | ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None), |
|
1086 | 1085 | ], _('hg repack [OPTIONS]')) |
|
1087 | 1086 | def repack_(ui, repo, *pats, **opts): |
|
1088 | 1087 | if opts.get('background'): |
|
1089 | 1088 | repackmod.backgroundrepack(repo, incremental=opts.get('incremental'), |
|
1090 | 1089 | packsonly=opts.get('packsonly', False)) |
|
1091 | 1090 | return |
|
1092 | 1091 | |
|
1093 | 1092 | options = {'packsonly': opts.get('packsonly')} |
|
1094 | 1093 | |
|
1095 | 1094 | try: |
|
1096 | 1095 | if opts.get('incremental'): |
|
1097 | 1096 | repackmod.incrementalrepack(repo, options=options) |
|
1098 | 1097 | else: |
|
1099 | 1098 | repackmod.fullrepack(repo, options=options) |
|
1100 | 1099 | except repackmod.RepackAlreadyRunning as ex: |
|
1101 | 1100 | # Don't propogate the exception if the repack is already in |
|
1102 | 1101 | # progress, since we want the command to exit 0. |
|
1103 | 1102 | repo.ui.warn('%s\n' % ex) |
@@ -1,377 +1,377 b'' | |||
|
1 | 1 | # debugcommands.py - debug logic for remotefilelog |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import hashlib |
|
10 | 10 | import os |
|
11 | 11 | import zlib |
|
12 | 12 | |
|
13 | 13 | from mercurial.node import bin, hex, nullid, short |
|
14 | 14 | from mercurial.i18n import _ |
|
15 | 15 | from mercurial import ( |
|
16 | 16 | error, |
|
17 | 17 | filelog, |
|
18 | 18 | revlog, |
|
19 | 19 | ) |
|
20 | 20 | from . import ( |
|
21 | 21 | constants, |
|
22 | 22 | datapack, |
|
23 | 23 | extutil, |
|
24 | 24 | fileserverclient, |
|
25 | 25 | historypack, |
|
26 | 26 | repack, |
|
27 | 27 | shallowutil, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | def debugremotefilelog(ui, path, **opts): |
|
31 | 31 | decompress = opts.get('decompress') |
|
32 | 32 | |
|
33 | 33 | size, firstnode, mapping = parsefileblob(path, decompress) |
|
34 | 34 | |
|
35 | 35 | ui.status(_("size: %s bytes\n") % (size)) |
|
36 | 36 | ui.status(_("path: %s \n") % (path)) |
|
37 | 37 | ui.status(_("key: %s \n") % (short(firstnode))) |
|
38 | 38 | ui.status(_("\n")) |
|
39 | 39 | ui.status(_("%12s => %12s %13s %13s %12s\n") % |
|
40 | 40 | ("node", "p1", "p2", "linknode", "copyfrom")) |
|
41 | 41 | |
|
42 | 42 | queue = [firstnode] |
|
43 | 43 | while queue: |
|
44 | 44 | node = queue.pop(0) |
|
45 | 45 | p1, p2, linknode, copyfrom = mapping[node] |
|
46 | 46 | ui.status(_("%s => %s %s %s %s\n") % |
|
47 | 47 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) |
|
48 | 48 | if p1 != nullid: |
|
49 | 49 | queue.append(p1) |
|
50 | 50 | if p2 != nullid: |
|
51 | 51 | queue.append(p2) |
|
52 | 52 | |
|
53 | 53 | def buildtemprevlog(repo, file): |
|
54 | 54 | # get filename key |
|
55 | 55 | filekey = hashlib.sha1(file).hexdigest() |
|
56 | 56 | filedir = os.path.join(repo.path, 'store/data', filekey) |
|
57 | 57 | |
|
58 | 58 | # sort all entries based on linkrev |
|
59 | 59 | fctxs = [] |
|
60 | 60 | for filenode in os.listdir(filedir): |
|
61 | 61 | if '_old' not in filenode: |
|
62 | 62 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) |
|
63 | 63 | |
|
64 | 64 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) |
|
65 | 65 | |
|
66 | 66 | # add to revlog |
|
67 | 67 | temppath = repo.sjoin('data/temprevlog.i') |
|
68 | 68 | if os.path.exists(temppath): |
|
69 | 69 | os.remove(temppath) |
|
70 | 70 | r = filelog.filelog(repo.svfs, 'temprevlog') |
|
71 | 71 | |
|
72 | 72 | class faket(object): |
|
73 | 73 | def add(self, a, b, c): |
|
74 | 74 | pass |
|
75 | 75 | t = faket() |
|
76 | 76 | for fctx in fctxs: |
|
77 | 77 | if fctx.node() not in repo: |
|
78 | 78 | continue |
|
79 | 79 | |
|
80 | 80 | p = fctx.filelog().parents(fctx.filenode()) |
|
81 | 81 | meta = {} |
|
82 | 82 | if fctx.renamed(): |
|
83 | 83 | meta['copy'] = fctx.renamed()[0] |
|
84 | 84 | meta['copyrev'] = hex(fctx.renamed()[1]) |
|
85 | 85 | |
|
86 | 86 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) |
|
87 | 87 | |
|
88 | 88 | return r |
|
89 | 89 | |
|
90 | 90 | def debugindex(orig, ui, repo, file_=None, **opts): |
|
91 | 91 | """dump the contents of an index file""" |
|
92 | 92 | if (opts.get('changelog') or |
|
93 | 93 | opts.get('manifest') or |
|
94 | 94 | opts.get('dir') or |
|
95 | not constants.SHALLOWREPO_REQUIREMENT in repo.requirements or | |
|
95 | not shallowutil.isenabled(repo) or | |
|
96 | 96 | not repo.shallowmatch(file_)): |
|
97 | 97 | return orig(ui, repo, file_, **opts) |
|
98 | 98 | |
|
99 | 99 | r = buildtemprevlog(repo, file_) |
|
100 | 100 | |
|
101 | 101 | # debugindex like normal |
|
102 | 102 | format = opts.get('format', 0) |
|
103 | 103 | if format not in (0, 1): |
|
104 | 104 | raise error.Abort(_("unknown format %d") % format) |
|
105 | 105 | |
|
106 | 106 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
107 | 107 | if generaldelta: |
|
108 | 108 | basehdr = ' delta' |
|
109 | 109 | else: |
|
110 | 110 | basehdr = ' base' |
|
111 | 111 | |
|
112 | 112 | if format == 0: |
|
113 | 113 | ui.write((" rev offset length " + basehdr + " linkrev" |
|
114 | 114 | " nodeid p1 p2\n")) |
|
115 | 115 | elif format == 1: |
|
116 | 116 | ui.write((" rev flag offset length" |
|
117 | 117 | " size " + basehdr + " link p1 p2" |
|
118 | 118 | " nodeid\n")) |
|
119 | 119 | |
|
120 | 120 | for i in r: |
|
121 | 121 | node = r.node(i) |
|
122 | 122 | if generaldelta: |
|
123 | 123 | base = r.deltaparent(i) |
|
124 | 124 | else: |
|
125 | 125 | base = r.chainbase(i) |
|
126 | 126 | if format == 0: |
|
127 | 127 | try: |
|
128 | 128 | pp = r.parents(node) |
|
129 | 129 | except Exception: |
|
130 | 130 | pp = [nullid, nullid] |
|
131 | 131 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( |
|
132 | 132 | i, r.start(i), r.length(i), base, r.linkrev(i), |
|
133 | 133 | short(node), short(pp[0]), short(pp[1]))) |
|
134 | 134 | elif format == 1: |
|
135 | 135 | pr = r.parentrevs(i) |
|
136 | 136 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( |
|
137 | 137 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
138 | 138 | base, r.linkrev(i), pr[0], pr[1], short(node))) |
|
139 | 139 | |
|
140 | 140 | def debugindexdot(orig, ui, repo, file_): |
|
141 | 141 | """dump an index DAG as a graphviz dot file""" |
|
142 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
142 | if not shallowutil.isenabled(repo): | |
|
143 | 143 | return orig(ui, repo, file_) |
|
144 | 144 | |
|
145 | 145 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) |
|
146 | 146 | |
|
147 | 147 | ui.write(("digraph G {\n")) |
|
148 | 148 | for i in r: |
|
149 | 149 | node = r.node(i) |
|
150 | 150 | pp = r.parents(node) |
|
151 | 151 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
152 | 152 | if pp[1] != nullid: |
|
153 | 153 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
154 | 154 | ui.write("}\n") |
|
155 | 155 | |
|
156 | 156 | def verifyremotefilelog(ui, path, **opts): |
|
157 | 157 | decompress = opts.get('decompress') |
|
158 | 158 | |
|
159 | 159 | for root, dirs, files in os.walk(path): |
|
160 | 160 | for file in files: |
|
161 | 161 | if file == "repos": |
|
162 | 162 | continue |
|
163 | 163 | filepath = os.path.join(root, file) |
|
164 | 164 | size, firstnode, mapping = parsefileblob(filepath, decompress) |
|
165 | 165 | for p1, p2, linknode, copyfrom in mapping.itervalues(): |
|
166 | 166 | if linknode == nullid: |
|
167 | 167 | actualpath = os.path.relpath(root, path) |
|
168 | 168 | key = fileserverclient.getcachekey("reponame", actualpath, |
|
169 | 169 | file) |
|
170 | 170 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, |
|
171 | 171 | path))) |
|
172 | 172 | |
|
173 | 173 | def _decompressblob(raw): |
|
174 | 174 | return zlib.decompress(raw) |
|
175 | 175 | |
|
176 | 176 | def parsefileblob(path, decompress): |
|
177 | 177 | raw = None |
|
178 | 178 | f = open(path, "r") |
|
179 | 179 | try: |
|
180 | 180 | raw = f.read() |
|
181 | 181 | finally: |
|
182 | 182 | f.close() |
|
183 | 183 | |
|
184 | 184 | if decompress: |
|
185 | 185 | raw = _decompressblob(raw) |
|
186 | 186 | |
|
187 | 187 | offset, size, flags = shallowutil.parsesizeflags(raw) |
|
188 | 188 | start = offset + size |
|
189 | 189 | |
|
190 | 190 | firstnode = None |
|
191 | 191 | |
|
192 | 192 | mapping = {} |
|
193 | 193 | while start < len(raw): |
|
194 | 194 | divider = raw.index('\0', start + 80) |
|
195 | 195 | |
|
196 | 196 | currentnode = raw[start:(start + 20)] |
|
197 | 197 | if not firstnode: |
|
198 | 198 | firstnode = currentnode |
|
199 | 199 | |
|
200 | 200 | p1 = raw[(start + 20):(start + 40)] |
|
201 | 201 | p2 = raw[(start + 40):(start + 60)] |
|
202 | 202 | linknode = raw[(start + 60):(start + 80)] |
|
203 | 203 | copyfrom = raw[(start + 80):divider] |
|
204 | 204 | |
|
205 | 205 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
206 | 206 | start = divider + 1 |
|
207 | 207 | |
|
208 | 208 | return size, firstnode, mapping |
|
209 | 209 | |
|
210 | 210 | def debugdatapack(ui, *paths, **opts): |
|
211 | 211 | for path in paths: |
|
212 | 212 | if '.data' in path: |
|
213 | 213 | path = path[:path.index('.data')] |
|
214 | 214 | ui.write("%s:\n" % path) |
|
215 | 215 | dpack = datapack.datapack(path) |
|
216 | 216 | node = opts.get('node') |
|
217 | 217 | if node: |
|
218 | 218 | deltachain = dpack.getdeltachain('', bin(node)) |
|
219 | 219 | dumpdeltachain(ui, deltachain, **opts) |
|
220 | 220 | return |
|
221 | 221 | |
|
222 | 222 | if opts.get('long'): |
|
223 | 223 | hashformatter = hex |
|
224 | 224 | hashlen = 42 |
|
225 | 225 | else: |
|
226 | 226 | hashformatter = short |
|
227 | 227 | hashlen = 14 |
|
228 | 228 | |
|
229 | 229 | lastfilename = None |
|
230 | 230 | totaldeltasize = 0 |
|
231 | 231 | totalblobsize = 0 |
|
232 | 232 | def printtotals(): |
|
233 | 233 | if lastfilename is not None: |
|
234 | 234 | ui.write("\n") |
|
235 | 235 | if not totaldeltasize or not totalblobsize: |
|
236 | 236 | return |
|
237 | 237 | difference = totalblobsize - totaldeltasize |
|
238 | 238 | deltastr = "%0.1f%% %s" % ( |
|
239 | 239 | (100.0 * abs(difference) / totalblobsize), |
|
240 | 240 | ("smaller" if difference > 0 else "bigger")) |
|
241 | 241 | |
|
242 | 242 | ui.write(("Total:%s%s %s (%s)\n") % ( |
|
243 | 243 | "".ljust(2 * hashlen - len("Total:")), |
|
244 | 244 | str(totaldeltasize).ljust(12), |
|
245 | 245 | str(totalblobsize).ljust(9), |
|
246 | 246 | deltastr |
|
247 | 247 | )) |
|
248 | 248 | |
|
249 | 249 | bases = {} |
|
250 | 250 | nodes = set() |
|
251 | 251 | failures = 0 |
|
252 | 252 | for filename, node, deltabase, deltalen in dpack.iterentries(): |
|
253 | 253 | bases[node] = deltabase |
|
254 | 254 | if node in nodes: |
|
255 | 255 | ui.write(("Bad entry: %s appears twice\n" % short(node))) |
|
256 | 256 | failures += 1 |
|
257 | 257 | nodes.add(node) |
|
258 | 258 | if filename != lastfilename: |
|
259 | 259 | printtotals() |
|
260 | 260 | name = '(empty name)' if filename == '' else filename |
|
261 | 261 | ui.write("%s:\n" % name) |
|
262 | 262 | ui.write("%s%s%s%s\n" % ( |
|
263 | 263 | "Node".ljust(hashlen), |
|
264 | 264 | "Delta Base".ljust(hashlen), |
|
265 | 265 | "Delta Length".ljust(14), |
|
266 | 266 | "Blob Size".ljust(9))) |
|
267 | 267 | lastfilename = filename |
|
268 | 268 | totalblobsize = 0 |
|
269 | 269 | totaldeltasize = 0 |
|
270 | 270 | |
|
271 | 271 | # Metadata could be missing, in which case it will be an empty dict. |
|
272 | 272 | meta = dpack.getmeta(filename, node) |
|
273 | 273 | if constants.METAKEYSIZE in meta: |
|
274 | 274 | blobsize = meta[constants.METAKEYSIZE] |
|
275 | 275 | totaldeltasize += deltalen |
|
276 | 276 | totalblobsize += blobsize |
|
277 | 277 | else: |
|
278 | 278 | blobsize = "(missing)" |
|
279 | 279 | ui.write("%s %s %s%s\n" % ( |
|
280 | 280 | hashformatter(node), |
|
281 | 281 | hashformatter(deltabase), |
|
282 | 282 | str(deltalen).ljust(14), |
|
283 | 283 | blobsize)) |
|
284 | 284 | |
|
285 | 285 | if filename is not None: |
|
286 | 286 | printtotals() |
|
287 | 287 | |
|
288 | 288 | failures += _sanitycheck(ui, set(nodes), bases) |
|
289 | 289 | if failures > 1: |
|
290 | 290 | ui.warn(("%d failures\n" % failures)) |
|
291 | 291 | return 1 |
|
292 | 292 | |
|
293 | 293 | def _sanitycheck(ui, nodes, bases): |
|
294 | 294 | """ |
|
295 | 295 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a |
|
296 | 296 | mapping of node->base): |
|
297 | 297 | |
|
298 | 298 | - Each deltabase must itself be a node elsewhere in the pack |
|
299 | 299 | - There must be no cycles |
|
300 | 300 | """ |
|
301 | 301 | failures = 0 |
|
302 | 302 | for node in nodes: |
|
303 | 303 | seen = set() |
|
304 | 304 | current = node |
|
305 | 305 | deltabase = bases[current] |
|
306 | 306 | |
|
307 | 307 | while deltabase != nullid: |
|
308 | 308 | if deltabase not in nodes: |
|
309 | 309 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % |
|
310 | 310 | (short(node), short(deltabase)))) |
|
311 | 311 | failures += 1 |
|
312 | 312 | break |
|
313 | 313 | |
|
314 | 314 | if deltabase in seen: |
|
315 | 315 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % |
|
316 | 316 | (short(node), short(deltabase)))) |
|
317 | 317 | failures += 1 |
|
318 | 318 | break |
|
319 | 319 | |
|
320 | 320 | current = deltabase |
|
321 | 321 | seen.add(current) |
|
322 | 322 | deltabase = bases[current] |
|
323 | 323 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid |
|
324 | 324 | # so we don't traverse it again. |
|
325 | 325 | bases[node] = nullid |
|
326 | 326 | return failures |
|
327 | 327 | |
|
328 | 328 | def dumpdeltachain(ui, deltachain, **opts): |
|
329 | 329 | hashformatter = hex |
|
330 | 330 | hashlen = 40 |
|
331 | 331 | |
|
332 | 332 | lastfilename = None |
|
333 | 333 | for filename, node, filename, deltabasenode, delta in deltachain: |
|
334 | 334 | if filename != lastfilename: |
|
335 | 335 | ui.write("\n%s\n" % filename) |
|
336 | 336 | lastfilename = filename |
|
337 | 337 | ui.write("%s %s %s %s\n" % ( |
|
338 | 338 | "Node".ljust(hashlen), |
|
339 | 339 | "Delta Base".ljust(hashlen), |
|
340 | 340 | "Delta SHA1".ljust(hashlen), |
|
341 | 341 | "Delta Length".ljust(6), |
|
342 | 342 | )) |
|
343 | 343 | |
|
344 | 344 | ui.write("%s %s %s %s\n" % ( |
|
345 | 345 | hashformatter(node), |
|
346 | 346 | hashformatter(deltabasenode), |
|
347 | 347 | hashlib.sha1(delta).hexdigest(), |
|
348 | 348 | len(delta))) |
|
349 | 349 | |
|
350 | 350 | def debughistorypack(ui, path): |
|
351 | 351 | if '.hist' in path: |
|
352 | 352 | path = path[:path.index('.hist')] |
|
353 | 353 | hpack = historypack.historypack(path) |
|
354 | 354 | |
|
355 | 355 | lastfilename = None |
|
356 | 356 | for entry in hpack.iterentries(): |
|
357 | 357 | filename, node, p1node, p2node, linknode, copyfrom = entry |
|
358 | 358 | if filename != lastfilename: |
|
359 | 359 | ui.write("\n%s\n" % filename) |
|
360 | 360 | ui.write("%s%s%s%s%s\n" % ( |
|
361 | 361 | "Node".ljust(14), |
|
362 | 362 | "P1 Node".ljust(14), |
|
363 | 363 | "P2 Node".ljust(14), |
|
364 | 364 | "Link Node".ljust(14), |
|
365 | 365 | "Copy From")) |
|
366 | 366 | lastfilename = filename |
|
367 | 367 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), |
|
368 | 368 | short(p2node), short(linknode), copyfrom)) |
|
369 | 369 | |
|
370 | 370 | def debugwaitonrepack(repo): |
|
371 | 371 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): |
|
372 | 372 | return |
|
373 | 373 | |
|
374 | 374 | def debugwaitonprefetch(repo): |
|
375 | 375 | with repo._lock(repo.svfs, "prefetchlock", True, None, |
|
376 | 376 | None, _('prefetching in %s') % repo.origroot): |
|
377 | 377 | pass |
@@ -1,418 +1,418 b'' | |||
|
1 | 1 | # remotefilelogserver.py - server logic for a remotefilelog server |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import errno |
|
10 | 10 | import os |
|
11 | 11 | import stat |
|
12 | 12 | import time |
|
13 | 13 | import zlib |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | from mercurial.node import bin, hex, nullid |
|
17 | 17 | from mercurial import ( |
|
18 | 18 | changegroup, |
|
19 | 19 | changelog, |
|
20 | 20 | context, |
|
21 | 21 | error, |
|
22 | 22 | extensions, |
|
23 | 23 | match, |
|
24 | 24 | store, |
|
25 | 25 | streamclone, |
|
26 | 26 | util, |
|
27 | 27 | wireprotoserver, |
|
28 | 28 | wireprototypes, |
|
29 | 29 | wireprotov1server, |
|
30 | 30 | ) |
|
31 | 31 | from . import ( |
|
32 | 32 | constants, |
|
33 | 33 | shallowutil, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | 36 | _sshv1server = wireprotoserver.sshv1protocolhandler |
|
37 | 37 | |
|
38 | 38 | def setupserver(ui, repo): |
|
39 | 39 | """Sets up a normal Mercurial repo so it can serve files to shallow repos. |
|
40 | 40 | """ |
|
41 | 41 | onetimesetup(ui) |
|
42 | 42 | |
|
43 | 43 | # don't send files to shallow clients during pulls |
|
44 | 44 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source, |
|
45 | 45 | *args, **kwargs): |
|
46 | 46 | caps = self._bundlecaps or [] |
|
47 | 47 | if constants.BUNDLE2_CAPABLITY in caps: |
|
48 | 48 | # only send files that don't match the specified patterns |
|
49 | 49 | includepattern = None |
|
50 | 50 | excludepattern = None |
|
51 | 51 | for cap in (self._bundlecaps or []): |
|
52 | 52 | if cap.startswith("includepattern="): |
|
53 | 53 | includepattern = cap[len("includepattern="):].split('\0') |
|
54 | 54 | elif cap.startswith("excludepattern="): |
|
55 | 55 | excludepattern = cap[len("excludepattern="):].split('\0') |
|
56 | 56 | |
|
57 | 57 | m = match.always(repo.root, '') |
|
58 | 58 | if includepattern or excludepattern: |
|
59 | 59 | m = match.match(repo.root, '', None, |
|
60 | 60 | includepattern, excludepattern) |
|
61 | 61 | |
|
62 | 62 | changedfiles = list([f for f in changedfiles if not m(f)]) |
|
63 | 63 | return orig(self, changedfiles, linknodes, commonrevs, source, |
|
64 | 64 | *args, **kwargs) |
|
65 | 65 | |
|
66 | 66 | extensions.wrapfunction( |
|
67 | 67 | changegroup.cgpacker, 'generatefiles', generatefiles) |
|
68 | 68 | |
|
69 | 69 | onetime = False |
|
70 | 70 | def onetimesetup(ui): |
|
71 | 71 | """Configures the wireprotocol for both clients and servers. |
|
72 | 72 | """ |
|
73 | 73 | global onetime |
|
74 | 74 | if onetime: |
|
75 | 75 | return |
|
76 | 76 | onetime = True |
|
77 | 77 | |
|
78 | 78 | # support file content requests |
|
79 | 79 | wireprotov1server.wireprotocommand( |
|
80 | 80 | 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads) |
|
81 | 81 | wireprotov1server.wireprotocommand( |
|
82 | 82 | 'x_rfl_getfiles', '', permission='pull')(getfiles) |
|
83 | 83 | wireprotov1server.wireprotocommand( |
|
84 | 84 | 'x_rfl_getfile', 'file node', permission='pull')(getfile) |
|
85 | 85 | |
|
86 | 86 | class streamstate(object): |
|
87 | 87 | match = None |
|
88 | 88 | shallowremote = False |
|
89 | 89 | noflatmf = False |
|
90 | 90 | state = streamstate() |
|
91 | 91 | |
|
92 | 92 | def stream_out_shallow(repo, proto, other): |
|
93 | 93 | includepattern = None |
|
94 | 94 | excludepattern = None |
|
95 | 95 | raw = other.get('includepattern') |
|
96 | 96 | if raw: |
|
97 | 97 | includepattern = raw.split('\0') |
|
98 | 98 | raw = other.get('excludepattern') |
|
99 | 99 | if raw: |
|
100 | 100 | excludepattern = raw.split('\0') |
|
101 | 101 | |
|
102 | 102 | oldshallow = state.shallowremote |
|
103 | 103 | oldmatch = state.match |
|
104 | 104 | oldnoflatmf = state.noflatmf |
|
105 | 105 | try: |
|
106 | 106 | state.shallowremote = True |
|
107 | 107 | state.match = match.always(repo.root, '') |
|
108 | 108 | state.noflatmf = other.get('noflatmanifest') == 'True' |
|
109 | 109 | if includepattern or excludepattern: |
|
110 | 110 | state.match = match.match(repo.root, '', None, |
|
111 | 111 | includepattern, excludepattern) |
|
112 | 112 | streamres = wireprotov1server.stream(repo, proto) |
|
113 | 113 | |
|
114 | 114 | # Force the first value to execute, so the file list is computed |
|
115 | 115 | # within the try/finally scope |
|
116 | 116 | first = next(streamres.gen) |
|
117 | 117 | second = next(streamres.gen) |
|
118 | 118 | def gen(): |
|
119 | 119 | yield first |
|
120 | 120 | yield second |
|
121 | 121 | for value in streamres.gen: |
|
122 | 122 | yield value |
|
123 | 123 | return wireprototypes.streamres(gen()) |
|
124 | 124 | finally: |
|
125 | 125 | state.shallowremote = oldshallow |
|
126 | 126 | state.match = oldmatch |
|
127 | 127 | state.noflatmf = oldnoflatmf |
|
128 | 128 | |
|
129 | 129 | wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*') |
|
130 | 130 | |
|
131 | 131 | # don't clone filelogs to shallow clients |
|
132 | 132 | def _walkstreamfiles(orig, repo): |
|
133 | 133 | if state.shallowremote: |
|
134 | 134 | # if we are shallow ourselves, stream our local commits |
|
135 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
135 | if shallowutil.isenabled(repo): | |
|
136 | 136 | striplen = len(repo.store.path) + 1 |
|
137 | 137 | readdir = repo.store.rawvfs.readdir |
|
138 | 138 | visit = [os.path.join(repo.store.path, 'data')] |
|
139 | 139 | while visit: |
|
140 | 140 | p = visit.pop() |
|
141 | 141 | for f, kind, st in readdir(p, stat=True): |
|
142 | 142 | fp = p + '/' + f |
|
143 | 143 | if kind == stat.S_IFREG: |
|
144 | 144 | if not fp.endswith('.i') and not fp.endswith('.d'): |
|
145 | 145 | n = util.pconvert(fp[striplen:]) |
|
146 | 146 | yield (store.decodedir(n), n, st.st_size) |
|
147 | 147 | if kind == stat.S_IFDIR: |
|
148 | 148 | visit.append(fp) |
|
149 | 149 | |
|
150 | 150 | if 'treemanifest' in repo.requirements: |
|
151 | 151 | for (u, e, s) in repo.store.datafiles(): |
|
152 | 152 | if (u.startswith('meta/') and |
|
153 | 153 | (u.endswith('.i') or u.endswith('.d'))): |
|
154 | 154 | yield (u, e, s) |
|
155 | 155 | |
|
156 | 156 | # Return .d and .i files that do not match the shallow pattern |
|
157 | 157 | match = state.match |
|
158 | 158 | if match and not match.always(): |
|
159 | 159 | for (u, e, s) in repo.store.datafiles(): |
|
160 | 160 | f = u[5:-2] # trim data/... and .i/.d |
|
161 | 161 | if not state.match(f): |
|
162 | 162 | yield (u, e, s) |
|
163 | 163 | |
|
164 | 164 | for x in repo.store.topfiles(): |
|
165 | 165 | if state.noflatmf and x[0][:11] == '00manifest.': |
|
166 | 166 | continue |
|
167 | 167 | yield x |
|
168 | 168 | |
|
169 | elif constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
169 | elif shallowutil.isenabled(repo): | |
|
170 | 170 | # don't allow cloning from a shallow repo to a full repo |
|
171 | 171 | # since it would require fetching every version of every |
|
172 | 172 | # file in order to create the revlogs. |
|
173 | 173 | raise error.Abort(_("Cannot clone from a shallow repo " |
|
174 | 174 | "to a full repo.")) |
|
175 | 175 | else: |
|
176 | 176 | for x in orig(repo): |
|
177 | 177 | yield x |
|
178 | 178 | |
|
179 | 179 | extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles) |
|
180 | 180 | |
|
181 | 181 | # We no longer use getbundle_shallow commands, but we must still |
|
182 | 182 | # support it for migration purposes |
|
183 | 183 | def getbundleshallow(repo, proto, others): |
|
184 | 184 | bundlecaps = others.get('bundlecaps', '') |
|
185 | 185 | bundlecaps = set(bundlecaps.split(',')) |
|
186 | 186 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) |
|
187 | 187 | others['bundlecaps'] = ','.join(bundlecaps) |
|
188 | 188 | |
|
189 | 189 | return wireprotov1server.commands["getbundle"][0](repo, proto, others) |
|
190 | 190 | |
|
191 | 191 | wireprotov1server.commands["getbundle_shallow"] = (getbundleshallow, '*') |
|
192 | 192 | |
|
193 | 193 | # expose remotefilelog capabilities |
|
194 | 194 | def _capabilities(orig, repo, proto): |
|
195 | 195 | caps = orig(repo, proto) |
|
196 | if ((constants.SHALLOWREPO_REQUIREMENT in repo.requirements or | |
|
197 | ui.configbool('remotefilelog', 'server'))): | |
|
196 | if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog', | |
|
197 | 'server')): | |
|
198 | 198 | if isinstance(proto, _sshv1server): |
|
199 | 199 | # legacy getfiles method which only works over ssh |
|
200 | 200 | caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES) |
|
201 | 201 | caps.append('x_rfl_getflogheads') |
|
202 | 202 | caps.append('x_rfl_getfile') |
|
203 | 203 | return caps |
|
204 | 204 | extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities) |
|
205 | 205 | |
|
206 | 206 | def _adjustlinkrev(orig, self, *args, **kwargs): |
|
207 | 207 | # When generating file blobs, taking the real path is too slow on large |
|
208 | 208 | # repos, so force it to just return the linkrev directly. |
|
209 | 209 | repo = self._repo |
|
210 | 210 | if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev: |
|
211 | 211 | return self._filelog.linkrev(self._filelog.rev(self._filenode)) |
|
212 | 212 | return orig(self, *args, **kwargs) |
|
213 | 213 | |
|
214 | 214 | extensions.wrapfunction( |
|
215 | 215 | context.basefilectx, '_adjustlinkrev', _adjustlinkrev) |
|
216 | 216 | |
|
217 | 217 | def _iscmd(orig, cmd): |
|
218 | 218 | if cmd == 'x_rfl_getfiles': |
|
219 | 219 | return False |
|
220 | 220 | return orig(cmd) |
|
221 | 221 | |
|
222 | 222 | extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd) |
|
223 | 223 | |
|
224 | 224 | def _loadfileblob(repo, cachepath, path, node): |
|
225 | 225 | filecachepath = os.path.join(cachepath, path, hex(node)) |
|
226 | 226 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: |
|
227 | 227 | filectx = repo.filectx(path, fileid=node) |
|
228 | 228 | if filectx.node() == nullid: |
|
229 | 229 | repo.changelog = changelog.changelog(repo.svfs) |
|
230 | 230 | filectx = repo.filectx(path, fileid=node) |
|
231 | 231 | |
|
232 | 232 | text = createfileblob(filectx) |
|
233 | 233 | # TODO configurable compression engines |
|
234 | 234 | text = zlib.compress(text) |
|
235 | 235 | |
|
236 | 236 | # everything should be user & group read/writable |
|
237 | 237 | oldumask = os.umask(0o002) |
|
238 | 238 | try: |
|
239 | 239 | dirname = os.path.dirname(filecachepath) |
|
240 | 240 | if not os.path.exists(dirname): |
|
241 | 241 | try: |
|
242 | 242 | os.makedirs(dirname) |
|
243 | 243 | except OSError as ex: |
|
244 | 244 | if ex.errno != errno.EEXIST: |
|
245 | 245 | raise |
|
246 | 246 | |
|
247 | 247 | f = None |
|
248 | 248 | try: |
|
249 | 249 | f = util.atomictempfile(filecachepath, "w") |
|
250 | 250 | f.write(text) |
|
251 | 251 | except (IOError, OSError): |
|
252 | 252 | # Don't abort if the user only has permission to read, |
|
253 | 253 | # and not write. |
|
254 | 254 | pass |
|
255 | 255 | finally: |
|
256 | 256 | if f: |
|
257 | 257 | f.close() |
|
258 | 258 | finally: |
|
259 | 259 | os.umask(oldumask) |
|
260 | 260 | else: |
|
261 | 261 | with open(filecachepath, "r") as f: |
|
262 | 262 | text = f.read() |
|
263 | 263 | return text |
|
264 | 264 | |
|
265 | 265 | def getflogheads(repo, proto, path): |
|
266 | 266 | """A server api for requesting a filelog's heads |
|
267 | 267 | """ |
|
268 | 268 | flog = repo.file(path) |
|
269 | 269 | heads = flog.heads() |
|
270 | 270 | return '\n'.join((hex(head) for head in heads if head != nullid)) |
|
271 | 271 | |
|
272 | 272 | def getfile(repo, proto, file, node): |
|
273 | 273 | """A server api for requesting a particular version of a file. Can be used |
|
274 | 274 | in batches to request many files at once. The return protocol is: |
|
275 | 275 | <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or |
|
276 | 276 | non-zero for an error. |
|
277 | 277 | |
|
278 | 278 | data is a compressed blob with revlog flag and ancestors information. See |
|
279 | 279 | createfileblob for its content. |
|
280 | 280 | """ |
|
281 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
281 | if shallowutil.isenabled(repo): | |
|
282 | 282 | return '1\0' + _('cannot fetch remote files from shallow repo') |
|
283 | 283 | cachepath = repo.ui.config("remotefilelog", "servercachepath") |
|
284 | 284 | if not cachepath: |
|
285 | 285 | cachepath = os.path.join(repo.path, "remotefilelogcache") |
|
286 | 286 | node = bin(node.strip()) |
|
287 | 287 | if node == nullid: |
|
288 | 288 | return '0\0' |
|
289 | 289 | return '0\0' + _loadfileblob(repo, cachepath, file, node) |
|
290 | 290 | |
|
291 | 291 | def getfiles(repo, proto): |
|
292 | 292 | """A server api for requesting particular versions of particular files. |
|
293 | 293 | """ |
|
294 | if constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
294 | if shallowutil.isenabled(repo): | |
|
295 | 295 | raise error.Abort(_('cannot fetch remote files from shallow repo')) |
|
296 | 296 | if not isinstance(proto, _sshv1server): |
|
297 | 297 | raise error.Abort(_('cannot fetch remote files over non-ssh protocol')) |
|
298 | 298 | |
|
299 | 299 | def streamer(): |
|
300 | 300 | fin = proto._fin |
|
301 | 301 | |
|
302 | 302 | cachepath = repo.ui.config("remotefilelog", "servercachepath") |
|
303 | 303 | if not cachepath: |
|
304 | 304 | cachepath = os.path.join(repo.path, "remotefilelogcache") |
|
305 | 305 | |
|
306 | 306 | while True: |
|
307 | 307 | request = fin.readline()[:-1] |
|
308 | 308 | if not request: |
|
309 | 309 | break |
|
310 | 310 | |
|
311 | 311 | node = bin(request[:40]) |
|
312 | 312 | if node == nullid: |
|
313 | 313 | yield '0\n' |
|
314 | 314 | continue |
|
315 | 315 | |
|
316 | 316 | path = request[40:] |
|
317 | 317 | |
|
318 | 318 | text = _loadfileblob(repo, cachepath, path, node) |
|
319 | 319 | |
|
320 | 320 | yield '%d\n%s' % (len(text), text) |
|
321 | 321 | |
|
322 | 322 | # it would be better to only flush after processing a whole batch |
|
323 | 323 | # but currently we don't know if there are more requests coming |
|
324 | 324 | proto._fout.flush() |
|
325 | 325 | return wireprototypes.streamres(streamer()) |
|
326 | 326 | |
|
327 | 327 | def createfileblob(filectx): |
|
328 | 328 | """ |
|
329 | 329 | format: |
|
330 | 330 | v0: |
|
331 | 331 | str(len(rawtext)) + '\0' + rawtext + ancestortext |
|
332 | 332 | v1: |
|
333 | 333 | 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext |
|
334 | 334 | metalist := metalist + '\n' + meta | meta |
|
335 | 335 | meta := sizemeta | flagmeta |
|
336 | 336 | sizemeta := METAKEYSIZE + str(len(rawtext)) |
|
337 | 337 | flagmeta := METAKEYFLAG + str(flag) |
|
338 | 338 | |
|
339 | 339 | note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a |
|
340 | 340 | length of 1. |
|
341 | 341 | """ |
|
342 | 342 | flog = filectx.filelog() |
|
343 | 343 | frev = filectx.filerev() |
|
344 | 344 | revlogflags = flog._revlog.flags(frev) |
|
345 | 345 | if revlogflags == 0: |
|
346 | 346 | # normal files |
|
347 | 347 | text = filectx.data() |
|
348 | 348 | else: |
|
349 | 349 | # lfs, read raw revision data |
|
350 | 350 | text = flog.revision(frev, raw=True) |
|
351 | 351 | |
|
352 | 352 | repo = filectx._repo |
|
353 | 353 | |
|
354 | 354 | ancestors = [filectx] |
|
355 | 355 | |
|
356 | 356 | try: |
|
357 | 357 | repo.forcelinkrev = True |
|
358 | 358 | ancestors.extend([f for f in filectx.ancestors()]) |
|
359 | 359 | |
|
360 | 360 | ancestortext = "" |
|
361 | 361 | for ancestorctx in ancestors: |
|
362 | 362 | parents = ancestorctx.parents() |
|
363 | 363 | p1 = nullid |
|
364 | 364 | p2 = nullid |
|
365 | 365 | if len(parents) > 0: |
|
366 | 366 | p1 = parents[0].filenode() |
|
367 | 367 | if len(parents) > 1: |
|
368 | 368 | p2 = parents[1].filenode() |
|
369 | 369 | |
|
370 | 370 | copyname = "" |
|
371 | 371 | rename = ancestorctx.renamed() |
|
372 | 372 | if rename: |
|
373 | 373 | copyname = rename[0] |
|
374 | 374 | linknode = ancestorctx.node() |
|
375 | 375 | ancestortext += "%s%s%s%s%s\0" % ( |
|
376 | 376 | ancestorctx.filenode(), p1, p2, linknode, |
|
377 | 377 | copyname) |
|
378 | 378 | finally: |
|
379 | 379 | repo.forcelinkrev = False |
|
380 | 380 | |
|
381 | 381 | header = shallowutil.buildfileblobheader(len(text), revlogflags) |
|
382 | 382 | |
|
383 | 383 | return "%s\0%s%s" % (header, text, ancestortext) |
|
384 | 384 | |
|
385 | 385 | def gcserver(ui, repo): |
|
386 | 386 | if not repo.ui.configbool("remotefilelog", "server"): |
|
387 | 387 | return |
|
388 | 388 | |
|
389 | 389 | neededfiles = set() |
|
390 | 390 | heads = repo.revs("heads(tip~25000:) - null") |
|
391 | 391 | |
|
392 | 392 | cachepath = repo.vfs.join("remotefilelogcache") |
|
393 | 393 | for head in heads: |
|
394 | 394 | mf = repo[head].manifest() |
|
395 | 395 | for filename, filenode in mf.iteritems(): |
|
396 | 396 | filecachepath = os.path.join(cachepath, filename, hex(filenode)) |
|
397 | 397 | neededfiles.add(filecachepath) |
|
398 | 398 | |
|
399 | 399 | # delete unneeded older files |
|
400 | 400 | days = repo.ui.configint("remotefilelog", "serverexpiration") |
|
401 | 401 | expiration = time.time() - (days * 24 * 60 * 60) |
|
402 | 402 | |
|
403 | 403 | _removing = _("removing old server cache") |
|
404 | 404 | count = 0 |
|
405 | 405 | ui.progress(_removing, count, unit="files") |
|
406 | 406 | for root, dirs, files in os.walk(cachepath): |
|
407 | 407 | for file in files: |
|
408 | 408 | filepath = os.path.join(root, file) |
|
409 | 409 | count += 1 |
|
410 | 410 | ui.progress(_removing, count, unit="files") |
|
411 | 411 | if filepath in neededfiles: |
|
412 | 412 | continue |
|
413 | 413 | |
|
414 | 414 | stat = os.stat(filepath) |
|
415 | 415 | if stat.st_mtime < expiration: |
|
416 | 416 | os.remove(filepath) |
|
417 | 417 | |
|
418 | 418 | ui.progress(_removing, None) |
@@ -1,294 +1,294 b'' | |||
|
1 | 1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | from mercurial.i18n import _ |
|
10 | 10 | from mercurial.node import bin, hex, nullid |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | bundlerepo, |
|
13 | 13 | changegroup, |
|
14 | 14 | error, |
|
15 | 15 | match, |
|
16 | 16 | mdiff, |
|
17 | 17 | pycompat, |
|
18 | 18 | ) |
|
19 | 19 | from . import ( |
|
20 | 20 | constants, |
|
21 | 21 | remotefilelog, |
|
22 | 22 | shallowutil, |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | 25 | NoFiles = 0 |
|
26 | 26 | LocalFiles = 1 |
|
27 | 27 | AllFiles = 2 |
|
28 | 28 | |
|
29 | 29 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): |
|
30 | 30 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
31 | 31 | for c in super(cls, self).group(nodelist, rlog, lookup, |
|
32 | 32 | units=units): |
|
33 | 33 | yield c |
|
34 | 34 | return |
|
35 | 35 | |
|
36 | 36 | if len(nodelist) == 0: |
|
37 | 37 | yield self.close() |
|
38 | 38 | return |
|
39 | 39 | |
|
40 | 40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) |
|
41 | 41 | |
|
42 | 42 | # add the parent of the first rev |
|
43 | 43 | p = rlog.parents(nodelist[0])[0] |
|
44 | 44 | nodelist.insert(0, p) |
|
45 | 45 | |
|
46 | 46 | # build deltas |
|
47 | 47 | for i in pycompat.xrange(len(nodelist) - 1): |
|
48 | 48 | prev, curr = nodelist[i], nodelist[i + 1] |
|
49 | 49 | linknode = lookup(curr) |
|
50 | 50 | for c in self.nodechunk(rlog, curr, prev, linknode): |
|
51 | 51 | yield c |
|
52 | 52 | |
|
53 | 53 | yield self.close() |
|
54 | 54 | |
|
55 | 55 | class shallowcg1packer(changegroup.cgpacker): |
|
56 | 56 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
57 | if constants.SHALLOWREPO_REQUIREMENT in self._repo.requirements: | |
|
57 | if shallowutil.isenabled(self._repo): | |
|
58 | 58 | fastpathlinkrev = False |
|
59 | 59 | |
|
60 | 60 | return super(shallowcg1packer, self).generate(commonrevs, clnodes, |
|
61 | 61 | fastpathlinkrev, source) |
|
62 | 62 | |
|
63 | 63 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): |
|
64 | 64 | return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup, |
|
65 | 65 | units=units) |
|
66 | 66 | |
|
67 | 67 | def generatefiles(self, changedfiles, *args): |
|
68 | 68 | try: |
|
69 | 69 | linknodes, commonrevs, source = args |
|
70 | 70 | except ValueError: |
|
71 | 71 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args |
|
72 | if constants.SHALLOWREPO_REQUIREMENT in self._repo.requirements: | |
|
72 | if shallowutil.isenabled(self._repo): | |
|
73 | 73 | repo = self._repo |
|
74 | 74 | if isinstance(repo, bundlerepo.bundlerepository): |
|
75 | 75 | # If the bundle contains filelogs, we can't pull from it, since |
|
76 | 76 | # bundlerepo is heavily tied to revlogs. Instead require that |
|
77 | 77 | # the user use unbundle instead. |
|
78 | 78 | # Force load the filelog data. |
|
79 | 79 | bundlerepo.bundlerepository.file(repo, 'foo') |
|
80 | 80 | if repo._cgfilespos: |
|
81 | 81 | raise error.Abort("cannot pull from full bundles", |
|
82 | 82 | hint="use `hg unbundle` instead") |
|
83 | 83 | return [] |
|
84 | 84 | filestosend = self.shouldaddfilegroups(source) |
|
85 | 85 | if filestosend == NoFiles: |
|
86 | 86 | changedfiles = list([f for f in changedfiles |
|
87 | 87 | if not repo.shallowmatch(f)]) |
|
88 | 88 | |
|
89 | 89 | return super(shallowcg1packer, self).generatefiles( |
|
90 | 90 | changedfiles, *args) |
|
91 | 91 | |
|
92 | 92 | def shouldaddfilegroups(self, source): |
|
93 | 93 | repo = self._repo |
|
94 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
94 | if not shallowutil.isenabled(repo): | |
|
95 | 95 | return AllFiles |
|
96 | 96 | |
|
97 | 97 | if source == "push" or source == "bundle": |
|
98 | 98 | return AllFiles |
|
99 | 99 | |
|
100 | 100 | caps = self._bundlecaps or [] |
|
101 | 101 | if source == "serve" or source == "pull": |
|
102 | 102 | if constants.BUNDLE2_CAPABLITY in caps: |
|
103 | 103 | return LocalFiles |
|
104 | 104 | else: |
|
105 | 105 | # Serving to a full repo requires us to serve everything |
|
106 | 106 | repo.ui.warn(_("pulling from a shallow repo\n")) |
|
107 | 107 | return AllFiles |
|
108 | 108 | |
|
109 | 109 | return NoFiles |
|
110 | 110 | |
|
111 | 111 | def prune(self, rlog, missing, commonrevs): |
|
112 | 112 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
113 | 113 | return super(shallowcg1packer, self).prune(rlog, missing, |
|
114 | 114 | commonrevs) |
|
115 | 115 | |
|
116 | 116 | repo = self._repo |
|
117 | 117 | results = [] |
|
118 | 118 | for fnode in missing: |
|
119 | 119 | fctx = repo.filectx(rlog.filename, fileid=fnode) |
|
120 | 120 | if fctx.linkrev() not in commonrevs: |
|
121 | 121 | results.append(fnode) |
|
122 | 122 | return results |
|
123 | 123 | |
|
124 | 124 | def nodechunk(self, revlog, node, prevnode, linknode): |
|
125 | 125 | prefix = '' |
|
126 | 126 | if prevnode == nullid: |
|
127 | 127 | delta = revlog.revision(node, raw=True) |
|
128 | 128 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
129 | 129 | else: |
|
130 | 130 | # Actually uses remotefilelog.revdiff which works on nodes, not revs |
|
131 | 131 | delta = revlog.revdiff(prevnode, node) |
|
132 | 132 | p1, p2 = revlog.parents(node) |
|
133 | 133 | flags = revlog.flags(node) |
|
134 | 134 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) |
|
135 | 135 | meta += prefix |
|
136 | 136 | l = len(meta) + len(delta) |
|
137 | 137 | yield changegroup.chunkheader(l) |
|
138 | 138 | yield meta |
|
139 | 139 | yield delta |
|
140 | 140 | |
|
141 | 141 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): |
|
142 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
142 | if not shallowutil.isenabled(repo): | |
|
143 | 143 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
144 | 144 | |
|
145 | 145 | original = repo.shallowmatch |
|
146 | 146 | try: |
|
147 | 147 | # if serving, only send files the clients has patterns for |
|
148 | 148 | if source == 'serve': |
|
149 | 149 | bundlecaps = kwargs.get('bundlecaps') |
|
150 | 150 | includepattern = None |
|
151 | 151 | excludepattern = None |
|
152 | 152 | for cap in (bundlecaps or []): |
|
153 | 153 | if cap.startswith("includepattern="): |
|
154 | 154 | raw = cap[len("includepattern="):] |
|
155 | 155 | if raw: |
|
156 | 156 | includepattern = raw.split('\0') |
|
157 | 157 | elif cap.startswith("excludepattern="): |
|
158 | 158 | raw = cap[len("excludepattern="):] |
|
159 | 159 | if raw: |
|
160 | 160 | excludepattern = raw.split('\0') |
|
161 | 161 | if includepattern or excludepattern: |
|
162 | 162 | repo.shallowmatch = match.match(repo.root, '', None, |
|
163 | 163 | includepattern, excludepattern) |
|
164 | 164 | else: |
|
165 | 165 | repo.shallowmatch = match.always(repo.root, '') |
|
166 | 166 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
167 | 167 | finally: |
|
168 | 168 | repo.shallowmatch = original |
|
169 | 169 | |
|
170 | 170 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): |
|
171 | if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements: | |
|
171 | if not shallowutil.isenabled(repo): | |
|
172 | 172 | return orig(repo, source, revmap, trp, expectedfiles, *args) |
|
173 | 173 | |
|
174 | 174 | files = 0 |
|
175 | 175 | newfiles = 0 |
|
176 | 176 | visited = set() |
|
177 | 177 | revisiondatas = {} |
|
178 | 178 | queue = [] |
|
179 | 179 | |
|
180 | 180 | # Normal Mercurial processes each file one at a time, adding all |
|
181 | 181 | # the new revisions for that file at once. In remotefilelog a file |
|
182 | 182 | # revision may depend on a different file's revision (in the case |
|
183 | 183 | # of a rename/copy), so we must lay all revisions down across all |
|
184 | 184 | # files in topological order. |
|
185 | 185 | |
|
186 | 186 | # read all the file chunks but don't add them |
|
187 | 187 | while True: |
|
188 | 188 | chunkdata = source.filelogheader() |
|
189 | 189 | if not chunkdata: |
|
190 | 190 | break |
|
191 | 191 | files += 1 |
|
192 | 192 | f = chunkdata["filename"] |
|
193 | 193 | repo.ui.debug("adding %s revisions\n" % f) |
|
194 | 194 | repo.ui.progress(_('files'), files, total=expectedfiles) |
|
195 | 195 | |
|
196 | 196 | if not repo.shallowmatch(f): |
|
197 | 197 | fl = repo.file(f) |
|
198 | 198 | deltas = source.deltaiter() |
|
199 | 199 | fl.addgroup(deltas, revmap, trp) |
|
200 | 200 | continue |
|
201 | 201 | |
|
202 | 202 | chain = None |
|
203 | 203 | while True: |
|
204 | 204 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None |
|
205 | 205 | revisiondata = source.deltachunk(chain) |
|
206 | 206 | if not revisiondata: |
|
207 | 207 | break |
|
208 | 208 | |
|
209 | 209 | chain = revisiondata[0] |
|
210 | 210 | |
|
211 | 211 | revisiondatas[(f, chain)] = revisiondata |
|
212 | 212 | queue.append((f, chain)) |
|
213 | 213 | |
|
214 | 214 | if f not in visited: |
|
215 | 215 | newfiles += 1 |
|
216 | 216 | visited.add(f) |
|
217 | 217 | |
|
218 | 218 | if chain is None: |
|
219 | 219 | raise error.Abort(_("received file revlog group is empty")) |
|
220 | 220 | |
|
221 | 221 | processed = set() |
|
222 | 222 | def available(f, node, depf, depnode): |
|
223 | 223 | if depnode != nullid and (depf, depnode) not in processed: |
|
224 | 224 | if not (depf, depnode) in revisiondatas: |
|
225 | 225 | # It's not in the changegroup, assume it's already |
|
226 | 226 | # in the repo |
|
227 | 227 | return True |
|
228 | 228 | # re-add self to queue |
|
229 | 229 | queue.insert(0, (f, node)) |
|
230 | 230 | # add dependency in front |
|
231 | 231 | queue.insert(0, (depf, depnode)) |
|
232 | 232 | return False |
|
233 | 233 | return True |
|
234 | 234 | |
|
235 | 235 | skipcount = 0 |
|
236 | 236 | |
|
237 | 237 | # Prefetch the non-bundled revisions that we will need |
|
238 | 238 | prefetchfiles = [] |
|
239 | 239 | for f, node in queue: |
|
240 | 240 | revisiondata = revisiondatas[(f, node)] |
|
241 | 241 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
242 | 242 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] |
|
243 | 243 | |
|
244 | 244 | for dependent in dependents: |
|
245 | 245 | if dependent == nullid or (f, dependent) in revisiondatas: |
|
246 | 246 | continue |
|
247 | 247 | prefetchfiles.append((f, hex(dependent))) |
|
248 | 248 | |
|
249 | 249 | repo.fileservice.prefetch(prefetchfiles) |
|
250 | 250 | |
|
251 | 251 | # Apply the revisions in topological order such that a revision |
|
252 | 252 | # is only written once it's deltabase and parents have been written. |
|
253 | 253 | while queue: |
|
254 | 254 | f, node = queue.pop(0) |
|
255 | 255 | if (f, node) in processed: |
|
256 | 256 | continue |
|
257 | 257 | |
|
258 | 258 | skipcount += 1 |
|
259 | 259 | if skipcount > len(queue) + 1: |
|
260 | 260 | raise error.Abort(_("circular node dependency")) |
|
261 | 261 | |
|
262 | 262 | fl = repo.file(f) |
|
263 | 263 | |
|
264 | 264 | revisiondata = revisiondatas[(f, node)] |
|
265 | 265 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
266 | 266 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata |
|
267 | 267 | |
|
268 | 268 | if not available(f, node, f, deltabase): |
|
269 | 269 | continue |
|
270 | 270 | |
|
271 | 271 | base = fl.revision(deltabase, raw=True) |
|
272 | 272 | text = mdiff.patch(base, delta) |
|
273 | 273 | if isinstance(text, buffer): |
|
274 | 274 | text = str(text) |
|
275 | 275 | |
|
276 | 276 | meta, text = shallowutil.parsemeta(text) |
|
277 | 277 | if 'copy' in meta: |
|
278 | 278 | copyfrom = meta['copy'] |
|
279 | 279 | copynode = bin(meta['copyrev']) |
|
280 | 280 | if not available(f, node, copyfrom, copynode): |
|
281 | 281 | continue |
|
282 | 282 | |
|
283 | 283 | for p in [p1, p2]: |
|
284 | 284 | if p != nullid: |
|
285 | 285 | if not available(f, node, f, p): |
|
286 | 286 | continue |
|
287 | 287 | |
|
288 | 288 | fl.add(text, meta, trp, linknode, p1, p2) |
|
289 | 289 | processed.add((f, node)) |
|
290 | 290 | skipcount = 0 |
|
291 | 291 | |
|
292 | 292 | repo.ui.progress(_('files'), None) |
|
293 | 293 | |
|
294 | 294 | return len(revisiondatas), newfiles |
@@ -1,487 +1,491 b'' | |||
|
1 | 1 | # shallowutil.py -- remotefilelog utilities |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2014 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import collections |
|
10 | 10 | import errno |
|
11 | 11 | import hashlib |
|
12 | 12 | import os |
|
13 | 13 | import stat |
|
14 | 14 | import struct |
|
15 | 15 | import tempfile |
|
16 | 16 | |
|
17 | 17 | from mercurial.i18n import _ |
|
18 | 18 | from mercurial import ( |
|
19 | 19 | error, |
|
20 | 20 | pycompat, |
|
21 | 21 | revlog, |
|
22 | 22 | util, |
|
23 | 23 | ) |
|
24 | 24 | from mercurial.utils import ( |
|
25 | 25 | storageutil, |
|
26 | 26 | stringutil, |
|
27 | 27 | ) |
|
28 | 28 | from . import constants |
|
29 | 29 | |
|
30 | 30 | if not pycompat.iswindows: |
|
31 | 31 | import grp |
|
32 | 32 | |
|
33 | def isenabled(repo): | |
|
34 | """returns whether the repository is remotefilelog enabled or not""" | |
|
35 | return constants.SHALLOWREPO_REQUIREMENT in repo.requirements | |
|
36 | ||
|
33 | 37 | def getcachekey(reponame, file, id): |
|
34 | 38 | pathhash = hashlib.sha1(file).hexdigest() |
|
35 | 39 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) |
|
36 | 40 | |
|
37 | 41 | def getlocalkey(file, id): |
|
38 | 42 | pathhash = hashlib.sha1(file).hexdigest() |
|
39 | 43 | return os.path.join(pathhash, id) |
|
40 | 44 | |
|
41 | 45 | def getcachepath(ui, allowempty=False): |
|
42 | 46 | cachepath = ui.config("remotefilelog", "cachepath") |
|
43 | 47 | if not cachepath: |
|
44 | 48 | if allowempty: |
|
45 | 49 | return None |
|
46 | 50 | else: |
|
47 | 51 | raise error.Abort(_("could not find config option " |
|
48 | 52 | "remotefilelog.cachepath")) |
|
49 | 53 | return util.expandpath(cachepath) |
|
50 | 54 | |
|
51 | 55 | def getcachepackpath(repo, category): |
|
52 | 56 | cachepath = getcachepath(repo.ui) |
|
53 | 57 | if category != constants.FILEPACK_CATEGORY: |
|
54 | 58 | return os.path.join(cachepath, repo.name, 'packs', category) |
|
55 | 59 | else: |
|
56 | 60 | return os.path.join(cachepath, repo.name, 'packs') |
|
57 | 61 | |
|
58 | 62 | def getlocalpackpath(base, category): |
|
59 | 63 | return os.path.join(base, 'packs', category) |
|
60 | 64 | |
|
61 | 65 | def createrevlogtext(text, copyfrom=None, copyrev=None): |
|
62 | 66 | """returns a string that matches the revlog contents in a |
|
63 | 67 | traditional revlog |
|
64 | 68 | """ |
|
65 | 69 | meta = {} |
|
66 | 70 | if copyfrom or text.startswith('\1\n'): |
|
67 | 71 | if copyfrom: |
|
68 | 72 | meta['copy'] = copyfrom |
|
69 | 73 | meta['copyrev'] = copyrev |
|
70 | 74 | text = storageutil.packmeta(meta, text) |
|
71 | 75 | |
|
72 | 76 | return text |
|
73 | 77 | |
|
74 | 78 | def parsemeta(text): |
|
75 | 79 | """parse mercurial filelog metadata""" |
|
76 | 80 | meta, size = storageutil.parsemeta(text) |
|
77 | 81 | if text.startswith('\1\n'): |
|
78 | 82 | s = text.index('\1\n', 2) |
|
79 | 83 | text = text[s + 2:] |
|
80 | 84 | return meta or {}, text |
|
81 | 85 | |
|
82 | 86 | def sumdicts(*dicts): |
|
83 | 87 | """Adds all the values of *dicts together into one dictionary. This assumes |
|
84 | 88 | the values in *dicts are all summable. |
|
85 | 89 | |
|
86 | 90 | e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1} |
|
87 | 91 | """ |
|
88 | 92 | result = collections.defaultdict(lambda: 0) |
|
89 | 93 | for dict in dicts: |
|
90 | 94 | for k, v in dict.iteritems(): |
|
91 | 95 | result[k] += v |
|
92 | 96 | return result |
|
93 | 97 | |
|
94 | 98 | def prefixkeys(dict, prefix): |
|
95 | 99 | """Returns ``dict`` with ``prefix`` prepended to all its keys.""" |
|
96 | 100 | result = {} |
|
97 | 101 | for k, v in dict.iteritems(): |
|
98 | 102 | result[prefix + k] = v |
|
99 | 103 | return result |
|
100 | 104 | |
|
101 | 105 | def reportpackmetrics(ui, prefix, *stores): |
|
102 | 106 | dicts = [s.getmetrics() for s in stores] |
|
103 | 107 | dict = prefixkeys(sumdicts(*dicts), prefix + '_') |
|
104 | 108 | ui.log(prefix + "_packsizes", "", **dict) |
|
105 | 109 | |
|
106 | 110 | def _parsepackmeta(metabuf): |
|
107 | 111 | """parse datapack meta, bytes (<metadata-list>) -> dict |
|
108 | 112 | |
|
109 | 113 | The dict contains raw content - both keys and values are strings. |
|
110 | 114 | Upper-level business may want to convert some of them to other types like |
|
111 | 115 | integers, on their own. |
|
112 | 116 | |
|
113 | 117 | raise ValueError if the data is corrupted |
|
114 | 118 | """ |
|
115 | 119 | metadict = {} |
|
116 | 120 | offset = 0 |
|
117 | 121 | buflen = len(metabuf) |
|
118 | 122 | while buflen - offset >= 3: |
|
119 | 123 | key = metabuf[offset] |
|
120 | 124 | offset += 1 |
|
121 | 125 | metalen = struct.unpack_from('!H', metabuf, offset)[0] |
|
122 | 126 | offset += 2 |
|
123 | 127 | if offset + metalen > buflen: |
|
124 | 128 | raise ValueError('corrupted metadata: incomplete buffer') |
|
125 | 129 | value = metabuf[offset:offset + metalen] |
|
126 | 130 | metadict[key] = value |
|
127 | 131 | offset += metalen |
|
128 | 132 | if offset != buflen: |
|
129 | 133 | raise ValueError('corrupted metadata: redundant data') |
|
130 | 134 | return metadict |
|
131 | 135 | |
|
132 | 136 | def _buildpackmeta(metadict): |
|
133 | 137 | """reverse of _parsepackmeta, dict -> bytes (<metadata-list>) |
|
134 | 138 | |
|
135 | 139 | The dict contains raw content - both keys and values are strings. |
|
136 | 140 | Upper-level business may want to serialize some of other types (like |
|
137 | 141 | integers) to strings before calling this function. |
|
138 | 142 | |
|
139 | 143 | raise ProgrammingError when metadata key is illegal, or ValueError if |
|
140 | 144 | length limit is exceeded |
|
141 | 145 | """ |
|
142 | 146 | metabuf = '' |
|
143 | 147 | for k, v in sorted((metadict or {}).iteritems()): |
|
144 | 148 | if len(k) != 1: |
|
145 | 149 | raise error.ProgrammingError('packmeta: illegal key: %s' % k) |
|
146 | 150 | if len(v) > 0xfffe: |
|
147 | 151 | raise ValueError('metadata value is too long: 0x%x > 0xfffe' |
|
148 | 152 | % len(v)) |
|
149 | 153 | metabuf += k |
|
150 | 154 | metabuf += struct.pack('!H', len(v)) |
|
151 | 155 | metabuf += v |
|
152 | 156 | # len(metabuf) is guaranteed representable in 4 bytes, because there are |
|
153 | 157 | # only 256 keys, and for each value, len(value) <= 0xfffe. |
|
154 | 158 | return metabuf |
|
155 | 159 | |
|
156 | 160 | _metaitemtypes = { |
|
157 | 161 | constants.METAKEYFLAG: (int, long), |
|
158 | 162 | constants.METAKEYSIZE: (int, long), |
|
159 | 163 | } |
|
160 | 164 | |
|
161 | 165 | def buildpackmeta(metadict): |
|
162 | 166 | """like _buildpackmeta, but typechecks metadict and normalize it. |
|
163 | 167 | |
|
164 | 168 | This means, METAKEYSIZE and METAKEYSIZE should have integers as values, |
|
165 | 169 | and METAKEYFLAG will be dropped if its value is 0. |
|
166 | 170 | """ |
|
167 | 171 | newmeta = {} |
|
168 | 172 | for k, v in (metadict or {}).iteritems(): |
|
169 | 173 | expectedtype = _metaitemtypes.get(k, (bytes,)) |
|
170 | 174 | if not isinstance(v, expectedtype): |
|
171 | 175 | raise error.ProgrammingError('packmeta: wrong type of key %s' % k) |
|
172 | 176 | # normalize int to binary buffer |
|
173 | 177 | if int in expectedtype: |
|
174 | 178 | # optimization: remove flag if it's 0 to save space |
|
175 | 179 | if k == constants.METAKEYFLAG and v == 0: |
|
176 | 180 | continue |
|
177 | 181 | v = int2bin(v) |
|
178 | 182 | newmeta[k] = v |
|
179 | 183 | return _buildpackmeta(newmeta) |
|
180 | 184 | |
|
181 | 185 | def parsepackmeta(metabuf): |
|
182 | 186 | """like _parsepackmeta, but convert fields to desired types automatically. |
|
183 | 187 | |
|
184 | 188 | This means, METAKEYFLAG and METAKEYSIZE fields will be converted to |
|
185 | 189 | integers. |
|
186 | 190 | """ |
|
187 | 191 | metadict = _parsepackmeta(metabuf) |
|
188 | 192 | for k, v in metadict.iteritems(): |
|
189 | 193 | if k in _metaitemtypes and int in _metaitemtypes[k]: |
|
190 | 194 | metadict[k] = bin2int(v) |
|
191 | 195 | return metadict |
|
192 | 196 | |
|
193 | 197 | def int2bin(n): |
|
194 | 198 | """convert a non-negative integer to raw binary buffer""" |
|
195 | 199 | buf = bytearray() |
|
196 | 200 | while n > 0: |
|
197 | 201 | buf.insert(0, n & 0xff) |
|
198 | 202 | n >>= 8 |
|
199 | 203 | return bytes(buf) |
|
200 | 204 | |
|
201 | 205 | def bin2int(buf): |
|
202 | 206 | """the reverse of int2bin, convert a binary buffer to an integer""" |
|
203 | 207 | x = 0 |
|
204 | 208 | for b in bytearray(buf): |
|
205 | 209 | x <<= 8 |
|
206 | 210 | x |= b |
|
207 | 211 | return x |
|
208 | 212 | |
|
209 | 213 | def parsesizeflags(raw): |
|
210 | 214 | """given a remotefilelog blob, return (headersize, rawtextsize, flags) |
|
211 | 215 | |
|
212 | 216 | see remotefilelogserver.createfileblob for the format. |
|
213 | 217 | raise RuntimeError if the content is illformed. |
|
214 | 218 | """ |
|
215 | 219 | flags = revlog.REVIDX_DEFAULT_FLAGS |
|
216 | 220 | size = None |
|
217 | 221 | try: |
|
218 | 222 | index = raw.index('\0') |
|
219 | 223 | header = raw[:index] |
|
220 | 224 | if header.startswith('v'): |
|
221 | 225 | # v1 and above, header starts with 'v' |
|
222 | 226 | if header.startswith('v1\n'): |
|
223 | 227 | for s in header.split('\n'): |
|
224 | 228 | if s.startswith(constants.METAKEYSIZE): |
|
225 | 229 | size = int(s[len(constants.METAKEYSIZE):]) |
|
226 | 230 | elif s.startswith(constants.METAKEYFLAG): |
|
227 | 231 | flags = int(s[len(constants.METAKEYFLAG):]) |
|
228 | 232 | else: |
|
229 | 233 | raise RuntimeError('unsupported remotefilelog header: %s' |
|
230 | 234 | % header) |
|
231 | 235 | else: |
|
232 | 236 | # v0, str(int(size)) is the header |
|
233 | 237 | size = int(header) |
|
234 | 238 | except ValueError: |
|
235 | 239 | raise RuntimeError("unexpected remotefilelog header: illegal format") |
|
236 | 240 | if size is None: |
|
237 | 241 | raise RuntimeError("unexpected remotefilelog header: no size found") |
|
238 | 242 | return index + 1, size, flags |
|
239 | 243 | |
|
240 | 244 | def buildfileblobheader(size, flags, version=None): |
|
241 | 245 | """return the header of a remotefilelog blob. |
|
242 | 246 | |
|
243 | 247 | see remotefilelogserver.createfileblob for the format. |
|
244 | 248 | approximately the reverse of parsesizeflags. |
|
245 | 249 | |
|
246 | 250 | version could be 0 or 1, or None (auto decide). |
|
247 | 251 | """ |
|
248 | 252 | # choose v0 if flags is empty, otherwise v1 |
|
249 | 253 | if version is None: |
|
250 | 254 | version = int(bool(flags)) |
|
251 | 255 | if version == 1: |
|
252 | 256 | header = ('v1\n%s%d\n%s%d' |
|
253 | 257 | % (constants.METAKEYSIZE, size, |
|
254 | 258 | constants.METAKEYFLAG, flags)) |
|
255 | 259 | elif version == 0: |
|
256 | 260 | if flags: |
|
257 | 261 | raise error.ProgrammingError('fileblob v0 does not support flag') |
|
258 | 262 | header = '%d' % size |
|
259 | 263 | else: |
|
260 | 264 | raise error.ProgrammingError('unknown fileblob version %d' % version) |
|
261 | 265 | return header |
|
262 | 266 | |
|
263 | 267 | def ancestormap(raw): |
|
264 | 268 | offset, size, flags = parsesizeflags(raw) |
|
265 | 269 | start = offset + size |
|
266 | 270 | |
|
267 | 271 | mapping = {} |
|
268 | 272 | while start < len(raw): |
|
269 | 273 | divider = raw.index('\0', start + 80) |
|
270 | 274 | |
|
271 | 275 | currentnode = raw[start:(start + 20)] |
|
272 | 276 | p1 = raw[(start + 20):(start + 40)] |
|
273 | 277 | p2 = raw[(start + 40):(start + 60)] |
|
274 | 278 | linknode = raw[(start + 60):(start + 80)] |
|
275 | 279 | copyfrom = raw[(start + 80):divider] |
|
276 | 280 | |
|
277 | 281 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
278 | 282 | start = divider + 1 |
|
279 | 283 | |
|
280 | 284 | return mapping |
|
281 | 285 | |
|
282 | 286 | def readfile(path): |
|
283 | 287 | f = open(path, 'rb') |
|
284 | 288 | try: |
|
285 | 289 | result = f.read() |
|
286 | 290 | |
|
287 | 291 | # we should never have empty files |
|
288 | 292 | if not result: |
|
289 | 293 | os.remove(path) |
|
290 | 294 | raise IOError("empty file: %s" % path) |
|
291 | 295 | |
|
292 | 296 | return result |
|
293 | 297 | finally: |
|
294 | 298 | f.close() |
|
295 | 299 | |
|
296 | 300 | def unlinkfile(filepath): |
|
297 | 301 | if pycompat.iswindows: |
|
298 | 302 | # On Windows, os.unlink cannnot delete readonly files |
|
299 | 303 | os.chmod(filepath, stat.S_IWUSR) |
|
300 | 304 | os.unlink(filepath) |
|
301 | 305 | |
|
302 | 306 | def renamefile(source, destination): |
|
303 | 307 | if pycompat.iswindows: |
|
304 | 308 | # On Windows, os.rename cannot rename readonly files |
|
305 | 309 | # and cannot overwrite destination if it exists |
|
306 | 310 | os.chmod(source, stat.S_IWUSR) |
|
307 | 311 | if os.path.isfile(destination): |
|
308 | 312 | os.chmod(destination, stat.S_IWUSR) |
|
309 | 313 | os.unlink(destination) |
|
310 | 314 | |
|
311 | 315 | os.rename(source, destination) |
|
312 | 316 | |
|
313 | 317 | def writefile(path, content, readonly=False): |
|
314 | 318 | dirname, filename = os.path.split(path) |
|
315 | 319 | if not os.path.exists(dirname): |
|
316 | 320 | try: |
|
317 | 321 | os.makedirs(dirname) |
|
318 | 322 | except OSError as ex: |
|
319 | 323 | if ex.errno != errno.EEXIST: |
|
320 | 324 | raise |
|
321 | 325 | |
|
322 | 326 | fd, temp = tempfile.mkstemp(prefix='.%s-' % filename, dir=dirname) |
|
323 | 327 | os.close(fd) |
|
324 | 328 | |
|
325 | 329 | try: |
|
326 | 330 | f = util.posixfile(temp, 'wb') |
|
327 | 331 | f.write(content) |
|
328 | 332 | f.close() |
|
329 | 333 | |
|
330 | 334 | if readonly: |
|
331 | 335 | mode = 0o444 |
|
332 | 336 | else: |
|
333 | 337 | # tempfiles are created with 0o600, so we need to manually set the |
|
334 | 338 | # mode. |
|
335 | 339 | oldumask = os.umask(0) |
|
336 | 340 | # there's no way to get the umask without modifying it, so set it |
|
337 | 341 | # back |
|
338 | 342 | os.umask(oldumask) |
|
339 | 343 | mode = ~oldumask |
|
340 | 344 | |
|
341 | 345 | renamefile(temp, path) |
|
342 | 346 | os.chmod(path, mode) |
|
343 | 347 | except Exception: |
|
344 | 348 | try: |
|
345 | 349 | unlinkfile(temp) |
|
346 | 350 | except OSError: |
|
347 | 351 | pass |
|
348 | 352 | raise |
|
349 | 353 | |
|
350 | 354 | def sortnodes(nodes, parentfunc): |
|
351 | 355 | """Topologically sorts the nodes, using the parentfunc to find |
|
352 | 356 | the parents of nodes.""" |
|
353 | 357 | nodes = set(nodes) |
|
354 | 358 | childmap = {} |
|
355 | 359 | parentmap = {} |
|
356 | 360 | roots = [] |
|
357 | 361 | |
|
358 | 362 | # Build a child and parent map |
|
359 | 363 | for n in nodes: |
|
360 | 364 | parents = [p for p in parentfunc(n) if p in nodes] |
|
361 | 365 | parentmap[n] = set(parents) |
|
362 | 366 | for p in parents: |
|
363 | 367 | childmap.setdefault(p, set()).add(n) |
|
364 | 368 | if not parents: |
|
365 | 369 | roots.append(n) |
|
366 | 370 | |
|
367 | 371 | roots.sort() |
|
368 | 372 | # Process roots, adding children to the queue as they become roots |
|
369 | 373 | results = [] |
|
370 | 374 | while roots: |
|
371 | 375 | n = roots.pop(0) |
|
372 | 376 | results.append(n) |
|
373 | 377 | if n in childmap: |
|
374 | 378 | children = childmap[n] |
|
375 | 379 | for c in children: |
|
376 | 380 | childparents = parentmap[c] |
|
377 | 381 | childparents.remove(n) |
|
378 | 382 | if len(childparents) == 0: |
|
379 | 383 | # insert at the beginning, that way child nodes |
|
380 | 384 | # are likely to be output immediately after their |
|
381 | 385 | # parents. This gives better compression results. |
|
382 | 386 | roots.insert(0, c) |
|
383 | 387 | |
|
384 | 388 | return results |
|
385 | 389 | |
|
386 | 390 | def readexactly(stream, n): |
|
387 | 391 | '''read n bytes from stream.read and abort if less was available''' |
|
388 | 392 | s = stream.read(n) |
|
389 | 393 | if len(s) < n: |
|
390 | 394 | raise error.Abort(_("stream ended unexpectedly" |
|
391 | 395 | " (got %d bytes, expected %d)") |
|
392 | 396 | % (len(s), n)) |
|
393 | 397 | return s |
|
394 | 398 | |
|
395 | 399 | def readunpack(stream, fmt): |
|
396 | 400 | data = readexactly(stream, struct.calcsize(fmt)) |
|
397 | 401 | return struct.unpack(fmt, data) |
|
398 | 402 | |
|
399 | 403 | def readpath(stream): |
|
400 | 404 | rawlen = readexactly(stream, constants.FILENAMESIZE) |
|
401 | 405 | pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0] |
|
402 | 406 | return readexactly(stream, pathlen) |
|
403 | 407 | |
|
404 | 408 | def readnodelist(stream): |
|
405 | 409 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) |
|
406 | 410 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] |
|
407 | 411 | for i in pycompat.xrange(nodecount): |
|
408 | 412 | yield readexactly(stream, constants.NODESIZE) |
|
409 | 413 | |
|
410 | 414 | def readpathlist(stream): |
|
411 | 415 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) |
|
412 | 416 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] |
|
413 | 417 | for i in pycompat.xrange(pathcount): |
|
414 | 418 | yield readpath(stream) |
|
415 | 419 | |
|
416 | 420 | def getgid(groupname): |
|
417 | 421 | try: |
|
418 | 422 | gid = grp.getgrnam(groupname).gr_gid |
|
419 | 423 | return gid |
|
420 | 424 | except KeyError: |
|
421 | 425 | return None |
|
422 | 426 | |
|
423 | 427 | def setstickygroupdir(path, gid, warn=None): |
|
424 | 428 | if gid is None: |
|
425 | 429 | return |
|
426 | 430 | try: |
|
427 | 431 | os.chown(path, -1, gid) |
|
428 | 432 | os.chmod(path, 0o2775) |
|
429 | 433 | except (IOError, OSError) as ex: |
|
430 | 434 | if warn: |
|
431 | 435 | warn(_('unable to chown/chmod on %s: %s\n') % (path, ex)) |
|
432 | 436 | |
|
433 | 437 | def mkstickygroupdir(ui, path): |
|
434 | 438 | """Creates the given directory (if it doesn't exist) and give it a |
|
435 | 439 | particular group with setgid enabled.""" |
|
436 | 440 | gid = None |
|
437 | 441 | groupname = ui.config("remotefilelog", "cachegroup") |
|
438 | 442 | if groupname: |
|
439 | 443 | gid = getgid(groupname) |
|
440 | 444 | if gid is None: |
|
441 | 445 | ui.warn(_('unable to resolve group name: %s\n') % groupname) |
|
442 | 446 | |
|
443 | 447 | # we use a single stat syscall to test the existence and mode / group bit |
|
444 | 448 | st = None |
|
445 | 449 | try: |
|
446 | 450 | st = os.stat(path) |
|
447 | 451 | except OSError: |
|
448 | 452 | pass |
|
449 | 453 | |
|
450 | 454 | if st: |
|
451 | 455 | # exists |
|
452 | 456 | if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid: |
|
453 | 457 | # permission needs to be fixed |
|
454 | 458 | setstickygroupdir(path, gid, ui.warn) |
|
455 | 459 | return |
|
456 | 460 | |
|
457 | 461 | oldumask = os.umask(0o002) |
|
458 | 462 | try: |
|
459 | 463 | missingdirs = [path] |
|
460 | 464 | path = os.path.dirname(path) |
|
461 | 465 | while path and not os.path.exists(path): |
|
462 | 466 | missingdirs.append(path) |
|
463 | 467 | path = os.path.dirname(path) |
|
464 | 468 | |
|
465 | 469 | for path in reversed(missingdirs): |
|
466 | 470 | try: |
|
467 | 471 | os.mkdir(path) |
|
468 | 472 | except OSError as ex: |
|
469 | 473 | if ex.errno != errno.EEXIST: |
|
470 | 474 | raise |
|
471 | 475 | |
|
472 | 476 | for path in missingdirs: |
|
473 | 477 | setstickygroupdir(path, gid, ui.warn) |
|
474 | 478 | finally: |
|
475 | 479 | os.umask(oldumask) |
|
476 | 480 | |
|
477 | 481 | def getusername(ui): |
|
478 | 482 | try: |
|
479 | 483 | return stringutil.shortuser(ui.username()) |
|
480 | 484 | except Exception: |
|
481 | 485 | return 'unknown' |
|
482 | 486 | |
|
483 | 487 | def getreponame(ui): |
|
484 | 488 | reponame = ui.config('paths', 'default') |
|
485 | 489 | if reponame: |
|
486 | 490 | return os.path.basename(reponame) |
|
487 | 491 | return "unknown" |
General Comments 0
You need to be logged in to leave comments.
Login now