Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,828 +1,828 b'' | |||
|
1 | 1 | # __init__.py - fsmonitor initialization and overrides |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013-2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
11 | 11 | status results. |
|
12 | 12 | |
|
13 | 13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
14 | 14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
15 | 15 | system, with fsmonitor it takes about 0.3 seconds. |
|
16 | 16 | |
|
17 | 17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
18 | 18 | repository as necessary. You'll need to install Watchman from |
|
19 | 19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
20 | 20 | |
|
21 | 21 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
22 | 22 | will disable itself if any of those are active. |
|
23 | 23 | |
|
24 | 24 | The following configuration options exist: |
|
25 | 25 | |
|
26 | 26 | :: |
|
27 | 27 | |
|
28 | 28 | [fsmonitor] |
|
29 | 29 | mode = {off, on, paranoid} |
|
30 | 30 | |
|
31 | 31 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
32 | 32 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
33 | 33 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
34 | 34 | and ensure that the results are consistent. |
|
35 | 35 | |
|
36 | 36 | :: |
|
37 | 37 | |
|
38 | 38 | [fsmonitor] |
|
39 | 39 | timeout = (float) |
|
40 | 40 | |
|
41 | 41 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
42 | 42 | to return results. Defaults to `2.0`. |
|
43 | 43 | |
|
44 | 44 | :: |
|
45 | 45 | |
|
46 | 46 | [fsmonitor] |
|
47 | 47 | blacklistusers = (list of userids) |
|
48 | 48 | |
|
49 | 49 | A list of usernames for which fsmonitor will disable itself altogether. |
|
50 | 50 | |
|
51 | 51 | :: |
|
52 | 52 | |
|
53 | 53 | [fsmonitor] |
|
54 | 54 | walk_on_invalidate = (boolean) |
|
55 | 55 | |
|
56 | 56 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
57 | 57 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
58 | 58 | have been changed. Walking the repo in that case can result in competing for |
|
59 | 59 | I/O with Watchman. For large repos it is recommended to set this value to |
|
60 | 60 | false. You may wish to set this to true if you have a very fast filesystem |
|
61 | 61 | that can outpace the IPC overhead of getting the result data for the full repo |
|
62 | 62 | from Watchman. Defaults to false. |
|
63 | 63 | |
|
64 | 64 | :: |
|
65 | 65 | |
|
66 | 66 | [fsmonitor] |
|
67 | 67 | warn_when_unused = (boolean) |
|
68 | 68 | |
|
69 | 69 | Whether to print a warning during certain operations when fsmonitor would be |
|
70 | 70 | beneficial to performance but isn't enabled. |
|
71 | 71 | |
|
72 | 72 | :: |
|
73 | 73 | |
|
74 | 74 | [fsmonitor] |
|
75 | 75 | warn_update_file_count = (integer) |
|
76 | 76 | |
|
77 | 77 | If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will |
|
78 | 78 | be printed during working directory updates if this many files will be |
|
79 | 79 | created. |
|
80 | 80 | ''' |
|
81 | 81 | |
|
82 | 82 | # Platforms Supported |
|
83 | 83 | # =================== |
|
84 | 84 | # |
|
85 | 85 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
86 | 86 | # even under severe loads. |
|
87 | 87 | # |
|
88 | 88 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
89 | 89 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
90 | 90 | # user testing under normal loads. |
|
91 | 91 | # |
|
92 | 92 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
93 | 93 | # very little testing has been done. |
|
94 | 94 | # |
|
95 | 95 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
96 | 96 | # |
|
97 | 97 | # Known Issues |
|
98 | 98 | # ============ |
|
99 | 99 | # |
|
100 | 100 | # * fsmonitor will disable itself if any of the following extensions are |
|
101 | 101 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
102 | 102 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
103 | 103 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
104 | 104 | # |
|
105 | 105 | # The issues related to nested repos and subrepos are probably not fundamental |
|
106 | 106 | # ones. Patches to fix them are welcome. |
|
107 | 107 | |
|
108 | 108 | from __future__ import absolute_import |
|
109 | 109 | |
|
110 | 110 | import codecs |
|
111 | 111 | import hashlib |
|
112 | 112 | import os |
|
113 | 113 | import stat |
|
114 | 114 | import sys |
|
115 | 115 | import weakref |
|
116 | 116 | |
|
117 | 117 | from mercurial.i18n import _ |
|
118 | 118 | from mercurial.node import ( |
|
119 | 119 | hex, |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | 122 | from mercurial import ( |
|
123 | 123 | context, |
|
124 | 124 | encoding, |
|
125 | 125 | error, |
|
126 | 126 | extensions, |
|
127 | 127 | localrepo, |
|
128 | 128 | merge, |
|
129 | 129 | pathutil, |
|
130 | 130 | pycompat, |
|
131 | 131 | registrar, |
|
132 | 132 | scmutil, |
|
133 | 133 | util, |
|
134 | 134 | ) |
|
135 | 135 | from mercurial import match as matchmod |
|
136 | 136 | |
|
137 | 137 | from . import ( |
|
138 | 138 | pywatchman, |
|
139 | 139 | state, |
|
140 | 140 | watchmanclient, |
|
141 | 141 | ) |
|
142 | 142 | |
|
143 | 143 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
144 | 144 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
145 | 145 | # be specifying the version(s) of Mercurial they are tested with, or |
|
146 | 146 | # leave the attribute unspecified. |
|
147 | 147 | testedwith = 'ships-with-hg-core' |
|
148 | 148 | |
|
149 | 149 | configtable = {} |
|
150 | 150 | configitem = registrar.configitem(configtable) |
|
151 | 151 | |
|
152 | 152 | configitem('fsmonitor', 'mode', |
|
153 | 153 | default='on', |
|
154 | 154 | ) |
|
155 | 155 | configitem('fsmonitor', 'walk_on_invalidate', |
|
156 | 156 | default=False, |
|
157 | 157 | ) |
|
158 | 158 | configitem('fsmonitor', 'timeout', |
|
159 | 159 | default='2', |
|
160 | 160 | ) |
|
161 | 161 | configitem('fsmonitor', 'blacklistusers', |
|
162 | 162 | default=list, |
|
163 | 163 | ) |
|
164 | 164 | configitem('fsmonitor', 'verbose', |
|
165 | 165 | default=True, |
|
166 | 166 | ) |
|
167 | 167 | configitem('experimental', 'fsmonitor.transaction_notify', |
|
168 | 168 | default=False, |
|
169 | 169 | ) |
|
170 | 170 | |
|
171 | 171 | # This extension is incompatible with the following blacklisted extensions |
|
172 | 172 | # and will disable itself when encountering one of these: |
|
173 | 173 | _blacklist = ['largefiles', 'eol'] |
|
174 | 174 | |
|
175 | 175 | def _handleunavailable(ui, state, ex): |
|
176 | 176 | """Exception handler for Watchman interaction exceptions""" |
|
177 | 177 | if isinstance(ex, watchmanclient.Unavailable): |
|
178 | 178 | # experimental config: fsmonitor.verbose |
|
179 | 179 | if ex.warn and ui.configbool('fsmonitor', 'verbose'): |
|
180 | 180 | ui.warn(str(ex) + '\n') |
|
181 | 181 | if ex.invalidate: |
|
182 | 182 | state.invalidate() |
|
183 | 183 | # experimental config: fsmonitor.verbose |
|
184 | 184 | if ui.configbool('fsmonitor', 'verbose'): |
|
185 | 185 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) |
|
186 | 186 | else: |
|
187 | 187 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) |
|
188 | 188 | |
|
189 | 189 | def _hashignore(ignore): |
|
190 | 190 | """Calculate hash for ignore patterns and filenames |
|
191 | 191 | |
|
192 | 192 | If this information changes between Mercurial invocations, we can't |
|
193 | 193 | rely on Watchman information anymore and have to re-scan the working |
|
194 | 194 | copy. |
|
195 | 195 | |
|
196 | 196 | """ |
|
197 | 197 | sha1 = hashlib.sha1() |
|
198 | 198 | sha1.update(repr(ignore)) |
|
199 | 199 | return sha1.hexdigest() |
|
200 | 200 | |
|
201 | 201 | _watchmanencoding = pywatchman.encoding.get_local_encoding() |
|
202 | 202 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() |
|
203 | 203 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) |
|
204 | 204 | |
|
205 | 205 | def _watchmantofsencoding(path): |
|
206 | 206 | """Fix path to match watchman and local filesystem encoding |
|
207 | 207 | |
|
208 | 208 | watchman's paths encoding can differ from filesystem encoding. For example, |
|
209 | 209 | on Windows, it's always utf-8. |
|
210 | 210 | """ |
|
211 | 211 | try: |
|
212 | 212 | decoded = path.decode(_watchmanencoding) |
|
213 | 213 | except UnicodeDecodeError as e: |
|
214 | 214 | raise error.Abort(str(e), hint='watchman encoding error') |
|
215 | 215 | |
|
216 | 216 | try: |
|
217 | 217 | encoded = decoded.encode(_fsencoding, 'strict') |
|
218 | 218 | except UnicodeEncodeError as e: |
|
219 | 219 | raise error.Abort(str(e)) |
|
220 | 220 | |
|
221 | 221 | return encoded |
|
222 | 222 | |
|
223 | 223 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
224 | 224 | '''Replacement for dirstate.walk, hooking into Watchman. |
|
225 | 225 | |
|
226 | 226 | Whenever full is False, ignored is False, and the Watchman client is |
|
227 | 227 | available, use Watchman combined with saved state to possibly return only a |
|
228 | 228 | subset of files.''' |
|
229 | 229 | def bail(reason): |
|
230 | 230 | self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason) |
|
231 | 231 | return orig(match, subrepos, unknown, ignored, full=True) |
|
232 | 232 | |
|
233 | 233 | if full: |
|
234 | 234 | return bail('full rewalk requested') |
|
235 | 235 | if ignored: |
|
236 | 236 | return bail('listing ignored files') |
|
237 | 237 | if not self._watchmanclient.available(): |
|
238 | 238 | return bail('client unavailable') |
|
239 | 239 | state = self._fsmonitorstate |
|
240 | 240 | clock, ignorehash, notefiles = state.get() |
|
241 | 241 | if not clock: |
|
242 | 242 | if state.walk_on_invalidate: |
|
243 | 243 | return bail('no clock') |
|
244 | 244 | # Initial NULL clock value, see |
|
245 | 245 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
246 | 246 | clock = 'c:0:0' |
|
247 | 247 | notefiles = [] |
|
248 | 248 | |
|
249 | 249 | ignore = self._ignore |
|
250 | 250 | dirignore = self._dirignore |
|
251 | 251 | if unknown: |
|
252 | 252 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': |
|
253 | 253 | # ignore list changed -- can't rely on Watchman state any more |
|
254 | 254 | if state.walk_on_invalidate: |
|
255 | 255 | return bail('ignore rules changed') |
|
256 | 256 | notefiles = [] |
|
257 | 257 | clock = 'c:0:0' |
|
258 | 258 | else: |
|
259 | 259 | # always ignore |
|
260 | 260 | ignore = util.always |
|
261 | 261 | dirignore = util.always |
|
262 | 262 | |
|
263 | 263 | matchfn = match.matchfn |
|
264 | 264 | matchalways = match.always() |
|
265 | 265 | dmap = self._map |
|
266 | 266 | if util.safehasattr(dmap, '_map'): |
|
267 | 267 | # for better performance, directly access the inner dirstate map if the |
|
268 | 268 | # standard dirstate implementation is in use. |
|
269 | 269 | dmap = dmap._map |
|
270 | 270 | nonnormalset = self._map.nonnormalset |
|
271 | 271 | |
|
272 | 272 | copymap = self._map.copymap |
|
273 | 273 | getkind = stat.S_IFMT |
|
274 | 274 | dirkind = stat.S_IFDIR |
|
275 | 275 | regkind = stat.S_IFREG |
|
276 | 276 | lnkkind = stat.S_IFLNK |
|
277 | 277 | join = self._join |
|
278 | 278 | normcase = util.normcase |
|
279 | 279 | fresh_instance = False |
|
280 | 280 | |
|
281 | 281 | exact = skipstep3 = False |
|
282 | 282 | if match.isexact(): # match.exact |
|
283 | 283 | exact = True |
|
284 | 284 | dirignore = util.always # skip step 2 |
|
285 | 285 | elif match.prefix(): # match.match, no patterns |
|
286 | 286 | skipstep3 = True |
|
287 | 287 | |
|
288 | 288 | if not exact and self._checkcase: |
|
289 | 289 | # note that even though we could receive directory entries, we're only |
|
290 | 290 | # interested in checking if a file with the same name exists. So only |
|
291 | 291 | # normalize files if possible. |
|
292 | 292 | normalize = self._normalizefile |
|
293 | 293 | skipstep3 = False |
|
294 | 294 | else: |
|
295 | 295 | normalize = None |
|
296 | 296 | |
|
297 | 297 | # step 1: find all explicit files |
|
298 | 298 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
299 | 299 | |
|
300 | 300 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
301 | 301 | work = [d for d in work if not dirignore(d[0])] |
|
302 | 302 | |
|
303 | 303 | if not work and (exact or skipstep3): |
|
304 | 304 | for s in subrepos: |
|
305 | 305 | del results[s] |
|
306 | 306 | del results['.hg'] |
|
307 | 307 | return results |
|
308 | 308 | |
|
309 | 309 | # step 2: query Watchman |
|
310 | 310 | try: |
|
311 | 311 | # Use the user-configured timeout for the query. |
|
312 | 312 | # Add a little slack over the top of the user query to allow for |
|
313 | 313 | # overheads while transferring the data |
|
314 | 314 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
315 | 315 | result = self._watchmanclient.command('query', { |
|
316 | 316 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], |
|
317 | 317 | 'since': clock, |
|
318 | 318 | 'expression': [ |
|
319 | 319 | 'not', [ |
|
320 | 320 | 'anyof', ['dirname', '.hg'], |
|
321 | 321 | ['name', '.hg', 'wholename'] |
|
322 | 322 | ] |
|
323 | 323 | ], |
|
324 | 324 | 'sync_timeout': int(state.timeout * 1000), |
|
325 | 325 | 'empty_on_fresh_instance': state.walk_on_invalidate, |
|
326 | 326 | }) |
|
327 | 327 | except Exception as ex: |
|
328 | 328 | _handleunavailable(self._ui, state, ex) |
|
329 | 329 | self._watchmanclient.clearconnection() |
|
330 | 330 | return bail('exception during run') |
|
331 | 331 | else: |
|
332 | 332 | # We need to propagate the last observed clock up so that we |
|
333 | 333 | # can use it for our next query |
|
334 | 334 | state.setlastclock(result['clock']) |
|
335 | 335 | if result['is_fresh_instance']: |
|
336 | 336 | if state.walk_on_invalidate: |
|
337 | 337 | state.invalidate() |
|
338 | 338 | return bail('fresh instance') |
|
339 | 339 | fresh_instance = True |
|
340 | 340 | # Ignore any prior noteable files from the state info |
|
341 | 341 | notefiles = [] |
|
342 | 342 | |
|
343 | 343 | # for file paths which require normalization and we encounter a case |
|
344 | 344 | # collision, we store our own foldmap |
|
345 | 345 | if normalize: |
|
346 | 346 | foldmap = dict((normcase(k), k) for k in results) |
|
347 | 347 | |
|
348 | 348 | switch_slashes = pycompat.ossep == '\\' |
|
349 | 349 | # The order of the results is, strictly speaking, undefined. |
|
350 | 350 | # For case changes on a case insensitive filesystem we may receive |
|
351 | 351 | # two entries, one with exists=True and another with exists=False. |
|
352 | 352 | # The exists=True entries in the same response should be interpreted |
|
353 | 353 | # as being happens-after the exists=False entries due to the way that |
|
354 | 354 | # Watchman tracks files. We use this property to reconcile deletes |
|
355 | 355 | # for name case changes. |
|
356 | 356 | for entry in result['files']: |
|
357 | 357 | fname = entry['name'] |
|
358 | 358 | if _fixencoding: |
|
359 | 359 | fname = _watchmantofsencoding(fname) |
|
360 | 360 | if switch_slashes: |
|
361 | 361 | fname = fname.replace('\\', '/') |
|
362 | 362 | if normalize: |
|
363 | 363 | normed = normcase(fname) |
|
364 | 364 | fname = normalize(fname, True, True) |
|
365 | 365 | foldmap[normed] = fname |
|
366 | 366 | fmode = entry['mode'] |
|
367 | 367 | fexists = entry['exists'] |
|
368 | 368 | kind = getkind(fmode) |
|
369 | 369 | |
|
370 | 370 | if '/.hg/' in fname or fname.endswith('/.hg'): |
|
371 | 371 | return bail('nested-repo-detected') |
|
372 | 372 | |
|
373 | 373 | if not fexists: |
|
374 | 374 | # if marked as deleted and we don't already have a change |
|
375 | 375 | # record, mark it as deleted. If we already have an entry |
|
376 | 376 | # for fname then it was either part of walkexplicit or was |
|
377 | 377 | # an earlier result that was a case change |
|
378 | 378 | if fname not in results and fname in dmap and ( |
|
379 | 379 | matchalways or matchfn(fname)): |
|
380 | 380 | results[fname] = None |
|
381 | 381 | elif kind == dirkind: |
|
382 | 382 | if fname in dmap and (matchalways or matchfn(fname)): |
|
383 | 383 | results[fname] = None |
|
384 | 384 | elif kind == regkind or kind == lnkkind: |
|
385 | 385 | if fname in dmap: |
|
386 | 386 | if matchalways or matchfn(fname): |
|
387 | 387 | results[fname] = entry |
|
388 | 388 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
389 | 389 | results[fname] = entry |
|
390 | 390 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
391 | 391 | results[fname] = None |
|
392 | 392 | |
|
393 | 393 | # step 3: query notable files we don't already know about |
|
394 | 394 | # XXX try not to iterate over the entire dmap |
|
395 | 395 | if normalize: |
|
396 | 396 | # any notable files that have changed case will already be handled |
|
397 | 397 | # above, so just check membership in the foldmap |
|
398 | 398 | notefiles = set((normalize(f, True, True) for f in notefiles |
|
399 | 399 | if normcase(f) not in foldmap)) |
|
400 | 400 | visit = set((f for f in notefiles if (f not in results and matchfn(f) |
|
401 | 401 | and (f in dmap or not ignore(f))))) |
|
402 | 402 | |
|
403 | 403 | if not fresh_instance: |
|
404 | 404 | if matchalways: |
|
405 | 405 | visit.update(f for f in nonnormalset if f not in results) |
|
406 | 406 | visit.update(f for f in copymap if f not in results) |
|
407 | 407 | else: |
|
408 | 408 | visit.update(f for f in nonnormalset |
|
409 | 409 | if f not in results and matchfn(f)) |
|
410 | 410 | visit.update(f for f in copymap |
|
411 | 411 | if f not in results and matchfn(f)) |
|
412 | 412 | else: |
|
413 | 413 | if matchalways: |
|
414 | 414 | visit.update(f for f, st in dmap.iteritems() if f not in results) |
|
415 | 415 | visit.update(f for f in copymap if f not in results) |
|
416 | 416 | else: |
|
417 | 417 | visit.update(f for f, st in dmap.iteritems() |
|
418 | 418 | if f not in results and matchfn(f)) |
|
419 | 419 | visit.update(f for f in copymap |
|
420 | 420 | if f not in results and matchfn(f)) |
|
421 | 421 | |
|
422 | 422 | audit = pathutil.pathauditor(self._root, cached=True).check |
|
423 | 423 | auditpass = [f for f in visit if audit(f)] |
|
424 | 424 | auditpass.sort() |
|
425 | 425 | auditfail = visit.difference(auditpass) |
|
426 | 426 | for f in auditfail: |
|
427 | 427 | results[f] = None |
|
428 | 428 | |
|
429 | 429 | nf = iter(auditpass).next |
|
430 | 430 | for st in util.statfiles([join(f) for f in auditpass]): |
|
431 | 431 | f = nf() |
|
432 | 432 | if st or f in dmap: |
|
433 | 433 | results[f] = st |
|
434 | 434 | |
|
435 | 435 | for s in subrepos: |
|
436 | 436 | del results[s] |
|
437 | 437 | del results['.hg'] |
|
438 | 438 | return results |
|
439 | 439 | |
|
440 | 440 | def overridestatus( |
|
441 | 441 | orig, self, node1='.', node2=None, match=None, ignored=False, |
|
442 | 442 | clean=False, unknown=False, listsubrepos=False): |
|
443 | 443 | listignored = ignored |
|
444 | 444 | listclean = clean |
|
445 | 445 | listunknown = unknown |
|
446 | 446 | |
|
447 | 447 | def _cmpsets(l1, l2): |
|
448 | 448 | try: |
|
449 | 449 | if 'FSMONITOR_LOG_FILE' in encoding.environ: |
|
450 | 450 | fn = encoding.environ['FSMONITOR_LOG_FILE'] |
|
451 | 451 | f = open(fn, 'wb') |
|
452 | 452 | else: |
|
453 | 453 | fn = 'fsmonitorfail.log' |
|
454 | 454 | f = self.vfs.open(fn, 'wb') |
|
455 | 455 | except (IOError, OSError): |
|
456 | 456 | self.ui.warn(_('warning: unable to write to %s\n') % fn) |
|
457 | 457 | return |
|
458 | 458 | |
|
459 | 459 | try: |
|
460 | 460 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
461 | 461 | if set(s1) != set(s2): |
|
462 | 462 | f.write('sets at position %d are unequal\n' % i) |
|
463 | 463 | f.write('watchman returned: %s\n' % s1) |
|
464 | 464 | f.write('stat returned: %s\n' % s2) |
|
465 | 465 | finally: |
|
466 | 466 | f.close() |
|
467 | 467 | |
|
468 | 468 | if isinstance(node1, context.changectx): |
|
469 | 469 | ctx1 = node1 |
|
470 | 470 | else: |
|
471 | 471 | ctx1 = self[node1] |
|
472 | 472 | if isinstance(node2, context.changectx): |
|
473 | 473 | ctx2 = node2 |
|
474 | 474 | else: |
|
475 | 475 | ctx2 = self[node2] |
|
476 | 476 | |
|
477 | 477 | working = ctx2.rev() is None |
|
478 | 478 | parentworking = working and ctx1 == self['.'] |
|
479 |
match = match or matchmod.always( |
|
|
479 | match = match or matchmod.always() | |
|
480 | 480 | |
|
481 | 481 | # Maybe we can use this opportunity to update Watchman's state. |
|
482 | 482 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
483 | 483 | # the workingctx that is to be committed. So don't update the state in |
|
484 | 484 | # that case. |
|
485 | 485 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
486 | 486 | # in the middle of a transaction; we must not update our state in that |
|
487 | 487 | # case, or we risk forgetting about changes in the working copy. |
|
488 | 488 | updatestate = (parentworking and match.always() and |
|
489 | 489 | not isinstance(ctx2, (context.workingcommitctx, |
|
490 | 490 | context.memctx)) and |
|
491 | 491 | 'HG_PENDING' not in encoding.environ) |
|
492 | 492 | |
|
493 | 493 | try: |
|
494 | 494 | if self._fsmonitorstate.walk_on_invalidate: |
|
495 | 495 | # Use a short timeout to query the current clock. If that |
|
496 | 496 | # takes too long then we assume that the service will be slow |
|
497 | 497 | # to answer our query. |
|
498 | 498 | # walk_on_invalidate indicates that we prefer to walk the |
|
499 | 499 | # tree ourselves because we can ignore portions that Watchman |
|
500 | 500 | # cannot and we tend to be faster in the warmer buffer cache |
|
501 | 501 | # cases. |
|
502 | 502 | self._watchmanclient.settimeout(0.1) |
|
503 | 503 | else: |
|
504 | 504 | # Give Watchman more time to potentially complete its walk |
|
505 | 505 | # and return the initial clock. In this mode we assume that |
|
506 | 506 | # the filesystem will be slower than parsing a potentially |
|
507 | 507 | # very large Watchman result set. |
|
508 | 508 | self._watchmanclient.settimeout( |
|
509 | 509 | self._fsmonitorstate.timeout + 0.1) |
|
510 | 510 | startclock = self._watchmanclient.getcurrentclock() |
|
511 | 511 | except Exception as ex: |
|
512 | 512 | self._watchmanclient.clearconnection() |
|
513 | 513 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
514 | 514 | # boo, Watchman failed. bail |
|
515 | 515 | return orig(node1, node2, match, listignored, listclean, |
|
516 | 516 | listunknown, listsubrepos) |
|
517 | 517 | |
|
518 | 518 | if updatestate: |
|
519 | 519 | # We need info about unknown files. This may make things slower the |
|
520 | 520 | # first time, but whatever. |
|
521 | 521 | stateunknown = True |
|
522 | 522 | else: |
|
523 | 523 | stateunknown = listunknown |
|
524 | 524 | |
|
525 | 525 | if updatestate: |
|
526 | 526 | ps = poststatus(startclock) |
|
527 | 527 | self.addpostdsstatus(ps) |
|
528 | 528 | |
|
529 | 529 | r = orig(node1, node2, match, listignored, listclean, stateunknown, |
|
530 | 530 | listsubrepos) |
|
531 | 531 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
532 | 532 | |
|
533 | 533 | if not listunknown: |
|
534 | 534 | unknown = [] |
|
535 | 535 | |
|
536 | 536 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
537 | 537 | full = listclean or match.traversedir is not None |
|
538 | 538 | if self._fsmonitorstate.mode == 'paranoid' and not full: |
|
539 | 539 | # run status again and fall back to the old walk this time |
|
540 | 540 | self.dirstate._fsmonitordisable = True |
|
541 | 541 | |
|
542 | 542 | # shut the UI up |
|
543 | 543 | quiet = self.ui.quiet |
|
544 | 544 | self.ui.quiet = True |
|
545 | 545 | fout, ferr = self.ui.fout, self.ui.ferr |
|
546 | 546 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') |
|
547 | 547 | |
|
548 | 548 | try: |
|
549 | 549 | rv2 = orig( |
|
550 | 550 | node1, node2, match, listignored, listclean, listunknown, |
|
551 | 551 | listsubrepos) |
|
552 | 552 | finally: |
|
553 | 553 | self.dirstate._fsmonitordisable = False |
|
554 | 554 | self.ui.quiet = quiet |
|
555 | 555 | self.ui.fout, self.ui.ferr = fout, ferr |
|
556 | 556 | |
|
557 | 557 | # clean isn't tested since it's set to True above |
|
558 | 558 | with self.wlock(): |
|
559 | 559 | _cmpsets( |
|
560 | 560 | [modified, added, removed, deleted, unknown, ignored, clean], |
|
561 | 561 | rv2) |
|
562 | 562 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
563 | 563 | |
|
564 | 564 | return scmutil.status( |
|
565 | 565 | modified, added, removed, deleted, unknown, ignored, clean) |
|
566 | 566 | |
|
567 | 567 | class poststatus(object): |
|
568 | 568 | def __init__(self, startclock): |
|
569 | 569 | self._startclock = startclock |
|
570 | 570 | |
|
571 | 571 | def __call__(self, wctx, status): |
|
572 | 572 | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock |
|
573 | 573 | hashignore = _hashignore(wctx.repo().dirstate._ignore) |
|
574 | 574 | notefiles = (status.modified + status.added + status.removed + |
|
575 | 575 | status.deleted + status.unknown) |
|
576 | 576 | wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles) |
|
577 | 577 | |
|
578 | 578 | def makedirstate(repo, dirstate): |
|
579 | 579 | class fsmonitordirstate(dirstate.__class__): |
|
580 | 580 | def _fsmonitorinit(self, repo): |
|
581 | 581 | # _fsmonitordisable is used in paranoid mode |
|
582 | 582 | self._fsmonitordisable = False |
|
583 | 583 | self._fsmonitorstate = repo._fsmonitorstate |
|
584 | 584 | self._watchmanclient = repo._watchmanclient |
|
585 | 585 | self._repo = weakref.proxy(repo) |
|
586 | 586 | |
|
587 | 587 | def walk(self, *args, **kwargs): |
|
588 | 588 | orig = super(fsmonitordirstate, self).walk |
|
589 | 589 | if self._fsmonitordisable: |
|
590 | 590 | return orig(*args, **kwargs) |
|
591 | 591 | return overridewalk(orig, self, *args, **kwargs) |
|
592 | 592 | |
|
593 | 593 | def rebuild(self, *args, **kwargs): |
|
594 | 594 | self._fsmonitorstate.invalidate() |
|
595 | 595 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
596 | 596 | |
|
597 | 597 | def invalidate(self, *args, **kwargs): |
|
598 | 598 | self._fsmonitorstate.invalidate() |
|
599 | 599 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
600 | 600 | |
|
601 | 601 | dirstate.__class__ = fsmonitordirstate |
|
602 | 602 | dirstate._fsmonitorinit(repo) |
|
603 | 603 | |
|
604 | 604 | def wrapdirstate(orig, self): |
|
605 | 605 | ds = orig(self) |
|
606 | 606 | # only override the dirstate when Watchman is available for the repo |
|
607 | 607 | if util.safehasattr(self, '_fsmonitorstate'): |
|
608 | 608 | makedirstate(self, ds) |
|
609 | 609 | return ds |
|
610 | 610 | |
|
611 | 611 | def extsetup(ui): |
|
612 | 612 | extensions.wrapfilecache( |
|
613 | 613 | localrepo.localrepository, 'dirstate', wrapdirstate) |
|
614 | 614 | if pycompat.isdarwin: |
|
615 | 615 | # An assist for avoiding the dangling-symlink fsevents bug |
|
616 | 616 | extensions.wrapfunction(os, 'symlink', wrapsymlink) |
|
617 | 617 | |
|
618 | 618 | extensions.wrapfunction(merge, 'update', wrapupdate) |
|
619 | 619 | |
|
620 | 620 | def wrapsymlink(orig, source, link_name): |
|
621 | 621 | ''' if we create a dangling symlink, also touch the parent dir |
|
622 | 622 | to encourage fsevents notifications to work more correctly ''' |
|
623 | 623 | try: |
|
624 | 624 | return orig(source, link_name) |
|
625 | 625 | finally: |
|
626 | 626 | try: |
|
627 | 627 | os.utime(os.path.dirname(link_name), None) |
|
628 | 628 | except OSError: |
|
629 | 629 | pass |
|
630 | 630 | |
|
631 | 631 | class state_update(object): |
|
632 | 632 | ''' This context manager is responsible for dispatching the state-enter |
|
633 | 633 | and state-leave signals to the watchman service. The enter and leave |
|
634 | 634 | methods can be invoked manually (for scenarios where context manager |
|
635 | 635 | semantics are not possible). If parameters oldnode and newnode are None, |
|
636 | 636 | they will be populated based on current working copy in enter and |
|
637 | 637 | leave, respectively. Similarly, if the distance is none, it will be |
|
638 | 638 | calculated based on the oldnode and newnode in the leave method.''' |
|
639 | 639 | |
|
640 | 640 | def __init__(self, repo, name, oldnode=None, newnode=None, distance=None, |
|
641 | 641 | partial=False): |
|
642 | 642 | self.repo = repo.unfiltered() |
|
643 | 643 | self.name = name |
|
644 | 644 | self.oldnode = oldnode |
|
645 | 645 | self.newnode = newnode |
|
646 | 646 | self.distance = distance |
|
647 | 647 | self.partial = partial |
|
648 | 648 | self._lock = None |
|
649 | 649 | self.need_leave = False |
|
650 | 650 | |
|
651 | 651 | def __enter__(self): |
|
652 | 652 | self.enter() |
|
653 | 653 | |
|
654 | 654 | def enter(self): |
|
655 | 655 | # Make sure we have a wlock prior to sending notifications to watchman. |
|
656 | 656 | # We don't want to race with other actors. In the update case, |
|
657 | 657 | # merge.update is going to take the wlock almost immediately. We are |
|
658 | 658 | # effectively extending the lock around several short sanity checks. |
|
659 | 659 | if self.oldnode is None: |
|
660 | 660 | self.oldnode = self.repo['.'].node() |
|
661 | 661 | |
|
662 | 662 | if self.repo.currentwlock() is None: |
|
663 | 663 | if util.safehasattr(self.repo, 'wlocknostateupdate'): |
|
664 | 664 | self._lock = self.repo.wlocknostateupdate() |
|
665 | 665 | else: |
|
666 | 666 | self._lock = self.repo.wlock() |
|
667 | 667 | self.need_leave = self._state( |
|
668 | 668 | 'state-enter', |
|
669 | 669 | hex(self.oldnode)) |
|
670 | 670 | return self |
|
671 | 671 | |
|
672 | 672 | def __exit__(self, type_, value, tb): |
|
673 | 673 | abort = True if type_ else False |
|
674 | 674 | self.exit(abort=abort) |
|
675 | 675 | |
|
676 | 676 | def exit(self, abort=False): |
|
677 | 677 | try: |
|
678 | 678 | if self.need_leave: |
|
679 | 679 | status = 'failed' if abort else 'ok' |
|
680 | 680 | if self.newnode is None: |
|
681 | 681 | self.newnode = self.repo['.'].node() |
|
682 | 682 | if self.distance is None: |
|
683 | 683 | self.distance = calcdistance( |
|
684 | 684 | self.repo, self.oldnode, self.newnode) |
|
685 | 685 | self._state( |
|
686 | 686 | 'state-leave', |
|
687 | 687 | hex(self.newnode), |
|
688 | 688 | status=status) |
|
689 | 689 | finally: |
|
690 | 690 | self.need_leave = False |
|
691 | 691 | if self._lock: |
|
692 | 692 | self._lock.release() |
|
693 | 693 | |
|
694 | 694 | def _state(self, cmd, commithash, status='ok'): |
|
695 | 695 | if not util.safehasattr(self.repo, '_watchmanclient'): |
|
696 | 696 | return False |
|
697 | 697 | try: |
|
698 | 698 | self.repo._watchmanclient.command(cmd, { |
|
699 | 699 | 'name': self.name, |
|
700 | 700 | 'metadata': { |
|
701 | 701 | # the target revision |
|
702 | 702 | 'rev': commithash, |
|
703 | 703 | # approximate number of commits between current and target |
|
704 | 704 | 'distance': self.distance if self.distance else 0, |
|
705 | 705 | # success/failure (only really meaningful for state-leave) |
|
706 | 706 | 'status': status, |
|
707 | 707 | # whether the working copy parent is changing |
|
708 | 708 | 'partial': self.partial, |
|
709 | 709 | }}) |
|
710 | 710 | return True |
|
711 | 711 | except Exception as e: |
|
712 | 712 | # Swallow any errors; fire and forget |
|
713 | 713 | self.repo.ui.log( |
|
714 | 714 | 'watchman', 'Exception %s while running %s\n', e, cmd) |
|
715 | 715 | return False |
|
716 | 716 | |
|
717 | 717 | # Estimate the distance between two nodes |
|
718 | 718 | def calcdistance(repo, oldnode, newnode): |
|
719 | 719 | anc = repo.changelog.ancestor(oldnode, newnode) |
|
720 | 720 | ancrev = repo[anc].rev() |
|
721 | 721 | distance = (abs(repo[oldnode].rev() - ancrev) |
|
722 | 722 | + abs(repo[newnode].rev() - ancrev)) |
|
723 | 723 | return distance |
|
724 | 724 | |
|
725 | 725 | # Bracket working copy updates with calls to the watchman state-enter |
|
726 | 726 | # and state-leave commands. This allows clients to perform more intelligent |
|
727 | 727 | # settling during bulk file change scenarios |
|
728 | 728 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
729 | 729 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, |
|
730 | 730 | mergeancestor=False, labels=None, matcher=None, **kwargs): |
|
731 | 731 | |
|
732 | 732 | distance = 0 |
|
733 | 733 | partial = True |
|
734 | 734 | oldnode = repo['.'].node() |
|
735 | 735 | newnode = repo[node].node() |
|
736 | 736 | if matcher is None or matcher.always(): |
|
737 | 737 | partial = False |
|
738 | 738 | distance = calcdistance(repo.unfiltered(), oldnode, newnode) |
|
739 | 739 | |
|
740 | 740 | with state_update(repo, name="hg.update", oldnode=oldnode, newnode=newnode, |
|
741 | 741 | distance=distance, partial=partial): |
|
742 | 742 | return orig( |
|
743 | 743 | repo, node, branchmerge, force, ancestor, mergeancestor, |
|
744 | 744 | labels, matcher, **kwargs) |
|
745 | 745 | |
|
746 | 746 | def repo_has_depth_one_nested_repo(repo): |
|
747 | 747 | for f in repo.wvfs.listdir(): |
|
748 | 748 | if os.path.isdir(os.path.join(repo.root, f, '.hg')): |
|
749 | 749 | msg = 'fsmonitor: sub-repository %r detected, fsmonitor disabled\n' |
|
750 | 750 | repo.ui.debug(msg % f) |
|
751 | 751 | return True |
|
752 | 752 | return False |
|
753 | 753 | |
|
754 | 754 | def reposetup(ui, repo): |
|
755 | 755 | # We don't work with largefiles or inotify |
|
756 | 756 | exts = extensions.enabled() |
|
757 | 757 | for ext in _blacklist: |
|
758 | 758 | if ext in exts: |
|
759 | 759 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' |
|
760 | 760 | 'extension and has been disabled.\n') % ext) |
|
761 | 761 | return |
|
762 | 762 | |
|
763 | 763 | if repo.local(): |
|
764 | 764 | # We don't work with subrepos either. |
|
765 | 765 | # |
|
766 | 766 | # if repo[None].substate can cause a dirstate parse, which is too |
|
767 | 767 | # slow. Instead, look for a file called hgsubstate, |
|
768 | 768 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): |
|
769 | 769 | return |
|
770 | 770 | |
|
771 | 771 | if repo_has_depth_one_nested_repo(repo): |
|
772 | 772 | return |
|
773 | 773 | |
|
774 | 774 | fsmonitorstate = state.state(repo) |
|
775 | 775 | if fsmonitorstate.mode == 'off': |
|
776 | 776 | return |
|
777 | 777 | |
|
778 | 778 | try: |
|
779 | 779 | client = watchmanclient.client(repo) |
|
780 | 780 | except Exception as ex: |
|
781 | 781 | _handleunavailable(ui, fsmonitorstate, ex) |
|
782 | 782 | return |
|
783 | 783 | |
|
784 | 784 | repo._fsmonitorstate = fsmonitorstate |
|
785 | 785 | repo._watchmanclient = client |
|
786 | 786 | |
|
787 | 787 | dirstate, cached = localrepo.isfilecached(repo, 'dirstate') |
|
788 | 788 | if cached: |
|
789 | 789 | # at this point since fsmonitorstate wasn't present, |
|
790 | 790 | # repo.dirstate is not a fsmonitordirstate |
|
791 | 791 | makedirstate(repo, dirstate) |
|
792 | 792 | |
|
793 | 793 | class fsmonitorrepo(repo.__class__): |
|
794 | 794 | def status(self, *args, **kwargs): |
|
795 | 795 | orig = super(fsmonitorrepo, self).status |
|
796 | 796 | return overridestatus(orig, self, *args, **kwargs) |
|
797 | 797 | |
|
798 | 798 | def wlocknostateupdate(self, *args, **kwargs): |
|
799 | 799 | return super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
800 | 800 | |
|
801 | 801 | def wlock(self, *args, **kwargs): |
|
802 | 802 | l = super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
803 | 803 | if not ui.configbool( |
|
804 | 804 | "experimental", "fsmonitor.transaction_notify"): |
|
805 | 805 | return l |
|
806 | 806 | if l.held != 1: |
|
807 | 807 | return l |
|
808 | 808 | origrelease = l.releasefn |
|
809 | 809 | |
|
810 | 810 | def staterelease(): |
|
811 | 811 | if origrelease: |
|
812 | 812 | origrelease() |
|
813 | 813 | if l.stateupdate: |
|
814 | 814 | l.stateupdate.exit() |
|
815 | 815 | l.stateupdate = None |
|
816 | 816 | |
|
817 | 817 | try: |
|
818 | 818 | l.stateupdate = None |
|
819 | 819 | l.stateupdate = state_update(self, name="hg.transaction") |
|
820 | 820 | l.stateupdate.enter() |
|
821 | 821 | l.releasefn = staterelease |
|
822 | 822 | except Exception as e: |
|
823 | 823 | # Swallow any errors; fire and forget |
|
824 | 824 | self.ui.log( |
|
825 | 825 | 'watchman', 'Exception in state update %s\n', e) |
|
826 | 826 | return l |
|
827 | 827 | |
|
828 | 828 | repo.__class__ = fsmonitorrepo |
@@ -1,341 +1,341 b'' | |||
|
1 | 1 | # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | '''commands to sign and verify changesets''' |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import binascii |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | from mercurial import ( |
|
15 | 15 | cmdutil, |
|
16 | 16 | error, |
|
17 | 17 | help, |
|
18 | 18 | match, |
|
19 | 19 | node as hgnode, |
|
20 | 20 | pycompat, |
|
21 | 21 | registrar, |
|
22 | 22 | ) |
|
23 | 23 | from mercurial.utils import ( |
|
24 | 24 | dateutil, |
|
25 | 25 | procutil, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | cmdtable = {} |
|
29 | 29 | command = registrar.command(cmdtable) |
|
30 | 30 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
31 | 31 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
32 | 32 | # be specifying the version(s) of Mercurial they are tested with, or |
|
33 | 33 | # leave the attribute unspecified. |
|
34 | 34 | testedwith = 'ships-with-hg-core' |
|
35 | 35 | |
|
36 | 36 | configtable = {} |
|
37 | 37 | configitem = registrar.configitem(configtable) |
|
38 | 38 | |
|
39 | 39 | configitem('gpg', 'cmd', |
|
40 | 40 | default='gpg', |
|
41 | 41 | ) |
|
42 | 42 | configitem('gpg', 'key', |
|
43 | 43 | default=None, |
|
44 | 44 | ) |
|
45 | 45 | configitem('gpg', '.*', |
|
46 | 46 | default=None, |
|
47 | 47 | generic=True, |
|
48 | 48 | ) |
|
49 | 49 | |
|
50 | 50 | # Custom help category |
|
51 | 51 | _HELP_CATEGORY = 'gpg' |
|
52 | 52 | |
|
53 | 53 | class gpg(object): |
|
54 | 54 | def __init__(self, path, key=None): |
|
55 | 55 | self.path = path |
|
56 | 56 | self.key = (key and " --local-user \"%s\"" % key) or "" |
|
57 | 57 | |
|
58 | 58 | def sign(self, data): |
|
59 | 59 | gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key) |
|
60 | 60 | return procutil.filter(data, gpgcmd) |
|
61 | 61 | |
|
62 | 62 | def verify(self, data, sig): |
|
63 | 63 | """ returns of the good and bad signatures""" |
|
64 | 64 | sigfile = datafile = None |
|
65 | 65 | try: |
|
66 | 66 | # create temporary files |
|
67 | 67 | fd, sigfile = pycompat.mkstemp(prefix="hg-gpg-", suffix=".sig") |
|
68 | 68 | fp = os.fdopen(fd, r'wb') |
|
69 | 69 | fp.write(sig) |
|
70 | 70 | fp.close() |
|
71 | 71 | fd, datafile = pycompat.mkstemp(prefix="hg-gpg-", suffix=".txt") |
|
72 | 72 | fp = os.fdopen(fd, r'wb') |
|
73 | 73 | fp.write(data) |
|
74 | 74 | fp.close() |
|
75 | 75 | gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify " |
|
76 | 76 | "\"%s\" \"%s\"" % (self.path, sigfile, datafile)) |
|
77 | 77 | ret = procutil.filter("", gpgcmd) |
|
78 | 78 | finally: |
|
79 | 79 | for f in (sigfile, datafile): |
|
80 | 80 | try: |
|
81 | 81 | if f: |
|
82 | 82 | os.unlink(f) |
|
83 | 83 | except OSError: |
|
84 | 84 | pass |
|
85 | 85 | keys = [] |
|
86 | 86 | key, fingerprint = None, None |
|
87 | 87 | for l in ret.splitlines(): |
|
88 | 88 | # see DETAILS in the gnupg documentation |
|
89 | 89 | # filter the logger output |
|
90 | 90 | if not l.startswith("[GNUPG:]"): |
|
91 | 91 | continue |
|
92 | 92 | l = l[9:] |
|
93 | 93 | if l.startswith("VALIDSIG"): |
|
94 | 94 | # fingerprint of the primary key |
|
95 | 95 | fingerprint = l.split()[10] |
|
96 | 96 | elif l.startswith("ERRSIG"): |
|
97 | 97 | key = l.split(" ", 3)[:2] |
|
98 | 98 | key.append("") |
|
99 | 99 | fingerprint = None |
|
100 | 100 | elif (l.startswith("GOODSIG") or |
|
101 | 101 | l.startswith("EXPSIG") or |
|
102 | 102 | l.startswith("EXPKEYSIG") or |
|
103 | 103 | l.startswith("BADSIG")): |
|
104 | 104 | if key is not None: |
|
105 | 105 | keys.append(key + [fingerprint]) |
|
106 | 106 | key = l.split(" ", 2) |
|
107 | 107 | fingerprint = None |
|
108 | 108 | if key is not None: |
|
109 | 109 | keys.append(key + [fingerprint]) |
|
110 | 110 | return keys |
|
111 | 111 | |
|
112 | 112 | def newgpg(ui, **opts): |
|
113 | 113 | """create a new gpg instance""" |
|
114 | 114 | gpgpath = ui.config("gpg", "cmd") |
|
115 | 115 | gpgkey = opts.get(r'key') |
|
116 | 116 | if not gpgkey: |
|
117 | 117 | gpgkey = ui.config("gpg", "key") |
|
118 | 118 | return gpg(gpgpath, gpgkey) |
|
119 | 119 | |
|
120 | 120 | def sigwalk(repo): |
|
121 | 121 | """ |
|
122 | 122 | walk over every sigs, yields a couple |
|
123 | 123 | ((node, version, sig), (filename, linenumber)) |
|
124 | 124 | """ |
|
125 | 125 | def parsefile(fileiter, context): |
|
126 | 126 | ln = 1 |
|
127 | 127 | for l in fileiter: |
|
128 | 128 | if not l: |
|
129 | 129 | continue |
|
130 | 130 | yield (l.split(" ", 2), (context, ln)) |
|
131 | 131 | ln += 1 |
|
132 | 132 | |
|
133 | 133 | # read the heads |
|
134 | 134 | fl = repo.file(".hgsigs") |
|
135 | 135 | for r in reversed(fl.heads()): |
|
136 | 136 | fn = ".hgsigs|%s" % hgnode.short(r) |
|
137 | 137 | for item in parsefile(fl.read(r).splitlines(), fn): |
|
138 | 138 | yield item |
|
139 | 139 | try: |
|
140 | 140 | # read local signatures |
|
141 | 141 | fn = "localsigs" |
|
142 | 142 | for item in parsefile(repo.vfs(fn), fn): |
|
143 | 143 | yield item |
|
144 | 144 | except IOError: |
|
145 | 145 | pass |
|
146 | 146 | |
|
147 | 147 | def getkeys(ui, repo, mygpg, sigdata, context): |
|
148 | 148 | """get the keys who signed a data""" |
|
149 | 149 | fn, ln = context |
|
150 | 150 | node, version, sig = sigdata |
|
151 | 151 | prefix = "%s:%d" % (fn, ln) |
|
152 | 152 | node = hgnode.bin(node) |
|
153 | 153 | |
|
154 | 154 | data = node2txt(repo, node, version) |
|
155 | 155 | sig = binascii.a2b_base64(sig) |
|
156 | 156 | keys = mygpg.verify(data, sig) |
|
157 | 157 | |
|
158 | 158 | validkeys = [] |
|
159 | 159 | # warn for expired key and/or sigs |
|
160 | 160 | for key in keys: |
|
161 | 161 | if key[0] == "ERRSIG": |
|
162 | 162 | ui.write(_("%s Unknown key ID \"%s\"\n") % (prefix, key[1])) |
|
163 | 163 | continue |
|
164 | 164 | if key[0] == "BADSIG": |
|
165 | 165 | ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2])) |
|
166 | 166 | continue |
|
167 | 167 | if key[0] == "EXPSIG": |
|
168 | 168 | ui.write(_("%s Note: Signature has expired" |
|
169 | 169 | " (signed by: \"%s\")\n") % (prefix, key[2])) |
|
170 | 170 | elif key[0] == "EXPKEYSIG": |
|
171 | 171 | ui.write(_("%s Note: This key has expired" |
|
172 | 172 | " (signed by: \"%s\")\n") % (prefix, key[2])) |
|
173 | 173 | validkeys.append((key[1], key[2], key[3])) |
|
174 | 174 | return validkeys |
|
175 | 175 | |
|
176 | 176 | @command("sigs", [], _('hg sigs'), helpcategory=_HELP_CATEGORY) |
|
177 | 177 | def sigs(ui, repo): |
|
178 | 178 | """list signed changesets""" |
|
179 | 179 | mygpg = newgpg(ui) |
|
180 | 180 | revs = {} |
|
181 | 181 | |
|
182 | 182 | for data, context in sigwalk(repo): |
|
183 | 183 | node, version, sig = data |
|
184 | 184 | fn, ln = context |
|
185 | 185 | try: |
|
186 | 186 | n = repo.lookup(node) |
|
187 | 187 | except KeyError: |
|
188 | 188 | ui.warn(_("%s:%d node does not exist\n") % (fn, ln)) |
|
189 | 189 | continue |
|
190 | 190 | r = repo.changelog.rev(n) |
|
191 | 191 | keys = getkeys(ui, repo, mygpg, data, context) |
|
192 | 192 | if not keys: |
|
193 | 193 | continue |
|
194 | 194 | revs.setdefault(r, []) |
|
195 | 195 | revs[r].extend(keys) |
|
196 | 196 | for rev in sorted(revs, reverse=True): |
|
197 | 197 | for k in revs[rev]: |
|
198 | 198 | r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev))) |
|
199 | 199 | ui.write("%-30s %s\n" % (keystr(ui, k), r)) |
|
200 | 200 | |
|
201 | 201 | @command("sigcheck", [], _('hg sigcheck REV'), helpcategory=_HELP_CATEGORY) |
|
202 | 202 | def sigcheck(ui, repo, rev): |
|
203 | 203 | """verify all the signatures there may be for a particular revision""" |
|
204 | 204 | mygpg = newgpg(ui) |
|
205 | 205 | rev = repo.lookup(rev) |
|
206 | 206 | hexrev = hgnode.hex(rev) |
|
207 | 207 | keys = [] |
|
208 | 208 | |
|
209 | 209 | for data, context in sigwalk(repo): |
|
210 | 210 | node, version, sig = data |
|
211 | 211 | if node == hexrev: |
|
212 | 212 | k = getkeys(ui, repo, mygpg, data, context) |
|
213 | 213 | if k: |
|
214 | 214 | keys.extend(k) |
|
215 | 215 | |
|
216 | 216 | if not keys: |
|
217 | 217 | ui.write(_("no valid signature for %s\n") % hgnode.short(rev)) |
|
218 | 218 | return |
|
219 | 219 | |
|
220 | 220 | # print summary |
|
221 | 221 | ui.write(_("%s is signed by:\n") % hgnode.short(rev)) |
|
222 | 222 | for key in keys: |
|
223 | 223 | ui.write(" %s\n" % keystr(ui, key)) |
|
224 | 224 | |
|
225 | 225 | def keystr(ui, key): |
|
226 | 226 | """associate a string to a key (username, comment)""" |
|
227 | 227 | keyid, user, fingerprint = key |
|
228 | 228 | comment = ui.config("gpg", fingerprint) |
|
229 | 229 | if comment: |
|
230 | 230 | return "%s (%s)" % (user, comment) |
|
231 | 231 | else: |
|
232 | 232 | return user |
|
233 | 233 | |
|
234 | 234 | @command("sign", |
|
235 | 235 | [('l', 'local', None, _('make the signature local')), |
|
236 | 236 | ('f', 'force', None, _('sign even if the sigfile is modified')), |
|
237 | 237 | ('', 'no-commit', None, _('do not commit the sigfile after signing')), |
|
238 | 238 | ('k', 'key', '', |
|
239 | 239 | _('the key id to sign with'), _('ID')), |
|
240 | 240 | ('m', 'message', '', |
|
241 | 241 | _('use text as commit message'), _('TEXT')), |
|
242 | 242 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
243 | 243 | ] + cmdutil.commitopts2, |
|
244 | 244 | _('hg sign [OPTION]... [REV]...'), |
|
245 | 245 | helpcategory=_HELP_CATEGORY) |
|
246 | 246 | def sign(ui, repo, *revs, **opts): |
|
247 | 247 | """add a signature for the current or given revision |
|
248 | 248 | |
|
249 | 249 | If no revision is given, the parent of the working directory is used, |
|
250 | 250 | or tip if no revision is checked out. |
|
251 | 251 | |
|
252 | 252 | The ``gpg.cmd`` config setting can be used to specify the command |
|
253 | 253 | to run. A default key can be specified with ``gpg.key``. |
|
254 | 254 | |
|
255 | 255 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
256 | 256 | """ |
|
257 | 257 | with repo.wlock(): |
|
258 | 258 | return _dosign(ui, repo, *revs, **opts) |
|
259 | 259 | |
|
260 | 260 | def _dosign(ui, repo, *revs, **opts): |
|
261 | 261 | mygpg = newgpg(ui, **opts) |
|
262 | 262 | opts = pycompat.byteskwargs(opts) |
|
263 | 263 | sigver = "0" |
|
264 | 264 | sigmessage = "" |
|
265 | 265 | |
|
266 | 266 | date = opts.get('date') |
|
267 | 267 | if date: |
|
268 | 268 | opts['date'] = dateutil.parsedate(date) |
|
269 | 269 | |
|
270 | 270 | if revs: |
|
271 | 271 | nodes = [repo.lookup(n) for n in revs] |
|
272 | 272 | else: |
|
273 | 273 | nodes = [node for node in repo.dirstate.parents() |
|
274 | 274 | if node != hgnode.nullid] |
|
275 | 275 | if len(nodes) > 1: |
|
276 | 276 | raise error.Abort(_('uncommitted merge - please provide a ' |
|
277 | 277 | 'specific revision')) |
|
278 | 278 | if not nodes: |
|
279 | 279 | nodes = [repo.changelog.tip()] |
|
280 | 280 | |
|
281 | 281 | for n in nodes: |
|
282 | 282 | hexnode = hgnode.hex(n) |
|
283 | 283 | ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n), |
|
284 | 284 | hgnode.short(n))) |
|
285 | 285 | # build data |
|
286 | 286 | data = node2txt(repo, n, sigver) |
|
287 | 287 | sig = mygpg.sign(data) |
|
288 | 288 | if not sig: |
|
289 | 289 | raise error.Abort(_("error while signing")) |
|
290 | 290 | sig = binascii.b2a_base64(sig) |
|
291 | 291 | sig = sig.replace("\n", "") |
|
292 | 292 | sigmessage += "%s %s %s\n" % (hexnode, sigver, sig) |
|
293 | 293 | |
|
294 | 294 | # write it |
|
295 | 295 | if opts['local']: |
|
296 | 296 | repo.vfs.append("localsigs", sigmessage) |
|
297 | 297 | return |
|
298 | 298 | |
|
299 | 299 | if not opts["force"]: |
|
300 |
msigs = match.exact( |
|
|
300 | msigs = match.exact(['.hgsigs']) | |
|
301 | 301 | if any(repo.status(match=msigs, unknown=True, ignored=True)): |
|
302 | 302 | raise error.Abort(_("working copy of .hgsigs is changed "), |
|
303 | 303 | hint=_("please commit .hgsigs manually")) |
|
304 | 304 | |
|
305 | 305 | sigsfile = repo.wvfs(".hgsigs", "ab") |
|
306 | 306 | sigsfile.write(sigmessage) |
|
307 | 307 | sigsfile.close() |
|
308 | 308 | |
|
309 | 309 | if '.hgsigs' not in repo.dirstate: |
|
310 | 310 | repo[None].add([".hgsigs"]) |
|
311 | 311 | |
|
312 | 312 | if opts["no_commit"]: |
|
313 | 313 | return |
|
314 | 314 | |
|
315 | 315 | message = opts['message'] |
|
316 | 316 | if not message: |
|
317 | 317 | # we don't translate commit messages |
|
318 | 318 | message = "\n".join(["Added signature for changeset %s" |
|
319 | 319 | % hgnode.short(n) |
|
320 | 320 | for n in nodes]) |
|
321 | 321 | try: |
|
322 | 322 | editor = cmdutil.getcommiteditor(editform='gpg.sign', |
|
323 | 323 | **pycompat.strkwargs(opts)) |
|
324 | 324 | repo.commit(message, opts['user'], opts['date'], match=msigs, |
|
325 | 325 | editor=editor) |
|
326 | 326 | except ValueError as inst: |
|
327 | 327 | raise error.Abort(pycompat.bytestr(inst)) |
|
328 | 328 | |
|
329 | 329 | def node2txt(repo, node, ver): |
|
330 | 330 | """map a manifest into some text""" |
|
331 | 331 | if ver == "0": |
|
332 | 332 | return "%s\n" % hgnode.hex(node) |
|
333 | 333 | else: |
|
334 | 334 | raise error.Abort(_("unknown signature version")) |
|
335 | 335 | |
|
336 | 336 | def extsetup(ui): |
|
337 | 337 | # Add our category before "Repository maintenance". |
|
338 | 338 | help.CATEGORY_ORDER.insert( |
|
339 | 339 | help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE), |
|
340 | 340 | _HELP_CATEGORY) |
|
341 | 341 | help.CATEGORY_NAMES[_HELP_CATEGORY] = 'GPG signing' |
@@ -1,675 +1,675 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''largefiles utility code: must not import other modules in this package.''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import copy |
|
13 | 13 | import hashlib |
|
14 | 14 | import os |
|
15 | 15 | import stat |
|
16 | 16 | |
|
17 | 17 | from mercurial.i18n import _ |
|
18 | 18 | from mercurial.node import hex |
|
19 | 19 | |
|
20 | 20 | from mercurial import ( |
|
21 | 21 | dirstate, |
|
22 | 22 | encoding, |
|
23 | 23 | error, |
|
24 | 24 | httpconnection, |
|
25 | 25 | match as matchmod, |
|
26 | 26 | node, |
|
27 | 27 | pycompat, |
|
28 | 28 | scmutil, |
|
29 | 29 | sparse, |
|
30 | 30 | util, |
|
31 | 31 | vfs as vfsmod, |
|
32 | 32 | ) |
|
33 | 33 | |
|
34 | 34 | shortname = '.hglf' |
|
35 | 35 | shortnameslash = shortname + '/' |
|
36 | 36 | longname = 'largefiles' |
|
37 | 37 | |
|
38 | 38 | # -- Private worker functions ------------------------------------------ |
|
39 | 39 | |
|
40 | 40 | def getminsize(ui, assumelfiles, opt, default=10): |
|
41 | 41 | lfsize = opt |
|
42 | 42 | if not lfsize and assumelfiles: |
|
43 | 43 | lfsize = ui.config(longname, 'minsize', default=default) |
|
44 | 44 | if lfsize: |
|
45 | 45 | try: |
|
46 | 46 | lfsize = float(lfsize) |
|
47 | 47 | except ValueError: |
|
48 | 48 | raise error.Abort(_('largefiles: size must be number (not %s)\n') |
|
49 | 49 | % lfsize) |
|
50 | 50 | if lfsize is None: |
|
51 | 51 | raise error.Abort(_('minimum size for largefiles must be specified')) |
|
52 | 52 | return lfsize |
|
53 | 53 | |
|
54 | 54 | def link(src, dest): |
|
55 | 55 | """Try to create hardlink - if that fails, efficiently make a copy.""" |
|
56 | 56 | util.makedirs(os.path.dirname(dest)) |
|
57 | 57 | try: |
|
58 | 58 | util.oslink(src, dest) |
|
59 | 59 | except OSError: |
|
60 | 60 | # if hardlinks fail, fallback on atomic copy |
|
61 | 61 | with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf: |
|
62 | 62 | for chunk in util.filechunkiter(srcf): |
|
63 | 63 | dstf.write(chunk) |
|
64 | 64 | os.chmod(dest, os.stat(src).st_mode) |
|
65 | 65 | |
|
66 | 66 | def usercachepath(ui, hash): |
|
67 | 67 | '''Return the correct location in the "global" largefiles cache for a file |
|
68 | 68 | with the given hash. |
|
69 | 69 | This cache is used for sharing of largefiles across repositories - both |
|
70 | 70 | to preserve download bandwidth and storage space.''' |
|
71 | 71 | return os.path.join(_usercachedir(ui), hash) |
|
72 | 72 | |
|
73 | 73 | def _usercachedir(ui, name=longname): |
|
74 | 74 | '''Return the location of the "global" largefiles cache.''' |
|
75 | 75 | path = ui.configpath(name, 'usercache') |
|
76 | 76 | if path: |
|
77 | 77 | return path |
|
78 | 78 | if pycompat.iswindows: |
|
79 | 79 | appdata = encoding.environ.get('LOCALAPPDATA',\ |
|
80 | 80 | encoding.environ.get('APPDATA')) |
|
81 | 81 | if appdata: |
|
82 | 82 | return os.path.join(appdata, name) |
|
83 | 83 | elif pycompat.isdarwin: |
|
84 | 84 | home = encoding.environ.get('HOME') |
|
85 | 85 | if home: |
|
86 | 86 | return os.path.join(home, 'Library', 'Caches', name) |
|
87 | 87 | elif pycompat.isposix: |
|
88 | 88 | path = encoding.environ.get('XDG_CACHE_HOME') |
|
89 | 89 | if path: |
|
90 | 90 | return os.path.join(path, name) |
|
91 | 91 | home = encoding.environ.get('HOME') |
|
92 | 92 | if home: |
|
93 | 93 | return os.path.join(home, '.cache', name) |
|
94 | 94 | else: |
|
95 | 95 | raise error.Abort(_('unknown operating system: %s\n') |
|
96 | 96 | % pycompat.osname) |
|
97 | 97 | raise error.Abort(_('unknown %s usercache location') % name) |
|
98 | 98 | |
|
99 | 99 | def inusercache(ui, hash): |
|
100 | 100 | path = usercachepath(ui, hash) |
|
101 | 101 | return os.path.exists(path) |
|
102 | 102 | |
|
103 | 103 | def findfile(repo, hash): |
|
104 | 104 | '''Return store path of the largefile with the specified hash. |
|
105 | 105 | As a side effect, the file might be linked from user cache. |
|
106 | 106 | Return None if the file can't be found locally.''' |
|
107 | 107 | path, exists = findstorepath(repo, hash) |
|
108 | 108 | if exists: |
|
109 | 109 | repo.ui.note(_('found %s in store\n') % hash) |
|
110 | 110 | return path |
|
111 | 111 | elif inusercache(repo.ui, hash): |
|
112 | 112 | repo.ui.note(_('found %s in system cache\n') % hash) |
|
113 | 113 | path = storepath(repo, hash) |
|
114 | 114 | link(usercachepath(repo.ui, hash), path) |
|
115 | 115 | return path |
|
116 | 116 | return None |
|
117 | 117 | |
|
118 | 118 | class largefilesdirstate(dirstate.dirstate): |
|
119 | 119 | def __getitem__(self, key): |
|
120 | 120 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
121 | 121 | def normal(self, f): |
|
122 | 122 | return super(largefilesdirstate, self).normal(unixpath(f)) |
|
123 | 123 | def remove(self, f): |
|
124 | 124 | return super(largefilesdirstate, self).remove(unixpath(f)) |
|
125 | 125 | def add(self, f): |
|
126 | 126 | return super(largefilesdirstate, self).add(unixpath(f)) |
|
127 | 127 | def drop(self, f): |
|
128 | 128 | return super(largefilesdirstate, self).drop(unixpath(f)) |
|
129 | 129 | def forget(self, f): |
|
130 | 130 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
131 | 131 | def normallookup(self, f): |
|
132 | 132 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
133 | 133 | def _ignore(self, f): |
|
134 | 134 | return False |
|
135 | 135 | def write(self, tr=False): |
|
136 | 136 | # (1) disable PENDING mode always |
|
137 | 137 | # (lfdirstate isn't yet managed as a part of the transaction) |
|
138 | 138 | # (2) avoid develwarn 'use dirstate.write with ....' |
|
139 | 139 | super(largefilesdirstate, self).write(None) |
|
140 | 140 | |
|
141 | 141 | def openlfdirstate(ui, repo, create=True): |
|
142 | 142 | ''' |
|
143 | 143 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
144 | 144 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
145 | 145 | ''' |
|
146 | 146 | vfs = repo.vfs |
|
147 | 147 | lfstoredir = longname |
|
148 | 148 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
149 | 149 | lfdirstate = largefilesdirstate(opener, ui, repo.root, |
|
150 | 150 | repo.dirstate._validate, |
|
151 | 151 | lambda: sparse.matcher(repo)) |
|
152 | 152 | |
|
153 | 153 | # If the largefiles dirstate does not exist, populate and create |
|
154 | 154 | # it. This ensures that we create it on the first meaningful |
|
155 | 155 | # largefiles operation in a new clone. |
|
156 | 156 | if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')): |
|
157 | 157 | matcher = getstandinmatcher(repo) |
|
158 | 158 | standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False, |
|
159 | 159 | ignored=False) |
|
160 | 160 | |
|
161 | 161 | if len(standins) > 0: |
|
162 | 162 | vfs.makedirs(lfstoredir) |
|
163 | 163 | |
|
164 | 164 | for standin in standins: |
|
165 | 165 | lfile = splitstandin(standin) |
|
166 | 166 | lfdirstate.normallookup(lfile) |
|
167 | 167 | return lfdirstate |
|
168 | 168 | |
|
169 | 169 | def lfdirstatestatus(lfdirstate, repo): |
|
170 | 170 | pctx = repo['.'] |
|
171 |
match = matchmod.always( |
|
|
171 | match = matchmod.always() | |
|
172 | 172 | unsure, s = lfdirstate.status(match, subrepos=[], ignored=False, |
|
173 | 173 | clean=False, unknown=False) |
|
174 | 174 | modified, clean = s.modified, s.clean |
|
175 | 175 | for lfile in unsure: |
|
176 | 176 | try: |
|
177 | 177 | fctx = pctx[standin(lfile)] |
|
178 | 178 | except LookupError: |
|
179 | 179 | fctx = None |
|
180 | 180 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): |
|
181 | 181 | modified.append(lfile) |
|
182 | 182 | else: |
|
183 | 183 | clean.append(lfile) |
|
184 | 184 | lfdirstate.normal(lfile) |
|
185 | 185 | return s |
|
186 | 186 | |
|
187 | 187 | def listlfiles(repo, rev=None, matcher=None): |
|
188 | 188 | '''return a list of largefiles in the working copy or the |
|
189 | 189 | specified changeset''' |
|
190 | 190 | |
|
191 | 191 | if matcher is None: |
|
192 | 192 | matcher = getstandinmatcher(repo) |
|
193 | 193 | |
|
194 | 194 | # ignore unknown files in working directory |
|
195 | 195 | return [splitstandin(f) |
|
196 | 196 | for f in repo[rev].walk(matcher) |
|
197 | 197 | if rev is not None or repo.dirstate[f] != '?'] |
|
198 | 198 | |
|
199 | 199 | def instore(repo, hash, forcelocal=False): |
|
200 | 200 | '''Return true if a largefile with the given hash exists in the store''' |
|
201 | 201 | return os.path.exists(storepath(repo, hash, forcelocal)) |
|
202 | 202 | |
|
203 | 203 | def storepath(repo, hash, forcelocal=False): |
|
204 | 204 | '''Return the correct location in the repository largefiles store for a |
|
205 | 205 | file with the given hash.''' |
|
206 | 206 | if not forcelocal and repo.shared(): |
|
207 | 207 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
|
208 | 208 | return repo.vfs.join(longname, hash) |
|
209 | 209 | |
|
210 | 210 | def findstorepath(repo, hash): |
|
211 | 211 | '''Search through the local store path(s) to find the file for the given |
|
212 | 212 | hash. If the file is not found, its path in the primary store is returned. |
|
213 | 213 | The return value is a tuple of (path, exists(path)). |
|
214 | 214 | ''' |
|
215 | 215 | # For shared repos, the primary store is in the share source. But for |
|
216 | 216 | # backward compatibility, force a lookup in the local store if it wasn't |
|
217 | 217 | # found in the share source. |
|
218 | 218 | path = storepath(repo, hash, False) |
|
219 | 219 | |
|
220 | 220 | if instore(repo, hash): |
|
221 | 221 | return (path, True) |
|
222 | 222 | elif repo.shared() and instore(repo, hash, True): |
|
223 | 223 | return storepath(repo, hash, True), True |
|
224 | 224 | |
|
225 | 225 | return (path, False) |
|
226 | 226 | |
|
227 | 227 | def copyfromcache(repo, hash, filename): |
|
228 | 228 | '''Copy the specified largefile from the repo or system cache to |
|
229 | 229 | filename in the repository. Return true on success or false if the |
|
230 | 230 | file was not found in either cache (which should not happened: |
|
231 | 231 | this is meant to be called only after ensuring that the needed |
|
232 | 232 | largefile exists in the cache).''' |
|
233 | 233 | wvfs = repo.wvfs |
|
234 | 234 | path = findfile(repo, hash) |
|
235 | 235 | if path is None: |
|
236 | 236 | return False |
|
237 | 237 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
|
238 | 238 | # The write may fail before the file is fully written, but we |
|
239 | 239 | # don't use atomic writes in the working copy. |
|
240 | 240 | with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd: |
|
241 | 241 | gothash = copyandhash( |
|
242 | 242 | util.filechunkiter(srcfd), destfd) |
|
243 | 243 | if gothash != hash: |
|
244 | 244 | repo.ui.warn(_('%s: data corruption in %s with hash %s\n') |
|
245 | 245 | % (filename, path, gothash)) |
|
246 | 246 | wvfs.unlink(filename) |
|
247 | 247 | return False |
|
248 | 248 | return True |
|
249 | 249 | |
|
250 | 250 | def copytostore(repo, ctx, file, fstandin): |
|
251 | 251 | wvfs = repo.wvfs |
|
252 | 252 | hash = readasstandin(ctx[fstandin]) |
|
253 | 253 | if instore(repo, hash): |
|
254 | 254 | return |
|
255 | 255 | if wvfs.exists(file): |
|
256 | 256 | copytostoreabsolute(repo, wvfs.join(file), hash) |
|
257 | 257 | else: |
|
258 | 258 | repo.ui.warn(_("%s: largefile %s not available from local store\n") % |
|
259 | 259 | (file, hash)) |
|
260 | 260 | |
|
261 | 261 | def copyalltostore(repo, node): |
|
262 | 262 | '''Copy all largefiles in a given revision to the store''' |
|
263 | 263 | |
|
264 | 264 | ctx = repo[node] |
|
265 | 265 | for filename in ctx.files(): |
|
266 | 266 | realfile = splitstandin(filename) |
|
267 | 267 | if realfile is not None and filename in ctx.manifest(): |
|
268 | 268 | copytostore(repo, ctx, realfile, filename) |
|
269 | 269 | |
|
270 | 270 | def copytostoreabsolute(repo, file, hash): |
|
271 | 271 | if inusercache(repo.ui, hash): |
|
272 | 272 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
|
273 | 273 | else: |
|
274 | 274 | util.makedirs(os.path.dirname(storepath(repo, hash))) |
|
275 | 275 | with open(file, 'rb') as srcf: |
|
276 | 276 | with util.atomictempfile(storepath(repo, hash), |
|
277 | 277 | createmode=repo.store.createmode) as dstf: |
|
278 | 278 | for chunk in util.filechunkiter(srcf): |
|
279 | 279 | dstf.write(chunk) |
|
280 | 280 | linktousercache(repo, hash) |
|
281 | 281 | |
|
282 | 282 | def linktousercache(repo, hash): |
|
283 | 283 | '''Link / copy the largefile with the specified hash from the store |
|
284 | 284 | to the cache.''' |
|
285 | 285 | path = usercachepath(repo.ui, hash) |
|
286 | 286 | link(storepath(repo, hash), path) |
|
287 | 287 | |
|
288 | 288 | def getstandinmatcher(repo, rmatcher=None): |
|
289 | 289 | '''Return a match object that applies rmatcher to the standin directory''' |
|
290 | 290 | wvfs = repo.wvfs |
|
291 | 291 | standindir = shortname |
|
292 | 292 | |
|
293 | 293 | # no warnings about missing files or directories |
|
294 | 294 | badfn = lambda f, msg: None |
|
295 | 295 | |
|
296 | 296 | if rmatcher and not rmatcher.always(): |
|
297 | 297 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] |
|
298 | 298 | if not pats: |
|
299 | 299 | pats = [wvfs.join(standindir)] |
|
300 | 300 | match = scmutil.match(repo[None], pats, badfn=badfn) |
|
301 | 301 | else: |
|
302 | 302 | # no patterns: relative to repo root |
|
303 | 303 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
|
304 | 304 | return match |
|
305 | 305 | |
|
306 | 306 | def composestandinmatcher(repo, rmatcher): |
|
307 | 307 | '''Return a matcher that accepts standins corresponding to the |
|
308 | 308 | files accepted by rmatcher. Pass the list of files in the matcher |
|
309 | 309 | as the paths specified by the user.''' |
|
310 | 310 | smatcher = getstandinmatcher(repo, rmatcher) |
|
311 | 311 | isstandin = smatcher.matchfn |
|
312 | 312 | def composedmatchfn(f): |
|
313 | 313 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
314 | 314 | smatcher.matchfn = composedmatchfn |
|
315 | 315 | |
|
316 | 316 | return smatcher |
|
317 | 317 | |
|
318 | 318 | def standin(filename): |
|
319 | 319 | '''Return the repo-relative path to the standin for the specified big |
|
320 | 320 | file.''' |
|
321 | 321 | # Notes: |
|
322 | 322 | # 1) Some callers want an absolute path, but for instance addlargefiles |
|
323 | 323 | # needs it repo-relative so it can be passed to repo[None].add(). So |
|
324 | 324 | # leave it up to the caller to use repo.wjoin() to get an absolute path. |
|
325 | 325 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
326 | 326 | # Windows. Change existing separator to '/' first in case we are |
|
327 | 327 | # passed filenames from an external source (like the command line). |
|
328 | 328 | return shortnameslash + util.pconvert(filename) |
|
329 | 329 | |
|
330 | 330 | def isstandin(filename): |
|
331 | 331 | '''Return true if filename is a big file standin. filename must be |
|
332 | 332 | in Mercurial's internal form (slash-separated).''' |
|
333 | 333 | return filename.startswith(shortnameslash) |
|
334 | 334 | |
|
335 | 335 | def splitstandin(filename): |
|
336 | 336 | # Split on / because that's what dirstate always uses, even on Windows. |
|
337 | 337 | # Change local separator to / first just in case we are passed filenames |
|
338 | 338 | # from an external source (like the command line). |
|
339 | 339 | bits = util.pconvert(filename).split('/', 1) |
|
340 | 340 | if len(bits) == 2 and bits[0] == shortname: |
|
341 | 341 | return bits[1] |
|
342 | 342 | else: |
|
343 | 343 | return None |
|
344 | 344 | |
|
345 | 345 | def updatestandin(repo, lfile, standin): |
|
346 | 346 | """Re-calculate hash value of lfile and write it into standin |
|
347 | 347 | |
|
348 | 348 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. |
|
349 | 349 | """ |
|
350 | 350 | file = repo.wjoin(lfile) |
|
351 | 351 | if repo.wvfs.exists(lfile): |
|
352 | 352 | hash = hashfile(file) |
|
353 | 353 | executable = getexecutable(file) |
|
354 | 354 | writestandin(repo, standin, hash, executable) |
|
355 | 355 | else: |
|
356 | 356 | raise error.Abort(_('%s: file not found!') % lfile) |
|
357 | 357 | |
|
358 | 358 | def readasstandin(fctx): |
|
359 | 359 | '''read hex hash from given filectx of standin file |
|
360 | 360 | |
|
361 | 361 | This encapsulates how "standin" data is stored into storage layer.''' |
|
362 | 362 | return fctx.data().strip() |
|
363 | 363 | |
|
364 | 364 | def writestandin(repo, standin, hash, executable): |
|
365 | 365 | '''write hash to <repo.root>/<standin>''' |
|
366 | 366 | repo.wwrite(standin, hash + '\n', executable and 'x' or '') |
|
367 | 367 | |
|
368 | 368 | def copyandhash(instream, outfile): |
|
369 | 369 | '''Read bytes from instream (iterable) and write them to outfile, |
|
370 | 370 | computing the SHA-1 hash of the data along the way. Return the hash.''' |
|
371 | 371 | hasher = hashlib.sha1('') |
|
372 | 372 | for data in instream: |
|
373 | 373 | hasher.update(data) |
|
374 | 374 | outfile.write(data) |
|
375 | 375 | return hex(hasher.digest()) |
|
376 | 376 | |
|
377 | 377 | def hashfile(file): |
|
378 | 378 | if not os.path.exists(file): |
|
379 | 379 | return '' |
|
380 | 380 | with open(file, 'rb') as fd: |
|
381 | 381 | return hexsha1(fd) |
|
382 | 382 | |
|
383 | 383 | def getexecutable(filename): |
|
384 | 384 | mode = os.stat(filename).st_mode |
|
385 | 385 | return ((mode & stat.S_IXUSR) and |
|
386 | 386 | (mode & stat.S_IXGRP) and |
|
387 | 387 | (mode & stat.S_IXOTH)) |
|
388 | 388 | |
|
389 | 389 | def urljoin(first, second, *arg): |
|
390 | 390 | def join(left, right): |
|
391 | 391 | if not left.endswith('/'): |
|
392 | 392 | left += '/' |
|
393 | 393 | if right.startswith('/'): |
|
394 | 394 | right = right[1:] |
|
395 | 395 | return left + right |
|
396 | 396 | |
|
397 | 397 | url = join(first, second) |
|
398 | 398 | for a in arg: |
|
399 | 399 | url = join(url, a) |
|
400 | 400 | return url |
|
401 | 401 | |
|
402 | 402 | def hexsha1(fileobj): |
|
403 | 403 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
404 | 404 | object data""" |
|
405 | 405 | h = hashlib.sha1() |
|
406 | 406 | for chunk in util.filechunkiter(fileobj): |
|
407 | 407 | h.update(chunk) |
|
408 | 408 | return hex(h.digest()) |
|
409 | 409 | |
|
410 | 410 | def httpsendfile(ui, filename): |
|
411 | 411 | return httpconnection.httpsendfile(ui, filename, 'rb') |
|
412 | 412 | |
|
413 | 413 | def unixpath(path): |
|
414 | 414 | '''Return a version of path normalized for use with the lfdirstate.''' |
|
415 | 415 | return util.pconvert(os.path.normpath(path)) |
|
416 | 416 | |
|
417 | 417 | def islfilesrepo(repo): |
|
418 | 418 | '''Return true if the repo is a largefile repo.''' |
|
419 | 419 | if ('largefiles' in repo.requirements and |
|
420 | 420 | any(shortnameslash in f[0] for f in repo.store.datafiles())): |
|
421 | 421 | return True |
|
422 | 422 | |
|
423 | 423 | return any(openlfdirstate(repo.ui, repo, False)) |
|
424 | 424 | |
|
425 | 425 | class storeprotonotcapable(Exception): |
|
426 | 426 | def __init__(self, storetypes): |
|
427 | 427 | self.storetypes = storetypes |
|
428 | 428 | |
|
429 | 429 | def getstandinsstate(repo): |
|
430 | 430 | standins = [] |
|
431 | 431 | matcher = getstandinmatcher(repo) |
|
432 | 432 | wctx = repo[None] |
|
433 | 433 | for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False, |
|
434 | 434 | ignored=False): |
|
435 | 435 | lfile = splitstandin(standin) |
|
436 | 436 | try: |
|
437 | 437 | hash = readasstandin(wctx[standin]) |
|
438 | 438 | except IOError: |
|
439 | 439 | hash = None |
|
440 | 440 | standins.append((lfile, hash)) |
|
441 | 441 | return standins |
|
442 | 442 | |
|
443 | 443 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
444 | 444 | lfstandin = standin(lfile) |
|
445 | 445 | if lfstandin in repo.dirstate: |
|
446 | 446 | stat = repo.dirstate._map[lfstandin] |
|
447 | 447 | state, mtime = stat[0], stat[3] |
|
448 | 448 | else: |
|
449 | 449 | state, mtime = '?', -1 |
|
450 | 450 | if state == 'n': |
|
451 | 451 | if (normallookup or mtime < 0 or |
|
452 | 452 | not repo.wvfs.exists(lfile)): |
|
453 | 453 | # state 'n' doesn't ensure 'clean' in this case |
|
454 | 454 | lfdirstate.normallookup(lfile) |
|
455 | 455 | else: |
|
456 | 456 | lfdirstate.normal(lfile) |
|
457 | 457 | elif state == 'm': |
|
458 | 458 | lfdirstate.normallookup(lfile) |
|
459 | 459 | elif state == 'r': |
|
460 | 460 | lfdirstate.remove(lfile) |
|
461 | 461 | elif state == 'a': |
|
462 | 462 | lfdirstate.add(lfile) |
|
463 | 463 | elif state == '?': |
|
464 | 464 | lfdirstate.drop(lfile) |
|
465 | 465 | |
|
466 | 466 | def markcommitted(orig, ctx, node): |
|
467 | 467 | repo = ctx.repo() |
|
468 | 468 | |
|
469 | 469 | orig(node) |
|
470 | 470 | |
|
471 | 471 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" |
|
472 | 472 | # because files coming from the 2nd parent are omitted in the latter. |
|
473 | 473 | # |
|
474 | 474 | # The former should be used to get targets of "synclfdirstate", |
|
475 | 475 | # because such files: |
|
476 | 476 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and |
|
477 | 477 | # - have to be marked as "n" after commit, but |
|
478 | 478 | # - aren't listed in "repo[node].files()" |
|
479 | 479 | |
|
480 | 480 | lfdirstate = openlfdirstate(repo.ui, repo) |
|
481 | 481 | for f in ctx.files(): |
|
482 | 482 | lfile = splitstandin(f) |
|
483 | 483 | if lfile is not None: |
|
484 | 484 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
485 | 485 | lfdirstate.write() |
|
486 | 486 | |
|
487 | 487 | # As part of committing, copy all of the largefiles into the cache. |
|
488 | 488 | # |
|
489 | 489 | # Using "node" instead of "ctx" implies additional "repo[node]" |
|
490 | 490 | # lookup while copyalltostore(), but can omit redundant check for |
|
491 | 491 | # files comming from the 2nd parent, which should exist in store |
|
492 | 492 | # at merging. |
|
493 | 493 | copyalltostore(repo, node) |
|
494 | 494 | |
|
495 | 495 | def getlfilestoupdate(oldstandins, newstandins): |
|
496 | 496 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
|
497 | 497 | filelist = [] |
|
498 | 498 | for f in changedstandins: |
|
499 | 499 | if f[0] not in filelist: |
|
500 | 500 | filelist.append(f[0]) |
|
501 | 501 | return filelist |
|
502 | 502 | |
|
503 | 503 | def getlfilestoupload(repo, missing, addfunc): |
|
504 | 504 | makeprogress = repo.ui.makeprogress |
|
505 | 505 | with makeprogress(_('finding outgoing largefiles'), |
|
506 | 506 | unit=_('revisions'), total=len(missing)) as progress: |
|
507 | 507 | for i, n in enumerate(missing): |
|
508 | 508 | progress.update(i) |
|
509 | 509 | parents = [p for p in repo[n].parents() if p != node.nullid] |
|
510 | 510 | |
|
511 | 511 | oldlfstatus = repo.lfstatus |
|
512 | 512 | repo.lfstatus = False |
|
513 | 513 | try: |
|
514 | 514 | ctx = repo[n] |
|
515 | 515 | finally: |
|
516 | 516 | repo.lfstatus = oldlfstatus |
|
517 | 517 | |
|
518 | 518 | files = set(ctx.files()) |
|
519 | 519 | if len(parents) == 2: |
|
520 | 520 | mc = ctx.manifest() |
|
521 | 521 | mp1 = ctx.p1().manifest() |
|
522 | 522 | mp2 = ctx.p2().manifest() |
|
523 | 523 | for f in mp1: |
|
524 | 524 | if f not in mc: |
|
525 | 525 | files.add(f) |
|
526 | 526 | for f in mp2: |
|
527 | 527 | if f not in mc: |
|
528 | 528 | files.add(f) |
|
529 | 529 | for f in mc: |
|
530 | 530 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
531 | 531 | files.add(f) |
|
532 | 532 | for fn in files: |
|
533 | 533 | if isstandin(fn) and fn in ctx: |
|
534 | 534 | addfunc(fn, readasstandin(ctx[fn])) |
|
535 | 535 | |
|
536 | 536 | def updatestandinsbymatch(repo, match): |
|
537 | 537 | '''Update standins in the working directory according to specified match |
|
538 | 538 | |
|
539 | 539 | This returns (possibly modified) ``match`` object to be used for |
|
540 | 540 | subsequent commit process. |
|
541 | 541 | ''' |
|
542 | 542 | |
|
543 | 543 | ui = repo.ui |
|
544 | 544 | |
|
545 | 545 | # Case 1: user calls commit with no specific files or |
|
546 | 546 | # include/exclude patterns: refresh and commit all files that |
|
547 | 547 | # are "dirty". |
|
548 | 548 | if match is None or match.always(): |
|
549 | 549 | # Spend a bit of time here to get a list of files we know |
|
550 | 550 | # are modified so we can compare only against those. |
|
551 | 551 | # It can cost a lot of time (several seconds) |
|
552 | 552 | # otherwise to update all standins if the largefiles are |
|
553 | 553 | # large. |
|
554 | 554 | lfdirstate = openlfdirstate(ui, repo) |
|
555 |
dirtymatch = matchmod.always( |
|
|
555 | dirtymatch = matchmod.always() | |
|
556 | 556 | unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False, |
|
557 | 557 | clean=False, unknown=False) |
|
558 | 558 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
559 | 559 | lfiles = listlfiles(repo) |
|
560 | 560 | # this only loops through largefiles that exist (not |
|
561 | 561 | # removed/renamed) |
|
562 | 562 | for lfile in lfiles: |
|
563 | 563 | if lfile in modifiedfiles: |
|
564 | 564 | fstandin = standin(lfile) |
|
565 | 565 | if repo.wvfs.exists(fstandin): |
|
566 | 566 | # this handles the case where a rebase is being |
|
567 | 567 | # performed and the working copy is not updated |
|
568 | 568 | # yet. |
|
569 | 569 | if repo.wvfs.exists(lfile): |
|
570 | 570 | updatestandin(repo, lfile, fstandin) |
|
571 | 571 | |
|
572 | 572 | return match |
|
573 | 573 | |
|
574 | 574 | lfiles = listlfiles(repo) |
|
575 | 575 | match._files = repo._subdirlfs(match.files(), lfiles) |
|
576 | 576 | |
|
577 | 577 | # Case 2: user calls commit with specified patterns: refresh |
|
578 | 578 | # any matching big files. |
|
579 | 579 | smatcher = composestandinmatcher(repo, match) |
|
580 | 580 | standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False, |
|
581 | 581 | ignored=False) |
|
582 | 582 | |
|
583 | 583 | # No matching big files: get out of the way and pass control to |
|
584 | 584 | # the usual commit() method. |
|
585 | 585 | if not standins: |
|
586 | 586 | return match |
|
587 | 587 | |
|
588 | 588 | # Refresh all matching big files. It's possible that the |
|
589 | 589 | # commit will end up failing, in which case the big files will |
|
590 | 590 | # stay refreshed. No harm done: the user modified them and |
|
591 | 591 | # asked to commit them, so sooner or later we're going to |
|
592 | 592 | # refresh the standins. Might as well leave them refreshed. |
|
593 | 593 | lfdirstate = openlfdirstate(ui, repo) |
|
594 | 594 | for fstandin in standins: |
|
595 | 595 | lfile = splitstandin(fstandin) |
|
596 | 596 | if lfdirstate[lfile] != 'r': |
|
597 | 597 | updatestandin(repo, lfile, fstandin) |
|
598 | 598 | |
|
599 | 599 | # Cook up a new matcher that only matches regular files or |
|
600 | 600 | # standins corresponding to the big files requested by the |
|
601 | 601 | # user. Have to modify _files to prevent commit() from |
|
602 | 602 | # complaining "not tracked" for big files. |
|
603 | 603 | match = copy.copy(match) |
|
604 | 604 | origmatchfn = match.matchfn |
|
605 | 605 | |
|
606 | 606 | # Check both the list of largefiles and the list of |
|
607 | 607 | # standins because if a largefile was removed, it |
|
608 | 608 | # won't be in the list of largefiles at this point |
|
609 | 609 | match._files += sorted(standins) |
|
610 | 610 | |
|
611 | 611 | actualfiles = [] |
|
612 | 612 | for f in match._files: |
|
613 | 613 | fstandin = standin(f) |
|
614 | 614 | |
|
615 | 615 | # For largefiles, only one of the normal and standin should be |
|
616 | 616 | # committed (except if one of them is a remove). In the case of a |
|
617 | 617 | # standin removal, drop the normal file if it is unknown to dirstate. |
|
618 | 618 | # Thus, skip plain largefile names but keep the standin. |
|
619 | 619 | if f in lfiles or fstandin in standins: |
|
620 | 620 | if repo.dirstate[fstandin] != 'r': |
|
621 | 621 | if repo.dirstate[f] != 'r': |
|
622 | 622 | continue |
|
623 | 623 | elif repo.dirstate[f] == '?': |
|
624 | 624 | continue |
|
625 | 625 | |
|
626 | 626 | actualfiles.append(f) |
|
627 | 627 | match._files = actualfiles |
|
628 | 628 | |
|
629 | 629 | def matchfn(f): |
|
630 | 630 | if origmatchfn(f): |
|
631 | 631 | return f not in lfiles |
|
632 | 632 | else: |
|
633 | 633 | return f in standins |
|
634 | 634 | |
|
635 | 635 | match.matchfn = matchfn |
|
636 | 636 | |
|
637 | 637 | return match |
|
638 | 638 | |
|
639 | 639 | class automatedcommithook(object): |
|
640 | 640 | '''Stateful hook to update standins at the 1st commit of resuming |
|
641 | 641 | |
|
642 | 642 | For efficiency, updating standins in the working directory should |
|
643 | 643 | be avoided while automated committing (like rebase, transplant and |
|
644 | 644 | so on), because they should be updated before committing. |
|
645 | 645 | |
|
646 | 646 | But the 1st commit of resuming automated committing (e.g. ``rebase |
|
647 | 647 | --continue``) should update them, because largefiles may be |
|
648 | 648 | modified manually. |
|
649 | 649 | ''' |
|
650 | 650 | def __init__(self, resuming): |
|
651 | 651 | self.resuming = resuming |
|
652 | 652 | |
|
653 | 653 | def __call__(self, repo, match): |
|
654 | 654 | if self.resuming: |
|
655 | 655 | self.resuming = False # avoids updating at subsequent commits |
|
656 | 656 | return updatestandinsbymatch(repo, match) |
|
657 | 657 | else: |
|
658 | 658 | return match |
|
659 | 659 | |
|
660 | 660 | def getstatuswriter(ui, repo, forcibly=None): |
|
661 | 661 | '''Return the function to write largefiles specific status out |
|
662 | 662 | |
|
663 | 663 | If ``forcibly`` is ``None``, this returns the last element of |
|
664 | 664 | ``repo._lfstatuswriters`` as "default" writer function. |
|
665 | 665 | |
|
666 | 666 | Otherwise, this returns the function to always write out (or |
|
667 | 667 | ignore if ``not forcibly``) status. |
|
668 | 668 | ''' |
|
669 | 669 | if forcibly is None and util.safehasattr(repo, '_largefilesenabled'): |
|
670 | 670 | return repo._lfstatuswriters[-1] |
|
671 | 671 | else: |
|
672 | 672 | if forcibly: |
|
673 | 673 | return ui.status # forcibly WRITE OUT |
|
674 | 674 | else: |
|
675 | 675 | return lambda *msg, **opts: None # forcibly IGNORE |
@@ -1,1506 +1,1503 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''Overridden Mercurial commands and functions for the largefiles extension''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import copy |
|
13 | 13 | import os |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | |
|
17 | 17 | from mercurial.hgweb import ( |
|
18 | 18 | webcommands, |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | from mercurial import ( |
|
22 | 22 | archival, |
|
23 | 23 | cmdutil, |
|
24 | 24 | copies as copiesmod, |
|
25 | 25 | error, |
|
26 | 26 | exchange, |
|
27 | 27 | extensions, |
|
28 | 28 | exthelper, |
|
29 | 29 | filemerge, |
|
30 | 30 | hg, |
|
31 | 31 | logcmdutil, |
|
32 | 32 | match as matchmod, |
|
33 | 33 | merge, |
|
34 | 34 | pathutil, |
|
35 | 35 | pycompat, |
|
36 | 36 | scmutil, |
|
37 | 37 | smartset, |
|
38 | 38 | subrepo, |
|
39 | 39 | upgrade, |
|
40 | 40 | url as urlmod, |
|
41 | 41 | util, |
|
42 | 42 | ) |
|
43 | 43 | |
|
44 | 44 | from . import ( |
|
45 | 45 | lfcommands, |
|
46 | 46 | lfutil, |
|
47 | 47 | storefactory, |
|
48 | 48 | ) |
|
49 | 49 | |
|
50 | 50 | eh = exthelper.exthelper() |
|
51 | 51 | |
|
52 | 52 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
53 | 53 | |
|
54 | 54 | def composelargefilematcher(match, manifest): |
|
55 | 55 | '''create a matcher that matches only the largefiles in the original |
|
56 | 56 | matcher''' |
|
57 | 57 | m = copy.copy(match) |
|
58 | 58 | lfile = lambda f: lfutil.standin(f) in manifest |
|
59 | 59 | m._files = [lf for lf in m._files if lfile(lf)] |
|
60 | 60 | m._fileset = set(m._files) |
|
61 | 61 | m.always = lambda: False |
|
62 | 62 | origmatchfn = m.matchfn |
|
63 | 63 | m.matchfn = lambda f: lfile(f) and origmatchfn(f) |
|
64 | 64 | return m |
|
65 | 65 | |
|
66 | 66 | def composenormalfilematcher(match, manifest, exclude=None): |
|
67 | 67 | excluded = set() |
|
68 | 68 | if exclude is not None: |
|
69 | 69 | excluded.update(exclude) |
|
70 | 70 | |
|
71 | 71 | m = copy.copy(match) |
|
72 | 72 | notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in |
|
73 | 73 | manifest or f in excluded) |
|
74 | 74 | m._files = [lf for lf in m._files if notlfile(lf)] |
|
75 | 75 | m._fileset = set(m._files) |
|
76 | 76 | m.always = lambda: False |
|
77 | 77 | origmatchfn = m.matchfn |
|
78 | 78 | m.matchfn = lambda f: notlfile(f) and origmatchfn(f) |
|
79 | 79 | return m |
|
80 | 80 | |
|
81 | 81 | def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts): |
|
82 | 82 | large = opts.get(r'large') |
|
83 | 83 | lfsize = lfutil.getminsize( |
|
84 | 84 | ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize')) |
|
85 | 85 | |
|
86 | 86 | lfmatcher = None |
|
87 | 87 | if lfutil.islfilesrepo(repo): |
|
88 | 88 | lfpats = ui.configlist(lfutil.longname, 'patterns') |
|
89 | 89 | if lfpats: |
|
90 | 90 | lfmatcher = matchmod.match(repo.root, '', list(lfpats)) |
|
91 | 91 | |
|
92 | 92 | lfnames = [] |
|
93 | 93 | m = matcher |
|
94 | 94 | |
|
95 | 95 | wctx = repo[None] |
|
96 | 96 | for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)): |
|
97 | 97 | exact = m.exact(f) |
|
98 | 98 | lfile = lfutil.standin(f) in wctx |
|
99 | 99 | nfile = f in wctx |
|
100 | 100 | exists = lfile or nfile |
|
101 | 101 | |
|
102 | 102 | # Don't warn the user when they attempt to add a normal tracked file. |
|
103 | 103 | # The normal add code will do that for us. |
|
104 | 104 | if exact and exists: |
|
105 | 105 | if lfile: |
|
106 | 106 | ui.warn(_('%s already a largefile\n') % uipathfn(f)) |
|
107 | 107 | continue |
|
108 | 108 | |
|
109 | 109 | if (exact or not exists) and not lfutil.isstandin(f): |
|
110 | 110 | # In case the file was removed previously, but not committed |
|
111 | 111 | # (issue3507) |
|
112 | 112 | if not repo.wvfs.exists(f): |
|
113 | 113 | continue |
|
114 | 114 | |
|
115 | 115 | abovemin = (lfsize and |
|
116 | 116 | repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024) |
|
117 | 117 | if large or abovemin or (lfmatcher and lfmatcher(f)): |
|
118 | 118 | lfnames.append(f) |
|
119 | 119 | if ui.verbose or not exact: |
|
120 | 120 | ui.status(_('adding %s as a largefile\n') % uipathfn(f)) |
|
121 | 121 | |
|
122 | 122 | bad = [] |
|
123 | 123 | |
|
124 | 124 | # Need to lock, otherwise there could be a race condition between |
|
125 | 125 | # when standins are created and added to the repo. |
|
126 | 126 | with repo.wlock(): |
|
127 | 127 | if not opts.get(r'dry_run'): |
|
128 | 128 | standins = [] |
|
129 | 129 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
130 | 130 | for f in lfnames: |
|
131 | 131 | standinname = lfutil.standin(f) |
|
132 | 132 | lfutil.writestandin(repo, standinname, hash='', |
|
133 | 133 | executable=lfutil.getexecutable(repo.wjoin(f))) |
|
134 | 134 | standins.append(standinname) |
|
135 | 135 | if lfdirstate[f] == 'r': |
|
136 | 136 | lfdirstate.normallookup(f) |
|
137 | 137 | else: |
|
138 | 138 | lfdirstate.add(f) |
|
139 | 139 | lfdirstate.write() |
|
140 | 140 | bad += [lfutil.splitstandin(f) |
|
141 | 141 | for f in repo[None].add(standins) |
|
142 | 142 | if f in m.files()] |
|
143 | 143 | |
|
144 | 144 | added = [f for f in lfnames if f not in bad] |
|
145 | 145 | return added, bad |
|
146 | 146 | |
|
147 | 147 | def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts): |
|
148 | 148 | after = opts.get(r'after') |
|
149 | 149 | m = composelargefilematcher(matcher, repo[None].manifest()) |
|
150 | 150 | try: |
|
151 | 151 | repo.lfstatus = True |
|
152 | 152 | s = repo.status(match=m, clean=not isaddremove) |
|
153 | 153 | finally: |
|
154 | 154 | repo.lfstatus = False |
|
155 | 155 | manifest = repo[None].manifest() |
|
156 | 156 | modified, added, deleted, clean = [[f for f in list |
|
157 | 157 | if lfutil.standin(f) in manifest] |
|
158 | 158 | for list in (s.modified, s.added, |
|
159 | 159 | s.deleted, s.clean)] |
|
160 | 160 | |
|
161 | 161 | def warn(files, msg): |
|
162 | 162 | for f in files: |
|
163 | 163 | ui.warn(msg % uipathfn(f)) |
|
164 | 164 | return int(len(files) > 0) |
|
165 | 165 | |
|
166 | 166 | if after: |
|
167 | 167 | remove = deleted |
|
168 | 168 | result = warn(modified + added + clean, |
|
169 | 169 | _('not removing %s: file still exists\n')) |
|
170 | 170 | else: |
|
171 | 171 | remove = deleted + clean |
|
172 | 172 | result = warn(modified, _('not removing %s: file is modified (use -f' |
|
173 | 173 | ' to force removal)\n')) |
|
174 | 174 | result = warn(added, _('not removing %s: file has been marked for add' |
|
175 | 175 | ' (use forget to undo)\n')) or result |
|
176 | 176 | |
|
177 | 177 | # Need to lock because standin files are deleted then removed from the |
|
178 | 178 | # repository and we could race in-between. |
|
179 | 179 | with repo.wlock(): |
|
180 | 180 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
181 | 181 | for f in sorted(remove): |
|
182 | 182 | if ui.verbose or not m.exact(f): |
|
183 | 183 | ui.status(_('removing %s\n') % uipathfn(f)) |
|
184 | 184 | |
|
185 | 185 | if not dryrun: |
|
186 | 186 | if not after: |
|
187 | 187 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
188 | 188 | |
|
189 | 189 | if dryrun: |
|
190 | 190 | return result |
|
191 | 191 | |
|
192 | 192 | remove = [lfutil.standin(f) for f in remove] |
|
193 | 193 | # If this is being called by addremove, let the original addremove |
|
194 | 194 | # function handle this. |
|
195 | 195 | if not isaddremove: |
|
196 | 196 | for f in remove: |
|
197 | 197 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
198 | 198 | repo[None].forget(remove) |
|
199 | 199 | |
|
200 | 200 | for f in remove: |
|
201 | 201 | lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f), |
|
202 | 202 | False) |
|
203 | 203 | |
|
204 | 204 | lfdirstate.write() |
|
205 | 205 | |
|
206 | 206 | return result |
|
207 | 207 | |
|
208 | 208 | # For overriding mercurial.hgweb.webcommands so that largefiles will |
|
209 | 209 | # appear at their right place in the manifests. |
|
210 | 210 | @eh.wrapfunction(webcommands, 'decodepath') |
|
211 | 211 | def decodepath(orig, path): |
|
212 | 212 | return lfutil.splitstandin(path) or path |
|
213 | 213 | |
|
214 | 214 | # -- Wrappers: modify existing commands -------------------------------- |
|
215 | 215 | |
|
216 | 216 | @eh.wrapcommand('add', |
|
217 | 217 | opts=[('', 'large', None, _('add as largefile')), |
|
218 | 218 | ('', 'normal', None, _('add as normal file')), |
|
219 | 219 | ('', 'lfsize', '', _('add all files above this size (in megabytes) ' |
|
220 | 220 | 'as largefiles (default: 10)'))]) |
|
221 | 221 | def overrideadd(orig, ui, repo, *pats, **opts): |
|
222 | 222 | if opts.get(r'normal') and opts.get(r'large'): |
|
223 | 223 | raise error.Abort(_('--normal cannot be used with --large')) |
|
224 | 224 | return orig(ui, repo, *pats, **opts) |
|
225 | 225 | |
|
226 | 226 | @eh.wrapfunction(cmdutil, 'add') |
|
227 | 227 | def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts): |
|
228 | 228 | # The --normal flag short circuits this override |
|
229 | 229 | if opts.get(r'normal'): |
|
230 | 230 | return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts) |
|
231 | 231 | |
|
232 | 232 | ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts) |
|
233 | 233 | normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(), |
|
234 | 234 | ladded) |
|
235 | 235 | bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts) |
|
236 | 236 | |
|
237 | 237 | bad.extend(f for f in lbad) |
|
238 | 238 | return bad |
|
239 | 239 | |
|
240 | 240 | @eh.wrapfunction(cmdutil, 'remove') |
|
241 | 241 | def cmdutilremove(orig, ui, repo, matcher, prefix, uipathfn, after, force, |
|
242 | 242 | subrepos, dryrun): |
|
243 | 243 | normalmatcher = composenormalfilematcher(matcher, repo[None].manifest()) |
|
244 | 244 | result = orig(ui, repo, normalmatcher, prefix, uipathfn, after, force, |
|
245 | 245 | subrepos, dryrun) |
|
246 | 246 | return removelargefiles(ui, repo, False, matcher, uipathfn, dryrun, |
|
247 | 247 | after=after, force=force) or result |
|
248 | 248 | |
|
249 | 249 | @eh.wrapfunction(subrepo.hgsubrepo, 'status') |
|
250 | 250 | def overridestatusfn(orig, repo, rev2, **opts): |
|
251 | 251 | try: |
|
252 | 252 | repo._repo.lfstatus = True |
|
253 | 253 | return orig(repo, rev2, **opts) |
|
254 | 254 | finally: |
|
255 | 255 | repo._repo.lfstatus = False |
|
256 | 256 | |
|
257 | 257 | @eh.wrapcommand('status') |
|
258 | 258 | def overridestatus(orig, ui, repo, *pats, **opts): |
|
259 | 259 | try: |
|
260 | 260 | repo.lfstatus = True |
|
261 | 261 | return orig(ui, repo, *pats, **opts) |
|
262 | 262 | finally: |
|
263 | 263 | repo.lfstatus = False |
|
264 | 264 | |
|
265 | 265 | @eh.wrapfunction(subrepo.hgsubrepo, 'dirty') |
|
266 | 266 | def overridedirty(orig, repo, ignoreupdate=False, missing=False): |
|
267 | 267 | try: |
|
268 | 268 | repo._repo.lfstatus = True |
|
269 | 269 | return orig(repo, ignoreupdate=ignoreupdate, missing=missing) |
|
270 | 270 | finally: |
|
271 | 271 | repo._repo.lfstatus = False |
|
272 | 272 | |
|
273 | 273 | @eh.wrapcommand('log') |
|
274 | 274 | def overridelog(orig, ui, repo, *pats, **opts): |
|
275 | 275 | def overridematchandpats(orig, ctx, pats=(), opts=None, globbed=False, |
|
276 | 276 | default='relpath', badfn=None): |
|
277 | 277 | """Matcher that merges root directory with .hglf, suitable for log. |
|
278 | 278 | It is still possible to match .hglf directly. |
|
279 | 279 | For any listed files run log on the standin too. |
|
280 | 280 | matchfn tries both the given filename and with .hglf stripped. |
|
281 | 281 | """ |
|
282 | 282 | if opts is None: |
|
283 | 283 | opts = {} |
|
284 | 284 | matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn) |
|
285 | 285 | m, p = copy.copy(matchandpats) |
|
286 | 286 | |
|
287 | 287 | if m.always(): |
|
288 | 288 | # We want to match everything anyway, so there's no benefit trying |
|
289 | 289 | # to add standins. |
|
290 | 290 | return matchandpats |
|
291 | 291 | |
|
292 | 292 | pats = set(p) |
|
293 | 293 | |
|
294 | 294 | def fixpats(pat, tostandin=lfutil.standin): |
|
295 | 295 | if pat.startswith('set:'): |
|
296 | 296 | return pat |
|
297 | 297 | |
|
298 | 298 | kindpat = matchmod._patsplit(pat, None) |
|
299 | 299 | |
|
300 | 300 | if kindpat[0] is not None: |
|
301 | 301 | return kindpat[0] + ':' + tostandin(kindpat[1]) |
|
302 | 302 | return tostandin(kindpat[1]) |
|
303 | 303 | |
|
304 | 304 | cwd = repo.getcwd() |
|
305 | 305 | if cwd: |
|
306 | 306 | hglf = lfutil.shortname |
|
307 | 307 | back = util.pconvert(repo.pathto(hglf)[:-len(hglf)]) |
|
308 | 308 | |
|
309 | 309 | def tostandin(f): |
|
310 | 310 | # The file may already be a standin, so truncate the back |
|
311 | 311 | # prefix and test before mangling it. This avoids turning |
|
312 | 312 | # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'. |
|
313 | 313 | if f.startswith(back) and lfutil.splitstandin(f[len(back):]): |
|
314 | 314 | return f |
|
315 | 315 | |
|
316 | 316 | # An absolute path is from outside the repo, so truncate the |
|
317 | 317 | # path to the root before building the standin. Otherwise cwd |
|
318 | 318 | # is somewhere in the repo, relative to root, and needs to be |
|
319 | 319 | # prepended before building the standin. |
|
320 | 320 | if os.path.isabs(cwd): |
|
321 | 321 | f = f[len(back):] |
|
322 | 322 | else: |
|
323 | 323 | f = cwd + '/' + f |
|
324 | 324 | return back + lfutil.standin(f) |
|
325 | 325 | else: |
|
326 | 326 | def tostandin(f): |
|
327 | 327 | if lfutil.isstandin(f): |
|
328 | 328 | return f |
|
329 | 329 | return lfutil.standin(f) |
|
330 | 330 | pats.update(fixpats(f, tostandin) for f in p) |
|
331 | 331 | |
|
332 | 332 | for i in range(0, len(m._files)): |
|
333 | 333 | # Don't add '.hglf' to m.files, since that is already covered by '.' |
|
334 | 334 | if m._files[i] == '.': |
|
335 | 335 | continue |
|
336 | 336 | standin = lfutil.standin(m._files[i]) |
|
337 | 337 | # If the "standin" is a directory, append instead of replace to |
|
338 | 338 | # support naming a directory on the command line with only |
|
339 | 339 | # largefiles. The original directory is kept to support normal |
|
340 | 340 | # files. |
|
341 | 341 | if standin in ctx: |
|
342 | 342 | m._files[i] = standin |
|
343 | 343 | elif m._files[i] not in ctx and repo.wvfs.isdir(standin): |
|
344 | 344 | m._files.append(standin) |
|
345 | 345 | |
|
346 | 346 | m._fileset = set(m._files) |
|
347 | 347 | m.always = lambda: False |
|
348 | 348 | origmatchfn = m.matchfn |
|
349 | 349 | def lfmatchfn(f): |
|
350 | 350 | lf = lfutil.splitstandin(f) |
|
351 | 351 | if lf is not None and origmatchfn(lf): |
|
352 | 352 | return True |
|
353 | 353 | r = origmatchfn(f) |
|
354 | 354 | return r |
|
355 | 355 | m.matchfn = lfmatchfn |
|
356 | 356 | |
|
357 | 357 | ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats))) |
|
358 | 358 | return m, pats |
|
359 | 359 | |
|
360 | 360 | # For hg log --patch, the match object is used in two different senses: |
|
361 | 361 | # (1) to determine what revisions should be printed out, and |
|
362 | 362 | # (2) to determine what files to print out diffs for. |
|
363 | 363 | # The magic matchandpats override should be used for case (1) but not for |
|
364 | 364 | # case (2). |
|
365 | 365 | oldmatchandpats = scmutil.matchandpats |
|
366 | 366 | def overridemakefilematcher(orig, repo, pats, opts, badfn=None): |
|
367 | 367 | wctx = repo[None] |
|
368 | 368 | match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn) |
|
369 | 369 | return lambda ctx: match |
|
370 | 370 | |
|
371 | 371 | wrappedmatchandpats = extensions.wrappedfunction(scmutil, 'matchandpats', |
|
372 | 372 | overridematchandpats) |
|
373 | 373 | wrappedmakefilematcher = extensions.wrappedfunction( |
|
374 | 374 | logcmdutil, '_makenofollowfilematcher', overridemakefilematcher) |
|
375 | 375 | with wrappedmatchandpats, wrappedmakefilematcher: |
|
376 | 376 | return orig(ui, repo, *pats, **opts) |
|
377 | 377 | |
|
378 | 378 | @eh.wrapcommand('verify', |
|
379 | 379 | opts=[('', 'large', None, |
|
380 | 380 | _('verify that all largefiles in current revision exists')), |
|
381 | 381 | ('', 'lfa', None, |
|
382 | 382 | _('verify largefiles in all revisions, not just current')), |
|
383 | 383 | ('', 'lfc', None, |
|
384 | 384 | _('verify local largefile contents, not just existence'))]) |
|
385 | 385 | def overrideverify(orig, ui, repo, *pats, **opts): |
|
386 | 386 | large = opts.pop(r'large', False) |
|
387 | 387 | all = opts.pop(r'lfa', False) |
|
388 | 388 | contents = opts.pop(r'lfc', False) |
|
389 | 389 | |
|
390 | 390 | result = orig(ui, repo, *pats, **opts) |
|
391 | 391 | if large or all or contents: |
|
392 | 392 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) |
|
393 | 393 | return result |
|
394 | 394 | |
|
395 | 395 | @eh.wrapcommand('debugstate', |
|
396 | 396 | opts=[('', 'large', None, _('display largefiles dirstate'))]) |
|
397 | 397 | def overridedebugstate(orig, ui, repo, *pats, **opts): |
|
398 | 398 | large = opts.pop(r'large', False) |
|
399 | 399 | if large: |
|
400 | 400 | class fakerepo(object): |
|
401 | 401 | dirstate = lfutil.openlfdirstate(ui, repo) |
|
402 | 402 | orig(ui, fakerepo, *pats, **opts) |
|
403 | 403 | else: |
|
404 | 404 | orig(ui, repo, *pats, **opts) |
|
405 | 405 | |
|
406 | 406 | # Before starting the manifest merge, merge.updates will call |
|
407 | 407 | # _checkunknownfile to check if there are any files in the merged-in |
|
408 | 408 | # changeset that collide with unknown files in the working copy. |
|
409 | 409 | # |
|
410 | 410 | # The largefiles are seen as unknown, so this prevents us from merging |
|
411 | 411 | # in a file 'foo' if we already have a largefile with the same name. |
|
412 | 412 | # |
|
413 | 413 | # The overridden function filters the unknown files by removing any |
|
414 | 414 | # largefiles. This makes the merge proceed and we can then handle this |
|
415 | 415 | # case further in the overridden calculateupdates function below. |
|
416 | 416 | @eh.wrapfunction(merge, '_checkunknownfile') |
|
417 | 417 | def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None): |
|
418 | 418 | if lfutil.standin(repo.dirstate.normalize(f)) in wctx: |
|
419 | 419 | return False |
|
420 | 420 | return origfn(repo, wctx, mctx, f, f2) |
|
421 | 421 | |
|
422 | 422 | # The manifest merge handles conflicts on the manifest level. We want |
|
423 | 423 | # to handle changes in largefile-ness of files at this level too. |
|
424 | 424 | # |
|
425 | 425 | # The strategy is to run the original calculateupdates and then process |
|
426 | 426 | # the action list it outputs. There are two cases we need to deal with: |
|
427 | 427 | # |
|
428 | 428 | # 1. Normal file in p1, largefile in p2. Here the largefile is |
|
429 | 429 | # detected via its standin file, which will enter the working copy |
|
430 | 430 | # with a "get" action. It is not "merge" since the standin is all |
|
431 | 431 | # Mercurial is concerned with at this level -- the link to the |
|
432 | 432 | # existing normal file is not relevant here. |
|
433 | 433 | # |
|
434 | 434 | # 2. Largefile in p1, normal file in p2. Here we get a "merge" action |
|
435 | 435 | # since the largefile will be present in the working copy and |
|
436 | 436 | # different from the normal file in p2. Mercurial therefore |
|
437 | 437 | # triggers a merge action. |
|
438 | 438 | # |
|
439 | 439 | # In both cases, we prompt the user and emit new actions to either |
|
440 | 440 | # remove the standin (if the normal file was kept) or to remove the |
|
441 | 441 | # normal file and get the standin (if the largefile was kept). The |
|
442 | 442 | # default prompt answer is to use the largefile version since it was |
|
443 | 443 | # presumably changed on purpose. |
|
444 | 444 | # |
|
445 | 445 | # Finally, the merge.applyupdates function will then take care of |
|
446 | 446 | # writing the files into the working copy and lfcommands.updatelfiles |
|
447 | 447 | # will update the largefiles. |
|
448 | 448 | @eh.wrapfunction(merge, 'calculateupdates') |
|
449 | 449 | def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force, |
|
450 | 450 | acceptremote, *args, **kwargs): |
|
451 | 451 | overwrite = force and not branchmerge |
|
452 | 452 | actions, diverge, renamedelete = origfn( |
|
453 | 453 | repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs) |
|
454 | 454 | |
|
455 | 455 | if overwrite: |
|
456 | 456 | return actions, diverge, renamedelete |
|
457 | 457 | |
|
458 | 458 | # Convert to dictionary with filename as key and action as value. |
|
459 | 459 | lfiles = set() |
|
460 | 460 | for f in actions: |
|
461 | 461 | splitstandin = lfutil.splitstandin(f) |
|
462 | 462 | if splitstandin in p1: |
|
463 | 463 | lfiles.add(splitstandin) |
|
464 | 464 | elif lfutil.standin(f) in p1: |
|
465 | 465 | lfiles.add(f) |
|
466 | 466 | |
|
467 | 467 | for lfile in sorted(lfiles): |
|
468 | 468 | standin = lfutil.standin(lfile) |
|
469 | 469 | (lm, largs, lmsg) = actions.get(lfile, (None, None, None)) |
|
470 | 470 | (sm, sargs, smsg) = actions.get(standin, (None, None, None)) |
|
471 | 471 | if sm in ('g', 'dc') and lm != 'r': |
|
472 | 472 | if sm == 'dc': |
|
473 | 473 | f1, f2, fa, move, anc = sargs |
|
474 | 474 | sargs = (p2[f2].flags(), False) |
|
475 | 475 | # Case 1: normal file in the working copy, largefile in |
|
476 | 476 | # the second parent |
|
477 | 477 | usermsg = _('remote turned local normal file %s into a largefile\n' |
|
478 | 478 | 'use (l)argefile or keep (n)ormal file?' |
|
479 | 479 | '$$ &Largefile $$ &Normal file') % lfile |
|
480 | 480 | if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile |
|
481 | 481 | actions[lfile] = ('r', None, 'replaced by standin') |
|
482 | 482 | actions[standin] = ('g', sargs, 'replaces standin') |
|
483 | 483 | else: # keep local normal file |
|
484 | 484 | actions[lfile] = ('k', None, 'replaces standin') |
|
485 | 485 | if branchmerge: |
|
486 | 486 | actions[standin] = ('k', None, 'replaced by non-standin') |
|
487 | 487 | else: |
|
488 | 488 | actions[standin] = ('r', None, 'replaced by non-standin') |
|
489 | 489 | elif lm in ('g', 'dc') and sm != 'r': |
|
490 | 490 | if lm == 'dc': |
|
491 | 491 | f1, f2, fa, move, anc = largs |
|
492 | 492 | largs = (p2[f2].flags(), False) |
|
493 | 493 | # Case 2: largefile in the working copy, normal file in |
|
494 | 494 | # the second parent |
|
495 | 495 | usermsg = _('remote turned local largefile %s into a normal file\n' |
|
496 | 496 | 'keep (l)argefile or use (n)ormal file?' |
|
497 | 497 | '$$ &Largefile $$ &Normal file') % lfile |
|
498 | 498 | if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile |
|
499 | 499 | if branchmerge: |
|
500 | 500 | # largefile can be restored from standin safely |
|
501 | 501 | actions[lfile] = ('k', None, 'replaced by standin') |
|
502 | 502 | actions[standin] = ('k', None, 'replaces standin') |
|
503 | 503 | else: |
|
504 | 504 | # "lfile" should be marked as "removed" without |
|
505 | 505 | # removal of itself |
|
506 | 506 | actions[lfile] = ('lfmr', None, |
|
507 | 507 | 'forget non-standin largefile') |
|
508 | 508 | |
|
509 | 509 | # linear-merge should treat this largefile as 're-added' |
|
510 | 510 | actions[standin] = ('a', None, 'keep standin') |
|
511 | 511 | else: # pick remote normal file |
|
512 | 512 | actions[lfile] = ('g', largs, 'replaces standin') |
|
513 | 513 | actions[standin] = ('r', None, 'replaced by non-standin') |
|
514 | 514 | |
|
515 | 515 | return actions, diverge, renamedelete |
|
516 | 516 | |
|
517 | 517 | @eh.wrapfunction(merge, 'recordupdates') |
|
518 | 518 | def mergerecordupdates(orig, repo, actions, branchmerge): |
|
519 | 519 | if 'lfmr' in actions: |
|
520 | 520 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
521 | 521 | for lfile, args, msg in actions['lfmr']: |
|
522 | 522 | # this should be executed before 'orig', to execute 'remove' |
|
523 | 523 | # before all other actions |
|
524 | 524 | repo.dirstate.remove(lfile) |
|
525 | 525 | # make sure lfile doesn't get synclfdirstate'd as normal |
|
526 | 526 | lfdirstate.add(lfile) |
|
527 | 527 | lfdirstate.write() |
|
528 | 528 | |
|
529 | 529 | return orig(repo, actions, branchmerge) |
|
530 | 530 | |
|
531 | 531 | # Override filemerge to prompt the user about how they wish to merge |
|
532 | 532 | # largefiles. This will handle identical edits without prompting the user. |
|
533 | 533 | @eh.wrapfunction(filemerge, '_filemerge') |
|
534 | 534 | def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, |
|
535 | 535 | labels=None): |
|
536 | 536 | if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): |
|
537 | 537 | return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca, |
|
538 | 538 | labels=labels) |
|
539 | 539 | |
|
540 | 540 | ahash = lfutil.readasstandin(fca).lower() |
|
541 | 541 | dhash = lfutil.readasstandin(fcd).lower() |
|
542 | 542 | ohash = lfutil.readasstandin(fco).lower() |
|
543 | 543 | if (ohash != ahash and |
|
544 | 544 | ohash != dhash and |
|
545 | 545 | (dhash == ahash or |
|
546 | 546 | repo.ui.promptchoice( |
|
547 | 547 | _('largefile %s has a merge conflict\nancestor was %s\n' |
|
548 | 548 | 'keep (l)ocal %s or\ntake (o)ther %s?' |
|
549 | 549 | '$$ &Local $$ &Other') % |
|
550 | 550 | (lfutil.splitstandin(orig), ahash, dhash, ohash), |
|
551 | 551 | 0) == 1)): |
|
552 | 552 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) |
|
553 | 553 | return True, 0, False |
|
554 | 554 | |
|
555 | 555 | @eh.wrapfunction(copiesmod, 'pathcopies') |
|
556 | 556 | def copiespathcopies(orig, ctx1, ctx2, match=None): |
|
557 | 557 | copies = orig(ctx1, ctx2, match=match) |
|
558 | 558 | updated = {} |
|
559 | 559 | |
|
560 | 560 | for k, v in copies.iteritems(): |
|
561 | 561 | updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v |
|
562 | 562 | |
|
563 | 563 | return updated |
|
564 | 564 | |
|
565 | 565 | # Copy first changes the matchers to match standins instead of |
|
566 | 566 | # largefiles. Then it overrides util.copyfile in that function it |
|
567 | 567 | # checks if the destination largefile already exists. It also keeps a |
|
568 | 568 | # list of copied files so that the largefiles can be copied and the |
|
569 | 569 | # dirstate updated. |
|
570 | 570 | @eh.wrapfunction(cmdutil, 'copy') |
|
571 | 571 | def overridecopy(orig, ui, repo, pats, opts, rename=False): |
|
572 | 572 | # doesn't remove largefile on rename |
|
573 | 573 | if len(pats) < 2: |
|
574 | 574 | # this isn't legal, let the original function deal with it |
|
575 | 575 | return orig(ui, repo, pats, opts, rename) |
|
576 | 576 | |
|
577 | 577 | # This could copy both lfiles and normal files in one command, |
|
578 | 578 | # but we don't want to do that. First replace their matcher to |
|
579 | 579 | # only match normal files and run it, then replace it to just |
|
580 | 580 | # match largefiles and run it again. |
|
581 | 581 | nonormalfiles = False |
|
582 | 582 | nolfiles = False |
|
583 | 583 | manifest = repo[None].manifest() |
|
584 | 584 | def normalfilesmatchfn(orig, ctx, pats=(), opts=None, globbed=False, |
|
585 | 585 | default='relpath', badfn=None): |
|
586 | 586 | if opts is None: |
|
587 | 587 | opts = {} |
|
588 | 588 | match = orig(ctx, pats, opts, globbed, default, badfn=badfn) |
|
589 | 589 | return composenormalfilematcher(match, manifest) |
|
590 | 590 | with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn): |
|
591 | 591 | try: |
|
592 | 592 | result = orig(ui, repo, pats, opts, rename) |
|
593 | 593 | except error.Abort as e: |
|
594 | 594 | if pycompat.bytestr(e) != _('no files to copy'): |
|
595 | 595 | raise e |
|
596 | 596 | else: |
|
597 | 597 | nonormalfiles = True |
|
598 | 598 | result = 0 |
|
599 | 599 | |
|
600 | 600 | # The first rename can cause our current working directory to be removed. |
|
601 | 601 | # In that case there is nothing left to copy/rename so just quit. |
|
602 | 602 | try: |
|
603 | 603 | repo.getcwd() |
|
604 | 604 | except OSError: |
|
605 | 605 | return result |
|
606 | 606 | |
|
607 | 607 | def makestandin(relpath): |
|
608 | 608 | path = pathutil.canonpath(repo.root, repo.getcwd(), relpath) |
|
609 | 609 | return repo.wvfs.join(lfutil.standin(path)) |
|
610 | 610 | |
|
611 | 611 | fullpats = scmutil.expandpats(pats) |
|
612 | 612 | dest = fullpats[-1] |
|
613 | 613 | |
|
614 | 614 | if os.path.isdir(dest): |
|
615 | 615 | if not os.path.isdir(makestandin(dest)): |
|
616 | 616 | os.makedirs(makestandin(dest)) |
|
617 | 617 | |
|
618 | 618 | try: |
|
619 | 619 | # When we call orig below it creates the standins but we don't add |
|
620 | 620 | # them to the dir state until later so lock during that time. |
|
621 | 621 | wlock = repo.wlock() |
|
622 | 622 | |
|
623 | 623 | manifest = repo[None].manifest() |
|
624 | 624 | def overridematch(orig, ctx, pats=(), opts=None, globbed=False, |
|
625 | 625 | default='relpath', badfn=None): |
|
626 | 626 | if opts is None: |
|
627 | 627 | opts = {} |
|
628 | 628 | newpats = [] |
|
629 | 629 | # The patterns were previously mangled to add the standin |
|
630 | 630 | # directory; we need to remove that now |
|
631 | 631 | for pat in pats: |
|
632 | 632 | if matchmod.patkind(pat) is None and lfutil.shortname in pat: |
|
633 | 633 | newpats.append(pat.replace(lfutil.shortname, '')) |
|
634 | 634 | else: |
|
635 | 635 | newpats.append(pat) |
|
636 | 636 | match = orig(ctx, newpats, opts, globbed, default, badfn=badfn) |
|
637 | 637 | m = copy.copy(match) |
|
638 | 638 | lfile = lambda f: lfutil.standin(f) in manifest |
|
639 | 639 | m._files = [lfutil.standin(f) for f in m._files if lfile(f)] |
|
640 | 640 | m._fileset = set(m._files) |
|
641 | 641 | origmatchfn = m.matchfn |
|
642 | 642 | def matchfn(f): |
|
643 | 643 | lfile = lfutil.splitstandin(f) |
|
644 | 644 | return (lfile is not None and |
|
645 | 645 | (f in manifest) and |
|
646 | 646 | origmatchfn(lfile) or |
|
647 | 647 | None) |
|
648 | 648 | m.matchfn = matchfn |
|
649 | 649 | return m |
|
650 | 650 | listpats = [] |
|
651 | 651 | for pat in pats: |
|
652 | 652 | if matchmod.patkind(pat) is not None: |
|
653 | 653 | listpats.append(pat) |
|
654 | 654 | else: |
|
655 | 655 | listpats.append(makestandin(pat)) |
|
656 | 656 | |
|
657 | 657 | copiedfiles = [] |
|
658 | 658 | def overridecopyfile(orig, src, dest, *args, **kwargs): |
|
659 | 659 | if (lfutil.shortname in src and |
|
660 | 660 | dest.startswith(repo.wjoin(lfutil.shortname))): |
|
661 | 661 | destlfile = dest.replace(lfutil.shortname, '') |
|
662 | 662 | if not opts['force'] and os.path.exists(destlfile): |
|
663 | 663 | raise IOError('', |
|
664 | 664 | _('destination largefile already exists')) |
|
665 | 665 | copiedfiles.append((src, dest)) |
|
666 | 666 | orig(src, dest, *args, **kwargs) |
|
667 | 667 | with extensions.wrappedfunction(util, 'copyfile', overridecopyfile), \ |
|
668 | 668 | extensions.wrappedfunction(scmutil, 'match', overridematch): |
|
669 | 669 | result += orig(ui, repo, listpats, opts, rename) |
|
670 | 670 | |
|
671 | 671 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
672 | 672 | for (src, dest) in copiedfiles: |
|
673 | 673 | if (lfutil.shortname in src and |
|
674 | 674 | dest.startswith(repo.wjoin(lfutil.shortname))): |
|
675 | 675 | srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') |
|
676 | 676 | destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') |
|
677 | 677 | destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.' |
|
678 | 678 | if not os.path.isdir(destlfiledir): |
|
679 | 679 | os.makedirs(destlfiledir) |
|
680 | 680 | if rename: |
|
681 | 681 | os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) |
|
682 | 682 | |
|
683 | 683 | # The file is gone, but this deletes any empty parent |
|
684 | 684 | # directories as a side-effect. |
|
685 | 685 | repo.wvfs.unlinkpath(srclfile, ignoremissing=True) |
|
686 | 686 | lfdirstate.remove(srclfile) |
|
687 | 687 | else: |
|
688 | 688 | util.copyfile(repo.wjoin(srclfile), |
|
689 | 689 | repo.wjoin(destlfile)) |
|
690 | 690 | |
|
691 | 691 | lfdirstate.add(destlfile) |
|
692 | 692 | lfdirstate.write() |
|
693 | 693 | except error.Abort as e: |
|
694 | 694 | if pycompat.bytestr(e) != _('no files to copy'): |
|
695 | 695 | raise e |
|
696 | 696 | else: |
|
697 | 697 | nolfiles = True |
|
698 | 698 | finally: |
|
699 | 699 | wlock.release() |
|
700 | 700 | |
|
701 | 701 | if nolfiles and nonormalfiles: |
|
702 | 702 | raise error.Abort(_('no files to copy')) |
|
703 | 703 | |
|
704 | 704 | return result |
|
705 | 705 | |
|
706 | 706 | # When the user calls revert, we have to be careful to not revert any |
|
707 | 707 | # changes to other largefiles accidentally. This means we have to keep |
|
708 | 708 | # track of the largefiles that are being reverted so we only pull down |
|
709 | 709 | # the necessary largefiles. |
|
710 | 710 | # |
|
711 | 711 | # Standins are only updated (to match the hash of largefiles) before |
|
712 | 712 | # commits. Update the standins then run the original revert, changing |
|
713 | 713 | # the matcher to hit standins instead of largefiles. Based on the |
|
714 | 714 | # resulting standins update the largefiles. |
|
715 | 715 | @eh.wrapfunction(cmdutil, 'revert') |
|
716 | 716 | def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts): |
|
717 | 717 | # Because we put the standins in a bad state (by updating them) |
|
718 | 718 | # and then return them to a correct state we need to lock to |
|
719 | 719 | # prevent others from changing them in their incorrect state. |
|
720 | 720 | with repo.wlock(): |
|
721 | 721 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
722 | 722 | s = lfutil.lfdirstatestatus(lfdirstate, repo) |
|
723 | 723 | lfdirstate.write() |
|
724 | 724 | for lfile in s.modified: |
|
725 | 725 | lfutil.updatestandin(repo, lfile, lfutil.standin(lfile)) |
|
726 | 726 | for lfile in s.deleted: |
|
727 | 727 | fstandin = lfutil.standin(lfile) |
|
728 | 728 | if (repo.wvfs.exists(fstandin)): |
|
729 | 729 | repo.wvfs.unlink(fstandin) |
|
730 | 730 | |
|
731 | 731 | oldstandins = lfutil.getstandinsstate(repo) |
|
732 | 732 | |
|
733 | 733 | def overridematch(orig, mctx, pats=(), opts=None, globbed=False, |
|
734 | 734 | default='relpath', badfn=None): |
|
735 | 735 | if opts is None: |
|
736 | 736 | opts = {} |
|
737 | 737 | match = orig(mctx, pats, opts, globbed, default, badfn=badfn) |
|
738 | 738 | m = copy.copy(match) |
|
739 | 739 | |
|
740 | 740 | # revert supports recursing into subrepos, and though largefiles |
|
741 | 741 | # currently doesn't work correctly in that case, this match is |
|
742 | 742 | # called, so the lfdirstate above may not be the correct one for |
|
743 | 743 | # this invocation of match. |
|
744 | 744 | lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(), |
|
745 | 745 | False) |
|
746 | 746 | |
|
747 | 747 | wctx = repo[None] |
|
748 | 748 | matchfiles = [] |
|
749 | 749 | for f in m._files: |
|
750 | 750 | standin = lfutil.standin(f) |
|
751 | 751 | if standin in ctx or standin in mctx: |
|
752 | 752 | matchfiles.append(standin) |
|
753 | 753 | elif standin in wctx or lfdirstate[f] == 'r': |
|
754 | 754 | continue |
|
755 | 755 | else: |
|
756 | 756 | matchfiles.append(f) |
|
757 | 757 | m._files = matchfiles |
|
758 | 758 | m._fileset = set(m._files) |
|
759 | 759 | origmatchfn = m.matchfn |
|
760 | 760 | def matchfn(f): |
|
761 | 761 | lfile = lfutil.splitstandin(f) |
|
762 | 762 | if lfile is not None: |
|
763 | 763 | return (origmatchfn(lfile) and |
|
764 | 764 | (f in ctx or f in mctx)) |
|
765 | 765 | return origmatchfn(f) |
|
766 | 766 | m.matchfn = matchfn |
|
767 | 767 | return m |
|
768 | 768 | with extensions.wrappedfunction(scmutil, 'match', overridematch): |
|
769 | 769 | orig(ui, repo, ctx, parents, *pats, **opts) |
|
770 | 770 | |
|
771 | 771 | newstandins = lfutil.getstandinsstate(repo) |
|
772 | 772 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) |
|
773 | 773 | # lfdirstate should be 'normallookup'-ed for updated files, |
|
774 | 774 | # because reverting doesn't touch dirstate for 'normal' files |
|
775 | 775 | # when target revision is explicitly specified: in such case, |
|
776 | 776 | # 'n' and valid timestamp in dirstate doesn't ensure 'clean' |
|
777 | 777 | # of target (standin) file. |
|
778 | 778 | lfcommands.updatelfiles(ui, repo, filelist, printmessage=False, |
|
779 | 779 | normallookup=True) |
|
780 | 780 | |
|
781 | 781 | # after pulling changesets, we need to take some extra care to get |
|
782 | 782 | # largefiles updated remotely |
|
783 | 783 | @eh.wrapcommand('pull', |
|
784 | 784 | opts=[('', 'all-largefiles', None, |
|
785 | 785 | _('download all pulled versions of largefiles (DEPRECATED)')), |
|
786 | 786 | ('', 'lfrev', [], |
|
787 | 787 | _('download largefiles for these revisions'), _('REV'))]) |
|
788 | 788 | def overridepull(orig, ui, repo, source=None, **opts): |
|
789 | 789 | revsprepull = len(repo) |
|
790 | 790 | if not source: |
|
791 | 791 | source = 'default' |
|
792 | 792 | repo.lfpullsource = source |
|
793 | 793 | result = orig(ui, repo, source, **opts) |
|
794 | 794 | revspostpull = len(repo) |
|
795 | 795 | lfrevs = opts.get(r'lfrev', []) |
|
796 | 796 | if opts.get(r'all_largefiles'): |
|
797 | 797 | lfrevs.append('pulled()') |
|
798 | 798 | if lfrevs and revspostpull > revsprepull: |
|
799 | 799 | numcached = 0 |
|
800 | 800 | repo.firstpulled = revsprepull # for pulled() revset expression |
|
801 | 801 | try: |
|
802 | 802 | for rev in scmutil.revrange(repo, lfrevs): |
|
803 | 803 | ui.note(_('pulling largefiles for revision %d\n') % rev) |
|
804 | 804 | (cached, missing) = lfcommands.cachelfiles(ui, repo, rev) |
|
805 | 805 | numcached += len(cached) |
|
806 | 806 | finally: |
|
807 | 807 | del repo.firstpulled |
|
808 | 808 | ui.status(_("%d largefiles cached\n") % numcached) |
|
809 | 809 | return result |
|
810 | 810 | |
|
811 | 811 | @eh.wrapcommand('push', |
|
812 | 812 | opts=[('', 'lfrev', [], |
|
813 | 813 | _('upload largefiles for these revisions'), _('REV'))]) |
|
814 | 814 | def overridepush(orig, ui, repo, *args, **kwargs): |
|
815 | 815 | """Override push command and store --lfrev parameters in opargs""" |
|
816 | 816 | lfrevs = kwargs.pop(r'lfrev', None) |
|
817 | 817 | if lfrevs: |
|
818 | 818 | opargs = kwargs.setdefault(r'opargs', {}) |
|
819 | 819 | opargs['lfrevs'] = scmutil.revrange(repo, lfrevs) |
|
820 | 820 | return orig(ui, repo, *args, **kwargs) |
|
821 | 821 | |
|
822 | 822 | @eh.wrapfunction(exchange, 'pushoperation') |
|
823 | 823 | def exchangepushoperation(orig, *args, **kwargs): |
|
824 | 824 | """Override pushoperation constructor and store lfrevs parameter""" |
|
825 | 825 | lfrevs = kwargs.pop(r'lfrevs', None) |
|
826 | 826 | pushop = orig(*args, **kwargs) |
|
827 | 827 | pushop.lfrevs = lfrevs |
|
828 | 828 | return pushop |
|
829 | 829 | |
|
830 | 830 | @eh.revsetpredicate('pulled()') |
|
831 | 831 | def pulledrevsetsymbol(repo, subset, x): |
|
832 | 832 | """Changesets that just has been pulled. |
|
833 | 833 | |
|
834 | 834 | Only available with largefiles from pull --lfrev expressions. |
|
835 | 835 | |
|
836 | 836 | .. container:: verbose |
|
837 | 837 | |
|
838 | 838 | Some examples: |
|
839 | 839 | |
|
840 | 840 | - pull largefiles for all new changesets:: |
|
841 | 841 | |
|
842 | 842 | hg pull -lfrev "pulled()" |
|
843 | 843 | |
|
844 | 844 | - pull largefiles for all new branch heads:: |
|
845 | 845 | |
|
846 | 846 | hg pull -lfrev "head(pulled()) and not closed()" |
|
847 | 847 | |
|
848 | 848 | """ |
|
849 | 849 | |
|
850 | 850 | try: |
|
851 | 851 | firstpulled = repo.firstpulled |
|
852 | 852 | except AttributeError: |
|
853 | 853 | raise error.Abort(_("pulled() only available in --lfrev")) |
|
854 | 854 | return smartset.baseset([r for r in subset if r >= firstpulled]) |
|
855 | 855 | |
|
856 | 856 | @eh.wrapcommand('clone', |
|
857 | 857 | opts=[('', 'all-largefiles', None, |
|
858 | 858 | _('download all versions of all largefiles'))]) |
|
859 | 859 | def overrideclone(orig, ui, source, dest=None, **opts): |
|
860 | 860 | d = dest |
|
861 | 861 | if d is None: |
|
862 | 862 | d = hg.defaultdest(source) |
|
863 | 863 | if opts.get(r'all_largefiles') and not hg.islocal(d): |
|
864 | 864 | raise error.Abort(_( |
|
865 | 865 | '--all-largefiles is incompatible with non-local destination %s') % |
|
866 | 866 | d) |
|
867 | 867 | |
|
868 | 868 | return orig(ui, source, dest, **opts) |
|
869 | 869 | |
|
870 | 870 | @eh.wrapfunction(hg, 'clone') |
|
871 | 871 | def hgclone(orig, ui, opts, *args, **kwargs): |
|
872 | 872 | result = orig(ui, opts, *args, **kwargs) |
|
873 | 873 | |
|
874 | 874 | if result is not None: |
|
875 | 875 | sourcerepo, destrepo = result |
|
876 | 876 | repo = destrepo.local() |
|
877 | 877 | |
|
878 | 878 | # When cloning to a remote repo (like through SSH), no repo is available |
|
879 | 879 | # from the peer. Therefore the largefiles can't be downloaded and the |
|
880 | 880 | # hgrc can't be updated. |
|
881 | 881 | if not repo: |
|
882 | 882 | return result |
|
883 | 883 | |
|
884 | 884 | # Caching is implicitly limited to 'rev' option, since the dest repo was |
|
885 | 885 | # truncated at that point. The user may expect a download count with |
|
886 | 886 | # this option, so attempt whether or not this is a largefile repo. |
|
887 | 887 | if opts.get('all_largefiles'): |
|
888 | 888 | success, missing = lfcommands.downloadlfiles(ui, repo, None) |
|
889 | 889 | |
|
890 | 890 | if missing != 0: |
|
891 | 891 | return None |
|
892 | 892 | |
|
893 | 893 | return result |
|
894 | 894 | |
|
895 | 895 | @eh.wrapcommand('rebase', extension='rebase') |
|
896 | 896 | def overriderebase(orig, ui, repo, **opts): |
|
897 | 897 | if not util.safehasattr(repo, '_largefilesenabled'): |
|
898 | 898 | return orig(ui, repo, **opts) |
|
899 | 899 | |
|
900 | 900 | resuming = opts.get(r'continue') |
|
901 | 901 | repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) |
|
902 | 902 | repo._lfstatuswriters.append(lambda *msg, **opts: None) |
|
903 | 903 | try: |
|
904 | 904 | return orig(ui, repo, **opts) |
|
905 | 905 | finally: |
|
906 | 906 | repo._lfstatuswriters.pop() |
|
907 | 907 | repo._lfcommithooks.pop() |
|
908 | 908 | |
|
909 | 909 | @eh.wrapcommand('archive') |
|
910 | 910 | def overridearchivecmd(orig, ui, repo, dest, **opts): |
|
911 | 911 | repo.unfiltered().lfstatus = True |
|
912 | 912 | |
|
913 | 913 | try: |
|
914 | 914 | return orig(ui, repo.unfiltered(), dest, **opts) |
|
915 | 915 | finally: |
|
916 | 916 | repo.unfiltered().lfstatus = False |
|
917 | 917 | |
|
918 | 918 | @eh.wrapfunction(webcommands, 'archive') |
|
919 | 919 | def hgwebarchive(orig, web): |
|
920 | 920 | web.repo.lfstatus = True |
|
921 | 921 | |
|
922 | 922 | try: |
|
923 | 923 | return orig(web) |
|
924 | 924 | finally: |
|
925 | 925 | web.repo.lfstatus = False |
|
926 | 926 | |
|
927 | 927 | @eh.wrapfunction(archival, 'archive') |
|
928 | 928 | def overridearchive(orig, repo, dest, node, kind, decode=True, match=None, |
|
929 | 929 | prefix='', mtime=None, subrepos=None): |
|
930 | 930 | # For some reason setting repo.lfstatus in hgwebarchive only changes the |
|
931 | 931 | # unfiltered repo's attr, so check that as well. |
|
932 | 932 | if not repo.lfstatus and not repo.unfiltered().lfstatus: |
|
933 | 933 | return orig(repo, dest, node, kind, decode, match, prefix, mtime, |
|
934 | 934 | subrepos) |
|
935 | 935 | |
|
936 | 936 | # No need to lock because we are only reading history and |
|
937 | 937 | # largefile caches, neither of which are modified. |
|
938 | 938 | if node is not None: |
|
939 | 939 | lfcommands.cachelfiles(repo.ui, repo, node) |
|
940 | 940 | |
|
941 | 941 | if kind not in archival.archivers: |
|
942 | 942 | raise error.Abort(_("unknown archive type '%s'") % kind) |
|
943 | 943 | |
|
944 | 944 | ctx = repo[node] |
|
945 | 945 | |
|
946 | 946 | if kind == 'files': |
|
947 | 947 | if prefix: |
|
948 | 948 | raise error.Abort( |
|
949 | 949 | _('cannot give prefix when archiving to files')) |
|
950 | 950 | else: |
|
951 | 951 | prefix = archival.tidyprefix(dest, kind, prefix) |
|
952 | 952 | |
|
953 | 953 | def write(name, mode, islink, getdata): |
|
954 | 954 | if match and not match(name): |
|
955 | 955 | return |
|
956 | 956 | data = getdata() |
|
957 | 957 | if decode: |
|
958 | 958 | data = repo.wwritedata(name, data) |
|
959 | 959 | archiver.addfile(prefix + name, mode, islink, data) |
|
960 | 960 | |
|
961 | 961 | archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) |
|
962 | 962 | |
|
963 | 963 | if repo.ui.configbool("ui", "archivemeta"): |
|
964 | 964 | write('.hg_archival.txt', 0o644, False, |
|
965 | 965 | lambda: archival.buildmetadata(ctx)) |
|
966 | 966 | |
|
967 | 967 | for f in ctx: |
|
968 | 968 | ff = ctx.flags(f) |
|
969 | 969 | getdata = ctx[f].data |
|
970 | 970 | lfile = lfutil.splitstandin(f) |
|
971 | 971 | if lfile is not None: |
|
972 | 972 | if node is not None: |
|
973 | 973 | path = lfutil.findfile(repo, getdata().strip()) |
|
974 | 974 | |
|
975 | 975 | if path is None: |
|
976 | 976 | raise error.Abort( |
|
977 | 977 | _('largefile %s not found in repo store or system cache') |
|
978 | 978 | % lfile) |
|
979 | 979 | else: |
|
980 | 980 | path = lfile |
|
981 | 981 | |
|
982 | 982 | f = lfile |
|
983 | 983 | |
|
984 | 984 | getdata = lambda: util.readfile(path) |
|
985 | 985 | write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata) |
|
986 | 986 | |
|
987 | 987 | if subrepos: |
|
988 | 988 | for subpath in sorted(ctx.substate): |
|
989 | 989 | sub = ctx.workingsub(subpath) |
|
990 | 990 | submatch = matchmod.subdirmatcher(subpath, match) |
|
991 | 991 | subprefix = prefix + subpath + '/' |
|
992 | 992 | sub._repo.lfstatus = True |
|
993 | 993 | sub.archive(archiver, subprefix, submatch) |
|
994 | 994 | |
|
995 | 995 | archiver.done() |
|
996 | 996 | |
|
997 | 997 | @eh.wrapfunction(subrepo.hgsubrepo, 'archive') |
|
998 | 998 | def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True): |
|
999 | 999 | lfenabled = util.safehasattr(repo._repo, '_largefilesenabled') |
|
1000 | 1000 | if not lfenabled or not repo._repo.lfstatus: |
|
1001 | 1001 | return orig(repo, archiver, prefix, match, decode) |
|
1002 | 1002 | |
|
1003 | 1003 | repo._get(repo._state + ('hg',)) |
|
1004 | 1004 | rev = repo._state[1] |
|
1005 | 1005 | ctx = repo._repo[rev] |
|
1006 | 1006 | |
|
1007 | 1007 | if ctx.node() is not None: |
|
1008 | 1008 | lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node()) |
|
1009 | 1009 | |
|
1010 | 1010 | def write(name, mode, islink, getdata): |
|
1011 | 1011 | # At this point, the standin has been replaced with the largefile name, |
|
1012 | 1012 | # so the normal matcher works here without the lfutil variants. |
|
1013 | 1013 | if match and not match(f): |
|
1014 | 1014 | return |
|
1015 | 1015 | data = getdata() |
|
1016 | 1016 | if decode: |
|
1017 | 1017 | data = repo._repo.wwritedata(name, data) |
|
1018 | 1018 | |
|
1019 | 1019 | archiver.addfile(prefix + name, mode, islink, data) |
|
1020 | 1020 | |
|
1021 | 1021 | for f in ctx: |
|
1022 | 1022 | ff = ctx.flags(f) |
|
1023 | 1023 | getdata = ctx[f].data |
|
1024 | 1024 | lfile = lfutil.splitstandin(f) |
|
1025 | 1025 | if lfile is not None: |
|
1026 | 1026 | if ctx.node() is not None: |
|
1027 | 1027 | path = lfutil.findfile(repo._repo, getdata().strip()) |
|
1028 | 1028 | |
|
1029 | 1029 | if path is None: |
|
1030 | 1030 | raise error.Abort( |
|
1031 | 1031 | _('largefile %s not found in repo store or system cache') |
|
1032 | 1032 | % lfile) |
|
1033 | 1033 | else: |
|
1034 | 1034 | path = lfile |
|
1035 | 1035 | |
|
1036 | 1036 | f = lfile |
|
1037 | 1037 | |
|
1038 | 1038 | getdata = lambda: util.readfile(os.path.join(prefix, path)) |
|
1039 | 1039 | |
|
1040 | 1040 | write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata) |
|
1041 | 1041 | |
|
1042 | 1042 | for subpath in sorted(ctx.substate): |
|
1043 | 1043 | sub = ctx.workingsub(subpath) |
|
1044 | 1044 | submatch = matchmod.subdirmatcher(subpath, match) |
|
1045 | 1045 | subprefix = prefix + subpath + '/' |
|
1046 | 1046 | sub._repo.lfstatus = True |
|
1047 | 1047 | sub.archive(archiver, subprefix, submatch, decode) |
|
1048 | 1048 | |
|
1049 | 1049 | # If a largefile is modified, the change is not reflected in its |
|
1050 | 1050 | # standin until a commit. cmdutil.bailifchanged() raises an exception |
|
1051 | 1051 | # if the repo has uncommitted changes. Wrap it to also check if |
|
1052 | 1052 | # largefiles were changed. This is used by bisect, backout and fetch. |
|
1053 | 1053 | @eh.wrapfunction(cmdutil, 'bailifchanged') |
|
1054 | 1054 | def overridebailifchanged(orig, repo, *args, **kwargs): |
|
1055 | 1055 | orig(repo, *args, **kwargs) |
|
1056 | 1056 | repo.lfstatus = True |
|
1057 | 1057 | s = repo.status() |
|
1058 | 1058 | repo.lfstatus = False |
|
1059 | 1059 | if s.modified or s.added or s.removed or s.deleted: |
|
1060 | 1060 | raise error.Abort(_('uncommitted changes')) |
|
1061 | 1061 | |
|
1062 | 1062 | @eh.wrapfunction(cmdutil, 'postcommitstatus') |
|
1063 | 1063 | def postcommitstatus(orig, repo, *args, **kwargs): |
|
1064 | 1064 | repo.lfstatus = True |
|
1065 | 1065 | try: |
|
1066 | 1066 | return orig(repo, *args, **kwargs) |
|
1067 | 1067 | finally: |
|
1068 | 1068 | repo.lfstatus = False |
|
1069 | 1069 | |
|
1070 | 1070 | @eh.wrapfunction(cmdutil, 'forget') |
|
1071 | 1071 | def cmdutilforget(orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, |
|
1072 | 1072 | interactive): |
|
1073 | 1073 | normalmatcher = composenormalfilematcher(match, repo[None].manifest()) |
|
1074 | 1074 | bad, forgot = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, |
|
1075 | 1075 | dryrun, interactive) |
|
1076 | 1076 | m = composelargefilematcher(match, repo[None].manifest()) |
|
1077 | 1077 | |
|
1078 | 1078 | try: |
|
1079 | 1079 | repo.lfstatus = True |
|
1080 | 1080 | s = repo.status(match=m, clean=True) |
|
1081 | 1081 | finally: |
|
1082 | 1082 | repo.lfstatus = False |
|
1083 | 1083 | manifest = repo[None].manifest() |
|
1084 | 1084 | forget = sorted(s.modified + s.added + s.deleted + s.clean) |
|
1085 | 1085 | forget = [f for f in forget if lfutil.standin(f) in manifest] |
|
1086 | 1086 | |
|
1087 | 1087 | for f in forget: |
|
1088 | 1088 | fstandin = lfutil.standin(f) |
|
1089 | 1089 | if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin): |
|
1090 | 1090 | ui.warn(_('not removing %s: file is already untracked\n') |
|
1091 | 1091 | % uipathfn(f)) |
|
1092 | 1092 | bad.append(f) |
|
1093 | 1093 | |
|
1094 | 1094 | for f in forget: |
|
1095 | 1095 | if ui.verbose or not m.exact(f): |
|
1096 | 1096 | ui.status(_('removing %s\n') % uipathfn(f)) |
|
1097 | 1097 | |
|
1098 | 1098 | # Need to lock because standin files are deleted then removed from the |
|
1099 | 1099 | # repository and we could race in-between. |
|
1100 | 1100 | with repo.wlock(): |
|
1101 | 1101 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1102 | 1102 | for f in forget: |
|
1103 | 1103 | if lfdirstate[f] == 'a': |
|
1104 | 1104 | lfdirstate.drop(f) |
|
1105 | 1105 | else: |
|
1106 | 1106 | lfdirstate.remove(f) |
|
1107 | 1107 | lfdirstate.write() |
|
1108 | 1108 | standins = [lfutil.standin(f) for f in forget] |
|
1109 | 1109 | for f in standins: |
|
1110 | 1110 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1111 | 1111 | rejected = repo[None].forget(standins) |
|
1112 | 1112 | |
|
1113 | 1113 | bad.extend(f for f in rejected if f in m.files()) |
|
1114 | 1114 | forgot.extend(f for f in forget if f not in rejected) |
|
1115 | 1115 | return bad, forgot |
|
1116 | 1116 | |
|
1117 | 1117 | def _getoutgoings(repo, other, missing, addfunc): |
|
1118 | 1118 | """get pairs of filename and largefile hash in outgoing revisions |
|
1119 | 1119 | in 'missing'. |
|
1120 | 1120 | |
|
1121 | 1121 | largefiles already existing on 'other' repository are ignored. |
|
1122 | 1122 | |
|
1123 | 1123 | 'addfunc' is invoked with each unique pairs of filename and |
|
1124 | 1124 | largefile hash value. |
|
1125 | 1125 | """ |
|
1126 | 1126 | knowns = set() |
|
1127 | 1127 | lfhashes = set() |
|
1128 | 1128 | def dedup(fn, lfhash): |
|
1129 | 1129 | k = (fn, lfhash) |
|
1130 | 1130 | if k not in knowns: |
|
1131 | 1131 | knowns.add(k) |
|
1132 | 1132 | lfhashes.add(lfhash) |
|
1133 | 1133 | lfutil.getlfilestoupload(repo, missing, dedup) |
|
1134 | 1134 | if lfhashes: |
|
1135 | 1135 | lfexists = storefactory.openstore(repo, other).exists(lfhashes) |
|
1136 | 1136 | for fn, lfhash in knowns: |
|
1137 | 1137 | if not lfexists[lfhash]: # lfhash doesn't exist on "other" |
|
1138 | 1138 | addfunc(fn, lfhash) |
|
1139 | 1139 | |
|
1140 | 1140 | def outgoinghook(ui, repo, other, opts, missing): |
|
1141 | 1141 | if opts.pop('large', None): |
|
1142 | 1142 | lfhashes = set() |
|
1143 | 1143 | if ui.debugflag: |
|
1144 | 1144 | toupload = {} |
|
1145 | 1145 | def addfunc(fn, lfhash): |
|
1146 | 1146 | if fn not in toupload: |
|
1147 | 1147 | toupload[fn] = [] |
|
1148 | 1148 | toupload[fn].append(lfhash) |
|
1149 | 1149 | lfhashes.add(lfhash) |
|
1150 | 1150 | def showhashes(fn): |
|
1151 | 1151 | for lfhash in sorted(toupload[fn]): |
|
1152 | 1152 | ui.debug(' %s\n' % (lfhash)) |
|
1153 | 1153 | else: |
|
1154 | 1154 | toupload = set() |
|
1155 | 1155 | def addfunc(fn, lfhash): |
|
1156 | 1156 | toupload.add(fn) |
|
1157 | 1157 | lfhashes.add(lfhash) |
|
1158 | 1158 | def showhashes(fn): |
|
1159 | 1159 | pass |
|
1160 | 1160 | _getoutgoings(repo, other, missing, addfunc) |
|
1161 | 1161 | |
|
1162 | 1162 | if not toupload: |
|
1163 | 1163 | ui.status(_('largefiles: no files to upload\n')) |
|
1164 | 1164 | else: |
|
1165 | 1165 | ui.status(_('largefiles to upload (%d entities):\n') |
|
1166 | 1166 | % (len(lfhashes))) |
|
1167 | 1167 | for file in sorted(toupload): |
|
1168 | 1168 | ui.status(lfutil.splitstandin(file) + '\n') |
|
1169 | 1169 | showhashes(file) |
|
1170 | 1170 | ui.status('\n') |
|
1171 | 1171 | |
|
1172 | 1172 | @eh.wrapcommand('outgoing', |
|
1173 | 1173 | opts=[('', 'large', None, _('display outgoing largefiles'))]) |
|
1174 | 1174 | def _outgoingcmd(orig, *args, **kwargs): |
|
1175 | 1175 | # Nothing to do here other than add the extra help option- the hook above |
|
1176 | 1176 | # processes it. |
|
1177 | 1177 | return orig(*args, **kwargs) |
|
1178 | 1178 | |
|
1179 | 1179 | def summaryremotehook(ui, repo, opts, changes): |
|
1180 | 1180 | largeopt = opts.get('large', False) |
|
1181 | 1181 | if changes is None: |
|
1182 | 1182 | if largeopt: |
|
1183 | 1183 | return (False, True) # only outgoing check is needed |
|
1184 | 1184 | else: |
|
1185 | 1185 | return (False, False) |
|
1186 | 1186 | elif largeopt: |
|
1187 | 1187 | url, branch, peer, outgoing = changes[1] |
|
1188 | 1188 | if peer is None: |
|
1189 | 1189 | # i18n: column positioning for "hg summary" |
|
1190 | 1190 | ui.status(_('largefiles: (no remote repo)\n')) |
|
1191 | 1191 | return |
|
1192 | 1192 | |
|
1193 | 1193 | toupload = set() |
|
1194 | 1194 | lfhashes = set() |
|
1195 | 1195 | def addfunc(fn, lfhash): |
|
1196 | 1196 | toupload.add(fn) |
|
1197 | 1197 | lfhashes.add(lfhash) |
|
1198 | 1198 | _getoutgoings(repo, peer, outgoing.missing, addfunc) |
|
1199 | 1199 | |
|
1200 | 1200 | if not toupload: |
|
1201 | 1201 | # i18n: column positioning for "hg summary" |
|
1202 | 1202 | ui.status(_('largefiles: (no files to upload)\n')) |
|
1203 | 1203 | else: |
|
1204 | 1204 | # i18n: column positioning for "hg summary" |
|
1205 | 1205 | ui.status(_('largefiles: %d entities for %d files to upload\n') |
|
1206 | 1206 | % (len(lfhashes), len(toupload))) |
|
1207 | 1207 | |
|
1208 | 1208 | @eh.wrapcommand('summary', |
|
1209 | 1209 | opts=[('', 'large', None, _('display outgoing largefiles'))]) |
|
1210 | 1210 | def overridesummary(orig, ui, repo, *pats, **opts): |
|
1211 | 1211 | try: |
|
1212 | 1212 | repo.lfstatus = True |
|
1213 | 1213 | orig(ui, repo, *pats, **opts) |
|
1214 | 1214 | finally: |
|
1215 | 1215 | repo.lfstatus = False |
|
1216 | 1216 | |
|
1217 | 1217 | @eh.wrapfunction(scmutil, 'addremove') |
|
1218 | 1218 | def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None): |
|
1219 | 1219 | if opts is None: |
|
1220 | 1220 | opts = {} |
|
1221 | 1221 | if not lfutil.islfilesrepo(repo): |
|
1222 | 1222 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1223 | 1223 | # Get the list of missing largefiles so we can remove them |
|
1224 | 1224 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1225 |
unsure, s = lfdirstate.status(matchmod.always( |
|
|
1226 |
|
|
|
1227 | unknown=False) | |
|
1225 | unsure, s = lfdirstate.status(matchmod.always(), subrepos=[], | |
|
1226 | ignored=False, clean=False, unknown=False) | |
|
1228 | 1227 | |
|
1229 | 1228 | # Call into the normal remove code, but the removing of the standin, we want |
|
1230 | 1229 | # to have handled by original addremove. Monkey patching here makes sure |
|
1231 | 1230 | # we don't remove the standin in the largefiles code, preventing a very |
|
1232 | 1231 | # confused state later. |
|
1233 | 1232 | if s.deleted: |
|
1234 | 1233 | m = copy.copy(matcher) |
|
1235 | 1234 | |
|
1236 | 1235 | # The m._files and m._map attributes are not changed to the deleted list |
|
1237 | 1236 | # because that affects the m.exact() test, which in turn governs whether |
|
1238 | 1237 | # or not the file name is printed, and how. Simply limit the original |
|
1239 | 1238 | # matches to those in the deleted status list. |
|
1240 | 1239 | matchfn = m.matchfn |
|
1241 | 1240 | m.matchfn = lambda f: f in s.deleted and matchfn(f) |
|
1242 | 1241 | |
|
1243 | 1242 | removelargefiles(repo.ui, repo, True, m, uipathfn, opts.get('dry_run'), |
|
1244 | 1243 | **pycompat.strkwargs(opts)) |
|
1245 | 1244 | # Call into the normal add code, and any files that *should* be added as |
|
1246 | 1245 | # largefiles will be |
|
1247 | 1246 | added, bad = addlargefiles(repo.ui, repo, True, matcher, uipathfn, |
|
1248 | 1247 | **pycompat.strkwargs(opts)) |
|
1249 | 1248 | # Now that we've handled largefiles, hand off to the original addremove |
|
1250 | 1249 | # function to take care of the rest. Make sure it doesn't do anything with |
|
1251 | 1250 | # largefiles by passing a matcher that will ignore them. |
|
1252 | 1251 | matcher = composenormalfilematcher(matcher, repo[None].manifest(), added) |
|
1253 | 1252 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1254 | 1253 | |
|
1255 | 1254 | # Calling purge with --all will cause the largefiles to be deleted. |
|
1256 | 1255 | # Override repo.status to prevent this from happening. |
|
1257 | 1256 | @eh.wrapcommand('purge', extension='purge') |
|
1258 | 1257 | def overridepurge(orig, ui, repo, *dirs, **opts): |
|
1259 | 1258 | # XXX Monkey patching a repoview will not work. The assigned attribute will |
|
1260 | 1259 | # be set on the unfiltered repo, but we will only lookup attributes in the |
|
1261 | 1260 | # unfiltered repo if the lookup in the repoview object itself fails. As the |
|
1262 | 1261 | # monkey patched method exists on the repoview class the lookup will not |
|
1263 | 1262 | # fail. As a result, the original version will shadow the monkey patched |
|
1264 | 1263 | # one, defeating the monkey patch. |
|
1265 | 1264 | # |
|
1266 | 1265 | # As a work around we use an unfiltered repo here. We should do something |
|
1267 | 1266 | # cleaner instead. |
|
1268 | 1267 | repo = repo.unfiltered() |
|
1269 | 1268 | oldstatus = repo.status |
|
1270 | 1269 | def overridestatus(node1='.', node2=None, match=None, ignored=False, |
|
1271 | 1270 | clean=False, unknown=False, listsubrepos=False): |
|
1272 | 1271 | r = oldstatus(node1, node2, match, ignored, clean, unknown, |
|
1273 | 1272 | listsubrepos) |
|
1274 | 1273 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1275 | 1274 | unknown = [f for f in r.unknown if lfdirstate[f] == '?'] |
|
1276 | 1275 | ignored = [f for f in r.ignored if lfdirstate[f] == '?'] |
|
1277 | 1276 | return scmutil.status(r.modified, r.added, r.removed, r.deleted, |
|
1278 | 1277 | unknown, ignored, r.clean) |
|
1279 | 1278 | repo.status = overridestatus |
|
1280 | 1279 | orig(ui, repo, *dirs, **opts) |
|
1281 | 1280 | repo.status = oldstatus |
|
1282 | 1281 | |
|
1283 | 1282 | @eh.wrapcommand('rollback') |
|
1284 | 1283 | def overriderollback(orig, ui, repo, **opts): |
|
1285 | 1284 | with repo.wlock(): |
|
1286 | 1285 | before = repo.dirstate.parents() |
|
1287 | 1286 | orphans = set(f for f in repo.dirstate |
|
1288 | 1287 | if lfutil.isstandin(f) and repo.dirstate[f] != 'r') |
|
1289 | 1288 | result = orig(ui, repo, **opts) |
|
1290 | 1289 | after = repo.dirstate.parents() |
|
1291 | 1290 | if before == after: |
|
1292 | 1291 | return result # no need to restore standins |
|
1293 | 1292 | |
|
1294 | 1293 | pctx = repo['.'] |
|
1295 | 1294 | for f in repo.dirstate: |
|
1296 | 1295 | if lfutil.isstandin(f): |
|
1297 | 1296 | orphans.discard(f) |
|
1298 | 1297 | if repo.dirstate[f] == 'r': |
|
1299 | 1298 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1300 | 1299 | elif f in pctx: |
|
1301 | 1300 | fctx = pctx[f] |
|
1302 | 1301 | repo.wwrite(f, fctx.data(), fctx.flags()) |
|
1303 | 1302 | else: |
|
1304 | 1303 | # content of standin is not so important in 'a', |
|
1305 | 1304 | # 'm' or 'n' (coming from the 2nd parent) cases |
|
1306 | 1305 | lfutil.writestandin(repo, f, '', False) |
|
1307 | 1306 | for standin in orphans: |
|
1308 | 1307 | repo.wvfs.unlinkpath(standin, ignoremissing=True) |
|
1309 | 1308 | |
|
1310 | 1309 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1311 | 1310 | orphans = set(lfdirstate) |
|
1312 | 1311 | lfiles = lfutil.listlfiles(repo) |
|
1313 | 1312 | for file in lfiles: |
|
1314 | 1313 | lfutil.synclfdirstate(repo, lfdirstate, file, True) |
|
1315 | 1314 | orphans.discard(file) |
|
1316 | 1315 | for lfile in orphans: |
|
1317 | 1316 | lfdirstate.drop(lfile) |
|
1318 | 1317 | lfdirstate.write() |
|
1319 | 1318 | return result |
|
1320 | 1319 | |
|
1321 | 1320 | @eh.wrapcommand('transplant', extension='transplant') |
|
1322 | 1321 | def overridetransplant(orig, ui, repo, *revs, **opts): |
|
1323 | 1322 | resuming = opts.get(r'continue') |
|
1324 | 1323 | repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) |
|
1325 | 1324 | repo._lfstatuswriters.append(lambda *msg, **opts: None) |
|
1326 | 1325 | try: |
|
1327 | 1326 | result = orig(ui, repo, *revs, **opts) |
|
1328 | 1327 | finally: |
|
1329 | 1328 | repo._lfstatuswriters.pop() |
|
1330 | 1329 | repo._lfcommithooks.pop() |
|
1331 | 1330 | return result |
|
1332 | 1331 | |
|
1333 | 1332 | @eh.wrapcommand('cat') |
|
1334 | 1333 | def overridecat(orig, ui, repo, file1, *pats, **opts): |
|
1335 | 1334 | opts = pycompat.byteskwargs(opts) |
|
1336 | 1335 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1337 | 1336 | err = 1 |
|
1338 | 1337 | notbad = set() |
|
1339 | 1338 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1340 | 1339 | origmatchfn = m.matchfn |
|
1341 | 1340 | def lfmatchfn(f): |
|
1342 | 1341 | if origmatchfn(f): |
|
1343 | 1342 | return True |
|
1344 | 1343 | lf = lfutil.splitstandin(f) |
|
1345 | 1344 | if lf is None: |
|
1346 | 1345 | return False |
|
1347 | 1346 | notbad.add(lf) |
|
1348 | 1347 | return origmatchfn(lf) |
|
1349 | 1348 | m.matchfn = lfmatchfn |
|
1350 | 1349 | origbadfn = m.bad |
|
1351 | 1350 | def lfbadfn(f, msg): |
|
1352 | 1351 | if not f in notbad: |
|
1353 | 1352 | origbadfn(f, msg) |
|
1354 | 1353 | m.bad = lfbadfn |
|
1355 | 1354 | |
|
1356 | 1355 | origvisitdirfn = m.visitdir |
|
1357 | 1356 | def lfvisitdirfn(dir): |
|
1358 | 1357 | if dir == lfutil.shortname: |
|
1359 | 1358 | return True |
|
1360 | 1359 | ret = origvisitdirfn(dir) |
|
1361 | 1360 | if ret: |
|
1362 | 1361 | return ret |
|
1363 | 1362 | lf = lfutil.splitstandin(dir) |
|
1364 | 1363 | if lf is None: |
|
1365 | 1364 | return False |
|
1366 | 1365 | return origvisitdirfn(lf) |
|
1367 | 1366 | m.visitdir = lfvisitdirfn |
|
1368 | 1367 | |
|
1369 | 1368 | for f in ctx.walk(m): |
|
1370 | 1369 | with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp: |
|
1371 | 1370 | lf = lfutil.splitstandin(f) |
|
1372 | 1371 | if lf is None or origmatchfn(f): |
|
1373 | 1372 | # duplicating unreachable code from commands.cat |
|
1374 | 1373 | data = ctx[f].data() |
|
1375 | 1374 | if opts.get('decode'): |
|
1376 | 1375 | data = repo.wwritedata(f, data) |
|
1377 | 1376 | fp.write(data) |
|
1378 | 1377 | else: |
|
1379 | 1378 | hash = lfutil.readasstandin(ctx[f]) |
|
1380 | 1379 | if not lfutil.inusercache(repo.ui, hash): |
|
1381 | 1380 | store = storefactory.openstore(repo) |
|
1382 | 1381 | success, missing = store.get([(lf, hash)]) |
|
1383 | 1382 | if len(success) != 1: |
|
1384 | 1383 | raise error.Abort( |
|
1385 | 1384 | _('largefile %s is not in cache and could not be ' |
|
1386 | 1385 | 'downloaded') % lf) |
|
1387 | 1386 | path = lfutil.usercachepath(repo.ui, hash) |
|
1388 | 1387 | with open(path, "rb") as fpin: |
|
1389 | 1388 | for chunk in util.filechunkiter(fpin): |
|
1390 | 1389 | fp.write(chunk) |
|
1391 | 1390 | err = 0 |
|
1392 | 1391 | return err |
|
1393 | 1392 | |
|
1394 | 1393 | @eh.wrapfunction(merge, 'update') |
|
1395 | 1394 | def mergeupdate(orig, repo, node, branchmerge, force, |
|
1396 | 1395 | *args, **kwargs): |
|
1397 | 1396 | matcher = kwargs.get(r'matcher', None) |
|
1398 | 1397 | # note if this is a partial update |
|
1399 | 1398 | partial = matcher and not matcher.always() |
|
1400 | 1399 | with repo.wlock(): |
|
1401 | 1400 | # branch | | | |
|
1402 | 1401 | # merge | force | partial | action |
|
1403 | 1402 | # -------+-------+---------+-------------- |
|
1404 | 1403 | # x | x | x | linear-merge |
|
1405 | 1404 | # o | x | x | branch-merge |
|
1406 | 1405 | # x | o | x | overwrite (as clean update) |
|
1407 | 1406 | # o | o | x | force-branch-merge (*1) |
|
1408 | 1407 | # x | x | o | (*) |
|
1409 | 1408 | # o | x | o | (*) |
|
1410 | 1409 | # x | o | o | overwrite (as revert) |
|
1411 | 1410 | # o | o | o | (*) |
|
1412 | 1411 | # |
|
1413 | 1412 | # (*) don't care |
|
1414 | 1413 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") |
|
1415 | 1414 | |
|
1416 | 1415 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1417 |
unsure, s = lfdirstate.status(matchmod.always( |
|
|
1418 | repo.getcwd()), | |
|
1419 | subrepos=[], ignored=False, | |
|
1420 | clean=True, unknown=False) | |
|
1416 | unsure, s = lfdirstate.status(matchmod.always(), subrepos=[], | |
|
1417 | ignored=False, clean=True, unknown=False) | |
|
1421 | 1418 | oldclean = set(s.clean) |
|
1422 | 1419 | pctx = repo['.'] |
|
1423 | 1420 | dctx = repo[node] |
|
1424 | 1421 | for lfile in unsure + s.modified: |
|
1425 | 1422 | lfileabs = repo.wvfs.join(lfile) |
|
1426 | 1423 | if not repo.wvfs.exists(lfileabs): |
|
1427 | 1424 | continue |
|
1428 | 1425 | lfhash = lfutil.hashfile(lfileabs) |
|
1429 | 1426 | standin = lfutil.standin(lfile) |
|
1430 | 1427 | lfutil.writestandin(repo, standin, lfhash, |
|
1431 | 1428 | lfutil.getexecutable(lfileabs)) |
|
1432 | 1429 | if (standin in pctx and |
|
1433 | 1430 | lfhash == lfutil.readasstandin(pctx[standin])): |
|
1434 | 1431 | oldclean.add(lfile) |
|
1435 | 1432 | for lfile in s.added: |
|
1436 | 1433 | fstandin = lfutil.standin(lfile) |
|
1437 | 1434 | if fstandin not in dctx: |
|
1438 | 1435 | # in this case, content of standin file is meaningless |
|
1439 | 1436 | # (in dctx, lfile is unknown, or normal file) |
|
1440 | 1437 | continue |
|
1441 | 1438 | lfutil.updatestandin(repo, lfile, fstandin) |
|
1442 | 1439 | # mark all clean largefiles as dirty, just in case the update gets |
|
1443 | 1440 | # interrupted before largefiles and lfdirstate are synchronized |
|
1444 | 1441 | for lfile in oldclean: |
|
1445 | 1442 | lfdirstate.normallookup(lfile) |
|
1446 | 1443 | lfdirstate.write() |
|
1447 | 1444 | |
|
1448 | 1445 | oldstandins = lfutil.getstandinsstate(repo) |
|
1449 | 1446 | # Make sure the merge runs on disk, not in-memory. largefiles is not a |
|
1450 | 1447 | # good candidate for in-memory merge (large files, custom dirstate, |
|
1451 | 1448 | # matcher usage). |
|
1452 | 1449 | kwargs[r'wc'] = repo[None] |
|
1453 | 1450 | result = orig(repo, node, branchmerge, force, *args, **kwargs) |
|
1454 | 1451 | |
|
1455 | 1452 | newstandins = lfutil.getstandinsstate(repo) |
|
1456 | 1453 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) |
|
1457 | 1454 | |
|
1458 | 1455 | # to avoid leaving all largefiles as dirty and thus rehash them, mark |
|
1459 | 1456 | # all the ones that didn't change as clean |
|
1460 | 1457 | for lfile in oldclean.difference(filelist): |
|
1461 | 1458 | lfdirstate.normal(lfile) |
|
1462 | 1459 | lfdirstate.write() |
|
1463 | 1460 | |
|
1464 | 1461 | if branchmerge or force or partial: |
|
1465 | 1462 | filelist.extend(s.deleted + s.removed) |
|
1466 | 1463 | |
|
1467 | 1464 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, |
|
1468 | 1465 | normallookup=partial) |
|
1469 | 1466 | |
|
1470 | 1467 | return result |
|
1471 | 1468 | |
|
1472 | 1469 | @eh.wrapfunction(scmutil, 'marktouched') |
|
1473 | 1470 | def scmutilmarktouched(orig, repo, files, *args, **kwargs): |
|
1474 | 1471 | result = orig(repo, files, *args, **kwargs) |
|
1475 | 1472 | |
|
1476 | 1473 | filelist = [] |
|
1477 | 1474 | for f in files: |
|
1478 | 1475 | lf = lfutil.splitstandin(f) |
|
1479 | 1476 | if lf is not None: |
|
1480 | 1477 | filelist.append(lf) |
|
1481 | 1478 | if filelist: |
|
1482 | 1479 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, |
|
1483 | 1480 | printmessage=False, normallookup=True) |
|
1484 | 1481 | |
|
1485 | 1482 | return result |
|
1486 | 1483 | |
|
1487 | 1484 | @eh.wrapfunction(upgrade, 'preservedrequirements') |
|
1488 | 1485 | @eh.wrapfunction(upgrade, 'supporteddestrequirements') |
|
1489 | 1486 | def upgraderequirements(orig, repo): |
|
1490 | 1487 | reqs = orig(repo) |
|
1491 | 1488 | if 'largefiles' in repo.requirements: |
|
1492 | 1489 | reqs.add('largefiles') |
|
1493 | 1490 | return reqs |
|
1494 | 1491 | |
|
1495 | 1492 | _lfscheme = 'largefile://' |
|
1496 | 1493 | |
|
1497 | 1494 | @eh.wrapfunction(urlmod, 'open') |
|
1498 | 1495 | def openlargefile(orig, ui, url_, data=None): |
|
1499 | 1496 | if url_.startswith(_lfscheme): |
|
1500 | 1497 | if data: |
|
1501 | 1498 | msg = "cannot use data on a 'largefile://' url" |
|
1502 | 1499 | raise error.ProgrammingError(msg) |
|
1503 | 1500 | lfid = url_[len(_lfscheme):] |
|
1504 | 1501 | return storefactory.getlfile(ui, lfid) |
|
1505 | 1502 | else: |
|
1506 | 1503 | return orig(ui, url_, data=data) |
@@ -1,393 +1,393 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''setup for largefiles repositories: reposetup''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import copy |
|
13 | 13 | |
|
14 | 14 | from mercurial.i18n import _ |
|
15 | 15 | |
|
16 | 16 | from mercurial import ( |
|
17 | 17 | error, |
|
18 | 18 | localrepo, |
|
19 | 19 | match as matchmod, |
|
20 | 20 | scmutil, |
|
21 | 21 | ) |
|
22 | 22 | |
|
23 | 23 | from . import ( |
|
24 | 24 | lfcommands, |
|
25 | 25 | lfutil, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | def reposetup(ui, repo): |
|
29 | 29 | # wire repositories should be given new wireproto functions |
|
30 | 30 | # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs" |
|
31 | 31 | if not repo.local(): |
|
32 | 32 | return |
|
33 | 33 | |
|
34 | 34 | class lfilesrepo(repo.__class__): |
|
35 | 35 | # the mark to examine whether "repo" object enables largefiles or not |
|
36 | 36 | _largefilesenabled = True |
|
37 | 37 | |
|
38 | 38 | lfstatus = False |
|
39 | 39 | def status_nolfiles(self, *args, **kwargs): |
|
40 | 40 | return super(lfilesrepo, self).status(*args, **kwargs) |
|
41 | 41 | |
|
42 | 42 | # When lfstatus is set, return a context that gives the names |
|
43 | 43 | # of largefiles instead of their corresponding standins and |
|
44 | 44 | # identifies the largefiles as always binary, regardless of |
|
45 | 45 | # their actual contents. |
|
46 | 46 | def __getitem__(self, changeid): |
|
47 | 47 | ctx = super(lfilesrepo, self).__getitem__(changeid) |
|
48 | 48 | if self.lfstatus: |
|
49 | 49 | class lfilesctx(ctx.__class__): |
|
50 | 50 | def files(self): |
|
51 | 51 | filenames = super(lfilesctx, self).files() |
|
52 | 52 | return [lfutil.splitstandin(f) or f for f in filenames] |
|
53 | 53 | def manifest(self): |
|
54 | 54 | man1 = super(lfilesctx, self).manifest() |
|
55 | 55 | class lfilesmanifest(man1.__class__): |
|
56 | 56 | def __contains__(self, filename): |
|
57 | 57 | orig = super(lfilesmanifest, self).__contains__ |
|
58 | 58 | return (orig(filename) or |
|
59 | 59 | orig(lfutil.standin(filename))) |
|
60 | 60 | man1.__class__ = lfilesmanifest |
|
61 | 61 | return man1 |
|
62 | 62 | def filectx(self, path, fileid=None, filelog=None): |
|
63 | 63 | orig = super(lfilesctx, self).filectx |
|
64 | 64 | try: |
|
65 | 65 | if filelog is not None: |
|
66 | 66 | result = orig(path, fileid, filelog) |
|
67 | 67 | else: |
|
68 | 68 | result = orig(path, fileid) |
|
69 | 69 | except error.LookupError: |
|
70 | 70 | # Adding a null character will cause Mercurial to |
|
71 | 71 | # identify this as a binary file. |
|
72 | 72 | if filelog is not None: |
|
73 | 73 | result = orig(lfutil.standin(path), fileid, |
|
74 | 74 | filelog) |
|
75 | 75 | else: |
|
76 | 76 | result = orig(lfutil.standin(path), fileid) |
|
77 | 77 | olddata = result.data |
|
78 | 78 | result.data = lambda: olddata() + '\0' |
|
79 | 79 | return result |
|
80 | 80 | ctx.__class__ = lfilesctx |
|
81 | 81 | return ctx |
|
82 | 82 | |
|
83 | 83 | # Figure out the status of big files and insert them into the |
|
84 | 84 | # appropriate list in the result. Also removes standin files |
|
85 | 85 | # from the listing. Revert to the original status if |
|
86 | 86 | # self.lfstatus is False. |
|
87 | 87 | # XXX large file status is buggy when used on repo proxy. |
|
88 | 88 | # XXX this needs to be investigated. |
|
89 | 89 | @localrepo.unfilteredmethod |
|
90 | 90 | def status(self, node1='.', node2=None, match=None, ignored=False, |
|
91 | 91 | clean=False, unknown=False, listsubrepos=False): |
|
92 | 92 | listignored, listclean, listunknown = ignored, clean, unknown |
|
93 | 93 | orig = super(lfilesrepo, self).status |
|
94 | 94 | if not self.lfstatus: |
|
95 | 95 | return orig(node1, node2, match, listignored, listclean, |
|
96 | 96 | listunknown, listsubrepos) |
|
97 | 97 | |
|
98 | 98 | # some calls in this function rely on the old version of status |
|
99 | 99 | self.lfstatus = False |
|
100 | 100 | ctx1 = self[node1] |
|
101 | 101 | ctx2 = self[node2] |
|
102 | 102 | working = ctx2.rev() is None |
|
103 | 103 | parentworking = working and ctx1 == self['.'] |
|
104 | 104 | |
|
105 | 105 | if match is None: |
|
106 |
match = matchmod.always( |
|
|
106 | match = matchmod.always() | |
|
107 | 107 | |
|
108 | 108 | wlock = None |
|
109 | 109 | try: |
|
110 | 110 | try: |
|
111 | 111 | # updating the dirstate is optional |
|
112 | 112 | # so we don't wait on the lock |
|
113 | 113 | wlock = self.wlock(False) |
|
114 | 114 | except error.LockError: |
|
115 | 115 | pass |
|
116 | 116 | |
|
117 | 117 | # First check if paths or patterns were specified on the |
|
118 | 118 | # command line. If there were, and they don't match any |
|
119 | 119 | # largefiles, we should just bail here and let super |
|
120 | 120 | # handle it -- thus gaining a big performance boost. |
|
121 | 121 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
122 | 122 | if not match.always(): |
|
123 | 123 | for f in lfdirstate: |
|
124 | 124 | if match(f): |
|
125 | 125 | break |
|
126 | 126 | else: |
|
127 | 127 | return orig(node1, node2, match, listignored, listclean, |
|
128 | 128 | listunknown, listsubrepos) |
|
129 | 129 | |
|
130 | 130 | # Create a copy of match that matches standins instead |
|
131 | 131 | # of largefiles. |
|
132 | 132 | def tostandins(files): |
|
133 | 133 | if not working: |
|
134 | 134 | return files |
|
135 | 135 | newfiles = [] |
|
136 | 136 | dirstate = self.dirstate |
|
137 | 137 | for f in files: |
|
138 | 138 | sf = lfutil.standin(f) |
|
139 | 139 | if sf in dirstate: |
|
140 | 140 | newfiles.append(sf) |
|
141 | 141 | elif dirstate.hasdir(sf): |
|
142 | 142 | # Directory entries could be regular or |
|
143 | 143 | # standin, check both |
|
144 | 144 | newfiles.extend((f, sf)) |
|
145 | 145 | else: |
|
146 | 146 | newfiles.append(f) |
|
147 | 147 | return newfiles |
|
148 | 148 | |
|
149 | 149 | m = copy.copy(match) |
|
150 | 150 | m._files = tostandins(m._files) |
|
151 | 151 | |
|
152 | 152 | result = orig(node1, node2, m, ignored, clean, unknown, |
|
153 | 153 | listsubrepos) |
|
154 | 154 | if working: |
|
155 | 155 | |
|
156 | 156 | def sfindirstate(f): |
|
157 | 157 | sf = lfutil.standin(f) |
|
158 | 158 | dirstate = self.dirstate |
|
159 | 159 | return sf in dirstate or dirstate.hasdir(sf) |
|
160 | 160 | |
|
161 | 161 | match._files = [f for f in match._files |
|
162 | 162 | if sfindirstate(f)] |
|
163 | 163 | # Don't waste time getting the ignored and unknown |
|
164 | 164 | # files from lfdirstate |
|
165 | 165 | unsure, s = lfdirstate.status(match, subrepos=[], |
|
166 | 166 | ignored=False, |
|
167 | 167 | clean=listclean, |
|
168 | 168 | unknown=False) |
|
169 | 169 | (modified, added, removed, deleted, clean) = ( |
|
170 | 170 | s.modified, s.added, s.removed, s.deleted, s.clean) |
|
171 | 171 | if parentworking: |
|
172 | 172 | for lfile in unsure: |
|
173 | 173 | standin = lfutil.standin(lfile) |
|
174 | 174 | if standin not in ctx1: |
|
175 | 175 | # from second parent |
|
176 | 176 | modified.append(lfile) |
|
177 | 177 | elif lfutil.readasstandin(ctx1[standin]) \ |
|
178 | 178 | != lfutil.hashfile(self.wjoin(lfile)): |
|
179 | 179 | modified.append(lfile) |
|
180 | 180 | else: |
|
181 | 181 | if listclean: |
|
182 | 182 | clean.append(lfile) |
|
183 | 183 | lfdirstate.normal(lfile) |
|
184 | 184 | else: |
|
185 | 185 | tocheck = unsure + modified + added + clean |
|
186 | 186 | modified, added, clean = [], [], [] |
|
187 | 187 | checkexec = self.dirstate._checkexec |
|
188 | 188 | |
|
189 | 189 | for lfile in tocheck: |
|
190 | 190 | standin = lfutil.standin(lfile) |
|
191 | 191 | if standin in ctx1: |
|
192 | 192 | abslfile = self.wjoin(lfile) |
|
193 | 193 | if ((lfutil.readasstandin(ctx1[standin]) != |
|
194 | 194 | lfutil.hashfile(abslfile)) or |
|
195 | 195 | (checkexec and |
|
196 | 196 | ('x' in ctx1.flags(standin)) != |
|
197 | 197 | bool(lfutil.getexecutable(abslfile)))): |
|
198 | 198 | modified.append(lfile) |
|
199 | 199 | elif listclean: |
|
200 | 200 | clean.append(lfile) |
|
201 | 201 | else: |
|
202 | 202 | added.append(lfile) |
|
203 | 203 | |
|
204 | 204 | # at this point, 'removed' contains largefiles |
|
205 | 205 | # marked as 'R' in the working context. |
|
206 | 206 | # then, largefiles not managed also in the target |
|
207 | 207 | # context should be excluded from 'removed'. |
|
208 | 208 | removed = [lfile for lfile in removed |
|
209 | 209 | if lfutil.standin(lfile) in ctx1] |
|
210 | 210 | |
|
211 | 211 | # Standins no longer found in lfdirstate have been deleted |
|
212 | 212 | for standin in ctx1.walk(lfutil.getstandinmatcher(self)): |
|
213 | 213 | lfile = lfutil.splitstandin(standin) |
|
214 | 214 | if not match(lfile): |
|
215 | 215 | continue |
|
216 | 216 | if lfile not in lfdirstate: |
|
217 | 217 | deleted.append(lfile) |
|
218 | 218 | # Sync "largefile has been removed" back to the |
|
219 | 219 | # standin. Removing a file as a side effect of |
|
220 | 220 | # running status is gross, but the alternatives (if |
|
221 | 221 | # any) are worse. |
|
222 | 222 | self.wvfs.unlinkpath(standin, ignoremissing=True) |
|
223 | 223 | |
|
224 | 224 | # Filter result lists |
|
225 | 225 | result = list(result) |
|
226 | 226 | |
|
227 | 227 | # Largefiles are not really removed when they're |
|
228 | 228 | # still in the normal dirstate. Likewise, normal |
|
229 | 229 | # files are not really removed if they are still in |
|
230 | 230 | # lfdirstate. This happens in merges where files |
|
231 | 231 | # change type. |
|
232 | 232 | removed = [f for f in removed |
|
233 | 233 | if f not in self.dirstate] |
|
234 | 234 | result[2] = [f for f in result[2] |
|
235 | 235 | if f not in lfdirstate] |
|
236 | 236 | |
|
237 | 237 | lfiles = set(lfdirstate._map) |
|
238 | 238 | # Unknown files |
|
239 | 239 | result[4] = set(result[4]).difference(lfiles) |
|
240 | 240 | # Ignored files |
|
241 | 241 | result[5] = set(result[5]).difference(lfiles) |
|
242 | 242 | # combine normal files and largefiles |
|
243 | 243 | normals = [[fn for fn in filelist |
|
244 | 244 | if not lfutil.isstandin(fn)] |
|
245 | 245 | for filelist in result] |
|
246 | 246 | lfstatus = (modified, added, removed, deleted, [], [], |
|
247 | 247 | clean) |
|
248 | 248 | result = [sorted(list1 + list2) |
|
249 | 249 | for (list1, list2) in zip(normals, lfstatus)] |
|
250 | 250 | else: # not against working directory |
|
251 | 251 | result = [[lfutil.splitstandin(f) or f for f in items] |
|
252 | 252 | for items in result] |
|
253 | 253 | |
|
254 | 254 | if wlock: |
|
255 | 255 | lfdirstate.write() |
|
256 | 256 | |
|
257 | 257 | finally: |
|
258 | 258 | if wlock: |
|
259 | 259 | wlock.release() |
|
260 | 260 | |
|
261 | 261 | self.lfstatus = True |
|
262 | 262 | return scmutil.status(*result) |
|
263 | 263 | |
|
264 | 264 | def commitctx(self, ctx, *args, **kwargs): |
|
265 | 265 | node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs) |
|
266 | 266 | class lfilesctx(ctx.__class__): |
|
267 | 267 | def markcommitted(self, node): |
|
268 | 268 | orig = super(lfilesctx, self).markcommitted |
|
269 | 269 | return lfutil.markcommitted(orig, self, node) |
|
270 | 270 | ctx.__class__ = lfilesctx |
|
271 | 271 | return node |
|
272 | 272 | |
|
273 | 273 | # Before commit, largefile standins have not had their |
|
274 | 274 | # contents updated to reflect the hash of their largefile. |
|
275 | 275 | # Do that here. |
|
276 | 276 | def commit(self, text="", user=None, date=None, match=None, |
|
277 | 277 | force=False, editor=False, extra=None): |
|
278 | 278 | if extra is None: |
|
279 | 279 | extra = {} |
|
280 | 280 | orig = super(lfilesrepo, self).commit |
|
281 | 281 | |
|
282 | 282 | with self.wlock(): |
|
283 | 283 | lfcommithook = self._lfcommithooks[-1] |
|
284 | 284 | match = lfcommithook(self, match) |
|
285 | 285 | result = orig(text=text, user=user, date=date, match=match, |
|
286 | 286 | force=force, editor=editor, extra=extra) |
|
287 | 287 | return result |
|
288 | 288 | |
|
289 | 289 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
290 | 290 | if remote.local(): |
|
291 | 291 | missing = set(self.requirements) - remote.local().supported |
|
292 | 292 | if missing: |
|
293 | 293 | msg = _("required features are not" |
|
294 | 294 | " supported in the destination:" |
|
295 | 295 | " %s") % (', '.join(sorted(missing))) |
|
296 | 296 | raise error.Abort(msg) |
|
297 | 297 | return super(lfilesrepo, self).push(remote, force=force, revs=revs, |
|
298 | 298 | newbranch=newbranch) |
|
299 | 299 | |
|
300 | 300 | # TODO: _subdirlfs should be moved into "lfutil.py", because |
|
301 | 301 | # it is referred only from "lfutil.updatestandinsbymatch" |
|
302 | 302 | def _subdirlfs(self, files, lfiles): |
|
303 | 303 | ''' |
|
304 | 304 | Adjust matched file list |
|
305 | 305 | If we pass a directory to commit whose only committable files |
|
306 | 306 | are largefiles, the core commit code aborts before finding |
|
307 | 307 | the largefiles. |
|
308 | 308 | So we do the following: |
|
309 | 309 | For directories that only have largefiles as matches, |
|
310 | 310 | we explicitly add the largefiles to the match list and remove |
|
311 | 311 | the directory. |
|
312 | 312 | In other cases, we leave the match list unmodified. |
|
313 | 313 | ''' |
|
314 | 314 | actualfiles = [] |
|
315 | 315 | dirs = [] |
|
316 | 316 | regulars = [] |
|
317 | 317 | |
|
318 | 318 | for f in files: |
|
319 | 319 | if lfutil.isstandin(f + '/'): |
|
320 | 320 | raise error.Abort( |
|
321 | 321 | _('file "%s" is a largefile standin') % f, |
|
322 | 322 | hint=('commit the largefile itself instead')) |
|
323 | 323 | # Scan directories |
|
324 | 324 | if self.wvfs.isdir(f): |
|
325 | 325 | dirs.append(f) |
|
326 | 326 | else: |
|
327 | 327 | regulars.append(f) |
|
328 | 328 | |
|
329 | 329 | for f in dirs: |
|
330 | 330 | matcheddir = False |
|
331 | 331 | d = self.dirstate.normalize(f) + '/' |
|
332 | 332 | # Check for matched normal files |
|
333 | 333 | for mf in regulars: |
|
334 | 334 | if self.dirstate.normalize(mf).startswith(d): |
|
335 | 335 | actualfiles.append(f) |
|
336 | 336 | matcheddir = True |
|
337 | 337 | break |
|
338 | 338 | if not matcheddir: |
|
339 | 339 | # If no normal match, manually append |
|
340 | 340 | # any matching largefiles |
|
341 | 341 | for lf in lfiles: |
|
342 | 342 | if self.dirstate.normalize(lf).startswith(d): |
|
343 | 343 | actualfiles.append(lf) |
|
344 | 344 | if not matcheddir: |
|
345 | 345 | # There may still be normal files in the dir, so |
|
346 | 346 | # add a directory to the list, which |
|
347 | 347 | # forces status/dirstate to walk all files and |
|
348 | 348 | # call the match function on the matcher, even |
|
349 | 349 | # on case sensitive filesystems. |
|
350 | 350 | actualfiles.append('.') |
|
351 | 351 | matcheddir = True |
|
352 | 352 | # Nothing in dir, so readd it |
|
353 | 353 | # and let commit reject it |
|
354 | 354 | if not matcheddir: |
|
355 | 355 | actualfiles.append(f) |
|
356 | 356 | |
|
357 | 357 | # Always add normal files |
|
358 | 358 | actualfiles += regulars |
|
359 | 359 | return actualfiles |
|
360 | 360 | |
|
361 | 361 | repo.__class__ = lfilesrepo |
|
362 | 362 | |
|
363 | 363 | # stack of hooks being executed before committing. |
|
364 | 364 | # only last element ("_lfcommithooks[-1]") is used for each committing. |
|
365 | 365 | repo._lfcommithooks = [lfutil.updatestandinsbymatch] |
|
366 | 366 | |
|
367 | 367 | # Stack of status writer functions taking "*msg, **opts" arguments |
|
368 | 368 | # like "ui.status()". Only last element ("_lfstatuswriters[-1]") |
|
369 | 369 | # is used to write status out. |
|
370 | 370 | repo._lfstatuswriters = [ui.status] |
|
371 | 371 | |
|
372 | 372 | def prepushoutgoinghook(pushop): |
|
373 | 373 | """Push largefiles for pushop before pushing revisions.""" |
|
374 | 374 | lfrevs = pushop.lfrevs |
|
375 | 375 | if lfrevs is None: |
|
376 | 376 | lfrevs = pushop.outgoing.missing |
|
377 | 377 | if lfrevs: |
|
378 | 378 | toupload = set() |
|
379 | 379 | addfunc = lambda fn, lfhash: toupload.add(lfhash) |
|
380 | 380 | lfutil.getlfilestoupload(pushop.repo, lfrevs, |
|
381 | 381 | addfunc) |
|
382 | 382 | lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload) |
|
383 | 383 | repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook) |
|
384 | 384 | |
|
385 | 385 | def checkrequireslfiles(ui, repo, **kwargs): |
|
386 | 386 | if 'largefiles' not in repo.requirements and any( |
|
387 | 387 | lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()): |
|
388 | 388 | repo.requirements.add('largefiles') |
|
389 | 389 | repo._writerequirements() |
|
390 | 390 | |
|
391 | 391 | ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles, |
|
392 | 392 | 'largefiles') |
|
393 | 393 | ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles') |
@@ -1,404 +1,404 b'' | |||
|
1 | 1 | # remotefilelogserver.py - server logic for a remotefilelog server |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import errno |
|
10 | 10 | import os |
|
11 | 11 | import stat |
|
12 | 12 | import time |
|
13 | 13 | import zlib |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | from mercurial.node import bin, hex, nullid |
|
17 | 17 | from mercurial import ( |
|
18 | 18 | changegroup, |
|
19 | 19 | changelog, |
|
20 | 20 | context, |
|
21 | 21 | error, |
|
22 | 22 | extensions, |
|
23 | 23 | match, |
|
24 | 24 | store, |
|
25 | 25 | streamclone, |
|
26 | 26 | util, |
|
27 | 27 | wireprotoserver, |
|
28 | 28 | wireprototypes, |
|
29 | 29 | wireprotov1server, |
|
30 | 30 | ) |
|
31 | 31 | from . import ( |
|
32 | 32 | constants, |
|
33 | 33 | shallowutil, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | 36 | _sshv1server = wireprotoserver.sshv1protocolhandler |
|
37 | 37 | |
|
38 | 38 | def setupserver(ui, repo): |
|
39 | 39 | """Sets up a normal Mercurial repo so it can serve files to shallow repos. |
|
40 | 40 | """ |
|
41 | 41 | onetimesetup(ui) |
|
42 | 42 | |
|
43 | 43 | # don't send files to shallow clients during pulls |
|
44 | 44 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source, |
|
45 | 45 | *args, **kwargs): |
|
46 | 46 | caps = self._bundlecaps or [] |
|
47 | 47 | if constants.BUNDLE2_CAPABLITY in caps: |
|
48 | 48 | # only send files that don't match the specified patterns |
|
49 | 49 | includepattern = None |
|
50 | 50 | excludepattern = None |
|
51 | 51 | for cap in (self._bundlecaps or []): |
|
52 | 52 | if cap.startswith("includepattern="): |
|
53 | 53 | includepattern = cap[len("includepattern="):].split('\0') |
|
54 | 54 | elif cap.startswith("excludepattern="): |
|
55 | 55 | excludepattern = cap[len("excludepattern="):].split('\0') |
|
56 | 56 | |
|
57 |
m = match.always( |
|
|
57 | m = match.always() | |
|
58 | 58 | if includepattern or excludepattern: |
|
59 | 59 | m = match.match(repo.root, '', None, |
|
60 | 60 | includepattern, excludepattern) |
|
61 | 61 | |
|
62 | 62 | changedfiles = list([f for f in changedfiles if not m(f)]) |
|
63 | 63 | return orig(self, changedfiles, linknodes, commonrevs, source, |
|
64 | 64 | *args, **kwargs) |
|
65 | 65 | |
|
66 | 66 | extensions.wrapfunction( |
|
67 | 67 | changegroup.cgpacker, 'generatefiles', generatefiles) |
|
68 | 68 | |
|
69 | 69 | onetime = False |
|
70 | 70 | def onetimesetup(ui): |
|
71 | 71 | """Configures the wireprotocol for both clients and servers. |
|
72 | 72 | """ |
|
73 | 73 | global onetime |
|
74 | 74 | if onetime: |
|
75 | 75 | return |
|
76 | 76 | onetime = True |
|
77 | 77 | |
|
78 | 78 | # support file content requests |
|
79 | 79 | wireprotov1server.wireprotocommand( |
|
80 | 80 | 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads) |
|
81 | 81 | wireprotov1server.wireprotocommand( |
|
82 | 82 | 'x_rfl_getfiles', '', permission='pull')(getfiles) |
|
83 | 83 | wireprotov1server.wireprotocommand( |
|
84 | 84 | 'x_rfl_getfile', 'file node', permission='pull')(getfile) |
|
85 | 85 | |
|
86 | 86 | class streamstate(object): |
|
87 | 87 | match = None |
|
88 | 88 | shallowremote = False |
|
89 | 89 | noflatmf = False |
|
90 | 90 | state = streamstate() |
|
91 | 91 | |
|
92 | 92 | def stream_out_shallow(repo, proto, other): |
|
93 | 93 | includepattern = None |
|
94 | 94 | excludepattern = None |
|
95 | 95 | raw = other.get('includepattern') |
|
96 | 96 | if raw: |
|
97 | 97 | includepattern = raw.split('\0') |
|
98 | 98 | raw = other.get('excludepattern') |
|
99 | 99 | if raw: |
|
100 | 100 | excludepattern = raw.split('\0') |
|
101 | 101 | |
|
102 | 102 | oldshallow = state.shallowremote |
|
103 | 103 | oldmatch = state.match |
|
104 | 104 | oldnoflatmf = state.noflatmf |
|
105 | 105 | try: |
|
106 | 106 | state.shallowremote = True |
|
107 |
state.match = match.always( |
|
|
107 | state.match = match.always() | |
|
108 | 108 | state.noflatmf = other.get('noflatmanifest') == 'True' |
|
109 | 109 | if includepattern or excludepattern: |
|
110 | 110 | state.match = match.match(repo.root, '', None, |
|
111 | 111 | includepattern, excludepattern) |
|
112 | 112 | streamres = wireprotov1server.stream(repo, proto) |
|
113 | 113 | |
|
114 | 114 | # Force the first value to execute, so the file list is computed |
|
115 | 115 | # within the try/finally scope |
|
116 | 116 | first = next(streamres.gen) |
|
117 | 117 | second = next(streamres.gen) |
|
118 | 118 | def gen(): |
|
119 | 119 | yield first |
|
120 | 120 | yield second |
|
121 | 121 | for value in streamres.gen: |
|
122 | 122 | yield value |
|
123 | 123 | return wireprototypes.streamres(gen()) |
|
124 | 124 | finally: |
|
125 | 125 | state.shallowremote = oldshallow |
|
126 | 126 | state.match = oldmatch |
|
127 | 127 | state.noflatmf = oldnoflatmf |
|
128 | 128 | |
|
129 | 129 | wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*') |
|
130 | 130 | |
|
131 | 131 | # don't clone filelogs to shallow clients |
|
132 | 132 | def _walkstreamfiles(orig, repo, matcher=None): |
|
133 | 133 | if state.shallowremote: |
|
134 | 134 | # if we are shallow ourselves, stream our local commits |
|
135 | 135 | if shallowutil.isenabled(repo): |
|
136 | 136 | striplen = len(repo.store.path) + 1 |
|
137 | 137 | readdir = repo.store.rawvfs.readdir |
|
138 | 138 | visit = [os.path.join(repo.store.path, 'data')] |
|
139 | 139 | while visit: |
|
140 | 140 | p = visit.pop() |
|
141 | 141 | for f, kind, st in readdir(p, stat=True): |
|
142 | 142 | fp = p + '/' + f |
|
143 | 143 | if kind == stat.S_IFREG: |
|
144 | 144 | if not fp.endswith('.i') and not fp.endswith('.d'): |
|
145 | 145 | n = util.pconvert(fp[striplen:]) |
|
146 | 146 | yield (store.decodedir(n), n, st.st_size) |
|
147 | 147 | if kind == stat.S_IFDIR: |
|
148 | 148 | visit.append(fp) |
|
149 | 149 | |
|
150 | 150 | if 'treemanifest' in repo.requirements: |
|
151 | 151 | for (u, e, s) in repo.store.datafiles(): |
|
152 | 152 | if (u.startswith('meta/') and |
|
153 | 153 | (u.endswith('.i') or u.endswith('.d'))): |
|
154 | 154 | yield (u, e, s) |
|
155 | 155 | |
|
156 | 156 | # Return .d and .i files that do not match the shallow pattern |
|
157 | 157 | match = state.match |
|
158 | 158 | if match and not match.always(): |
|
159 | 159 | for (u, e, s) in repo.store.datafiles(): |
|
160 | 160 | f = u[5:-2] # trim data/... and .i/.d |
|
161 | 161 | if not state.match(f): |
|
162 | 162 | yield (u, e, s) |
|
163 | 163 | |
|
164 | 164 | for x in repo.store.topfiles(): |
|
165 | 165 | if state.noflatmf and x[0][:11] == '00manifest.': |
|
166 | 166 | continue |
|
167 | 167 | yield x |
|
168 | 168 | |
|
169 | 169 | elif shallowutil.isenabled(repo): |
|
170 | 170 | # don't allow cloning from a shallow repo to a full repo |
|
171 | 171 | # since it would require fetching every version of every |
|
172 | 172 | # file in order to create the revlogs. |
|
173 | 173 | raise error.Abort(_("Cannot clone from a shallow repo " |
|
174 | 174 | "to a full repo.")) |
|
175 | 175 | else: |
|
176 | 176 | for x in orig(repo, matcher): |
|
177 | 177 | yield x |
|
178 | 178 | |
|
179 | 179 | extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles) |
|
180 | 180 | |
|
181 | 181 | # expose remotefilelog capabilities |
|
182 | 182 | def _capabilities(orig, repo, proto): |
|
183 | 183 | caps = orig(repo, proto) |
|
184 | 184 | if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog', |
|
185 | 185 | 'server')): |
|
186 | 186 | if isinstance(proto, _sshv1server): |
|
187 | 187 | # legacy getfiles method which only works over ssh |
|
188 | 188 | caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES) |
|
189 | 189 | caps.append('x_rfl_getflogheads') |
|
190 | 190 | caps.append('x_rfl_getfile') |
|
191 | 191 | return caps |
|
192 | 192 | extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities) |
|
193 | 193 | |
|
194 | 194 | def _adjustlinkrev(orig, self, *args, **kwargs): |
|
195 | 195 | # When generating file blobs, taking the real path is too slow on large |
|
196 | 196 | # repos, so force it to just return the linkrev directly. |
|
197 | 197 | repo = self._repo |
|
198 | 198 | if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev: |
|
199 | 199 | return self._filelog.linkrev(self._filelog.rev(self._filenode)) |
|
200 | 200 | return orig(self, *args, **kwargs) |
|
201 | 201 | |
|
202 | 202 | extensions.wrapfunction( |
|
203 | 203 | context.basefilectx, '_adjustlinkrev', _adjustlinkrev) |
|
204 | 204 | |
|
205 | 205 | def _iscmd(orig, cmd): |
|
206 | 206 | if cmd == 'x_rfl_getfiles': |
|
207 | 207 | return False |
|
208 | 208 | return orig(cmd) |
|
209 | 209 | |
|
210 | 210 | extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd) |
|
211 | 211 | |
|
212 | 212 | def _loadfileblob(repo, cachepath, path, node): |
|
213 | 213 | filecachepath = os.path.join(cachepath, path, hex(node)) |
|
214 | 214 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: |
|
215 | 215 | filectx = repo.filectx(path, fileid=node) |
|
216 | 216 | if filectx.node() == nullid: |
|
217 | 217 | repo.changelog = changelog.changelog(repo.svfs) |
|
218 | 218 | filectx = repo.filectx(path, fileid=node) |
|
219 | 219 | |
|
220 | 220 | text = createfileblob(filectx) |
|
221 | 221 | # TODO configurable compression engines |
|
222 | 222 | text = zlib.compress(text) |
|
223 | 223 | |
|
224 | 224 | # everything should be user & group read/writable |
|
225 | 225 | oldumask = os.umask(0o002) |
|
226 | 226 | try: |
|
227 | 227 | dirname = os.path.dirname(filecachepath) |
|
228 | 228 | if not os.path.exists(dirname): |
|
229 | 229 | try: |
|
230 | 230 | os.makedirs(dirname) |
|
231 | 231 | except OSError as ex: |
|
232 | 232 | if ex.errno != errno.EEXIST: |
|
233 | 233 | raise |
|
234 | 234 | |
|
235 | 235 | f = None |
|
236 | 236 | try: |
|
237 | 237 | f = util.atomictempfile(filecachepath, "wb") |
|
238 | 238 | f.write(text) |
|
239 | 239 | except (IOError, OSError): |
|
240 | 240 | # Don't abort if the user only has permission to read, |
|
241 | 241 | # and not write. |
|
242 | 242 | pass |
|
243 | 243 | finally: |
|
244 | 244 | if f: |
|
245 | 245 | f.close() |
|
246 | 246 | finally: |
|
247 | 247 | os.umask(oldumask) |
|
248 | 248 | else: |
|
249 | 249 | with open(filecachepath, "rb") as f: |
|
250 | 250 | text = f.read() |
|
251 | 251 | return text |
|
252 | 252 | |
|
253 | 253 | def getflogheads(repo, proto, path): |
|
254 | 254 | """A server api for requesting a filelog's heads |
|
255 | 255 | """ |
|
256 | 256 | flog = repo.file(path) |
|
257 | 257 | heads = flog.heads() |
|
258 | 258 | return '\n'.join((hex(head) for head in heads if head != nullid)) |
|
259 | 259 | |
|
260 | 260 | def getfile(repo, proto, file, node): |
|
261 | 261 | """A server api for requesting a particular version of a file. Can be used |
|
262 | 262 | in batches to request many files at once. The return protocol is: |
|
263 | 263 | <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or |
|
264 | 264 | non-zero for an error. |
|
265 | 265 | |
|
266 | 266 | data is a compressed blob with revlog flag and ancestors information. See |
|
267 | 267 | createfileblob for its content. |
|
268 | 268 | """ |
|
269 | 269 | if shallowutil.isenabled(repo): |
|
270 | 270 | return '1\0' + _('cannot fetch remote files from shallow repo') |
|
271 | 271 | cachepath = repo.ui.config("remotefilelog", "servercachepath") |
|
272 | 272 | if not cachepath: |
|
273 | 273 | cachepath = os.path.join(repo.path, "remotefilelogcache") |
|
274 | 274 | node = bin(node.strip()) |
|
275 | 275 | if node == nullid: |
|
276 | 276 | return '0\0' |
|
277 | 277 | return '0\0' + _loadfileblob(repo, cachepath, file, node) |
|
278 | 278 | |
|
279 | 279 | def getfiles(repo, proto): |
|
280 | 280 | """A server api for requesting particular versions of particular files. |
|
281 | 281 | """ |
|
282 | 282 | if shallowutil.isenabled(repo): |
|
283 | 283 | raise error.Abort(_('cannot fetch remote files from shallow repo')) |
|
284 | 284 | if not isinstance(proto, _sshv1server): |
|
285 | 285 | raise error.Abort(_('cannot fetch remote files over non-ssh protocol')) |
|
286 | 286 | |
|
287 | 287 | def streamer(): |
|
288 | 288 | fin = proto._fin |
|
289 | 289 | |
|
290 | 290 | cachepath = repo.ui.config("remotefilelog", "servercachepath") |
|
291 | 291 | if not cachepath: |
|
292 | 292 | cachepath = os.path.join(repo.path, "remotefilelogcache") |
|
293 | 293 | |
|
294 | 294 | while True: |
|
295 | 295 | request = fin.readline()[:-1] |
|
296 | 296 | if not request: |
|
297 | 297 | break |
|
298 | 298 | |
|
299 | 299 | node = bin(request[:40]) |
|
300 | 300 | if node == nullid: |
|
301 | 301 | yield '0\n' |
|
302 | 302 | continue |
|
303 | 303 | |
|
304 | 304 | path = request[40:] |
|
305 | 305 | |
|
306 | 306 | text = _loadfileblob(repo, cachepath, path, node) |
|
307 | 307 | |
|
308 | 308 | yield '%d\n%s' % (len(text), text) |
|
309 | 309 | |
|
310 | 310 | # it would be better to only flush after processing a whole batch |
|
311 | 311 | # but currently we don't know if there are more requests coming |
|
312 | 312 | proto._fout.flush() |
|
313 | 313 | return wireprototypes.streamres(streamer()) |
|
314 | 314 | |
|
315 | 315 | def createfileblob(filectx): |
|
316 | 316 | """ |
|
317 | 317 | format: |
|
318 | 318 | v0: |
|
319 | 319 | str(len(rawtext)) + '\0' + rawtext + ancestortext |
|
320 | 320 | v1: |
|
321 | 321 | 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext |
|
322 | 322 | metalist := metalist + '\n' + meta | meta |
|
323 | 323 | meta := sizemeta | flagmeta |
|
324 | 324 | sizemeta := METAKEYSIZE + str(len(rawtext)) |
|
325 | 325 | flagmeta := METAKEYFLAG + str(flag) |
|
326 | 326 | |
|
327 | 327 | note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a |
|
328 | 328 | length of 1. |
|
329 | 329 | """ |
|
330 | 330 | flog = filectx.filelog() |
|
331 | 331 | frev = filectx.filerev() |
|
332 | 332 | revlogflags = flog._revlog.flags(frev) |
|
333 | 333 | if revlogflags == 0: |
|
334 | 334 | # normal files |
|
335 | 335 | text = filectx.data() |
|
336 | 336 | else: |
|
337 | 337 | # lfs, read raw revision data |
|
338 | 338 | text = flog.revision(frev, raw=True) |
|
339 | 339 | |
|
340 | 340 | repo = filectx._repo |
|
341 | 341 | |
|
342 | 342 | ancestors = [filectx] |
|
343 | 343 | |
|
344 | 344 | try: |
|
345 | 345 | repo.forcelinkrev = True |
|
346 | 346 | ancestors.extend([f for f in filectx.ancestors()]) |
|
347 | 347 | |
|
348 | 348 | ancestortext = "" |
|
349 | 349 | for ancestorctx in ancestors: |
|
350 | 350 | parents = ancestorctx.parents() |
|
351 | 351 | p1 = nullid |
|
352 | 352 | p2 = nullid |
|
353 | 353 | if len(parents) > 0: |
|
354 | 354 | p1 = parents[0].filenode() |
|
355 | 355 | if len(parents) > 1: |
|
356 | 356 | p2 = parents[1].filenode() |
|
357 | 357 | |
|
358 | 358 | copyname = "" |
|
359 | 359 | rename = ancestorctx.renamed() |
|
360 | 360 | if rename: |
|
361 | 361 | copyname = rename[0] |
|
362 | 362 | linknode = ancestorctx.node() |
|
363 | 363 | ancestortext += "%s%s%s%s%s\0" % ( |
|
364 | 364 | ancestorctx.filenode(), p1, p2, linknode, |
|
365 | 365 | copyname) |
|
366 | 366 | finally: |
|
367 | 367 | repo.forcelinkrev = False |
|
368 | 368 | |
|
369 | 369 | header = shallowutil.buildfileblobheader(len(text), revlogflags) |
|
370 | 370 | |
|
371 | 371 | return "%s\0%s%s" % (header, text, ancestortext) |
|
372 | 372 | |
|
373 | 373 | def gcserver(ui, repo): |
|
374 | 374 | if not repo.ui.configbool("remotefilelog", "server"): |
|
375 | 375 | return |
|
376 | 376 | |
|
377 | 377 | neededfiles = set() |
|
378 | 378 | heads = repo.revs("heads(tip~25000:) - null") |
|
379 | 379 | |
|
380 | 380 | cachepath = repo.vfs.join("remotefilelogcache") |
|
381 | 381 | for head in heads: |
|
382 | 382 | mf = repo[head].manifest() |
|
383 | 383 | for filename, filenode in mf.iteritems(): |
|
384 | 384 | filecachepath = os.path.join(cachepath, filename, hex(filenode)) |
|
385 | 385 | neededfiles.add(filecachepath) |
|
386 | 386 | |
|
387 | 387 | # delete unneeded older files |
|
388 | 388 | days = repo.ui.configint("remotefilelog", "serverexpiration") |
|
389 | 389 | expiration = time.time() - (days * 24 * 60 * 60) |
|
390 | 390 | |
|
391 | 391 | progress = ui.makeprogress(_("removing old server cache"), unit="files") |
|
392 | 392 | progress.update(0) |
|
393 | 393 | for root, dirs, files in os.walk(cachepath): |
|
394 | 394 | for file in files: |
|
395 | 395 | filepath = os.path.join(root, file) |
|
396 | 396 | progress.increment() |
|
397 | 397 | if filepath in neededfiles: |
|
398 | 398 | continue |
|
399 | 399 | |
|
400 | 400 | stat = os.stat(filepath) |
|
401 | 401 | if stat.st_mtime < expiration: |
|
402 | 402 | os.remove(filepath) |
|
403 | 403 | |
|
404 | 404 | progress.complete() |
@@ -1,293 +1,293 b'' | |||
|
1 | 1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | from mercurial.i18n import _ |
|
10 | 10 | from mercurial.node import bin, hex, nullid |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | bundlerepo, |
|
13 | 13 | changegroup, |
|
14 | 14 | error, |
|
15 | 15 | match, |
|
16 | 16 | mdiff, |
|
17 | 17 | pycompat, |
|
18 | 18 | ) |
|
19 | 19 | from . import ( |
|
20 | 20 | constants, |
|
21 | 21 | remotefilelog, |
|
22 | 22 | shallowutil, |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | 25 | NoFiles = 0 |
|
26 | 26 | LocalFiles = 1 |
|
27 | 27 | AllFiles = 2 |
|
28 | 28 | |
|
29 | 29 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): |
|
30 | 30 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
31 | 31 | for c in super(cls, self).group(nodelist, rlog, lookup, |
|
32 | 32 | units=units): |
|
33 | 33 | yield c |
|
34 | 34 | return |
|
35 | 35 | |
|
36 | 36 | if len(nodelist) == 0: |
|
37 | 37 | yield self.close() |
|
38 | 38 | return |
|
39 | 39 | |
|
40 | 40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) |
|
41 | 41 | |
|
42 | 42 | # add the parent of the first rev |
|
43 | 43 | p = rlog.parents(nodelist[0])[0] |
|
44 | 44 | nodelist.insert(0, p) |
|
45 | 45 | |
|
46 | 46 | # build deltas |
|
47 | 47 | for i in pycompat.xrange(len(nodelist) - 1): |
|
48 | 48 | prev, curr = nodelist[i], nodelist[i + 1] |
|
49 | 49 | linknode = lookup(curr) |
|
50 | 50 | for c in self.nodechunk(rlog, curr, prev, linknode): |
|
51 | 51 | yield c |
|
52 | 52 | |
|
53 | 53 | yield self.close() |
|
54 | 54 | |
|
55 | 55 | class shallowcg1packer(changegroup.cgpacker): |
|
56 | 56 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
57 | 57 | if shallowutil.isenabled(self._repo): |
|
58 | 58 | fastpathlinkrev = False |
|
59 | 59 | |
|
60 | 60 | return super(shallowcg1packer, self).generate(commonrevs, clnodes, |
|
61 | 61 | fastpathlinkrev, source) |
|
62 | 62 | |
|
63 | 63 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): |
|
64 | 64 | return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup, |
|
65 | 65 | units=units) |
|
66 | 66 | |
|
67 | 67 | def generatefiles(self, changedfiles, *args): |
|
68 | 68 | try: |
|
69 | 69 | linknodes, commonrevs, source = args |
|
70 | 70 | except ValueError: |
|
71 | 71 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args |
|
72 | 72 | if shallowutil.isenabled(self._repo): |
|
73 | 73 | repo = self._repo |
|
74 | 74 | if isinstance(repo, bundlerepo.bundlerepository): |
|
75 | 75 | # If the bundle contains filelogs, we can't pull from it, since |
|
76 | 76 | # bundlerepo is heavily tied to revlogs. Instead require that |
|
77 | 77 | # the user use unbundle instead. |
|
78 | 78 | # Force load the filelog data. |
|
79 | 79 | bundlerepo.bundlerepository.file(repo, 'foo') |
|
80 | 80 | if repo._cgfilespos: |
|
81 | 81 | raise error.Abort("cannot pull from full bundles", |
|
82 | 82 | hint="use `hg unbundle` instead") |
|
83 | 83 | return [] |
|
84 | 84 | filestosend = self.shouldaddfilegroups(source) |
|
85 | 85 | if filestosend == NoFiles: |
|
86 | 86 | changedfiles = list([f for f in changedfiles |
|
87 | 87 | if not repo.shallowmatch(f)]) |
|
88 | 88 | |
|
89 | 89 | return super(shallowcg1packer, self).generatefiles( |
|
90 | 90 | changedfiles, *args) |
|
91 | 91 | |
|
92 | 92 | def shouldaddfilegroups(self, source): |
|
93 | 93 | repo = self._repo |
|
94 | 94 | if not shallowutil.isenabled(repo): |
|
95 | 95 | return AllFiles |
|
96 | 96 | |
|
97 | 97 | if source == "push" or source == "bundle": |
|
98 | 98 | return AllFiles |
|
99 | 99 | |
|
100 | 100 | caps = self._bundlecaps or [] |
|
101 | 101 | if source == "serve" or source == "pull": |
|
102 | 102 | if constants.BUNDLE2_CAPABLITY in caps: |
|
103 | 103 | return LocalFiles |
|
104 | 104 | else: |
|
105 | 105 | # Serving to a full repo requires us to serve everything |
|
106 | 106 | repo.ui.warn(_("pulling from a shallow repo\n")) |
|
107 | 107 | return AllFiles |
|
108 | 108 | |
|
109 | 109 | return NoFiles |
|
110 | 110 | |
|
111 | 111 | def prune(self, rlog, missing, commonrevs): |
|
112 | 112 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
113 | 113 | return super(shallowcg1packer, self).prune(rlog, missing, |
|
114 | 114 | commonrevs) |
|
115 | 115 | |
|
116 | 116 | repo = self._repo |
|
117 | 117 | results = [] |
|
118 | 118 | for fnode in missing: |
|
119 | 119 | fctx = repo.filectx(rlog.filename, fileid=fnode) |
|
120 | 120 | if fctx.linkrev() not in commonrevs: |
|
121 | 121 | results.append(fnode) |
|
122 | 122 | return results |
|
123 | 123 | |
|
124 | 124 | def nodechunk(self, revlog, node, prevnode, linknode): |
|
125 | 125 | prefix = '' |
|
126 | 126 | if prevnode == nullid: |
|
127 | 127 | delta = revlog.revision(node, raw=True) |
|
128 | 128 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
129 | 129 | else: |
|
130 | 130 | # Actually uses remotefilelog.revdiff which works on nodes, not revs |
|
131 | 131 | delta = revlog.revdiff(prevnode, node) |
|
132 | 132 | p1, p2 = revlog.parents(node) |
|
133 | 133 | flags = revlog.flags(node) |
|
134 | 134 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) |
|
135 | 135 | meta += prefix |
|
136 | 136 | l = len(meta) + len(delta) |
|
137 | 137 | yield changegroup.chunkheader(l) |
|
138 | 138 | yield meta |
|
139 | 139 | yield delta |
|
140 | 140 | |
|
141 | 141 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): |
|
142 | 142 | if not shallowutil.isenabled(repo): |
|
143 | 143 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
144 | 144 | |
|
145 | 145 | original = repo.shallowmatch |
|
146 | 146 | try: |
|
147 | 147 | # if serving, only send files the clients has patterns for |
|
148 | 148 | if source == 'serve': |
|
149 | 149 | bundlecaps = kwargs.get(r'bundlecaps') |
|
150 | 150 | includepattern = None |
|
151 | 151 | excludepattern = None |
|
152 | 152 | for cap in (bundlecaps or []): |
|
153 | 153 | if cap.startswith("includepattern="): |
|
154 | 154 | raw = cap[len("includepattern="):] |
|
155 | 155 | if raw: |
|
156 | 156 | includepattern = raw.split('\0') |
|
157 | 157 | elif cap.startswith("excludepattern="): |
|
158 | 158 | raw = cap[len("excludepattern="):] |
|
159 | 159 | if raw: |
|
160 | 160 | excludepattern = raw.split('\0') |
|
161 | 161 | if includepattern or excludepattern: |
|
162 | 162 | repo.shallowmatch = match.match(repo.root, '', None, |
|
163 | 163 | includepattern, excludepattern) |
|
164 | 164 | else: |
|
165 |
repo.shallowmatch = match.always( |
|
|
165 | repo.shallowmatch = match.always() | |
|
166 | 166 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
167 | 167 | finally: |
|
168 | 168 | repo.shallowmatch = original |
|
169 | 169 | |
|
170 | 170 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): |
|
171 | 171 | if not shallowutil.isenabled(repo): |
|
172 | 172 | return orig(repo, source, revmap, trp, expectedfiles, *args) |
|
173 | 173 | |
|
174 | 174 | newfiles = 0 |
|
175 | 175 | visited = set() |
|
176 | 176 | revisiondatas = {} |
|
177 | 177 | queue = [] |
|
178 | 178 | |
|
179 | 179 | # Normal Mercurial processes each file one at a time, adding all |
|
180 | 180 | # the new revisions for that file at once. In remotefilelog a file |
|
181 | 181 | # revision may depend on a different file's revision (in the case |
|
182 | 182 | # of a rename/copy), so we must lay all revisions down across all |
|
183 | 183 | # files in topological order. |
|
184 | 184 | |
|
185 | 185 | # read all the file chunks but don't add them |
|
186 | 186 | progress = repo.ui.makeprogress(_('files'), total=expectedfiles) |
|
187 | 187 | while True: |
|
188 | 188 | chunkdata = source.filelogheader() |
|
189 | 189 | if not chunkdata: |
|
190 | 190 | break |
|
191 | 191 | f = chunkdata["filename"] |
|
192 | 192 | repo.ui.debug("adding %s revisions\n" % f) |
|
193 | 193 | progress.increment() |
|
194 | 194 | |
|
195 | 195 | if not repo.shallowmatch(f): |
|
196 | 196 | fl = repo.file(f) |
|
197 | 197 | deltas = source.deltaiter() |
|
198 | 198 | fl.addgroup(deltas, revmap, trp) |
|
199 | 199 | continue |
|
200 | 200 | |
|
201 | 201 | chain = None |
|
202 | 202 | while True: |
|
203 | 203 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None |
|
204 | 204 | revisiondata = source.deltachunk(chain) |
|
205 | 205 | if not revisiondata: |
|
206 | 206 | break |
|
207 | 207 | |
|
208 | 208 | chain = revisiondata[0] |
|
209 | 209 | |
|
210 | 210 | revisiondatas[(f, chain)] = revisiondata |
|
211 | 211 | queue.append((f, chain)) |
|
212 | 212 | |
|
213 | 213 | if f not in visited: |
|
214 | 214 | newfiles += 1 |
|
215 | 215 | visited.add(f) |
|
216 | 216 | |
|
217 | 217 | if chain is None: |
|
218 | 218 | raise error.Abort(_("received file revlog group is empty")) |
|
219 | 219 | |
|
220 | 220 | processed = set() |
|
221 | 221 | def available(f, node, depf, depnode): |
|
222 | 222 | if depnode != nullid and (depf, depnode) not in processed: |
|
223 | 223 | if not (depf, depnode) in revisiondatas: |
|
224 | 224 | # It's not in the changegroup, assume it's already |
|
225 | 225 | # in the repo |
|
226 | 226 | return True |
|
227 | 227 | # re-add self to queue |
|
228 | 228 | queue.insert(0, (f, node)) |
|
229 | 229 | # add dependency in front |
|
230 | 230 | queue.insert(0, (depf, depnode)) |
|
231 | 231 | return False |
|
232 | 232 | return True |
|
233 | 233 | |
|
234 | 234 | skipcount = 0 |
|
235 | 235 | |
|
236 | 236 | # Prefetch the non-bundled revisions that we will need |
|
237 | 237 | prefetchfiles = [] |
|
238 | 238 | for f, node in queue: |
|
239 | 239 | revisiondata = revisiondatas[(f, node)] |
|
240 | 240 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
241 | 241 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] |
|
242 | 242 | |
|
243 | 243 | for dependent in dependents: |
|
244 | 244 | if dependent == nullid or (f, dependent) in revisiondatas: |
|
245 | 245 | continue |
|
246 | 246 | prefetchfiles.append((f, hex(dependent))) |
|
247 | 247 | |
|
248 | 248 | repo.fileservice.prefetch(prefetchfiles) |
|
249 | 249 | |
|
250 | 250 | # Apply the revisions in topological order such that a revision |
|
251 | 251 | # is only written once it's deltabase and parents have been written. |
|
252 | 252 | while queue: |
|
253 | 253 | f, node = queue.pop(0) |
|
254 | 254 | if (f, node) in processed: |
|
255 | 255 | continue |
|
256 | 256 | |
|
257 | 257 | skipcount += 1 |
|
258 | 258 | if skipcount > len(queue) + 1: |
|
259 | 259 | raise error.Abort(_("circular node dependency")) |
|
260 | 260 | |
|
261 | 261 | fl = repo.file(f) |
|
262 | 262 | |
|
263 | 263 | revisiondata = revisiondatas[(f, node)] |
|
264 | 264 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
265 | 265 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata |
|
266 | 266 | |
|
267 | 267 | if not available(f, node, f, deltabase): |
|
268 | 268 | continue |
|
269 | 269 | |
|
270 | 270 | base = fl.revision(deltabase, raw=True) |
|
271 | 271 | text = mdiff.patch(base, delta) |
|
272 | 272 | if not isinstance(text, bytes): |
|
273 | 273 | text = bytes(text) |
|
274 | 274 | |
|
275 | 275 | meta, text = shallowutil.parsemeta(text) |
|
276 | 276 | if 'copy' in meta: |
|
277 | 277 | copyfrom = meta['copy'] |
|
278 | 278 | copynode = bin(meta['copyrev']) |
|
279 | 279 | if not available(f, node, copyfrom, copynode): |
|
280 | 280 | continue |
|
281 | 281 | |
|
282 | 282 | for p in [p1, p2]: |
|
283 | 283 | if p != nullid: |
|
284 | 284 | if not available(f, node, f, p): |
|
285 | 285 | continue |
|
286 | 286 | |
|
287 | 287 | fl.add(text, meta, trp, linknode, p1, p2) |
|
288 | 288 | processed.add((f, node)) |
|
289 | 289 | skipcount = 0 |
|
290 | 290 | |
|
291 | 291 | progress.complete() |
|
292 | 292 | |
|
293 | 293 | return len(revisiondatas), newfiles |
@@ -1,305 +1,305 b'' | |||
|
1 | 1 | # shallowrepo.py - shallow repository that uses remote filelogs |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import os |
|
10 | 10 | |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | from mercurial.node import hex, nullid, nullrev |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | encoding, |
|
15 | 15 | error, |
|
16 | 16 | localrepo, |
|
17 | 17 | match, |
|
18 | 18 | scmutil, |
|
19 | 19 | sparse, |
|
20 | 20 | util, |
|
21 | 21 | ) |
|
22 | 22 | from mercurial.utils import procutil |
|
23 | 23 | from . import ( |
|
24 | 24 | connectionpool, |
|
25 | 25 | constants, |
|
26 | 26 | contentstore, |
|
27 | 27 | datapack, |
|
28 | 28 | fileserverclient, |
|
29 | 29 | historypack, |
|
30 | 30 | metadatastore, |
|
31 | 31 | remotefilectx, |
|
32 | 32 | remotefilelog, |
|
33 | 33 | shallowutil, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | 36 | if util.safehasattr(util, '_hgexecutable'): |
|
37 | 37 | # Before 5be286db |
|
38 | 38 | _hgexecutable = util.hgexecutable |
|
39 | 39 | else: |
|
40 | 40 | from mercurial.utils import procutil |
|
41 | 41 | _hgexecutable = procutil.hgexecutable |
|
42 | 42 | |
|
43 | 43 | # These make*stores functions are global so that other extensions can replace |
|
44 | 44 | # them. |
|
45 | 45 | def makelocalstores(repo): |
|
46 | 46 | """In-repo stores, like .hg/store/data; can not be discarded.""" |
|
47 | 47 | localpath = os.path.join(repo.svfs.vfs.base, 'data') |
|
48 | 48 | if not os.path.exists(localpath): |
|
49 | 49 | os.makedirs(localpath) |
|
50 | 50 | |
|
51 | 51 | # Instantiate local data stores |
|
52 | 52 | localcontent = contentstore.remotefilelogcontentstore( |
|
53 | 53 | repo, localpath, repo.name, shared=False) |
|
54 | 54 | localmetadata = metadatastore.remotefilelogmetadatastore( |
|
55 | 55 | repo, localpath, repo.name, shared=False) |
|
56 | 56 | return localcontent, localmetadata |
|
57 | 57 | |
|
58 | 58 | def makecachestores(repo): |
|
59 | 59 | """Typically machine-wide, cache of remote data; can be discarded.""" |
|
60 | 60 | # Instantiate shared cache stores |
|
61 | 61 | cachepath = shallowutil.getcachepath(repo.ui) |
|
62 | 62 | cachecontent = contentstore.remotefilelogcontentstore( |
|
63 | 63 | repo, cachepath, repo.name, shared=True) |
|
64 | 64 | cachemetadata = metadatastore.remotefilelogmetadatastore( |
|
65 | 65 | repo, cachepath, repo.name, shared=True) |
|
66 | 66 | |
|
67 | 67 | repo.sharedstore = cachecontent |
|
68 | 68 | repo.shareddatastores.append(cachecontent) |
|
69 | 69 | repo.sharedhistorystores.append(cachemetadata) |
|
70 | 70 | |
|
71 | 71 | return cachecontent, cachemetadata |
|
72 | 72 | |
|
73 | 73 | def makeremotestores(repo, cachecontent, cachemetadata): |
|
74 | 74 | """These stores fetch data from a remote server.""" |
|
75 | 75 | # Instantiate remote stores |
|
76 | 76 | repo.fileservice = fileserverclient.fileserverclient(repo) |
|
77 | 77 | remotecontent = contentstore.remotecontentstore( |
|
78 | 78 | repo.ui, repo.fileservice, cachecontent) |
|
79 | 79 | remotemetadata = metadatastore.remotemetadatastore( |
|
80 | 80 | repo.ui, repo.fileservice, cachemetadata) |
|
81 | 81 | return remotecontent, remotemetadata |
|
82 | 82 | |
|
83 | 83 | def makepackstores(repo): |
|
84 | 84 | """Packs are more efficient (to read from) cache stores.""" |
|
85 | 85 | # Instantiate pack stores |
|
86 | 86 | packpath = shallowutil.getcachepackpath(repo, |
|
87 | 87 | constants.FILEPACK_CATEGORY) |
|
88 | 88 | packcontentstore = datapack.datapackstore(repo.ui, packpath) |
|
89 | 89 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) |
|
90 | 90 | |
|
91 | 91 | repo.shareddatastores.append(packcontentstore) |
|
92 | 92 | repo.sharedhistorystores.append(packmetadatastore) |
|
93 | 93 | shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore, |
|
94 | 94 | packmetadatastore) |
|
95 | 95 | return packcontentstore, packmetadatastore |
|
96 | 96 | |
|
97 | 97 | def makeunionstores(repo): |
|
98 | 98 | """Union stores iterate the other stores and return the first result.""" |
|
99 | 99 | repo.shareddatastores = [] |
|
100 | 100 | repo.sharedhistorystores = [] |
|
101 | 101 | |
|
102 | 102 | packcontentstore, packmetadatastore = makepackstores(repo) |
|
103 | 103 | cachecontent, cachemetadata = makecachestores(repo) |
|
104 | 104 | localcontent, localmetadata = makelocalstores(repo) |
|
105 | 105 | remotecontent, remotemetadata = makeremotestores(repo, cachecontent, |
|
106 | 106 | cachemetadata) |
|
107 | 107 | |
|
108 | 108 | # Instantiate union stores |
|
109 | 109 | repo.contentstore = contentstore.unioncontentstore( |
|
110 | 110 | packcontentstore, cachecontent, |
|
111 | 111 | localcontent, remotecontent, writestore=localcontent) |
|
112 | 112 | repo.metadatastore = metadatastore.unionmetadatastore( |
|
113 | 113 | packmetadatastore, cachemetadata, localmetadata, remotemetadata, |
|
114 | 114 | writestore=localmetadata) |
|
115 | 115 | |
|
116 | 116 | fileservicedatawrite = cachecontent |
|
117 | 117 | fileservicehistorywrite = cachemetadata |
|
118 | 118 | repo.fileservice.setstore(repo.contentstore, repo.metadatastore, |
|
119 | 119 | fileservicedatawrite, fileservicehistorywrite) |
|
120 | 120 | shallowutil.reportpackmetrics(repo.ui, 'filestore', |
|
121 | 121 | packcontentstore, packmetadatastore) |
|
122 | 122 | |
|
123 | 123 | def wraprepo(repo): |
|
124 | 124 | class shallowrepository(repo.__class__): |
|
125 | 125 | @util.propertycache |
|
126 | 126 | def name(self): |
|
127 | 127 | return self.ui.config('remotefilelog', 'reponame') |
|
128 | 128 | |
|
129 | 129 | @util.propertycache |
|
130 | 130 | def fallbackpath(self): |
|
131 | 131 | path = repo.ui.config("remotefilelog", "fallbackpath", |
|
132 | 132 | repo.ui.config('paths', 'default')) |
|
133 | 133 | if not path: |
|
134 | 134 | raise error.Abort("no remotefilelog server " |
|
135 | 135 | "configured - is your .hg/hgrc trusted?") |
|
136 | 136 | |
|
137 | 137 | return path |
|
138 | 138 | |
|
139 | 139 | def maybesparsematch(self, *revs, **kwargs): |
|
140 | 140 | ''' |
|
141 | 141 | A wrapper that allows the remotefilelog to invoke sparsematch() if |
|
142 | 142 | this is a sparse repository, or returns None if this is not a |
|
143 | 143 | sparse repository. |
|
144 | 144 | ''' |
|
145 | 145 | if revs: |
|
146 | 146 | ret = sparse.matcher(repo, revs=revs) |
|
147 | 147 | else: |
|
148 | 148 | ret = sparse.matcher(repo) |
|
149 | 149 | |
|
150 | 150 | if ret.always(): |
|
151 | 151 | return None |
|
152 | 152 | return ret |
|
153 | 153 | |
|
154 | 154 | def file(self, f): |
|
155 | 155 | if f[0] == '/': |
|
156 | 156 | f = f[1:] |
|
157 | 157 | |
|
158 | 158 | if self.shallowmatch(f): |
|
159 | 159 | return remotefilelog.remotefilelog(self.svfs, f, self) |
|
160 | 160 | else: |
|
161 | 161 | return super(shallowrepository, self).file(f) |
|
162 | 162 | |
|
163 | 163 | def filectx(self, path, *args, **kwargs): |
|
164 | 164 | if self.shallowmatch(path): |
|
165 | 165 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) |
|
166 | 166 | else: |
|
167 | 167 | return super(shallowrepository, self).filectx(path, *args, |
|
168 | 168 | **kwargs) |
|
169 | 169 | |
|
170 | 170 | @localrepo.unfilteredmethod |
|
171 | 171 | def commitctx(self, ctx, error=False): |
|
172 | 172 | """Add a new revision to current repository. |
|
173 | 173 | Revision information is passed via the context argument. |
|
174 | 174 | """ |
|
175 | 175 | |
|
176 | 176 | # some contexts already have manifest nodes, they don't need any |
|
177 | 177 | # prefetching (for example if we're just editing a commit message |
|
178 | 178 | # we can reuse manifest |
|
179 | 179 | if not ctx.manifestnode(): |
|
180 | 180 | # prefetch files that will likely be compared |
|
181 | 181 | m1 = ctx.p1().manifest() |
|
182 | 182 | files = [] |
|
183 | 183 | for f in ctx.modified() + ctx.added(): |
|
184 | 184 | fparent1 = m1.get(f, nullid) |
|
185 | 185 | if fparent1 != nullid: |
|
186 | 186 | files.append((f, hex(fparent1))) |
|
187 | 187 | self.fileservice.prefetch(files) |
|
188 | 188 | return super(shallowrepository, self).commitctx(ctx, |
|
189 | 189 | error=error) |
|
190 | 190 | |
|
191 | 191 | def backgroundprefetch(self, revs, base=None, repack=False, pats=None, |
|
192 | 192 | opts=None): |
|
193 | 193 | """Runs prefetch in background with optional repack |
|
194 | 194 | """ |
|
195 | 195 | cmd = [_hgexecutable(), '-R', repo.origroot, 'prefetch'] |
|
196 | 196 | if repack: |
|
197 | 197 | cmd.append('--repack') |
|
198 | 198 | if revs: |
|
199 | 199 | cmd += ['-r', revs] |
|
200 | 200 | procutil.runbgcommand(cmd, encoding.environ) |
|
201 | 201 | |
|
202 | 202 | def prefetch(self, revs, base=None, pats=None, opts=None): |
|
203 | 203 | """Prefetches all the necessary file revisions for the given revs |
|
204 | 204 | Optionally runs repack in background |
|
205 | 205 | """ |
|
206 | 206 | with repo._lock(repo.svfs, 'prefetchlock', True, None, None, |
|
207 | 207 | _('prefetching in %s') % repo.origroot): |
|
208 | 208 | self._prefetch(revs, base, pats, opts) |
|
209 | 209 | |
|
210 | 210 | def _prefetch(self, revs, base=None, pats=None, opts=None): |
|
211 | 211 | fallbackpath = self.fallbackpath |
|
212 | 212 | if fallbackpath: |
|
213 | 213 | # If we know a rev is on the server, we should fetch the server |
|
214 | 214 | # version of those files, since our local file versions might |
|
215 | 215 | # become obsolete if the local commits are stripped. |
|
216 | 216 | localrevs = repo.revs('outgoing(%s)', fallbackpath) |
|
217 | 217 | if base is not None and base != nullrev: |
|
218 | 218 | serverbase = list(repo.revs('first(reverse(::%s) - %ld)', |
|
219 | 219 | base, localrevs)) |
|
220 | 220 | if serverbase: |
|
221 | 221 | base = serverbase[0] |
|
222 | 222 | else: |
|
223 | 223 | localrevs = repo |
|
224 | 224 | |
|
225 | 225 | mfl = repo.manifestlog |
|
226 | 226 | mfrevlog = mfl.getstorage('') |
|
227 | 227 | if base is not None: |
|
228 | 228 | mfdict = mfl[repo[base].manifestnode()].read() |
|
229 | 229 | skip = set(mfdict.iteritems()) |
|
230 | 230 | else: |
|
231 | 231 | skip = set() |
|
232 | 232 | |
|
233 | 233 | # Copy the skip set to start large and avoid constant resizing, |
|
234 | 234 | # and since it's likely to be very similar to the prefetch set. |
|
235 | 235 | files = skip.copy() |
|
236 | 236 | serverfiles = skip.copy() |
|
237 | 237 | visited = set() |
|
238 | 238 | visited.add(nullrev) |
|
239 | 239 | revcount = len(revs) |
|
240 | 240 | progress = self.ui.makeprogress(_('prefetching'), total=revcount) |
|
241 | 241 | progress.update(0) |
|
242 | 242 | for rev in sorted(revs): |
|
243 | 243 | ctx = repo[rev] |
|
244 | 244 | if pats: |
|
245 | 245 | m = scmutil.match(ctx, pats, opts) |
|
246 | 246 | sparsematch = repo.maybesparsematch(rev) |
|
247 | 247 | |
|
248 | 248 | mfnode = ctx.manifestnode() |
|
249 | 249 | mfrev = mfrevlog.rev(mfnode) |
|
250 | 250 | |
|
251 | 251 | # Decompressing manifests is expensive. |
|
252 | 252 | # When possible, only read the deltas. |
|
253 | 253 | p1, p2 = mfrevlog.parentrevs(mfrev) |
|
254 | 254 | if p1 in visited and p2 in visited: |
|
255 | 255 | mfdict = mfl[mfnode].readfast() |
|
256 | 256 | else: |
|
257 | 257 | mfdict = mfl[mfnode].read() |
|
258 | 258 | |
|
259 | 259 | diff = mfdict.iteritems() |
|
260 | 260 | if pats: |
|
261 | 261 | diff = (pf for pf in diff if m(pf[0])) |
|
262 | 262 | if sparsematch: |
|
263 | 263 | diff = (pf for pf in diff if sparsematch(pf[0])) |
|
264 | 264 | if rev not in localrevs: |
|
265 | 265 | serverfiles.update(diff) |
|
266 | 266 | else: |
|
267 | 267 | files.update(diff) |
|
268 | 268 | |
|
269 | 269 | visited.add(mfrev) |
|
270 | 270 | progress.increment() |
|
271 | 271 | |
|
272 | 272 | files.difference_update(skip) |
|
273 | 273 | serverfiles.difference_update(skip) |
|
274 | 274 | progress.complete() |
|
275 | 275 | |
|
276 | 276 | # Fetch files known to be on the server |
|
277 | 277 | if serverfiles: |
|
278 | 278 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] |
|
279 | 279 | repo.fileservice.prefetch(results, force=True) |
|
280 | 280 | |
|
281 | 281 | # Fetch files that may or may not be on the server |
|
282 | 282 | if files: |
|
283 | 283 | results = [(path, hex(fnode)) for (path, fnode) in files] |
|
284 | 284 | repo.fileservice.prefetch(results) |
|
285 | 285 | |
|
286 | 286 | def close(self): |
|
287 | 287 | super(shallowrepository, self).close() |
|
288 | 288 | self.connectionpool.close() |
|
289 | 289 | |
|
290 | 290 | repo.__class__ = shallowrepository |
|
291 | 291 | |
|
292 |
repo.shallowmatch = match.always( |
|
|
292 | repo.shallowmatch = match.always() | |
|
293 | 293 | |
|
294 | 294 | makeunionstores(repo) |
|
295 | 295 | |
|
296 | 296 | repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern", |
|
297 | 297 | None) |
|
298 | 298 | repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern", |
|
299 | 299 | None) |
|
300 | 300 | if not util.safehasattr(repo, 'connectionpool'): |
|
301 | 301 | repo.connectionpool = connectionpool.connectionpool(repo) |
|
302 | 302 | |
|
303 | 303 | if repo.includepattern or repo.excludepattern: |
|
304 | 304 | repo.shallowmatch = match.match(repo.root, '', None, |
|
305 | 305 | repo.includepattern, repo.excludepattern) |
@@ -1,347 +1,347 b'' | |||
|
1 | 1 | # sparse.py - allow sparse checkouts of the working directory |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2014 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """allow sparse checkouts of the working directory (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | (This extension is not yet protected by backwards compatibility |
|
11 | 11 | guarantees. Any aspect may break in future releases until this |
|
12 | 12 | notice is removed.) |
|
13 | 13 | |
|
14 | 14 | This extension allows the working directory to only consist of a |
|
15 | 15 | subset of files for the revision. This allows specific files or |
|
16 | 16 | directories to be explicitly included or excluded. Many repository |
|
17 | 17 | operations have performance proportional to the number of files in |
|
18 | 18 | the working directory. So only realizing a subset of files in the |
|
19 | 19 | working directory can improve performance. |
|
20 | 20 | |
|
21 | 21 | Sparse Config Files |
|
22 | 22 | ------------------- |
|
23 | 23 | |
|
24 | 24 | The set of files that are part of a sparse checkout are defined by |
|
25 | 25 | a sparse config file. The file defines 3 things: includes (files to |
|
26 | 26 | include in the sparse checkout), excludes (files to exclude from the |
|
27 | 27 | sparse checkout), and profiles (links to other config files). |
|
28 | 28 | |
|
29 | 29 | The file format is newline delimited. Empty lines and lines beginning |
|
30 | 30 | with ``#`` are ignored. |
|
31 | 31 | |
|
32 | 32 | Lines beginning with ``%include `` denote another sparse config file |
|
33 | 33 | to include. e.g. ``%include tests.sparse``. The filename is relative |
|
34 | 34 | to the repository root. |
|
35 | 35 | |
|
36 | 36 | The special lines ``[include]`` and ``[exclude]`` denote the section |
|
37 | 37 | for includes and excludes that follow, respectively. It is illegal to |
|
38 | 38 | have ``[include]`` after ``[exclude]``. |
|
39 | 39 | |
|
40 | 40 | Non-special lines resemble file patterns to be added to either includes |
|
41 | 41 | or excludes. The syntax of these lines is documented by :hg:`help patterns`. |
|
42 | 42 | Patterns are interpreted as ``glob:`` by default and match against the |
|
43 | 43 | root of the repository. |
|
44 | 44 | |
|
45 | 45 | Exclusion patterns take precedence over inclusion patterns. So even |
|
46 | 46 | if a file is explicitly included, an ``[exclude]`` entry can remove it. |
|
47 | 47 | |
|
48 | 48 | For example, say you have a repository with 3 directories, ``frontend/``, |
|
49 | 49 | ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond |
|
50 | 50 | to different projects and it is uncommon for someone working on one |
|
51 | 51 | to need the files for the other. But ``tools/`` contains files shared |
|
52 | 52 | between both projects. Your sparse config files may resemble:: |
|
53 | 53 | |
|
54 | 54 | # frontend.sparse |
|
55 | 55 | frontend/** |
|
56 | 56 | tools/** |
|
57 | 57 | |
|
58 | 58 | # backend.sparse |
|
59 | 59 | backend/** |
|
60 | 60 | tools/** |
|
61 | 61 | |
|
62 | 62 | Say the backend grows in size. Or there's a directory with thousands |
|
63 | 63 | of files you wish to exclude. You can modify the profile to exclude |
|
64 | 64 | certain files:: |
|
65 | 65 | |
|
66 | 66 | [include] |
|
67 | 67 | backend/** |
|
68 | 68 | tools/** |
|
69 | 69 | |
|
70 | 70 | [exclude] |
|
71 | 71 | tools/tests/** |
|
72 | 72 | """ |
|
73 | 73 | |
|
74 | 74 | from __future__ import absolute_import |
|
75 | 75 | |
|
76 | 76 | from mercurial.i18n import _ |
|
77 | 77 | from mercurial import ( |
|
78 | 78 | commands, |
|
79 | 79 | dirstate, |
|
80 | 80 | error, |
|
81 | 81 | extensions, |
|
82 | 82 | hg, |
|
83 | 83 | logcmdutil, |
|
84 | 84 | match as matchmod, |
|
85 | 85 | pycompat, |
|
86 | 86 | registrar, |
|
87 | 87 | sparse, |
|
88 | 88 | util, |
|
89 | 89 | ) |
|
90 | 90 | |
|
91 | 91 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
92 | 92 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
93 | 93 | # be specifying the version(s) of Mercurial they are tested with, or |
|
94 | 94 | # leave the attribute unspecified. |
|
95 | 95 | testedwith = 'ships-with-hg-core' |
|
96 | 96 | |
|
97 | 97 | cmdtable = {} |
|
98 | 98 | command = registrar.command(cmdtable) |
|
99 | 99 | |
|
100 | 100 | def extsetup(ui): |
|
101 | 101 | sparse.enabled = True |
|
102 | 102 | |
|
103 | 103 | _setupclone(ui) |
|
104 | 104 | _setuplog(ui) |
|
105 | 105 | _setupadd(ui) |
|
106 | 106 | _setupdirstate(ui) |
|
107 | 107 | |
|
108 | 108 | def replacefilecache(cls, propname, replacement): |
|
109 | 109 | """Replace a filecache property with a new class. This allows changing the |
|
110 | 110 | cache invalidation condition.""" |
|
111 | 111 | origcls = cls |
|
112 | 112 | assert callable(replacement) |
|
113 | 113 | while cls is not object: |
|
114 | 114 | if propname in cls.__dict__: |
|
115 | 115 | orig = cls.__dict__[propname] |
|
116 | 116 | setattr(cls, propname, replacement(orig)) |
|
117 | 117 | break |
|
118 | 118 | cls = cls.__bases__[0] |
|
119 | 119 | |
|
120 | 120 | if cls is object: |
|
121 | 121 | raise AttributeError(_("type '%s' has no property '%s'") % (origcls, |
|
122 | 122 | propname)) |
|
123 | 123 | |
|
124 | 124 | def _setuplog(ui): |
|
125 | 125 | entry = commands.table['log|history'] |
|
126 | 126 | entry[1].append(('', 'sparse', None, |
|
127 | 127 | "limit to changesets affecting the sparse checkout")) |
|
128 | 128 | |
|
129 | 129 | def _initialrevs(orig, repo, opts): |
|
130 | 130 | revs = orig(repo, opts) |
|
131 | 131 | if opts.get('sparse'): |
|
132 | 132 | sparsematch = sparse.matcher(repo) |
|
133 | 133 | def ctxmatch(rev): |
|
134 | 134 | ctx = repo[rev] |
|
135 | 135 | return any(f for f in ctx.files() if sparsematch(f)) |
|
136 | 136 | revs = revs.filter(ctxmatch) |
|
137 | 137 | return revs |
|
138 | 138 | extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs) |
|
139 | 139 | |
|
140 | 140 | def _clonesparsecmd(orig, ui, repo, *args, **opts): |
|
141 | 141 | include_pat = opts.get(r'include') |
|
142 | 142 | exclude_pat = opts.get(r'exclude') |
|
143 | 143 | enableprofile_pat = opts.get(r'enable_profile') |
|
144 | 144 | narrow_pat = opts.get(r'narrow') |
|
145 | 145 | include = exclude = enableprofile = False |
|
146 | 146 | if include_pat: |
|
147 | 147 | pat = include_pat |
|
148 | 148 | include = True |
|
149 | 149 | if exclude_pat: |
|
150 | 150 | pat = exclude_pat |
|
151 | 151 | exclude = True |
|
152 | 152 | if enableprofile_pat: |
|
153 | 153 | pat = enableprofile_pat |
|
154 | 154 | enableprofile = True |
|
155 | 155 | if sum([include, exclude, enableprofile]) > 1: |
|
156 | 156 | raise error.Abort(_("too many flags specified.")) |
|
157 | 157 | # if --narrow is passed, it means they are includes and excludes for narrow |
|
158 | 158 | # clone |
|
159 | 159 | if not narrow_pat and (include or exclude or enableprofile): |
|
160 | 160 | def clonesparse(orig, self, node, overwrite, *args, **kwargs): |
|
161 | 161 | sparse.updateconfig(self.unfiltered(), pat, {}, include=include, |
|
162 | 162 | exclude=exclude, enableprofile=enableprofile, |
|
163 | 163 | usereporootpaths=True) |
|
164 | 164 | return orig(self, node, overwrite, *args, **kwargs) |
|
165 | 165 | extensions.wrapfunction(hg, 'updaterepo', clonesparse) |
|
166 | 166 | return orig(ui, repo, *args, **opts) |
|
167 | 167 | |
|
168 | 168 | def _setupclone(ui): |
|
169 | 169 | entry = commands.table['clone'] |
|
170 | 170 | entry[1].append(('', 'enable-profile', [], |
|
171 | 171 | 'enable a sparse profile')) |
|
172 | 172 | entry[1].append(('', 'include', [], |
|
173 | 173 | 'include sparse pattern')) |
|
174 | 174 | entry[1].append(('', 'exclude', [], |
|
175 | 175 | 'exclude sparse pattern')) |
|
176 | 176 | extensions.wrapcommand(commands.table, 'clone', _clonesparsecmd) |
|
177 | 177 | |
|
178 | 178 | def _setupadd(ui): |
|
179 | 179 | entry = commands.table['add'] |
|
180 | 180 | entry[1].append(('s', 'sparse', None, |
|
181 | 181 | 'also include directories of added files in sparse config')) |
|
182 | 182 | |
|
183 | 183 | def _add(orig, ui, repo, *pats, **opts): |
|
184 | 184 | if opts.get(r'sparse'): |
|
185 | 185 | dirs = set() |
|
186 | 186 | for pat in pats: |
|
187 | 187 | dirname, basename = util.split(pat) |
|
188 | 188 | dirs.add(dirname) |
|
189 | 189 | sparse.updateconfig(repo, list(dirs), opts, include=True) |
|
190 | 190 | return orig(ui, repo, *pats, **opts) |
|
191 | 191 | |
|
192 | 192 | extensions.wrapcommand(commands.table, 'add', _add) |
|
193 | 193 | |
|
194 | 194 | def _setupdirstate(ui): |
|
195 | 195 | """Modify the dirstate to prevent stat'ing excluded files, |
|
196 | 196 | and to prevent modifications to files outside the checkout. |
|
197 | 197 | """ |
|
198 | 198 | |
|
199 | 199 | def walk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
200 | 200 | # hack to not exclude explicitly-specified paths so that they can |
|
201 | 201 | # be warned later on e.g. dirstate.add() |
|
202 |
em = matchmod.exact( |
|
|
202 | em = matchmod.exact(match.files()) | |
|
203 | 203 | sm = matchmod.unionmatcher([self._sparsematcher, em]) |
|
204 | 204 | match = matchmod.intersectmatchers(match, sm) |
|
205 | 205 | return orig(self, match, subrepos, unknown, ignored, full) |
|
206 | 206 | |
|
207 | 207 | extensions.wrapfunction(dirstate.dirstate, 'walk', walk) |
|
208 | 208 | |
|
209 | 209 | # dirstate.rebuild should not add non-matching files |
|
210 | 210 | def _rebuild(orig, self, parent, allfiles, changedfiles=None): |
|
211 | 211 | matcher = self._sparsematcher |
|
212 | 212 | if not matcher.always(): |
|
213 | 213 | allfiles = [f for f in allfiles if matcher(f)] |
|
214 | 214 | if changedfiles: |
|
215 | 215 | changedfiles = [f for f in changedfiles if matcher(f)] |
|
216 | 216 | |
|
217 | 217 | if changedfiles is not None: |
|
218 | 218 | # In _rebuild, these files will be deleted from the dirstate |
|
219 | 219 | # when they are not found to be in allfiles |
|
220 | 220 | dirstatefilestoremove = set(f for f in self if not matcher(f)) |
|
221 | 221 | changedfiles = dirstatefilestoremove.union(changedfiles) |
|
222 | 222 | |
|
223 | 223 | return orig(self, parent, allfiles, changedfiles) |
|
224 | 224 | extensions.wrapfunction(dirstate.dirstate, 'rebuild', _rebuild) |
|
225 | 225 | |
|
226 | 226 | # Prevent adding files that are outside the sparse checkout |
|
227 | 227 | editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge'] |
|
228 | 228 | hint = _('include file with `hg debugsparse --include <pattern>` or use ' + |
|
229 | 229 | '`hg add -s <file>` to include file directory while adding') |
|
230 | 230 | for func in editfuncs: |
|
231 | 231 | def _wrapper(orig, self, *args): |
|
232 | 232 | sparsematch = self._sparsematcher |
|
233 | 233 | if not sparsematch.always(): |
|
234 | 234 | for f in args: |
|
235 | 235 | if (f is not None and not sparsematch(f) and |
|
236 | 236 | f not in self): |
|
237 | 237 | raise error.Abort(_("cannot add '%s' - it is outside " |
|
238 | 238 | "the sparse checkout") % f, |
|
239 | 239 | hint=hint) |
|
240 | 240 | return orig(self, *args) |
|
241 | 241 | extensions.wrapfunction(dirstate.dirstate, func, _wrapper) |
|
242 | 242 | |
|
243 | 243 | @command('debugsparse', [ |
|
244 | 244 | ('I', 'include', False, _('include files in the sparse checkout')), |
|
245 | 245 | ('X', 'exclude', False, _('exclude files in the sparse checkout')), |
|
246 | 246 | ('d', 'delete', False, _('delete an include/exclude rule')), |
|
247 | 247 | ('f', 'force', False, _('allow changing rules even with pending changes')), |
|
248 | 248 | ('', 'enable-profile', False, _('enables the specified profile')), |
|
249 | 249 | ('', 'disable-profile', False, _('disables the specified profile')), |
|
250 | 250 | ('', 'import-rules', False, _('imports rules from a file')), |
|
251 | 251 | ('', 'clear-rules', False, _('clears local include/exclude rules')), |
|
252 | 252 | ('', 'refresh', False, _('updates the working after sparseness changes')), |
|
253 | 253 | ('', 'reset', False, _('makes the repo full again')), |
|
254 | 254 | ] + commands.templateopts, |
|
255 | 255 | _('[--OPTION] PATTERN...'), |
|
256 | 256 | helpbasic=True) |
|
257 | 257 | def debugsparse(ui, repo, *pats, **opts): |
|
258 | 258 | """make the current checkout sparse, or edit the existing checkout |
|
259 | 259 | |
|
260 | 260 | The sparse command is used to make the current checkout sparse. |
|
261 | 261 | This means files that don't meet the sparse condition will not be |
|
262 | 262 | written to disk, or show up in any working copy operations. It does |
|
263 | 263 | not affect files in history in any way. |
|
264 | 264 | |
|
265 | 265 | Passing no arguments prints the currently applied sparse rules. |
|
266 | 266 | |
|
267 | 267 | --include and --exclude are used to add and remove files from the sparse |
|
268 | 268 | checkout. The effects of adding an include or exclude rule are applied |
|
269 | 269 | immediately. If applying the new rule would cause a file with pending |
|
270 | 270 | changes to be added or removed, the command will fail. Pass --force to |
|
271 | 271 | force a rule change even with pending changes (the changes on disk will |
|
272 | 272 | be preserved). |
|
273 | 273 | |
|
274 | 274 | --delete removes an existing include/exclude rule. The effects are |
|
275 | 275 | immediate. |
|
276 | 276 | |
|
277 | 277 | --refresh refreshes the files on disk based on the sparse rules. This is |
|
278 | 278 | only necessary if .hg/sparse was changed by hand. |
|
279 | 279 | |
|
280 | 280 | --enable-profile and --disable-profile accept a path to a .hgsparse file. |
|
281 | 281 | This allows defining sparse checkouts and tracking them inside the |
|
282 | 282 | repository. This is useful for defining commonly used sparse checkouts for |
|
283 | 283 | many people to use. As the profile definition changes over time, the sparse |
|
284 | 284 | checkout will automatically be updated appropriately, depending on which |
|
285 | 285 | changeset is checked out. Changes to .hgsparse are not applied until they |
|
286 | 286 | have been committed. |
|
287 | 287 | |
|
288 | 288 | --import-rules accepts a path to a file containing rules in the .hgsparse |
|
289 | 289 | format, allowing you to add --include, --exclude and --enable-profile rules |
|
290 | 290 | in bulk. Like the --include, --exclude and --enable-profile switches, the |
|
291 | 291 | changes are applied immediately. |
|
292 | 292 | |
|
293 | 293 | --clear-rules removes all local include and exclude rules, while leaving |
|
294 | 294 | any enabled profiles in place. |
|
295 | 295 | |
|
296 | 296 | Returns 0 if editing the sparse checkout succeeds. |
|
297 | 297 | """ |
|
298 | 298 | opts = pycompat.byteskwargs(opts) |
|
299 | 299 | include = opts.get('include') |
|
300 | 300 | exclude = opts.get('exclude') |
|
301 | 301 | force = opts.get('force') |
|
302 | 302 | enableprofile = opts.get('enable_profile') |
|
303 | 303 | disableprofile = opts.get('disable_profile') |
|
304 | 304 | importrules = opts.get('import_rules') |
|
305 | 305 | clearrules = opts.get('clear_rules') |
|
306 | 306 | delete = opts.get('delete') |
|
307 | 307 | refresh = opts.get('refresh') |
|
308 | 308 | reset = opts.get('reset') |
|
309 | 309 | count = sum([include, exclude, enableprofile, disableprofile, delete, |
|
310 | 310 | importrules, refresh, clearrules, reset]) |
|
311 | 311 | if count > 1: |
|
312 | 312 | raise error.Abort(_("too many flags specified")) |
|
313 | 313 | |
|
314 | 314 | if count == 0: |
|
315 | 315 | if repo.vfs.exists('sparse'): |
|
316 | 316 | ui.status(repo.vfs.read("sparse") + "\n") |
|
317 | 317 | temporaryincludes = sparse.readtemporaryincludes(repo) |
|
318 | 318 | if temporaryincludes: |
|
319 | 319 | ui.status(_("Temporarily Included Files (for merge/rebase):\n")) |
|
320 | 320 | ui.status(("\n".join(temporaryincludes) + "\n")) |
|
321 | 321 | else: |
|
322 | 322 | ui.status(_('repo is not sparse\n')) |
|
323 | 323 | return |
|
324 | 324 | |
|
325 | 325 | if include or exclude or delete or reset or enableprofile or disableprofile: |
|
326 | 326 | sparse.updateconfig(repo, pats, opts, include=include, exclude=exclude, |
|
327 | 327 | reset=reset, delete=delete, |
|
328 | 328 | enableprofile=enableprofile, |
|
329 | 329 | disableprofile=disableprofile, force=force) |
|
330 | 330 | |
|
331 | 331 | if importrules: |
|
332 | 332 | sparse.importfromfiles(repo, opts, pats, force=force) |
|
333 | 333 | |
|
334 | 334 | if clearrules: |
|
335 | 335 | sparse.clearrules(repo, force=force) |
|
336 | 336 | |
|
337 | 337 | if refresh: |
|
338 | 338 | try: |
|
339 | 339 | wlock = repo.wlock() |
|
340 | 340 | fcounts = map( |
|
341 | 341 | len, |
|
342 | 342 | sparse.refreshwdir(repo, repo.status(), sparse.matcher(repo), |
|
343 | 343 | force=force)) |
|
344 | 344 | sparse.printchanges(ui, opts, added=fcounts[0], dropped=fcounts[1], |
|
345 | 345 | conflicting=fcounts[2]) |
|
346 | 346 | finally: |
|
347 | 347 | wlock.release() |
@@ -1,765 +1,765 b'' | |||
|
1 | 1 | # Patch transplanting extension for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''command to transplant changesets from another branch |
|
9 | 9 | |
|
10 | 10 | This extension allows you to transplant changes to another parent revision, |
|
11 | 11 | possibly in another repository. The transplant is done using 'diff' patches. |
|
12 | 12 | |
|
13 | 13 | Transplanted patches are recorded in .hg/transplant/transplants, as a |
|
14 | 14 | map from a changeset hash to its hash in the source repository. |
|
15 | 15 | ''' |
|
16 | 16 | from __future__ import absolute_import |
|
17 | 17 | |
|
18 | 18 | import os |
|
19 | 19 | |
|
20 | 20 | from mercurial.i18n import _ |
|
21 | 21 | from mercurial import ( |
|
22 | 22 | bundlerepo, |
|
23 | 23 | cmdutil, |
|
24 | 24 | error, |
|
25 | 25 | exchange, |
|
26 | 26 | hg, |
|
27 | 27 | logcmdutil, |
|
28 | 28 | match, |
|
29 | 29 | merge, |
|
30 | 30 | node as nodemod, |
|
31 | 31 | patch, |
|
32 | 32 | pycompat, |
|
33 | 33 | registrar, |
|
34 | 34 | revlog, |
|
35 | 35 | revset, |
|
36 | 36 | scmutil, |
|
37 | 37 | smartset, |
|
38 | 38 | util, |
|
39 | 39 | vfs as vfsmod, |
|
40 | 40 | ) |
|
41 | 41 | from mercurial.utils import ( |
|
42 | 42 | procutil, |
|
43 | 43 | stringutil, |
|
44 | 44 | ) |
|
45 | 45 | |
|
46 | 46 | class TransplantError(error.Abort): |
|
47 | 47 | pass |
|
48 | 48 | |
|
49 | 49 | cmdtable = {} |
|
50 | 50 | command = registrar.command(cmdtable) |
|
51 | 51 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
52 | 52 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
53 | 53 | # be specifying the version(s) of Mercurial they are tested with, or |
|
54 | 54 | # leave the attribute unspecified. |
|
55 | 55 | testedwith = 'ships-with-hg-core' |
|
56 | 56 | |
|
57 | 57 | configtable = {} |
|
58 | 58 | configitem = registrar.configitem(configtable) |
|
59 | 59 | |
|
60 | 60 | configitem('transplant', 'filter', |
|
61 | 61 | default=None, |
|
62 | 62 | ) |
|
63 | 63 | configitem('transplant', 'log', |
|
64 | 64 | default=None, |
|
65 | 65 | ) |
|
66 | 66 | |
|
67 | 67 | class transplantentry(object): |
|
68 | 68 | def __init__(self, lnode, rnode): |
|
69 | 69 | self.lnode = lnode |
|
70 | 70 | self.rnode = rnode |
|
71 | 71 | |
|
72 | 72 | class transplants(object): |
|
73 | 73 | def __init__(self, path=None, transplantfile=None, opener=None): |
|
74 | 74 | self.path = path |
|
75 | 75 | self.transplantfile = transplantfile |
|
76 | 76 | self.opener = opener |
|
77 | 77 | |
|
78 | 78 | if not opener: |
|
79 | 79 | self.opener = vfsmod.vfs(self.path) |
|
80 | 80 | self.transplants = {} |
|
81 | 81 | self.dirty = False |
|
82 | 82 | self.read() |
|
83 | 83 | |
|
84 | 84 | def read(self): |
|
85 | 85 | abspath = os.path.join(self.path, self.transplantfile) |
|
86 | 86 | if self.transplantfile and os.path.exists(abspath): |
|
87 | 87 | for line in self.opener.read(self.transplantfile).splitlines(): |
|
88 | 88 | lnode, rnode = map(revlog.bin, line.split(':')) |
|
89 | 89 | list = self.transplants.setdefault(rnode, []) |
|
90 | 90 | list.append(transplantentry(lnode, rnode)) |
|
91 | 91 | |
|
92 | 92 | def write(self): |
|
93 | 93 | if self.dirty and self.transplantfile: |
|
94 | 94 | if not os.path.isdir(self.path): |
|
95 | 95 | os.mkdir(self.path) |
|
96 | 96 | fp = self.opener(self.transplantfile, 'w') |
|
97 | 97 | for list in self.transplants.itervalues(): |
|
98 | 98 | for t in list: |
|
99 | 99 | l, r = map(nodemod.hex, (t.lnode, t.rnode)) |
|
100 | 100 | fp.write(l + ':' + r + '\n') |
|
101 | 101 | fp.close() |
|
102 | 102 | self.dirty = False |
|
103 | 103 | |
|
104 | 104 | def get(self, rnode): |
|
105 | 105 | return self.transplants.get(rnode) or [] |
|
106 | 106 | |
|
107 | 107 | def set(self, lnode, rnode): |
|
108 | 108 | list = self.transplants.setdefault(rnode, []) |
|
109 | 109 | list.append(transplantentry(lnode, rnode)) |
|
110 | 110 | self.dirty = True |
|
111 | 111 | |
|
112 | 112 | def remove(self, transplant): |
|
113 | 113 | list = self.transplants.get(transplant.rnode) |
|
114 | 114 | if list: |
|
115 | 115 | del list[list.index(transplant)] |
|
116 | 116 | self.dirty = True |
|
117 | 117 | |
|
118 | 118 | class transplanter(object): |
|
119 | 119 | def __init__(self, ui, repo, opts): |
|
120 | 120 | self.ui = ui |
|
121 | 121 | self.path = repo.vfs.join('transplant') |
|
122 | 122 | self.opener = vfsmod.vfs(self.path) |
|
123 | 123 | self.transplants = transplants(self.path, 'transplants', |
|
124 | 124 | opener=self.opener) |
|
125 | 125 | def getcommiteditor(): |
|
126 | 126 | editform = cmdutil.mergeeditform(repo[None], 'transplant') |
|
127 | 127 | return cmdutil.getcommiteditor(editform=editform, |
|
128 | 128 | **pycompat.strkwargs(opts)) |
|
129 | 129 | self.getcommiteditor = getcommiteditor |
|
130 | 130 | |
|
131 | 131 | def applied(self, repo, node, parent): |
|
132 | 132 | '''returns True if a node is already an ancestor of parent |
|
133 | 133 | or is parent or has already been transplanted''' |
|
134 | 134 | if hasnode(repo, parent): |
|
135 | 135 | parentrev = repo.changelog.rev(parent) |
|
136 | 136 | if hasnode(repo, node): |
|
137 | 137 | rev = repo.changelog.rev(node) |
|
138 | 138 | reachable = repo.changelog.ancestors([parentrev], rev, |
|
139 | 139 | inclusive=True) |
|
140 | 140 | if rev in reachable: |
|
141 | 141 | return True |
|
142 | 142 | for t in self.transplants.get(node): |
|
143 | 143 | # it might have been stripped |
|
144 | 144 | if not hasnode(repo, t.lnode): |
|
145 | 145 | self.transplants.remove(t) |
|
146 | 146 | return False |
|
147 | 147 | lnoderev = repo.changelog.rev(t.lnode) |
|
148 | 148 | if lnoderev in repo.changelog.ancestors([parentrev], lnoderev, |
|
149 | 149 | inclusive=True): |
|
150 | 150 | return True |
|
151 | 151 | return False |
|
152 | 152 | |
|
153 | 153 | def apply(self, repo, source, revmap, merges, opts=None): |
|
154 | 154 | '''apply the revisions in revmap one by one in revision order''' |
|
155 | 155 | if opts is None: |
|
156 | 156 | opts = {} |
|
157 | 157 | revs = sorted(revmap) |
|
158 | 158 | p1 = repo.dirstate.p1() |
|
159 | 159 | pulls = [] |
|
160 | 160 | diffopts = patch.difffeatureopts(self.ui, opts) |
|
161 | 161 | diffopts.git = True |
|
162 | 162 | |
|
163 | 163 | lock = tr = None |
|
164 | 164 | try: |
|
165 | 165 | lock = repo.lock() |
|
166 | 166 | tr = repo.transaction('transplant') |
|
167 | 167 | for rev in revs: |
|
168 | 168 | node = revmap[rev] |
|
169 | 169 | revstr = '%d:%s' % (rev, nodemod.short(node)) |
|
170 | 170 | |
|
171 | 171 | if self.applied(repo, node, p1): |
|
172 | 172 | self.ui.warn(_('skipping already applied revision %s\n') % |
|
173 | 173 | revstr) |
|
174 | 174 | continue |
|
175 | 175 | |
|
176 | 176 | parents = source.changelog.parents(node) |
|
177 | 177 | if not (opts.get('filter') or opts.get('log')): |
|
178 | 178 | # If the changeset parent is the same as the |
|
179 | 179 | # wdir's parent, just pull it. |
|
180 | 180 | if parents[0] == p1: |
|
181 | 181 | pulls.append(node) |
|
182 | 182 | p1 = node |
|
183 | 183 | continue |
|
184 | 184 | if pulls: |
|
185 | 185 | if source != repo: |
|
186 | 186 | exchange.pull(repo, source.peer(), heads=pulls) |
|
187 | 187 | merge.update(repo, pulls[-1], branchmerge=False, |
|
188 | 188 | force=False) |
|
189 | 189 | p1 = repo.dirstate.p1() |
|
190 | 190 | pulls = [] |
|
191 | 191 | |
|
192 | 192 | domerge = False |
|
193 | 193 | if node in merges: |
|
194 | 194 | # pulling all the merge revs at once would mean we |
|
195 | 195 | # couldn't transplant after the latest even if |
|
196 | 196 | # transplants before them fail. |
|
197 | 197 | domerge = True |
|
198 | 198 | if not hasnode(repo, node): |
|
199 | 199 | exchange.pull(repo, source.peer(), heads=[node]) |
|
200 | 200 | |
|
201 | 201 | skipmerge = False |
|
202 | 202 | if parents[1] != revlog.nullid: |
|
203 | 203 | if not opts.get('parent'): |
|
204 | 204 | self.ui.note(_('skipping merge changeset %d:%s\n') |
|
205 | 205 | % (rev, nodemod.short(node))) |
|
206 | 206 | skipmerge = True |
|
207 | 207 | else: |
|
208 | 208 | parent = source.lookup(opts['parent']) |
|
209 | 209 | if parent not in parents: |
|
210 | 210 | raise error.Abort(_('%s is not a parent of %s') % |
|
211 | 211 | (nodemod.short(parent), |
|
212 | 212 | nodemod.short(node))) |
|
213 | 213 | else: |
|
214 | 214 | parent = parents[0] |
|
215 | 215 | |
|
216 | 216 | if skipmerge: |
|
217 | 217 | patchfile = None |
|
218 | 218 | else: |
|
219 | 219 | fd, patchfile = pycompat.mkstemp(prefix='hg-transplant-') |
|
220 | 220 | fp = os.fdopen(fd, r'wb') |
|
221 | 221 | gen = patch.diff(source, parent, node, opts=diffopts) |
|
222 | 222 | for chunk in gen: |
|
223 | 223 | fp.write(chunk) |
|
224 | 224 | fp.close() |
|
225 | 225 | |
|
226 | 226 | del revmap[rev] |
|
227 | 227 | if patchfile or domerge: |
|
228 | 228 | try: |
|
229 | 229 | try: |
|
230 | 230 | n = self.applyone(repo, node, |
|
231 | 231 | source.changelog.read(node), |
|
232 | 232 | patchfile, merge=domerge, |
|
233 | 233 | log=opts.get('log'), |
|
234 | 234 | filter=opts.get('filter')) |
|
235 | 235 | except TransplantError: |
|
236 | 236 | # Do not rollback, it is up to the user to |
|
237 | 237 | # fix the merge or cancel everything |
|
238 | 238 | tr.close() |
|
239 | 239 | raise |
|
240 | 240 | if n and domerge: |
|
241 | 241 | self.ui.status(_('%s merged at %s\n') % (revstr, |
|
242 | 242 | nodemod.short(n))) |
|
243 | 243 | elif n: |
|
244 | 244 | self.ui.status(_('%s transplanted to %s\n') |
|
245 | 245 | % (nodemod.short(node), |
|
246 | 246 | nodemod.short(n))) |
|
247 | 247 | finally: |
|
248 | 248 | if patchfile: |
|
249 | 249 | os.unlink(patchfile) |
|
250 | 250 | tr.close() |
|
251 | 251 | if pulls: |
|
252 | 252 | exchange.pull(repo, source.peer(), heads=pulls) |
|
253 | 253 | merge.update(repo, pulls[-1], branchmerge=False, force=False) |
|
254 | 254 | finally: |
|
255 | 255 | self.saveseries(revmap, merges) |
|
256 | 256 | self.transplants.write() |
|
257 | 257 | if tr: |
|
258 | 258 | tr.release() |
|
259 | 259 | if lock: |
|
260 | 260 | lock.release() |
|
261 | 261 | |
|
262 | 262 | def filter(self, filter, node, changelog, patchfile): |
|
263 | 263 | '''arbitrarily rewrite changeset before applying it''' |
|
264 | 264 | |
|
265 | 265 | self.ui.status(_('filtering %s\n') % patchfile) |
|
266 | 266 | user, date, msg = (changelog[1], changelog[2], changelog[4]) |
|
267 | 267 | fd, headerfile = pycompat.mkstemp(prefix='hg-transplant-') |
|
268 | 268 | fp = os.fdopen(fd, r'wb') |
|
269 | 269 | fp.write("# HG changeset patch\n") |
|
270 | 270 | fp.write("# User %s\n" % user) |
|
271 | 271 | fp.write("# Date %d %d\n" % date) |
|
272 | 272 | fp.write(msg + '\n') |
|
273 | 273 | fp.close() |
|
274 | 274 | |
|
275 | 275 | try: |
|
276 | 276 | self.ui.system('%s %s %s' % (filter, |
|
277 | 277 | procutil.shellquote(headerfile), |
|
278 | 278 | procutil.shellquote(patchfile)), |
|
279 | 279 | environ={'HGUSER': changelog[1], |
|
280 | 280 | 'HGREVISION': nodemod.hex(node), |
|
281 | 281 | }, |
|
282 | 282 | onerr=error.Abort, errprefix=_('filter failed'), |
|
283 | 283 | blockedtag='transplant_filter') |
|
284 | 284 | user, date, msg = self.parselog(open(headerfile, 'rb'))[1:4] |
|
285 | 285 | finally: |
|
286 | 286 | os.unlink(headerfile) |
|
287 | 287 | |
|
288 | 288 | return (user, date, msg) |
|
289 | 289 | |
|
290 | 290 | def applyone(self, repo, node, cl, patchfile, merge=False, log=False, |
|
291 | 291 | filter=None): |
|
292 | 292 | '''apply the patch in patchfile to the repository as a transplant''' |
|
293 | 293 | (manifest, user, (time, timezone), files, message) = cl[:5] |
|
294 | 294 | date = "%d %d" % (time, timezone) |
|
295 | 295 | extra = {'transplant_source': node} |
|
296 | 296 | if filter: |
|
297 | 297 | (user, date, message) = self.filter(filter, node, cl, patchfile) |
|
298 | 298 | |
|
299 | 299 | if log: |
|
300 | 300 | # we don't translate messages inserted into commits |
|
301 | 301 | message += '\n(transplanted from %s)' % nodemod.hex(node) |
|
302 | 302 | |
|
303 | 303 | self.ui.status(_('applying %s\n') % nodemod.short(node)) |
|
304 | 304 | self.ui.note('%s %s\n%s\n' % (user, date, message)) |
|
305 | 305 | |
|
306 | 306 | if not patchfile and not merge: |
|
307 | 307 | raise error.Abort(_('can only omit patchfile if merging')) |
|
308 | 308 | if patchfile: |
|
309 | 309 | try: |
|
310 | 310 | files = set() |
|
311 | 311 | patch.patch(self.ui, repo, patchfile, files=files, eolmode=None) |
|
312 | 312 | files = list(files) |
|
313 | 313 | except Exception as inst: |
|
314 | 314 | seriespath = os.path.join(self.path, 'series') |
|
315 | 315 | if os.path.exists(seriespath): |
|
316 | 316 | os.unlink(seriespath) |
|
317 | 317 | p1 = repo.dirstate.p1() |
|
318 | 318 | p2 = node |
|
319 | 319 | self.log(user, date, message, p1, p2, merge=merge) |
|
320 | 320 | self.ui.write(stringutil.forcebytestr(inst) + '\n') |
|
321 | 321 | raise TransplantError(_('fix up the working directory and run ' |
|
322 | 322 | 'hg transplant --continue')) |
|
323 | 323 | else: |
|
324 | 324 | files = None |
|
325 | 325 | if merge: |
|
326 | 326 | p1 = repo.dirstate.p1() |
|
327 | 327 | repo.setparents(p1, node) |
|
328 |
m = match.always( |
|
|
328 | m = match.always() | |
|
329 | 329 | else: |
|
330 |
m = match.exact( |
|
|
330 | m = match.exact(files) | |
|
331 | 331 | |
|
332 | 332 | n = repo.commit(message, user, date, extra=extra, match=m, |
|
333 | 333 | editor=self.getcommiteditor()) |
|
334 | 334 | if not n: |
|
335 | 335 | self.ui.warn(_('skipping emptied changeset %s\n') % |
|
336 | 336 | nodemod.short(node)) |
|
337 | 337 | return None |
|
338 | 338 | if not merge: |
|
339 | 339 | self.transplants.set(n, node) |
|
340 | 340 | |
|
341 | 341 | return n |
|
342 | 342 | |
|
343 | 343 | def canresume(self): |
|
344 | 344 | return os.path.exists(os.path.join(self.path, 'journal')) |
|
345 | 345 | |
|
346 | 346 | def resume(self, repo, source, opts): |
|
347 | 347 | '''recover last transaction and apply remaining changesets''' |
|
348 | 348 | if os.path.exists(os.path.join(self.path, 'journal')): |
|
349 | 349 | n, node = self.recover(repo, source, opts) |
|
350 | 350 | if n: |
|
351 | 351 | self.ui.status(_('%s transplanted as %s\n') % |
|
352 | 352 | (nodemod.short(node), |
|
353 | 353 | nodemod.short(n))) |
|
354 | 354 | else: |
|
355 | 355 | self.ui.status(_('%s skipped due to empty diff\n') |
|
356 | 356 | % (nodemod.short(node),)) |
|
357 | 357 | seriespath = os.path.join(self.path, 'series') |
|
358 | 358 | if not os.path.exists(seriespath): |
|
359 | 359 | self.transplants.write() |
|
360 | 360 | return |
|
361 | 361 | nodes, merges = self.readseries() |
|
362 | 362 | revmap = {} |
|
363 | 363 | for n in nodes: |
|
364 | 364 | revmap[source.changelog.rev(n)] = n |
|
365 | 365 | os.unlink(seriespath) |
|
366 | 366 | |
|
367 | 367 | self.apply(repo, source, revmap, merges, opts) |
|
368 | 368 | |
|
369 | 369 | def recover(self, repo, source, opts): |
|
370 | 370 | '''commit working directory using journal metadata''' |
|
371 | 371 | node, user, date, message, parents = self.readlog() |
|
372 | 372 | merge = False |
|
373 | 373 | |
|
374 | 374 | if not user or not date or not message or not parents[0]: |
|
375 | 375 | raise error.Abort(_('transplant log file is corrupt')) |
|
376 | 376 | |
|
377 | 377 | parent = parents[0] |
|
378 | 378 | if len(parents) > 1: |
|
379 | 379 | if opts.get('parent'): |
|
380 | 380 | parent = source.lookup(opts['parent']) |
|
381 | 381 | if parent not in parents: |
|
382 | 382 | raise error.Abort(_('%s is not a parent of %s') % |
|
383 | 383 | (nodemod.short(parent), |
|
384 | 384 | nodemod.short(node))) |
|
385 | 385 | else: |
|
386 | 386 | merge = True |
|
387 | 387 | |
|
388 | 388 | extra = {'transplant_source': node} |
|
389 | 389 | try: |
|
390 | 390 | p1 = repo.dirstate.p1() |
|
391 | 391 | if p1 != parent: |
|
392 | 392 | raise error.Abort(_('working directory not at transplant ' |
|
393 | 393 | 'parent %s') % nodemod.hex(parent)) |
|
394 | 394 | if merge: |
|
395 | 395 | repo.setparents(p1, parents[1]) |
|
396 | 396 | modified, added, removed, deleted = repo.status()[:4] |
|
397 | 397 | if merge or modified or added or removed or deleted: |
|
398 | 398 | n = repo.commit(message, user, date, extra=extra, |
|
399 | 399 | editor=self.getcommiteditor()) |
|
400 | 400 | if not n: |
|
401 | 401 | raise error.Abort(_('commit failed')) |
|
402 | 402 | if not merge: |
|
403 | 403 | self.transplants.set(n, node) |
|
404 | 404 | else: |
|
405 | 405 | n = None |
|
406 | 406 | self.unlog() |
|
407 | 407 | |
|
408 | 408 | return n, node |
|
409 | 409 | finally: |
|
410 | 410 | # TODO: get rid of this meaningless try/finally enclosing. |
|
411 | 411 | # this is kept only to reduce changes in a patch. |
|
412 | 412 | pass |
|
413 | 413 | |
|
414 | 414 | def readseries(self): |
|
415 | 415 | nodes = [] |
|
416 | 416 | merges = [] |
|
417 | 417 | cur = nodes |
|
418 | 418 | for line in self.opener.read('series').splitlines(): |
|
419 | 419 | if line.startswith('# Merges'): |
|
420 | 420 | cur = merges |
|
421 | 421 | continue |
|
422 | 422 | cur.append(revlog.bin(line)) |
|
423 | 423 | |
|
424 | 424 | return (nodes, merges) |
|
425 | 425 | |
|
426 | 426 | def saveseries(self, revmap, merges): |
|
427 | 427 | if not revmap: |
|
428 | 428 | return |
|
429 | 429 | |
|
430 | 430 | if not os.path.isdir(self.path): |
|
431 | 431 | os.mkdir(self.path) |
|
432 | 432 | series = self.opener('series', 'w') |
|
433 | 433 | for rev in sorted(revmap): |
|
434 | 434 | series.write(nodemod.hex(revmap[rev]) + '\n') |
|
435 | 435 | if merges: |
|
436 | 436 | series.write('# Merges\n') |
|
437 | 437 | for m in merges: |
|
438 | 438 | series.write(nodemod.hex(m) + '\n') |
|
439 | 439 | series.close() |
|
440 | 440 | |
|
441 | 441 | def parselog(self, fp): |
|
442 | 442 | parents = [] |
|
443 | 443 | message = [] |
|
444 | 444 | node = revlog.nullid |
|
445 | 445 | inmsg = False |
|
446 | 446 | user = None |
|
447 | 447 | date = None |
|
448 | 448 | for line in fp.read().splitlines(): |
|
449 | 449 | if inmsg: |
|
450 | 450 | message.append(line) |
|
451 | 451 | elif line.startswith('# User '): |
|
452 | 452 | user = line[7:] |
|
453 | 453 | elif line.startswith('# Date '): |
|
454 | 454 | date = line[7:] |
|
455 | 455 | elif line.startswith('# Node ID '): |
|
456 | 456 | node = revlog.bin(line[10:]) |
|
457 | 457 | elif line.startswith('# Parent '): |
|
458 | 458 | parents.append(revlog.bin(line[9:])) |
|
459 | 459 | elif not line.startswith('# '): |
|
460 | 460 | inmsg = True |
|
461 | 461 | message.append(line) |
|
462 | 462 | if None in (user, date): |
|
463 | 463 | raise error.Abort(_("filter corrupted changeset (no user or date)")) |
|
464 | 464 | return (node, user, date, '\n'.join(message), parents) |
|
465 | 465 | |
|
466 | 466 | def log(self, user, date, message, p1, p2, merge=False): |
|
467 | 467 | '''journal changelog metadata for later recover''' |
|
468 | 468 | |
|
469 | 469 | if not os.path.isdir(self.path): |
|
470 | 470 | os.mkdir(self.path) |
|
471 | 471 | fp = self.opener('journal', 'w') |
|
472 | 472 | fp.write('# User %s\n' % user) |
|
473 | 473 | fp.write('# Date %s\n' % date) |
|
474 | 474 | fp.write('# Node ID %s\n' % nodemod.hex(p2)) |
|
475 | 475 | fp.write('# Parent ' + nodemod.hex(p1) + '\n') |
|
476 | 476 | if merge: |
|
477 | 477 | fp.write('# Parent ' + nodemod.hex(p2) + '\n') |
|
478 | 478 | fp.write(message.rstrip() + '\n') |
|
479 | 479 | fp.close() |
|
480 | 480 | |
|
481 | 481 | def readlog(self): |
|
482 | 482 | return self.parselog(self.opener('journal')) |
|
483 | 483 | |
|
484 | 484 | def unlog(self): |
|
485 | 485 | '''remove changelog journal''' |
|
486 | 486 | absdst = os.path.join(self.path, 'journal') |
|
487 | 487 | if os.path.exists(absdst): |
|
488 | 488 | os.unlink(absdst) |
|
489 | 489 | |
|
490 | 490 | def transplantfilter(self, repo, source, root): |
|
491 | 491 | def matchfn(node): |
|
492 | 492 | if self.applied(repo, node, root): |
|
493 | 493 | return False |
|
494 | 494 | if source.changelog.parents(node)[1] != revlog.nullid: |
|
495 | 495 | return False |
|
496 | 496 | extra = source.changelog.read(node)[5] |
|
497 | 497 | cnode = extra.get('transplant_source') |
|
498 | 498 | if cnode and self.applied(repo, cnode, root): |
|
499 | 499 | return False |
|
500 | 500 | return True |
|
501 | 501 | |
|
502 | 502 | return matchfn |
|
503 | 503 | |
|
504 | 504 | def hasnode(repo, node): |
|
505 | 505 | try: |
|
506 | 506 | return repo.changelog.rev(node) is not None |
|
507 | 507 | except error.StorageError: |
|
508 | 508 | return False |
|
509 | 509 | |
|
510 | 510 | def browserevs(ui, repo, nodes, opts): |
|
511 | 511 | '''interactively transplant changesets''' |
|
512 | 512 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
513 | 513 | transplants = [] |
|
514 | 514 | merges = [] |
|
515 | 515 | prompt = _('apply changeset? [ynmpcq?]:' |
|
516 | 516 | '$$ &yes, transplant this changeset' |
|
517 | 517 | '$$ &no, skip this changeset' |
|
518 | 518 | '$$ &merge at this changeset' |
|
519 | 519 | '$$ show &patch' |
|
520 | 520 | '$$ &commit selected changesets' |
|
521 | 521 | '$$ &quit and cancel transplant' |
|
522 | 522 | '$$ &? (show this help)') |
|
523 | 523 | for node in nodes: |
|
524 | 524 | displayer.show(repo[node]) |
|
525 | 525 | action = None |
|
526 | 526 | while not action: |
|
527 | 527 | choice = ui.promptchoice(prompt) |
|
528 | 528 | action = 'ynmpcq?'[choice:choice + 1] |
|
529 | 529 | if action == '?': |
|
530 | 530 | for c, t in ui.extractchoices(prompt)[1]: |
|
531 | 531 | ui.write('%s: %s\n' % (c, t)) |
|
532 | 532 | action = None |
|
533 | 533 | elif action == 'p': |
|
534 | 534 | parent = repo.changelog.parents(node)[0] |
|
535 | 535 | for chunk in patch.diff(repo, parent, node): |
|
536 | 536 | ui.write(chunk) |
|
537 | 537 | action = None |
|
538 | 538 | if action == 'y': |
|
539 | 539 | transplants.append(node) |
|
540 | 540 | elif action == 'm': |
|
541 | 541 | merges.append(node) |
|
542 | 542 | elif action == 'c': |
|
543 | 543 | break |
|
544 | 544 | elif action == 'q': |
|
545 | 545 | transplants = () |
|
546 | 546 | merges = () |
|
547 | 547 | break |
|
548 | 548 | displayer.close() |
|
549 | 549 | return (transplants, merges) |
|
550 | 550 | |
|
551 | 551 | @command('transplant', |
|
552 | 552 | [('s', 'source', '', _('transplant changesets from REPO'), _('REPO')), |
|
553 | 553 | ('b', 'branch', [], _('use this source changeset as head'), _('REV')), |
|
554 | 554 | ('a', 'all', None, _('pull all changesets up to the --branch revisions')), |
|
555 | 555 | ('p', 'prune', [], _('skip over REV'), _('REV')), |
|
556 | 556 | ('m', 'merge', [], _('merge at REV'), _('REV')), |
|
557 | 557 | ('', 'parent', '', |
|
558 | 558 | _('parent to choose when transplanting merge'), _('REV')), |
|
559 | 559 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
560 | 560 | ('', 'log', None, _('append transplant info to log message')), |
|
561 | 561 | ('c', 'continue', None, _('continue last transplant session ' |
|
562 | 562 | 'after fixing conflicts')), |
|
563 | 563 | ('', 'filter', '', |
|
564 | 564 | _('filter changesets through command'), _('CMD'))], |
|
565 | 565 | _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] ' |
|
566 | 566 | '[-m REV] [REV]...'), |
|
567 | 567 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT) |
|
568 | 568 | def transplant(ui, repo, *revs, **opts): |
|
569 | 569 | '''transplant changesets from another branch |
|
570 | 570 | |
|
571 | 571 | Selected changesets will be applied on top of the current working |
|
572 | 572 | directory with the log of the original changeset. The changesets |
|
573 | 573 | are copied and will thus appear twice in the history with different |
|
574 | 574 | identities. |
|
575 | 575 | |
|
576 | 576 | Consider using the graft command if everything is inside the same |
|
577 | 577 | repository - it will use merges and will usually give a better result. |
|
578 | 578 | Use the rebase extension if the changesets are unpublished and you want |
|
579 | 579 | to move them instead of copying them. |
|
580 | 580 | |
|
581 | 581 | If --log is specified, log messages will have a comment appended |
|
582 | 582 | of the form:: |
|
583 | 583 | |
|
584 | 584 | (transplanted from CHANGESETHASH) |
|
585 | 585 | |
|
586 | 586 | You can rewrite the changelog message with the --filter option. |
|
587 | 587 | Its argument will be invoked with the current changelog message as |
|
588 | 588 | $1 and the patch as $2. |
|
589 | 589 | |
|
590 | 590 | --source/-s specifies another repository to use for selecting changesets, |
|
591 | 591 | just as if it temporarily had been pulled. |
|
592 | 592 | If --branch/-b is specified, these revisions will be used as |
|
593 | 593 | heads when deciding which changesets to transplant, just as if only |
|
594 | 594 | these revisions had been pulled. |
|
595 | 595 | If --all/-a is specified, all the revisions up to the heads specified |
|
596 | 596 | with --branch will be transplanted. |
|
597 | 597 | |
|
598 | 598 | Example: |
|
599 | 599 | |
|
600 | 600 | - transplant all changes up to REV on top of your current revision:: |
|
601 | 601 | |
|
602 | 602 | hg transplant --branch REV --all |
|
603 | 603 | |
|
604 | 604 | You can optionally mark selected transplanted changesets as merge |
|
605 | 605 | changesets. You will not be prompted to transplant any ancestors |
|
606 | 606 | of a merged transplant, and you can merge descendants of them |
|
607 | 607 | normally instead of transplanting them. |
|
608 | 608 | |
|
609 | 609 | Merge changesets may be transplanted directly by specifying the |
|
610 | 610 | proper parent changeset by calling :hg:`transplant --parent`. |
|
611 | 611 | |
|
612 | 612 | If no merges or revisions are provided, :hg:`transplant` will |
|
613 | 613 | start an interactive changeset browser. |
|
614 | 614 | |
|
615 | 615 | If a changeset application fails, you can fix the merge by hand |
|
616 | 616 | and then resume where you left off by calling :hg:`transplant |
|
617 | 617 | --continue/-c`. |
|
618 | 618 | ''' |
|
619 | 619 | with repo.wlock(): |
|
620 | 620 | return _dotransplant(ui, repo, *revs, **opts) |
|
621 | 621 | |
|
622 | 622 | def _dotransplant(ui, repo, *revs, **opts): |
|
623 | 623 | def incwalk(repo, csets, match=util.always): |
|
624 | 624 | for node in csets: |
|
625 | 625 | if match(node): |
|
626 | 626 | yield node |
|
627 | 627 | |
|
628 | 628 | def transplantwalk(repo, dest, heads, match=util.always): |
|
629 | 629 | '''Yield all nodes that are ancestors of a head but not ancestors |
|
630 | 630 | of dest. |
|
631 | 631 | If no heads are specified, the heads of repo will be used.''' |
|
632 | 632 | if not heads: |
|
633 | 633 | heads = repo.heads() |
|
634 | 634 | ancestors = [] |
|
635 | 635 | ctx = repo[dest] |
|
636 | 636 | for head in heads: |
|
637 | 637 | ancestors.append(ctx.ancestor(repo[head]).node()) |
|
638 | 638 | for node in repo.changelog.nodesbetween(ancestors, heads)[0]: |
|
639 | 639 | if match(node): |
|
640 | 640 | yield node |
|
641 | 641 | |
|
642 | 642 | def checkopts(opts, revs): |
|
643 | 643 | if opts.get('continue'): |
|
644 | 644 | if opts.get('branch') or opts.get('all') or opts.get('merge'): |
|
645 | 645 | raise error.Abort(_('--continue is incompatible with ' |
|
646 | 646 | '--branch, --all and --merge')) |
|
647 | 647 | return |
|
648 | 648 | if not (opts.get('source') or revs or |
|
649 | 649 | opts.get('merge') or opts.get('branch')): |
|
650 | 650 | raise error.Abort(_('no source URL, branch revision, or revision ' |
|
651 | 651 | 'list provided')) |
|
652 | 652 | if opts.get('all'): |
|
653 | 653 | if not opts.get('branch'): |
|
654 | 654 | raise error.Abort(_('--all requires a branch revision')) |
|
655 | 655 | if revs: |
|
656 | 656 | raise error.Abort(_('--all is incompatible with a ' |
|
657 | 657 | 'revision list')) |
|
658 | 658 | |
|
659 | 659 | opts = pycompat.byteskwargs(opts) |
|
660 | 660 | checkopts(opts, revs) |
|
661 | 661 | |
|
662 | 662 | if not opts.get('log'): |
|
663 | 663 | # deprecated config: transplant.log |
|
664 | 664 | opts['log'] = ui.config('transplant', 'log') |
|
665 | 665 | if not opts.get('filter'): |
|
666 | 666 | # deprecated config: transplant.filter |
|
667 | 667 | opts['filter'] = ui.config('transplant', 'filter') |
|
668 | 668 | |
|
669 | 669 | tp = transplanter(ui, repo, opts) |
|
670 | 670 | |
|
671 | 671 | p1 = repo.dirstate.p1() |
|
672 | 672 | if len(repo) > 0 and p1 == revlog.nullid: |
|
673 | 673 | raise error.Abort(_('no revision checked out')) |
|
674 | 674 | if opts.get('continue'): |
|
675 | 675 | if not tp.canresume(): |
|
676 | 676 | raise error.Abort(_('no transplant to continue')) |
|
677 | 677 | else: |
|
678 | 678 | cmdutil.checkunfinished(repo) |
|
679 | 679 | cmdutil.bailifchanged(repo) |
|
680 | 680 | |
|
681 | 681 | sourcerepo = opts.get('source') |
|
682 | 682 | if sourcerepo: |
|
683 | 683 | peer = hg.peer(repo, opts, ui.expandpath(sourcerepo)) |
|
684 | 684 | heads = pycompat.maplist(peer.lookup, opts.get('branch', ())) |
|
685 | 685 | target = set(heads) |
|
686 | 686 | for r in revs: |
|
687 | 687 | try: |
|
688 | 688 | target.add(peer.lookup(r)) |
|
689 | 689 | except error.RepoError: |
|
690 | 690 | pass |
|
691 | 691 | source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer, |
|
692 | 692 | onlyheads=sorted(target), force=True) |
|
693 | 693 | else: |
|
694 | 694 | source = repo |
|
695 | 695 | heads = pycompat.maplist(source.lookup, opts.get('branch', ())) |
|
696 | 696 | cleanupfn = None |
|
697 | 697 | |
|
698 | 698 | try: |
|
699 | 699 | if opts.get('continue'): |
|
700 | 700 | tp.resume(repo, source, opts) |
|
701 | 701 | return |
|
702 | 702 | |
|
703 | 703 | tf = tp.transplantfilter(repo, source, p1) |
|
704 | 704 | if opts.get('prune'): |
|
705 | 705 | prune = set(source[r].node() |
|
706 | 706 | for r in scmutil.revrange(source, opts.get('prune'))) |
|
707 | 707 | matchfn = lambda x: tf(x) and x not in prune |
|
708 | 708 | else: |
|
709 | 709 | matchfn = tf |
|
710 | 710 | merges = pycompat.maplist(source.lookup, opts.get('merge', ())) |
|
711 | 711 | revmap = {} |
|
712 | 712 | if revs: |
|
713 | 713 | for r in scmutil.revrange(source, revs): |
|
714 | 714 | revmap[int(r)] = source[r].node() |
|
715 | 715 | elif opts.get('all') or not merges: |
|
716 | 716 | if source != repo: |
|
717 | 717 | alltransplants = incwalk(source, csets, match=matchfn) |
|
718 | 718 | else: |
|
719 | 719 | alltransplants = transplantwalk(source, p1, heads, |
|
720 | 720 | match=matchfn) |
|
721 | 721 | if opts.get('all'): |
|
722 | 722 | revs = alltransplants |
|
723 | 723 | else: |
|
724 | 724 | revs, newmerges = browserevs(ui, source, alltransplants, opts) |
|
725 | 725 | merges.extend(newmerges) |
|
726 | 726 | for r in revs: |
|
727 | 727 | revmap[source.changelog.rev(r)] = r |
|
728 | 728 | for r in merges: |
|
729 | 729 | revmap[source.changelog.rev(r)] = r |
|
730 | 730 | |
|
731 | 731 | tp.apply(repo, source, revmap, merges, opts) |
|
732 | 732 | finally: |
|
733 | 733 | if cleanupfn: |
|
734 | 734 | cleanupfn() |
|
735 | 735 | |
|
736 | 736 | revsetpredicate = registrar.revsetpredicate() |
|
737 | 737 | |
|
738 | 738 | @revsetpredicate('transplanted([set])') |
|
739 | 739 | def revsettransplanted(repo, subset, x): |
|
740 | 740 | """Transplanted changesets in set, or all transplanted changesets. |
|
741 | 741 | """ |
|
742 | 742 | if x: |
|
743 | 743 | s = revset.getset(repo, subset, x) |
|
744 | 744 | else: |
|
745 | 745 | s = subset |
|
746 | 746 | return smartset.baseset([r for r in s if |
|
747 | 747 | repo[r].extra().get('transplant_source')]) |
|
748 | 748 | |
|
749 | 749 | templatekeyword = registrar.templatekeyword() |
|
750 | 750 | |
|
751 | 751 | @templatekeyword('transplanted', requires={'ctx'}) |
|
752 | 752 | def kwtransplanted(context, mapping): |
|
753 | 753 | """String. The node identifier of the transplanted |
|
754 | 754 | changeset if any.""" |
|
755 | 755 | ctx = context.resource(mapping, 'ctx') |
|
756 | 756 | n = ctx.extra().get('transplant_source') |
|
757 | 757 | return n and nodemod.hex(n) or '' |
|
758 | 758 | |
|
759 | 759 | def extsetup(ui): |
|
760 | 760 | cmdutil.unfinishedstates.append( |
|
761 | 761 | ['transplant/journal', True, False, _('transplant in progress'), |
|
762 | 762 | _("use 'hg transplant --continue' or 'hg update' to abort")]) |
|
763 | 763 | |
|
764 | 764 | # tell hggettext to extract docstrings from these functions: |
|
765 | 765 | i18nfunctions = [revsettransplanted, kwtransplanted] |
@@ -1,1418 +1,1418 b'' | |||
|
1 | 1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import struct |
|
12 | 12 | import weakref |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from .node import ( |
|
16 | 16 | hex, |
|
17 | 17 | nullid, |
|
18 | 18 | nullrev, |
|
19 | 19 | short, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | from . import ( |
|
23 | 23 | error, |
|
24 | 24 | match as matchmod, |
|
25 | 25 | mdiff, |
|
26 | 26 | phases, |
|
27 | 27 | pycompat, |
|
28 | 28 | repository, |
|
29 | 29 | util, |
|
30 | 30 | ) |
|
31 | 31 | |
|
32 | 32 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s") |
|
33 | 33 | _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s") |
|
34 | 34 | _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH") |
|
35 | 35 | |
|
36 | 36 | LFS_REQUIREMENT = 'lfs' |
|
37 | 37 | |
|
38 | 38 | readexactly = util.readexactly |
|
39 | 39 | |
|
40 | 40 | def getchunk(stream): |
|
41 | 41 | """return the next chunk from stream as a string""" |
|
42 | 42 | d = readexactly(stream, 4) |
|
43 | 43 | l = struct.unpack(">l", d)[0] |
|
44 | 44 | if l <= 4: |
|
45 | 45 | if l: |
|
46 | 46 | raise error.Abort(_("invalid chunk length %d") % l) |
|
47 | 47 | return "" |
|
48 | 48 | return readexactly(stream, l - 4) |
|
49 | 49 | |
|
50 | 50 | def chunkheader(length): |
|
51 | 51 | """return a changegroup chunk header (string)""" |
|
52 | 52 | return struct.pack(">l", length + 4) |
|
53 | 53 | |
|
54 | 54 | def closechunk(): |
|
55 | 55 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
56 | 56 | return struct.pack(">l", 0) |
|
57 | 57 | |
|
58 | 58 | def _fileheader(path): |
|
59 | 59 | """Obtain a changegroup chunk header for a named path.""" |
|
60 | 60 | return chunkheader(len(path)) + path |
|
61 | 61 | |
|
62 | 62 | def writechunks(ui, chunks, filename, vfs=None): |
|
63 | 63 | """Write chunks to a file and return its filename. |
|
64 | 64 | |
|
65 | 65 | The stream is assumed to be a bundle file. |
|
66 | 66 | Existing files will not be overwritten. |
|
67 | 67 | If no filename is specified, a temporary file is created. |
|
68 | 68 | """ |
|
69 | 69 | fh = None |
|
70 | 70 | cleanup = None |
|
71 | 71 | try: |
|
72 | 72 | if filename: |
|
73 | 73 | if vfs: |
|
74 | 74 | fh = vfs.open(filename, "wb") |
|
75 | 75 | else: |
|
76 | 76 | # Increase default buffer size because default is usually |
|
77 | 77 | # small (4k is common on Linux). |
|
78 | 78 | fh = open(filename, "wb", 131072) |
|
79 | 79 | else: |
|
80 | 80 | fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
81 | 81 | fh = os.fdopen(fd, r"wb") |
|
82 | 82 | cleanup = filename |
|
83 | 83 | for c in chunks: |
|
84 | 84 | fh.write(c) |
|
85 | 85 | cleanup = None |
|
86 | 86 | return filename |
|
87 | 87 | finally: |
|
88 | 88 | if fh is not None: |
|
89 | 89 | fh.close() |
|
90 | 90 | if cleanup is not None: |
|
91 | 91 | if filename and vfs: |
|
92 | 92 | vfs.unlink(cleanup) |
|
93 | 93 | else: |
|
94 | 94 | os.unlink(cleanup) |
|
95 | 95 | |
|
96 | 96 | class cg1unpacker(object): |
|
97 | 97 | """Unpacker for cg1 changegroup streams. |
|
98 | 98 | |
|
99 | 99 | A changegroup unpacker handles the framing of the revision data in |
|
100 | 100 | the wire format. Most consumers will want to use the apply() |
|
101 | 101 | method to add the changes from the changegroup to a repository. |
|
102 | 102 | |
|
103 | 103 | If you're forwarding a changegroup unmodified to another consumer, |
|
104 | 104 | use getchunks(), which returns an iterator of changegroup |
|
105 | 105 | chunks. This is mostly useful for cases where you need to know the |
|
106 | 106 | data stream has ended by observing the end of the changegroup. |
|
107 | 107 | |
|
108 | 108 | deltachunk() is useful only if you're applying delta data. Most |
|
109 | 109 | consumers should prefer apply() instead. |
|
110 | 110 | |
|
111 | 111 | A few other public methods exist. Those are used only for |
|
112 | 112 | bundlerepo and some debug commands - their use is discouraged. |
|
113 | 113 | """ |
|
114 | 114 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
115 | 115 | deltaheadersize = deltaheader.size |
|
116 | 116 | version = '01' |
|
117 | 117 | _grouplistcount = 1 # One list of files after the manifests |
|
118 | 118 | |
|
119 | 119 | def __init__(self, fh, alg, extras=None): |
|
120 | 120 | if alg is None: |
|
121 | 121 | alg = 'UN' |
|
122 | 122 | if alg not in util.compengines.supportedbundletypes: |
|
123 | 123 | raise error.Abort(_('unknown stream compression type: %s') |
|
124 | 124 | % alg) |
|
125 | 125 | if alg == 'BZ': |
|
126 | 126 | alg = '_truncatedBZ' |
|
127 | 127 | |
|
128 | 128 | compengine = util.compengines.forbundletype(alg) |
|
129 | 129 | self._stream = compengine.decompressorreader(fh) |
|
130 | 130 | self._type = alg |
|
131 | 131 | self.extras = extras or {} |
|
132 | 132 | self.callback = None |
|
133 | 133 | |
|
134 | 134 | # These methods (compressed, read, seek, tell) all appear to only |
|
135 | 135 | # be used by bundlerepo, but it's a little hard to tell. |
|
136 | 136 | def compressed(self): |
|
137 | 137 | return self._type is not None and self._type != 'UN' |
|
138 | 138 | def read(self, l): |
|
139 | 139 | return self._stream.read(l) |
|
140 | 140 | def seek(self, pos): |
|
141 | 141 | return self._stream.seek(pos) |
|
142 | 142 | def tell(self): |
|
143 | 143 | return self._stream.tell() |
|
144 | 144 | def close(self): |
|
145 | 145 | return self._stream.close() |
|
146 | 146 | |
|
147 | 147 | def _chunklength(self): |
|
148 | 148 | d = readexactly(self._stream, 4) |
|
149 | 149 | l = struct.unpack(">l", d)[0] |
|
150 | 150 | if l <= 4: |
|
151 | 151 | if l: |
|
152 | 152 | raise error.Abort(_("invalid chunk length %d") % l) |
|
153 | 153 | return 0 |
|
154 | 154 | if self.callback: |
|
155 | 155 | self.callback() |
|
156 | 156 | return l - 4 |
|
157 | 157 | |
|
158 | 158 | def changelogheader(self): |
|
159 | 159 | """v10 does not have a changelog header chunk""" |
|
160 | 160 | return {} |
|
161 | 161 | |
|
162 | 162 | def manifestheader(self): |
|
163 | 163 | """v10 does not have a manifest header chunk""" |
|
164 | 164 | return {} |
|
165 | 165 | |
|
166 | 166 | def filelogheader(self): |
|
167 | 167 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
168 | 168 | l = self._chunklength() |
|
169 | 169 | if not l: |
|
170 | 170 | return {} |
|
171 | 171 | fname = readexactly(self._stream, l) |
|
172 | 172 | return {'filename': fname} |
|
173 | 173 | |
|
174 | 174 | def _deltaheader(self, headertuple, prevnode): |
|
175 | 175 | node, p1, p2, cs = headertuple |
|
176 | 176 | if prevnode is None: |
|
177 | 177 | deltabase = p1 |
|
178 | 178 | else: |
|
179 | 179 | deltabase = prevnode |
|
180 | 180 | flags = 0 |
|
181 | 181 | return node, p1, p2, deltabase, cs, flags |
|
182 | 182 | |
|
183 | 183 | def deltachunk(self, prevnode): |
|
184 | 184 | l = self._chunklength() |
|
185 | 185 | if not l: |
|
186 | 186 | return {} |
|
187 | 187 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
188 | 188 | header = self.deltaheader.unpack(headerdata) |
|
189 | 189 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
190 | 190 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
191 | 191 | return (node, p1, p2, cs, deltabase, delta, flags) |
|
192 | 192 | |
|
193 | 193 | def getchunks(self): |
|
194 | 194 | """returns all the chunks contains in the bundle |
|
195 | 195 | |
|
196 | 196 | Used when you need to forward the binary stream to a file or another |
|
197 | 197 | network API. To do so, it parse the changegroup data, otherwise it will |
|
198 | 198 | block in case of sshrepo because it don't know the end of the stream. |
|
199 | 199 | """ |
|
200 | 200 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, |
|
201 | 201 | # and a list of filelogs. For changegroup 3, we expect 4 parts: |
|
202 | 202 | # changelog, manifestlog, a list of tree manifestlogs, and a list of |
|
203 | 203 | # filelogs. |
|
204 | 204 | # |
|
205 | 205 | # Changelog and manifestlog parts are terminated with empty chunks. The |
|
206 | 206 | # tree and file parts are a list of entry sections. Each entry section |
|
207 | 207 | # is a series of chunks terminating in an empty chunk. The list of these |
|
208 | 208 | # entry sections is terminated in yet another empty chunk, so we know |
|
209 | 209 | # we've reached the end of the tree/file list when we reach an empty |
|
210 | 210 | # chunk that was proceeded by no non-empty chunks. |
|
211 | 211 | |
|
212 | 212 | parts = 0 |
|
213 | 213 | while parts < 2 + self._grouplistcount: |
|
214 | 214 | noentries = True |
|
215 | 215 | while True: |
|
216 | 216 | chunk = getchunk(self) |
|
217 | 217 | if not chunk: |
|
218 | 218 | # The first two empty chunks represent the end of the |
|
219 | 219 | # changelog and the manifestlog portions. The remaining |
|
220 | 220 | # empty chunks represent either A) the end of individual |
|
221 | 221 | # tree or file entries in the file list, or B) the end of |
|
222 | 222 | # the entire list. It's the end of the entire list if there |
|
223 | 223 | # were no entries (i.e. noentries is True). |
|
224 | 224 | if parts < 2: |
|
225 | 225 | parts += 1 |
|
226 | 226 | elif noentries: |
|
227 | 227 | parts += 1 |
|
228 | 228 | break |
|
229 | 229 | noentries = False |
|
230 | 230 | yield chunkheader(len(chunk)) |
|
231 | 231 | pos = 0 |
|
232 | 232 | while pos < len(chunk): |
|
233 | 233 | next = pos + 2**20 |
|
234 | 234 | yield chunk[pos:next] |
|
235 | 235 | pos = next |
|
236 | 236 | yield closechunk() |
|
237 | 237 | |
|
238 | 238 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
239 | 239 | self.callback = prog.increment |
|
240 | 240 | # no need to check for empty manifest group here: |
|
241 | 241 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
242 | 242 | # no new manifest will be created and the manifest group will |
|
243 | 243 | # be empty during the pull |
|
244 | 244 | self.manifestheader() |
|
245 | 245 | deltas = self.deltaiter() |
|
246 | 246 | repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp) |
|
247 | 247 | prog.complete() |
|
248 | 248 | self.callback = None |
|
249 | 249 | |
|
250 | 250 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, |
|
251 | 251 | expectedtotal=None): |
|
252 | 252 | """Add the changegroup returned by source.read() to this repo. |
|
253 | 253 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
254 | 254 | the URL of the repo where this changegroup is coming from. |
|
255 | 255 | |
|
256 | 256 | Return an integer summarizing the change to this repo: |
|
257 | 257 | - nothing changed or no source: 0 |
|
258 | 258 | - more heads than before: 1+added heads (2..n) |
|
259 | 259 | - fewer heads than before: -1-removed heads (-2..-n) |
|
260 | 260 | - number of heads stays the same: 1 |
|
261 | 261 | """ |
|
262 | 262 | repo = repo.unfiltered() |
|
263 | 263 | def csmap(x): |
|
264 | 264 | repo.ui.debug("add changeset %s\n" % short(x)) |
|
265 | 265 | return len(cl) |
|
266 | 266 | |
|
267 | 267 | def revmap(x): |
|
268 | 268 | return cl.rev(x) |
|
269 | 269 | |
|
270 | 270 | changesets = files = revisions = 0 |
|
271 | 271 | |
|
272 | 272 | try: |
|
273 | 273 | # The transaction may already carry source information. In this |
|
274 | 274 | # case we use the top level data. We overwrite the argument |
|
275 | 275 | # because we need to use the top level value (if they exist) |
|
276 | 276 | # in this function. |
|
277 | 277 | srctype = tr.hookargs.setdefault('source', srctype) |
|
278 | 278 | tr.hookargs.setdefault('url', url) |
|
279 | 279 | repo.hook('prechangegroup', |
|
280 | 280 | throw=True, **pycompat.strkwargs(tr.hookargs)) |
|
281 | 281 | |
|
282 | 282 | # write changelog data to temp files so concurrent readers |
|
283 | 283 | # will not see an inconsistent view |
|
284 | 284 | cl = repo.changelog |
|
285 | 285 | cl.delayupdate(tr) |
|
286 | 286 | oldheads = set(cl.heads()) |
|
287 | 287 | |
|
288 | 288 | trp = weakref.proxy(tr) |
|
289 | 289 | # pull off the changeset group |
|
290 | 290 | repo.ui.status(_("adding changesets\n")) |
|
291 | 291 | clstart = len(cl) |
|
292 | 292 | progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'), |
|
293 | 293 | total=expectedtotal) |
|
294 | 294 | self.callback = progress.increment |
|
295 | 295 | |
|
296 | 296 | efiles = set() |
|
297 | 297 | def onchangelog(cl, node): |
|
298 | 298 | efiles.update(cl.readfiles(node)) |
|
299 | 299 | |
|
300 | 300 | self.changelogheader() |
|
301 | 301 | deltas = self.deltaiter() |
|
302 | 302 | cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) |
|
303 | 303 | efiles = len(efiles) |
|
304 | 304 | |
|
305 | 305 | if not cgnodes: |
|
306 | 306 | repo.ui.develwarn('applied empty changelog from changegroup', |
|
307 | 307 | config='warn-empty-changegroup') |
|
308 | 308 | clend = len(cl) |
|
309 | 309 | changesets = clend - clstart |
|
310 | 310 | progress.complete() |
|
311 | 311 | self.callback = None |
|
312 | 312 | |
|
313 | 313 | # pull off the manifest group |
|
314 | 314 | repo.ui.status(_("adding manifests\n")) |
|
315 | 315 | # We know that we'll never have more manifests than we had |
|
316 | 316 | # changesets. |
|
317 | 317 | progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'), |
|
318 | 318 | total=changesets) |
|
319 | 319 | self._unpackmanifests(repo, revmap, trp, progress) |
|
320 | 320 | |
|
321 | 321 | needfiles = {} |
|
322 | 322 | if repo.ui.configbool('server', 'validate'): |
|
323 | 323 | cl = repo.changelog |
|
324 | 324 | ml = repo.manifestlog |
|
325 | 325 | # validate incoming csets have their manifests |
|
326 | 326 | for cset in pycompat.xrange(clstart, clend): |
|
327 | 327 | mfnode = cl.changelogrevision(cset).manifest |
|
328 | 328 | mfest = ml[mfnode].readdelta() |
|
329 | 329 | # store file cgnodes we must see |
|
330 | 330 | for f, n in mfest.iteritems(): |
|
331 | 331 | needfiles.setdefault(f, set()).add(n) |
|
332 | 332 | |
|
333 | 333 | # process the files |
|
334 | 334 | repo.ui.status(_("adding file changes\n")) |
|
335 | 335 | newrevs, newfiles = _addchangegroupfiles( |
|
336 | 336 | repo, self, revmap, trp, efiles, needfiles) |
|
337 | 337 | revisions += newrevs |
|
338 | 338 | files += newfiles |
|
339 | 339 | |
|
340 | 340 | deltaheads = 0 |
|
341 | 341 | if oldheads: |
|
342 | 342 | heads = cl.heads() |
|
343 | 343 | deltaheads = len(heads) - len(oldheads) |
|
344 | 344 | for h in heads: |
|
345 | 345 | if h not in oldheads and repo[h].closesbranch(): |
|
346 | 346 | deltaheads -= 1 |
|
347 | 347 | htext = "" |
|
348 | 348 | if deltaheads: |
|
349 | 349 | htext = _(" (%+d heads)") % deltaheads |
|
350 | 350 | |
|
351 | 351 | repo.ui.status(_("added %d changesets" |
|
352 | 352 | " with %d changes to %d files%s\n") |
|
353 | 353 | % (changesets, revisions, files, htext)) |
|
354 | 354 | repo.invalidatevolatilesets() |
|
355 | 355 | |
|
356 | 356 | if changesets > 0: |
|
357 | 357 | if 'node' not in tr.hookargs: |
|
358 | 358 | tr.hookargs['node'] = hex(cl.node(clstart)) |
|
359 | 359 | tr.hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
360 | 360 | hookargs = dict(tr.hookargs) |
|
361 | 361 | else: |
|
362 | 362 | hookargs = dict(tr.hookargs) |
|
363 | 363 | hookargs['node'] = hex(cl.node(clstart)) |
|
364 | 364 | hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
365 | 365 | repo.hook('pretxnchangegroup', |
|
366 | 366 | throw=True, **pycompat.strkwargs(hookargs)) |
|
367 | 367 | |
|
368 | 368 | added = [cl.node(r) for r in pycompat.xrange(clstart, clend)] |
|
369 | 369 | phaseall = None |
|
370 | 370 | if srctype in ('push', 'serve'): |
|
371 | 371 | # Old servers can not push the boundary themselves. |
|
372 | 372 | # New servers won't push the boundary if changeset already |
|
373 | 373 | # exists locally as secret |
|
374 | 374 | # |
|
375 | 375 | # We should not use added here but the list of all change in |
|
376 | 376 | # the bundle |
|
377 | 377 | if repo.publishing(): |
|
378 | 378 | targetphase = phaseall = phases.public |
|
379 | 379 | else: |
|
380 | 380 | # closer target phase computation |
|
381 | 381 | |
|
382 | 382 | # Those changesets have been pushed from the |
|
383 | 383 | # outside, their phases are going to be pushed |
|
384 | 384 | # alongside. Therefor `targetphase` is |
|
385 | 385 | # ignored. |
|
386 | 386 | targetphase = phaseall = phases.draft |
|
387 | 387 | if added: |
|
388 | 388 | phases.registernew(repo, tr, targetphase, added) |
|
389 | 389 | if phaseall is not None: |
|
390 | 390 | phases.advanceboundary(repo, tr, phaseall, cgnodes) |
|
391 | 391 | |
|
392 | 392 | if changesets > 0: |
|
393 | 393 | |
|
394 | 394 | def runhooks(): |
|
395 | 395 | # These hooks run when the lock releases, not when the |
|
396 | 396 | # transaction closes. So it's possible for the changelog |
|
397 | 397 | # to have changed since we last saw it. |
|
398 | 398 | if clstart >= len(repo): |
|
399 | 399 | return |
|
400 | 400 | |
|
401 | 401 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) |
|
402 | 402 | |
|
403 | 403 | for n in added: |
|
404 | 404 | args = hookargs.copy() |
|
405 | 405 | args['node'] = hex(n) |
|
406 | 406 | del args['node_last'] |
|
407 | 407 | repo.hook("incoming", **pycompat.strkwargs(args)) |
|
408 | 408 | |
|
409 | 409 | newheads = [h for h in repo.heads() |
|
410 | 410 | if h not in oldheads] |
|
411 | 411 | repo.ui.log("incoming", |
|
412 | 412 | "%d incoming changes - new heads: %s\n", |
|
413 | 413 | len(added), |
|
414 | 414 | ', '.join([hex(c[:6]) for c in newheads])) |
|
415 | 415 | |
|
416 | 416 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, |
|
417 | 417 | lambda tr: repo._afterlock(runhooks)) |
|
418 | 418 | finally: |
|
419 | 419 | repo.ui.flush() |
|
420 | 420 | # never return 0 here: |
|
421 | 421 | if deltaheads < 0: |
|
422 | 422 | ret = deltaheads - 1 |
|
423 | 423 | else: |
|
424 | 424 | ret = deltaheads + 1 |
|
425 | 425 | return ret |
|
426 | 426 | |
|
427 | 427 | def deltaiter(self): |
|
428 | 428 | """ |
|
429 | 429 | returns an iterator of the deltas in this changegroup |
|
430 | 430 | |
|
431 | 431 | Useful for passing to the underlying storage system to be stored. |
|
432 | 432 | """ |
|
433 | 433 | chain = None |
|
434 | 434 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
435 | 435 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) |
|
436 | 436 | yield chunkdata |
|
437 | 437 | chain = chunkdata[0] |
|
438 | 438 | |
|
439 | 439 | class cg2unpacker(cg1unpacker): |
|
440 | 440 | """Unpacker for cg2 streams. |
|
441 | 441 | |
|
442 | 442 | cg2 streams add support for generaldelta, so the delta header |
|
443 | 443 | format is slightly different. All other features about the data |
|
444 | 444 | remain the same. |
|
445 | 445 | """ |
|
446 | 446 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
447 | 447 | deltaheadersize = deltaheader.size |
|
448 | 448 | version = '02' |
|
449 | 449 | |
|
450 | 450 | def _deltaheader(self, headertuple, prevnode): |
|
451 | 451 | node, p1, p2, deltabase, cs = headertuple |
|
452 | 452 | flags = 0 |
|
453 | 453 | return node, p1, p2, deltabase, cs, flags |
|
454 | 454 | |
|
455 | 455 | class cg3unpacker(cg2unpacker): |
|
456 | 456 | """Unpacker for cg3 streams. |
|
457 | 457 | |
|
458 | 458 | cg3 streams add support for exchanging treemanifests and revlog |
|
459 | 459 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
460 | 460 | separating manifests and files. |
|
461 | 461 | """ |
|
462 | 462 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
463 | 463 | deltaheadersize = deltaheader.size |
|
464 | 464 | version = '03' |
|
465 | 465 | _grouplistcount = 2 # One list of manifests and one list of files |
|
466 | 466 | |
|
467 | 467 | def _deltaheader(self, headertuple, prevnode): |
|
468 | 468 | node, p1, p2, deltabase, cs, flags = headertuple |
|
469 | 469 | return node, p1, p2, deltabase, cs, flags |
|
470 | 470 | |
|
471 | 471 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
472 | 472 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) |
|
473 | 473 | for chunkdata in iter(self.filelogheader, {}): |
|
474 | 474 | # If we get here, there are directory manifests in the changegroup |
|
475 | 475 | d = chunkdata["filename"] |
|
476 | 476 | repo.ui.debug("adding %s revisions\n" % d) |
|
477 | 477 | deltas = self.deltaiter() |
|
478 | 478 | if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp): |
|
479 | 479 | raise error.Abort(_("received dir revlog group is empty")) |
|
480 | 480 | |
|
481 | 481 | class headerlessfixup(object): |
|
482 | 482 | def __init__(self, fh, h): |
|
483 | 483 | self._h = h |
|
484 | 484 | self._fh = fh |
|
485 | 485 | def read(self, n): |
|
486 | 486 | if self._h: |
|
487 | 487 | d, self._h = self._h[:n], self._h[n:] |
|
488 | 488 | if len(d) < n: |
|
489 | 489 | d += readexactly(self._fh, n - len(d)) |
|
490 | 490 | return d |
|
491 | 491 | return readexactly(self._fh, n) |
|
492 | 492 | |
|
493 | 493 | def _revisiondeltatochunks(delta, headerfn): |
|
494 | 494 | """Serialize a revisiondelta to changegroup chunks.""" |
|
495 | 495 | |
|
496 | 496 | # The captured revision delta may be encoded as a delta against |
|
497 | 497 | # a base revision or as a full revision. The changegroup format |
|
498 | 498 | # requires that everything on the wire be deltas. So for full |
|
499 | 499 | # revisions, we need to invent a header that says to rewrite |
|
500 | 500 | # data. |
|
501 | 501 | |
|
502 | 502 | if delta.delta is not None: |
|
503 | 503 | prefix, data = b'', delta.delta |
|
504 | 504 | elif delta.basenode == nullid: |
|
505 | 505 | data = delta.revision |
|
506 | 506 | prefix = mdiff.trivialdiffheader(len(data)) |
|
507 | 507 | else: |
|
508 | 508 | data = delta.revision |
|
509 | 509 | prefix = mdiff.replacediffheader(delta.baserevisionsize, |
|
510 | 510 | len(data)) |
|
511 | 511 | |
|
512 | 512 | meta = headerfn(delta) |
|
513 | 513 | |
|
514 | 514 | yield chunkheader(len(meta) + len(prefix) + len(data)) |
|
515 | 515 | yield meta |
|
516 | 516 | if prefix: |
|
517 | 517 | yield prefix |
|
518 | 518 | yield data |
|
519 | 519 | |
|
520 | 520 | def _sortnodesellipsis(store, nodes, cl, lookup): |
|
521 | 521 | """Sort nodes for changegroup generation.""" |
|
522 | 522 | # Ellipses serving mode. |
|
523 | 523 | # |
|
524 | 524 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
525 | 525 | # for non-changelog revlogs. In practice, we haven't started doing |
|
526 | 526 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
527 | 527 | # are actually full of bogus parentage on all the ellipsis |
|
528 | 528 | # nodes. This has the side effect that, while the contents are |
|
529 | 529 | # correct, the individual DAGs might be completely out of whack in |
|
530 | 530 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
531 | 531 | # revisions or so) in the main hg repo. |
|
532 | 532 | # |
|
533 | 533 | # The one invariant we *know* holds is that the new (potentially |
|
534 | 534 | # bogus) DAG shape will be valid if we order the nodes in the |
|
535 | 535 | # order that they're introduced in dramatis personae by the |
|
536 | 536 | # changelog, so what we do is we sort the non-changelog histories |
|
537 | 537 | # by the order in which they are used by the changelog. |
|
538 | 538 | key = lambda n: cl.rev(lookup(n)) |
|
539 | 539 | return sorted(nodes, key=key) |
|
540 | 540 | |
|
541 | 541 | def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev, |
|
542 | 542 | linknode, clrevtolocalrev, fullclnodes, |
|
543 | 543 | precomputedellipsis): |
|
544 | 544 | linkparents = precomputedellipsis[linkrev] |
|
545 | 545 | def local(clrev): |
|
546 | 546 | """Turn a changelog revnum into a local revnum. |
|
547 | 547 | |
|
548 | 548 | The ellipsis dag is stored as revnums on the changelog, |
|
549 | 549 | but when we're producing ellipsis entries for |
|
550 | 550 | non-changelog revlogs, we need to turn those numbers into |
|
551 | 551 | something local. This does that for us, and during the |
|
552 | 552 | changelog sending phase will also expand the stored |
|
553 | 553 | mappings as needed. |
|
554 | 554 | """ |
|
555 | 555 | if clrev == nullrev: |
|
556 | 556 | return nullrev |
|
557 | 557 | |
|
558 | 558 | if ischangelog: |
|
559 | 559 | return clrev |
|
560 | 560 | |
|
561 | 561 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
562 | 562 | # change that has been linked from the current revlog. |
|
563 | 563 | # |
|
564 | 564 | # For a flat manifest revlog only a single step should be necessary |
|
565 | 565 | # as all relevant changelog entries are relevant to the flat |
|
566 | 566 | # manifest. |
|
567 | 567 | # |
|
568 | 568 | # For a filelog or tree manifest dirlog however not every changelog |
|
569 | 569 | # entry will have been relevant, so we need to skip some changelog |
|
570 | 570 | # nodes even after ellipsis-izing. |
|
571 | 571 | walk = [clrev] |
|
572 | 572 | while walk: |
|
573 | 573 | p = walk[0] |
|
574 | 574 | walk = walk[1:] |
|
575 | 575 | if p in clrevtolocalrev: |
|
576 | 576 | return clrevtolocalrev[p] |
|
577 | 577 | elif p in fullclnodes: |
|
578 | 578 | walk.extend([pp for pp in cl.parentrevs(p) |
|
579 | 579 | if pp != nullrev]) |
|
580 | 580 | elif p in precomputedellipsis: |
|
581 | 581 | walk.extend([pp for pp in precomputedellipsis[p] |
|
582 | 582 | if pp != nullrev]) |
|
583 | 583 | else: |
|
584 | 584 | # In this case, we've got an ellipsis with parents |
|
585 | 585 | # outside the current bundle (likely an |
|
586 | 586 | # incremental pull). We "know" that we can use the |
|
587 | 587 | # value of this same revlog at whatever revision |
|
588 | 588 | # is pointed to by linknode. "Know" is in scare |
|
589 | 589 | # quotes because I haven't done enough examination |
|
590 | 590 | # of edge cases to convince myself this is really |
|
591 | 591 | # a fact - it works for all the (admittedly |
|
592 | 592 | # thorough) cases in our testsuite, but I would be |
|
593 | 593 | # somewhat unsurprised to find a case in the wild |
|
594 | 594 | # where this breaks down a bit. That said, I don't |
|
595 | 595 | # know if it would hurt anything. |
|
596 | 596 | for i in pycompat.xrange(rev, 0, -1): |
|
597 | 597 | if store.linkrev(i) == clrev: |
|
598 | 598 | return i |
|
599 | 599 | # We failed to resolve a parent for this node, so |
|
600 | 600 | # we crash the changegroup construction. |
|
601 | 601 | raise error.Abort( |
|
602 | 602 | 'unable to resolve parent while packing %r %r' |
|
603 | 603 | ' for changeset %r' % (store.indexfile, rev, clrev)) |
|
604 | 604 | |
|
605 | 605 | return nullrev |
|
606 | 606 | |
|
607 | 607 | if not linkparents or ( |
|
608 | 608 | store.parentrevs(rev) == (nullrev, nullrev)): |
|
609 | 609 | p1, p2 = nullrev, nullrev |
|
610 | 610 | elif len(linkparents) == 1: |
|
611 | 611 | p1, = sorted(local(p) for p in linkparents) |
|
612 | 612 | p2 = nullrev |
|
613 | 613 | else: |
|
614 | 614 | p1, p2 = sorted(local(p) for p in linkparents) |
|
615 | 615 | |
|
616 | 616 | p1node, p2node = store.node(p1), store.node(p2) |
|
617 | 617 | |
|
618 | 618 | return p1node, p2node, linknode |
|
619 | 619 | |
|
620 | 620 | def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev, |
|
621 | 621 | topic=None, |
|
622 | 622 | ellipses=False, clrevtolocalrev=None, fullclnodes=None, |
|
623 | 623 | precomputedellipsis=None): |
|
624 | 624 | """Calculate deltas for a set of revisions. |
|
625 | 625 | |
|
626 | 626 | Is a generator of ``revisiondelta`` instances. |
|
627 | 627 | |
|
628 | 628 | If topic is not None, progress detail will be generated using this |
|
629 | 629 | topic name (e.g. changesets, manifests, etc). |
|
630 | 630 | """ |
|
631 | 631 | if not nodes: |
|
632 | 632 | return |
|
633 | 633 | |
|
634 | 634 | cl = repo.changelog |
|
635 | 635 | |
|
636 | 636 | if ischangelog: |
|
637 | 637 | # `hg log` shows changesets in storage order. To preserve order |
|
638 | 638 | # across clones, send out changesets in storage order. |
|
639 | 639 | nodesorder = 'storage' |
|
640 | 640 | elif ellipses: |
|
641 | 641 | nodes = _sortnodesellipsis(store, nodes, cl, lookup) |
|
642 | 642 | nodesorder = 'nodes' |
|
643 | 643 | else: |
|
644 | 644 | nodesorder = None |
|
645 | 645 | |
|
646 | 646 | # Perform ellipses filtering and revision massaging. We do this before |
|
647 | 647 | # emitrevisions() because a) filtering out revisions creates less work |
|
648 | 648 | # for emitrevisions() b) dropping revisions would break emitrevisions()'s |
|
649 | 649 | # assumptions about delta choices and we would possibly send a delta |
|
650 | 650 | # referencing a missing base revision. |
|
651 | 651 | # |
|
652 | 652 | # Also, calling lookup() has side-effects with regards to populating |
|
653 | 653 | # data structures. If we don't call lookup() for each node or if we call |
|
654 | 654 | # lookup() after the first pass through each node, things can break - |
|
655 | 655 | # possibly intermittently depending on the python hash seed! For that |
|
656 | 656 | # reason, we store a mapping of all linknodes during the initial node |
|
657 | 657 | # pass rather than use lookup() on the output side. |
|
658 | 658 | if ellipses: |
|
659 | 659 | filtered = [] |
|
660 | 660 | adjustedparents = {} |
|
661 | 661 | linknodes = {} |
|
662 | 662 | |
|
663 | 663 | for node in nodes: |
|
664 | 664 | rev = store.rev(node) |
|
665 | 665 | linknode = lookup(node) |
|
666 | 666 | linkrev = cl.rev(linknode) |
|
667 | 667 | clrevtolocalrev[linkrev] = rev |
|
668 | 668 | |
|
669 | 669 | # If linknode is in fullclnodes, it means the corresponding |
|
670 | 670 | # changeset was a full changeset and is being sent unaltered. |
|
671 | 671 | if linknode in fullclnodes: |
|
672 | 672 | linknodes[node] = linknode |
|
673 | 673 | |
|
674 | 674 | # If the corresponding changeset wasn't in the set computed |
|
675 | 675 | # as relevant to us, it should be dropped outright. |
|
676 | 676 | elif linkrev not in precomputedellipsis: |
|
677 | 677 | continue |
|
678 | 678 | |
|
679 | 679 | else: |
|
680 | 680 | # We could probably do this later and avoid the dict |
|
681 | 681 | # holding state. But it likely doesn't matter. |
|
682 | 682 | p1node, p2node, linknode = _resolvenarrowrevisioninfo( |
|
683 | 683 | cl, store, ischangelog, rev, linkrev, linknode, |
|
684 | 684 | clrevtolocalrev, fullclnodes, precomputedellipsis) |
|
685 | 685 | |
|
686 | 686 | adjustedparents[node] = (p1node, p2node) |
|
687 | 687 | linknodes[node] = linknode |
|
688 | 688 | |
|
689 | 689 | filtered.append(node) |
|
690 | 690 | |
|
691 | 691 | nodes = filtered |
|
692 | 692 | |
|
693 | 693 | # We expect the first pass to be fast, so we only engage the progress |
|
694 | 694 | # meter for constructing the revision deltas. |
|
695 | 695 | progress = None |
|
696 | 696 | if topic is not None: |
|
697 | 697 | progress = repo.ui.makeprogress(topic, unit=_('chunks'), |
|
698 | 698 | total=len(nodes)) |
|
699 | 699 | |
|
700 | 700 | configtarget = repo.ui.config('devel', 'bundle.delta') |
|
701 | 701 | if configtarget not in ('', 'p1', 'full'): |
|
702 | 702 | msg = _("""config "devel.bundle.delta" as unknown value: %s""") |
|
703 | 703 | repo.ui.warn(msg % configtarget) |
|
704 | 704 | |
|
705 | 705 | deltamode = repository.CG_DELTAMODE_STD |
|
706 | 706 | if forcedeltaparentprev: |
|
707 | 707 | deltamode = repository.CG_DELTAMODE_PREV |
|
708 | 708 | elif configtarget == 'p1': |
|
709 | 709 | deltamode = repository.CG_DELTAMODE_P1 |
|
710 | 710 | elif configtarget == 'full': |
|
711 | 711 | deltamode = repository.CG_DELTAMODE_FULL |
|
712 | 712 | |
|
713 | 713 | revisions = store.emitrevisions( |
|
714 | 714 | nodes, |
|
715 | 715 | nodesorder=nodesorder, |
|
716 | 716 | revisiondata=True, |
|
717 | 717 | assumehaveparentrevisions=not ellipses, |
|
718 | 718 | deltamode=deltamode) |
|
719 | 719 | |
|
720 | 720 | for i, revision in enumerate(revisions): |
|
721 | 721 | if progress: |
|
722 | 722 | progress.update(i + 1) |
|
723 | 723 | |
|
724 | 724 | if ellipses: |
|
725 | 725 | linknode = linknodes[revision.node] |
|
726 | 726 | |
|
727 | 727 | if revision.node in adjustedparents: |
|
728 | 728 | p1node, p2node = adjustedparents[revision.node] |
|
729 | 729 | revision.p1node = p1node |
|
730 | 730 | revision.p2node = p2node |
|
731 | 731 | revision.flags |= repository.REVISION_FLAG_ELLIPSIS |
|
732 | 732 | |
|
733 | 733 | else: |
|
734 | 734 | linknode = lookup(revision.node) |
|
735 | 735 | |
|
736 | 736 | revision.linknode = linknode |
|
737 | 737 | yield revision |
|
738 | 738 | |
|
739 | 739 | if progress: |
|
740 | 740 | progress.complete() |
|
741 | 741 | |
|
742 | 742 | class cgpacker(object): |
|
743 | 743 | def __init__(self, repo, oldmatcher, matcher, version, |
|
744 | 744 | builddeltaheader, manifestsend, |
|
745 | 745 | forcedeltaparentprev=False, |
|
746 | 746 | bundlecaps=None, ellipses=False, |
|
747 | 747 | shallow=False, ellipsisroots=None, fullnodes=None): |
|
748 | 748 | """Given a source repo, construct a bundler. |
|
749 | 749 | |
|
750 | 750 | oldmatcher is a matcher that matches on files the client already has. |
|
751 | 751 | These will not be included in the changegroup. |
|
752 | 752 | |
|
753 | 753 | matcher is a matcher that matches on files to include in the |
|
754 | 754 | changegroup. Used to facilitate sparse changegroups. |
|
755 | 755 | |
|
756 | 756 | forcedeltaparentprev indicates whether delta parents must be against |
|
757 | 757 | the previous revision in a delta group. This should only be used for |
|
758 | 758 | compatibility with changegroup version 1. |
|
759 | 759 | |
|
760 | 760 | builddeltaheader is a callable that constructs the header for a group |
|
761 | 761 | delta. |
|
762 | 762 | |
|
763 | 763 | manifestsend is a chunk to send after manifests have been fully emitted. |
|
764 | 764 | |
|
765 | 765 | ellipses indicates whether ellipsis serving mode is enabled. |
|
766 | 766 | |
|
767 | 767 | bundlecaps is optional and can be used to specify the set of |
|
768 | 768 | capabilities which can be used to build the bundle. While bundlecaps is |
|
769 | 769 | unused in core Mercurial, extensions rely on this feature to communicate |
|
770 | 770 | capabilities to customize the changegroup packer. |
|
771 | 771 | |
|
772 | 772 | shallow indicates whether shallow data might be sent. The packer may |
|
773 | 773 | need to pack file contents not introduced by the changes being packed. |
|
774 | 774 | |
|
775 | 775 | fullnodes is the set of changelog nodes which should not be ellipsis |
|
776 | 776 | nodes. We store this rather than the set of nodes that should be |
|
777 | 777 | ellipsis because for very large histories we expect this to be |
|
778 | 778 | significantly smaller. |
|
779 | 779 | """ |
|
780 | 780 | assert oldmatcher |
|
781 | 781 | assert matcher |
|
782 | 782 | self._oldmatcher = oldmatcher |
|
783 | 783 | self._matcher = matcher |
|
784 | 784 | |
|
785 | 785 | self.version = version |
|
786 | 786 | self._forcedeltaparentprev = forcedeltaparentprev |
|
787 | 787 | self._builddeltaheader = builddeltaheader |
|
788 | 788 | self._manifestsend = manifestsend |
|
789 | 789 | self._ellipses = ellipses |
|
790 | 790 | |
|
791 | 791 | # Set of capabilities we can use to build the bundle. |
|
792 | 792 | if bundlecaps is None: |
|
793 | 793 | bundlecaps = set() |
|
794 | 794 | self._bundlecaps = bundlecaps |
|
795 | 795 | self._isshallow = shallow |
|
796 | 796 | self._fullclnodes = fullnodes |
|
797 | 797 | |
|
798 | 798 | # Maps ellipsis revs to their roots at the changelog level. |
|
799 | 799 | self._precomputedellipsis = ellipsisroots |
|
800 | 800 | |
|
801 | 801 | self._repo = repo |
|
802 | 802 | |
|
803 | 803 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
804 | 804 | self._verbosenote = self._repo.ui.note |
|
805 | 805 | else: |
|
806 | 806 | self._verbosenote = lambda s: None |
|
807 | 807 | |
|
808 | 808 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source, |
|
809 | 809 | changelog=True): |
|
810 | 810 | """Yield a sequence of changegroup byte chunks. |
|
811 | 811 | If changelog is False, changelog data won't be added to changegroup |
|
812 | 812 | """ |
|
813 | 813 | |
|
814 | 814 | repo = self._repo |
|
815 | 815 | cl = repo.changelog |
|
816 | 816 | |
|
817 | 817 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
818 | 818 | size = 0 |
|
819 | 819 | |
|
820 | 820 | clstate, deltas = self._generatechangelog(cl, clnodes, |
|
821 | 821 | generate=changelog) |
|
822 | 822 | for delta in deltas: |
|
823 | 823 | for chunk in _revisiondeltatochunks(delta, |
|
824 | 824 | self._builddeltaheader): |
|
825 | 825 | size += len(chunk) |
|
826 | 826 | yield chunk |
|
827 | 827 | |
|
828 | 828 | close = closechunk() |
|
829 | 829 | size += len(close) |
|
830 | 830 | yield closechunk() |
|
831 | 831 | |
|
832 | 832 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
833 | 833 | |
|
834 | 834 | clrevorder = clstate['clrevorder'] |
|
835 | 835 | manifests = clstate['manifests'] |
|
836 | 836 | changedfiles = clstate['changedfiles'] |
|
837 | 837 | |
|
838 | 838 | # We need to make sure that the linkrev in the changegroup refers to |
|
839 | 839 | # the first changeset that introduced the manifest or file revision. |
|
840 | 840 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
841 | 841 | # are walked in revlog order. |
|
842 | 842 | # |
|
843 | 843 | # When taking the slowpath when the manifest revlog uses generaldelta, |
|
844 | 844 | # the manifest may be walked in the "wrong" order. Without 'clrevorder', |
|
845 | 845 | # we would get an incorrect linkrev (see fix in cc0ff93d0c0c). |
|
846 | 846 | # |
|
847 | 847 | # When taking the fastpath, we are only vulnerable to reordering |
|
848 | 848 | # of the changelog itself. The changelog never uses generaldelta and is |
|
849 | 849 | # never reordered. To handle this case, we simply take the slowpath, |
|
850 | 850 | # which already has the 'clrevorder' logic. This was also fixed in |
|
851 | 851 | # cc0ff93d0c0c. |
|
852 | 852 | |
|
853 | 853 | # Treemanifests don't work correctly with fastpathlinkrev |
|
854 | 854 | # either, because we don't discover which directory nodes to |
|
855 | 855 | # send along with files. This could probably be fixed. |
|
856 | 856 | fastpathlinkrev = fastpathlinkrev and ( |
|
857 | 857 | 'treemanifest' not in repo.requirements) |
|
858 | 858 | |
|
859 | 859 | fnodes = {} # needed file nodes |
|
860 | 860 | |
|
861 | 861 | size = 0 |
|
862 | 862 | it = self.generatemanifests( |
|
863 | 863 | commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source, |
|
864 | 864 | clstate['clrevtomanifestrev']) |
|
865 | 865 | |
|
866 | 866 | for tree, deltas in it: |
|
867 | 867 | if tree: |
|
868 | 868 | assert self.version == b'03' |
|
869 | 869 | chunk = _fileheader(tree) |
|
870 | 870 | size += len(chunk) |
|
871 | 871 | yield chunk |
|
872 | 872 | |
|
873 | 873 | for delta in deltas: |
|
874 | 874 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
875 | 875 | for chunk in chunks: |
|
876 | 876 | size += len(chunk) |
|
877 | 877 | yield chunk |
|
878 | 878 | |
|
879 | 879 | close = closechunk() |
|
880 | 880 | size += len(close) |
|
881 | 881 | yield close |
|
882 | 882 | |
|
883 | 883 | self._verbosenote(_('%8.i (manifests)\n') % size) |
|
884 | 884 | yield self._manifestsend |
|
885 | 885 | |
|
886 | 886 | mfdicts = None |
|
887 | 887 | if self._ellipses and self._isshallow: |
|
888 | 888 | mfdicts = [(self._repo.manifestlog[n].read(), lr) |
|
889 | 889 | for (n, lr) in manifests.iteritems()] |
|
890 | 890 | |
|
891 | 891 | manifests.clear() |
|
892 | 892 | clrevs = set(cl.rev(x) for x in clnodes) |
|
893 | 893 | |
|
894 | 894 | it = self.generatefiles(changedfiles, commonrevs, |
|
895 | 895 | source, mfdicts, fastpathlinkrev, |
|
896 | 896 | fnodes, clrevs) |
|
897 | 897 | |
|
898 | 898 | for path, deltas in it: |
|
899 | 899 | h = _fileheader(path) |
|
900 | 900 | size = len(h) |
|
901 | 901 | yield h |
|
902 | 902 | |
|
903 | 903 | for delta in deltas: |
|
904 | 904 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
905 | 905 | for chunk in chunks: |
|
906 | 906 | size += len(chunk) |
|
907 | 907 | yield chunk |
|
908 | 908 | |
|
909 | 909 | close = closechunk() |
|
910 | 910 | size += len(close) |
|
911 | 911 | yield close |
|
912 | 912 | |
|
913 | 913 | self._verbosenote(_('%8.i %s\n') % (size, path)) |
|
914 | 914 | |
|
915 | 915 | yield closechunk() |
|
916 | 916 | |
|
917 | 917 | if clnodes: |
|
918 | 918 | repo.hook('outgoing', node=hex(clnodes[0]), source=source) |
|
919 | 919 | |
|
920 | 920 | def _generatechangelog(self, cl, nodes, generate=True): |
|
921 | 921 | """Generate data for changelog chunks. |
|
922 | 922 | |
|
923 | 923 | Returns a 2-tuple of a dict containing state and an iterable of |
|
924 | 924 | byte chunks. The state will not be fully populated until the |
|
925 | 925 | chunk stream has been fully consumed. |
|
926 | 926 | |
|
927 | 927 | if generate is False, the state will be fully populated and no chunk |
|
928 | 928 | stream will be yielded |
|
929 | 929 | """ |
|
930 | 930 | clrevorder = {} |
|
931 | 931 | manifests = {} |
|
932 | 932 | mfl = self._repo.manifestlog |
|
933 | 933 | changedfiles = set() |
|
934 | 934 | clrevtomanifestrev = {} |
|
935 | 935 | |
|
936 | 936 | state = { |
|
937 | 937 | 'clrevorder': clrevorder, |
|
938 | 938 | 'manifests': manifests, |
|
939 | 939 | 'changedfiles': changedfiles, |
|
940 | 940 | 'clrevtomanifestrev': clrevtomanifestrev, |
|
941 | 941 | } |
|
942 | 942 | |
|
943 | 943 | if not (generate or self._ellipses): |
|
944 | 944 | # sort the nodes in storage order |
|
945 | 945 | nodes = sorted(nodes, key=cl.rev) |
|
946 | 946 | for node in nodes: |
|
947 | 947 | c = cl.changelogrevision(node) |
|
948 | 948 | clrevorder[node] = len(clrevorder) |
|
949 | 949 | # record the first changeset introducing this manifest version |
|
950 | 950 | manifests.setdefault(c.manifest, node) |
|
951 | 951 | # Record a complete list of potentially-changed files in |
|
952 | 952 | # this manifest. |
|
953 | 953 | changedfiles.update(c.files) |
|
954 | 954 | |
|
955 | 955 | return state, () |
|
956 | 956 | |
|
957 | 957 | # Callback for the changelog, used to collect changed files and |
|
958 | 958 | # manifest nodes. |
|
959 | 959 | # Returns the linkrev node (identity in the changelog case). |
|
960 | 960 | def lookupcl(x): |
|
961 | 961 | c = cl.changelogrevision(x) |
|
962 | 962 | clrevorder[x] = len(clrevorder) |
|
963 | 963 | |
|
964 | 964 | if self._ellipses: |
|
965 | 965 | # Only update manifests if x is going to be sent. Otherwise we |
|
966 | 966 | # end up with bogus linkrevs specified for manifests and |
|
967 | 967 | # we skip some manifest nodes that we should otherwise |
|
968 | 968 | # have sent. |
|
969 | 969 | if (x in self._fullclnodes |
|
970 | 970 | or cl.rev(x) in self._precomputedellipsis): |
|
971 | 971 | |
|
972 | 972 | manifestnode = c.manifest |
|
973 | 973 | # Record the first changeset introducing this manifest |
|
974 | 974 | # version. |
|
975 | 975 | manifests.setdefault(manifestnode, x) |
|
976 | 976 | # Set this narrow-specific dict so we have the lowest |
|
977 | 977 | # manifest revnum to look up for this cl revnum. (Part of |
|
978 | 978 | # mapping changelog ellipsis parents to manifest ellipsis |
|
979 | 979 | # parents) |
|
980 | 980 | clrevtomanifestrev.setdefault( |
|
981 | 981 | cl.rev(x), mfl.rev(manifestnode)) |
|
982 | 982 | # We can't trust the changed files list in the changeset if the |
|
983 | 983 | # client requested a shallow clone. |
|
984 | 984 | if self._isshallow: |
|
985 | 985 | changedfiles.update(mfl[c.manifest].read().keys()) |
|
986 | 986 | else: |
|
987 | 987 | changedfiles.update(c.files) |
|
988 | 988 | else: |
|
989 | 989 | # record the first changeset introducing this manifest version |
|
990 | 990 | manifests.setdefault(c.manifest, x) |
|
991 | 991 | # Record a complete list of potentially-changed files in |
|
992 | 992 | # this manifest. |
|
993 | 993 | changedfiles.update(c.files) |
|
994 | 994 | |
|
995 | 995 | return x |
|
996 | 996 | |
|
997 | 997 | gen = deltagroup( |
|
998 | 998 | self._repo, cl, nodes, True, lookupcl, |
|
999 | 999 | self._forcedeltaparentprev, |
|
1000 | 1000 | ellipses=self._ellipses, |
|
1001 | 1001 | topic=_('changesets'), |
|
1002 | 1002 | clrevtolocalrev={}, |
|
1003 | 1003 | fullclnodes=self._fullclnodes, |
|
1004 | 1004 | precomputedellipsis=self._precomputedellipsis) |
|
1005 | 1005 | |
|
1006 | 1006 | return state, gen |
|
1007 | 1007 | |
|
1008 | 1008 | def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, |
|
1009 | 1009 | manifests, fnodes, source, clrevtolocalrev): |
|
1010 | 1010 | """Returns an iterator of changegroup chunks containing manifests. |
|
1011 | 1011 | |
|
1012 | 1012 | `source` is unused here, but is used by extensions like remotefilelog to |
|
1013 | 1013 | change what is sent based in pulls vs pushes, etc. |
|
1014 | 1014 | """ |
|
1015 | 1015 | repo = self._repo |
|
1016 | 1016 | mfl = repo.manifestlog |
|
1017 | 1017 | tmfnodes = {'': manifests} |
|
1018 | 1018 | |
|
1019 | 1019 | # Callback for the manifest, used to collect linkrevs for filelog |
|
1020 | 1020 | # revisions. |
|
1021 | 1021 | # Returns the linkrev node (collected in lookupcl). |
|
1022 | 1022 | def makelookupmflinknode(tree, nodes): |
|
1023 | 1023 | if fastpathlinkrev: |
|
1024 | 1024 | assert not tree |
|
1025 | 1025 | return manifests.__getitem__ |
|
1026 | 1026 | |
|
1027 | 1027 | def lookupmflinknode(x): |
|
1028 | 1028 | """Callback for looking up the linknode for manifests. |
|
1029 | 1029 | |
|
1030 | 1030 | Returns the linkrev node for the specified manifest. |
|
1031 | 1031 | |
|
1032 | 1032 | SIDE EFFECT: |
|
1033 | 1033 | |
|
1034 | 1034 | 1) fclnodes gets populated with the list of relevant |
|
1035 | 1035 | file nodes if we're not using fastpathlinkrev |
|
1036 | 1036 | 2) When treemanifests are in use, collects treemanifest nodes |
|
1037 | 1037 | to send |
|
1038 | 1038 | |
|
1039 | 1039 | Note that this means manifests must be completely sent to |
|
1040 | 1040 | the client before you can trust the list of files and |
|
1041 | 1041 | treemanifests to send. |
|
1042 | 1042 | """ |
|
1043 | 1043 | clnode = nodes[x] |
|
1044 | 1044 | mdata = mfl.get(tree, x).readfast(shallow=True) |
|
1045 | 1045 | for p, n, fl in mdata.iterentries(): |
|
1046 | 1046 | if fl == 't': # subdirectory manifest |
|
1047 | 1047 | subtree = tree + p + '/' |
|
1048 | 1048 | tmfclnodes = tmfnodes.setdefault(subtree, {}) |
|
1049 | 1049 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
1050 | 1050 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
1051 | 1051 | tmfclnodes[n] = clnode |
|
1052 | 1052 | else: |
|
1053 | 1053 | f = tree + p |
|
1054 | 1054 | fclnodes = fnodes.setdefault(f, {}) |
|
1055 | 1055 | fclnode = fclnodes.setdefault(n, clnode) |
|
1056 | 1056 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
1057 | 1057 | fclnodes[n] = clnode |
|
1058 | 1058 | return clnode |
|
1059 | 1059 | return lookupmflinknode |
|
1060 | 1060 | |
|
1061 | 1061 | while tmfnodes: |
|
1062 | 1062 | tree, nodes = tmfnodes.popitem() |
|
1063 | 1063 | |
|
1064 | 1064 | should_visit = self._matcher.visitdir(tree[:-1] or '.') |
|
1065 | 1065 | if tree and not should_visit: |
|
1066 | 1066 | continue |
|
1067 | 1067 | |
|
1068 | 1068 | store = mfl.getstorage(tree) |
|
1069 | 1069 | |
|
1070 | 1070 | if not should_visit: |
|
1071 | 1071 | # No nodes to send because this directory is out of |
|
1072 | 1072 | # the client's view of the repository (probably |
|
1073 | 1073 | # because of narrow clones). Do this even for the root |
|
1074 | 1074 | # directory (tree=='') |
|
1075 | 1075 | prunednodes = [] |
|
1076 | 1076 | else: |
|
1077 | 1077 | # Avoid sending any manifest nodes we can prove the |
|
1078 | 1078 | # client already has by checking linkrevs. See the |
|
1079 | 1079 | # related comment in generatefiles(). |
|
1080 | 1080 | prunednodes = self._prunemanifests(store, nodes, commonrevs) |
|
1081 | 1081 | |
|
1082 | 1082 | if tree and not prunednodes: |
|
1083 | 1083 | continue |
|
1084 | 1084 | |
|
1085 | 1085 | lookupfn = makelookupmflinknode(tree, nodes) |
|
1086 | 1086 | |
|
1087 | 1087 | deltas = deltagroup( |
|
1088 | 1088 | self._repo, store, prunednodes, False, lookupfn, |
|
1089 | 1089 | self._forcedeltaparentprev, |
|
1090 | 1090 | ellipses=self._ellipses, |
|
1091 | 1091 | topic=_('manifests'), |
|
1092 | 1092 | clrevtolocalrev=clrevtolocalrev, |
|
1093 | 1093 | fullclnodes=self._fullclnodes, |
|
1094 | 1094 | precomputedellipsis=self._precomputedellipsis) |
|
1095 | 1095 | |
|
1096 | 1096 | if not self._oldmatcher.visitdir(store.tree[:-1] or '.'): |
|
1097 | 1097 | yield tree, deltas |
|
1098 | 1098 | else: |
|
1099 | 1099 | # 'deltas' is a generator and we need to consume it even if |
|
1100 | 1100 | # we are not going to send it because a side-effect is that |
|
1101 | 1101 | # it updates tmdnodes (via lookupfn) |
|
1102 | 1102 | for d in deltas: |
|
1103 | 1103 | pass |
|
1104 | 1104 | if not tree: |
|
1105 | 1105 | yield tree, [] |
|
1106 | 1106 | |
|
1107 | 1107 | def _prunemanifests(self, store, nodes, commonrevs): |
|
1108 | 1108 | # This is split out as a separate method to allow filtering |
|
1109 | 1109 | # commonrevs in extension code. |
|
1110 | 1110 | # |
|
1111 | 1111 | # TODO(augie): this shouldn't be required, instead we should |
|
1112 | 1112 | # make filtering of revisions to send delegated to the store |
|
1113 | 1113 | # layer. |
|
1114 | 1114 | frev, flr = store.rev, store.linkrev |
|
1115 | 1115 | return [n for n in nodes if flr(frev(n)) not in commonrevs] |
|
1116 | 1116 | |
|
1117 | 1117 | # The 'source' parameter is useful for extensions |
|
1118 | 1118 | def generatefiles(self, changedfiles, commonrevs, source, |
|
1119 | 1119 | mfdicts, fastpathlinkrev, fnodes, clrevs): |
|
1120 | 1120 | changedfiles = [f for f in changedfiles |
|
1121 | 1121 | if self._matcher(f) and not self._oldmatcher(f)] |
|
1122 | 1122 | |
|
1123 | 1123 | if not fastpathlinkrev: |
|
1124 | 1124 | def normallinknodes(unused, fname): |
|
1125 | 1125 | return fnodes.get(fname, {}) |
|
1126 | 1126 | else: |
|
1127 | 1127 | cln = self._repo.changelog.node |
|
1128 | 1128 | |
|
1129 | 1129 | def normallinknodes(store, fname): |
|
1130 | 1130 | flinkrev = store.linkrev |
|
1131 | 1131 | fnode = store.node |
|
1132 | 1132 | revs = ((r, flinkrev(r)) for r in store) |
|
1133 | 1133 | return dict((fnode(r), cln(lr)) |
|
1134 | 1134 | for r, lr in revs if lr in clrevs) |
|
1135 | 1135 | |
|
1136 | 1136 | clrevtolocalrev = {} |
|
1137 | 1137 | |
|
1138 | 1138 | if self._isshallow: |
|
1139 | 1139 | # In a shallow clone, the linknodes callback needs to also include |
|
1140 | 1140 | # those file nodes that are in the manifests we sent but weren't |
|
1141 | 1141 | # introduced by those manifests. |
|
1142 | 1142 | commonctxs = [self._repo[c] for c in commonrevs] |
|
1143 | 1143 | clrev = self._repo.changelog.rev |
|
1144 | 1144 | |
|
1145 | 1145 | def linknodes(flog, fname): |
|
1146 | 1146 | for c in commonctxs: |
|
1147 | 1147 | try: |
|
1148 | 1148 | fnode = c.filenode(fname) |
|
1149 | 1149 | clrevtolocalrev[c.rev()] = flog.rev(fnode) |
|
1150 | 1150 | except error.ManifestLookupError: |
|
1151 | 1151 | pass |
|
1152 | 1152 | links = normallinknodes(flog, fname) |
|
1153 | 1153 | if len(links) != len(mfdicts): |
|
1154 | 1154 | for mf, lr in mfdicts: |
|
1155 | 1155 | fnode = mf.get(fname, None) |
|
1156 | 1156 | if fnode in links: |
|
1157 | 1157 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
1158 | 1158 | elif fnode: |
|
1159 | 1159 | links[fnode] = lr |
|
1160 | 1160 | return links |
|
1161 | 1161 | else: |
|
1162 | 1162 | linknodes = normallinknodes |
|
1163 | 1163 | |
|
1164 | 1164 | repo = self._repo |
|
1165 | 1165 | progress = repo.ui.makeprogress(_('files'), unit=_('files'), |
|
1166 | 1166 | total=len(changedfiles)) |
|
1167 | 1167 | for i, fname in enumerate(sorted(changedfiles)): |
|
1168 | 1168 | filerevlog = repo.file(fname) |
|
1169 | 1169 | if not filerevlog: |
|
1170 | 1170 | raise error.Abort(_("empty or missing file data for %s") % |
|
1171 | 1171 | fname) |
|
1172 | 1172 | |
|
1173 | 1173 | clrevtolocalrev.clear() |
|
1174 | 1174 | |
|
1175 | 1175 | linkrevnodes = linknodes(filerevlog, fname) |
|
1176 | 1176 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
1177 | 1177 | # fastpath case and with lookupmf in the slowpath case. |
|
1178 | 1178 | def lookupfilelog(x): |
|
1179 | 1179 | return linkrevnodes[x] |
|
1180 | 1180 | |
|
1181 | 1181 | frev, flr = filerevlog.rev, filerevlog.linkrev |
|
1182 | 1182 | # Skip sending any filenode we know the client already |
|
1183 | 1183 | # has. This avoids over-sending files relatively |
|
1184 | 1184 | # inexpensively, so it's not a problem if we under-filter |
|
1185 | 1185 | # here. |
|
1186 | 1186 | filenodes = [n for n in linkrevnodes |
|
1187 | 1187 | if flr(frev(n)) not in commonrevs] |
|
1188 | 1188 | |
|
1189 | 1189 | if not filenodes: |
|
1190 | 1190 | continue |
|
1191 | 1191 | |
|
1192 | 1192 | progress.update(i + 1, item=fname) |
|
1193 | 1193 | |
|
1194 | 1194 | deltas = deltagroup( |
|
1195 | 1195 | self._repo, filerevlog, filenodes, False, lookupfilelog, |
|
1196 | 1196 | self._forcedeltaparentprev, |
|
1197 | 1197 | ellipses=self._ellipses, |
|
1198 | 1198 | clrevtolocalrev=clrevtolocalrev, |
|
1199 | 1199 | fullclnodes=self._fullclnodes, |
|
1200 | 1200 | precomputedellipsis=self._precomputedellipsis) |
|
1201 | 1201 | |
|
1202 | 1202 | yield fname, deltas |
|
1203 | 1203 | |
|
1204 | 1204 | progress.complete() |
|
1205 | 1205 | |
|
1206 | 1206 | def _makecg1packer(repo, oldmatcher, matcher, bundlecaps, |
|
1207 | 1207 | ellipses=False, shallow=False, ellipsisroots=None, |
|
1208 | 1208 | fullnodes=None): |
|
1209 | 1209 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( |
|
1210 | 1210 | d.node, d.p1node, d.p2node, d.linknode) |
|
1211 | 1211 | |
|
1212 | 1212 | return cgpacker(repo, oldmatcher, matcher, b'01', |
|
1213 | 1213 | builddeltaheader=builddeltaheader, |
|
1214 | 1214 | manifestsend=b'', |
|
1215 | 1215 | forcedeltaparentprev=True, |
|
1216 | 1216 | bundlecaps=bundlecaps, |
|
1217 | 1217 | ellipses=ellipses, |
|
1218 | 1218 | shallow=shallow, |
|
1219 | 1219 | ellipsisroots=ellipsisroots, |
|
1220 | 1220 | fullnodes=fullnodes) |
|
1221 | 1221 | |
|
1222 | 1222 | def _makecg2packer(repo, oldmatcher, matcher, bundlecaps, |
|
1223 | 1223 | ellipses=False, shallow=False, ellipsisroots=None, |
|
1224 | 1224 | fullnodes=None): |
|
1225 | 1225 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( |
|
1226 | 1226 | d.node, d.p1node, d.p2node, d.basenode, d.linknode) |
|
1227 | 1227 | |
|
1228 | 1228 | return cgpacker(repo, oldmatcher, matcher, b'02', |
|
1229 | 1229 | builddeltaheader=builddeltaheader, |
|
1230 | 1230 | manifestsend=b'', |
|
1231 | 1231 | bundlecaps=bundlecaps, |
|
1232 | 1232 | ellipses=ellipses, |
|
1233 | 1233 | shallow=shallow, |
|
1234 | 1234 | ellipsisroots=ellipsisroots, |
|
1235 | 1235 | fullnodes=fullnodes) |
|
1236 | 1236 | |
|
1237 | 1237 | def _makecg3packer(repo, oldmatcher, matcher, bundlecaps, |
|
1238 | 1238 | ellipses=False, shallow=False, ellipsisroots=None, |
|
1239 | 1239 | fullnodes=None): |
|
1240 | 1240 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( |
|
1241 | 1241 | d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags) |
|
1242 | 1242 | |
|
1243 | 1243 | return cgpacker(repo, oldmatcher, matcher, b'03', |
|
1244 | 1244 | builddeltaheader=builddeltaheader, |
|
1245 | 1245 | manifestsend=closechunk(), |
|
1246 | 1246 | bundlecaps=bundlecaps, |
|
1247 | 1247 | ellipses=ellipses, |
|
1248 | 1248 | shallow=shallow, |
|
1249 | 1249 | ellipsisroots=ellipsisroots, |
|
1250 | 1250 | fullnodes=fullnodes) |
|
1251 | 1251 | |
|
1252 | 1252 | _packermap = {'01': (_makecg1packer, cg1unpacker), |
|
1253 | 1253 | # cg2 adds support for exchanging generaldelta |
|
1254 | 1254 | '02': (_makecg2packer, cg2unpacker), |
|
1255 | 1255 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
1256 | 1256 | '03': (_makecg3packer, cg3unpacker), |
|
1257 | 1257 | } |
|
1258 | 1258 | |
|
1259 | 1259 | def allsupportedversions(repo): |
|
1260 | 1260 | versions = set(_packermap.keys()) |
|
1261 | 1261 | if not (repo.ui.configbool('experimental', 'changegroup3') or |
|
1262 | 1262 | repo.ui.configbool('experimental', 'treemanifest') or |
|
1263 | 1263 | 'treemanifest' in repo.requirements): |
|
1264 | 1264 | versions.discard('03') |
|
1265 | 1265 | return versions |
|
1266 | 1266 | |
|
1267 | 1267 | # Changegroup versions that can be applied to the repo |
|
1268 | 1268 | def supportedincomingversions(repo): |
|
1269 | 1269 | return allsupportedversions(repo) |
|
1270 | 1270 | |
|
1271 | 1271 | # Changegroup versions that can be created from the repo |
|
1272 | 1272 | def supportedoutgoingversions(repo): |
|
1273 | 1273 | versions = allsupportedversions(repo) |
|
1274 | 1274 | if 'treemanifest' in repo.requirements: |
|
1275 | 1275 | # Versions 01 and 02 support only flat manifests and it's just too |
|
1276 | 1276 | # expensive to convert between the flat manifest and tree manifest on |
|
1277 | 1277 | # the fly. Since tree manifests are hashed differently, all of history |
|
1278 | 1278 | # would have to be converted. Instead, we simply don't even pretend to |
|
1279 | 1279 | # support versions 01 and 02. |
|
1280 | 1280 | versions.discard('01') |
|
1281 | 1281 | versions.discard('02') |
|
1282 | 1282 | if repository.NARROW_REQUIREMENT in repo.requirements: |
|
1283 | 1283 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1284 | 1284 | # support that for stripping and unbundling to work. |
|
1285 | 1285 | versions.discard('01') |
|
1286 | 1286 | versions.discard('02') |
|
1287 | 1287 | if LFS_REQUIREMENT in repo.requirements: |
|
1288 | 1288 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1289 | 1289 | # mark LFS entries with REVIDX_EXTSTORED. |
|
1290 | 1290 | versions.discard('01') |
|
1291 | 1291 | versions.discard('02') |
|
1292 | 1292 | |
|
1293 | 1293 | return versions |
|
1294 | 1294 | |
|
1295 | 1295 | def localversion(repo): |
|
1296 | 1296 | # Finds the best version to use for bundles that are meant to be used |
|
1297 | 1297 | # locally, such as those from strip and shelve, and temporary bundles. |
|
1298 | 1298 | return max(supportedoutgoingversions(repo)) |
|
1299 | 1299 | |
|
1300 | 1300 | def safeversion(repo): |
|
1301 | 1301 | # Finds the smallest version that it's safe to assume clients of the repo |
|
1302 | 1302 | # will support. For example, all hg versions that support generaldelta also |
|
1303 | 1303 | # support changegroup 02. |
|
1304 | 1304 | versions = supportedoutgoingversions(repo) |
|
1305 | 1305 | if 'generaldelta' in repo.requirements: |
|
1306 | 1306 | versions.discard('01') |
|
1307 | 1307 | assert versions |
|
1308 | 1308 | return min(versions) |
|
1309 | 1309 | |
|
1310 | 1310 | def getbundler(version, repo, bundlecaps=None, oldmatcher=None, |
|
1311 | 1311 | matcher=None, ellipses=False, shallow=False, |
|
1312 | 1312 | ellipsisroots=None, fullnodes=None): |
|
1313 | 1313 | assert version in supportedoutgoingversions(repo) |
|
1314 | 1314 | |
|
1315 | 1315 | if matcher is None: |
|
1316 |
matcher = matchmod.always( |
|
|
1316 | matcher = matchmod.always() | |
|
1317 | 1317 | if oldmatcher is None: |
|
1318 |
oldmatcher = matchmod.never( |
|
|
1318 | oldmatcher = matchmod.never() | |
|
1319 | 1319 | |
|
1320 | 1320 | if version == '01' and not matcher.always(): |
|
1321 | 1321 | raise error.ProgrammingError('version 01 changegroups do not support ' |
|
1322 | 1322 | 'sparse file matchers') |
|
1323 | 1323 | |
|
1324 | 1324 | if ellipses and version in (b'01', b'02'): |
|
1325 | 1325 | raise error.Abort( |
|
1326 | 1326 | _('ellipsis nodes require at least cg3 on client and server, ' |
|
1327 | 1327 | 'but negotiated version %s') % version) |
|
1328 | 1328 | |
|
1329 | 1329 | # Requested files could include files not in the local store. So |
|
1330 | 1330 | # filter those out. |
|
1331 | 1331 | matcher = repo.narrowmatch(matcher) |
|
1332 | 1332 | |
|
1333 | 1333 | fn = _packermap[version][0] |
|
1334 | 1334 | return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses, |
|
1335 | 1335 | shallow=shallow, ellipsisroots=ellipsisroots, |
|
1336 | 1336 | fullnodes=fullnodes) |
|
1337 | 1337 | |
|
1338 | 1338 | def getunbundler(version, fh, alg, extras=None): |
|
1339 | 1339 | return _packermap[version][1](fh, alg, extras=extras) |
|
1340 | 1340 | |
|
1341 | 1341 | def _changegroupinfo(repo, nodes, source): |
|
1342 | 1342 | if repo.ui.verbose or source == 'bundle': |
|
1343 | 1343 | repo.ui.status(_("%d changesets found\n") % len(nodes)) |
|
1344 | 1344 | if repo.ui.debugflag: |
|
1345 | 1345 | repo.ui.debug("list of changesets:\n") |
|
1346 | 1346 | for node in nodes: |
|
1347 | 1347 | repo.ui.debug("%s\n" % hex(node)) |
|
1348 | 1348 | |
|
1349 | 1349 | def makechangegroup(repo, outgoing, version, source, fastpath=False, |
|
1350 | 1350 | bundlecaps=None): |
|
1351 | 1351 | cgstream = makestream(repo, outgoing, version, source, |
|
1352 | 1352 | fastpath=fastpath, bundlecaps=bundlecaps) |
|
1353 | 1353 | return getunbundler(version, util.chunkbuffer(cgstream), None, |
|
1354 | 1354 | {'clcount': len(outgoing.missing) }) |
|
1355 | 1355 | |
|
1356 | 1356 | def makestream(repo, outgoing, version, source, fastpath=False, |
|
1357 | 1357 | bundlecaps=None, matcher=None): |
|
1358 | 1358 | bundler = getbundler(version, repo, bundlecaps=bundlecaps, |
|
1359 | 1359 | matcher=matcher) |
|
1360 | 1360 | |
|
1361 | 1361 | repo = repo.unfiltered() |
|
1362 | 1362 | commonrevs = outgoing.common |
|
1363 | 1363 | csets = outgoing.missing |
|
1364 | 1364 | heads = outgoing.missingheads |
|
1365 | 1365 | # We go through the fast path if we get told to, or if all (unfiltered |
|
1366 | 1366 | # heads have been requested (since we then know there all linkrevs will |
|
1367 | 1367 | # be pulled by the client). |
|
1368 | 1368 | heads.sort() |
|
1369 | 1369 | fastpathlinkrev = fastpath or ( |
|
1370 | 1370 | repo.filtername is None and heads == sorted(repo.heads())) |
|
1371 | 1371 | |
|
1372 | 1372 | repo.hook('preoutgoing', throw=True, source=source) |
|
1373 | 1373 | _changegroupinfo(repo, csets, source) |
|
1374 | 1374 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
1375 | 1375 | |
|
1376 | 1376 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
1377 | 1377 | revisions = 0 |
|
1378 | 1378 | files = 0 |
|
1379 | 1379 | progress = repo.ui.makeprogress(_('files'), unit=_('files'), |
|
1380 | 1380 | total=expectedfiles) |
|
1381 | 1381 | for chunkdata in iter(source.filelogheader, {}): |
|
1382 | 1382 | files += 1 |
|
1383 | 1383 | f = chunkdata["filename"] |
|
1384 | 1384 | repo.ui.debug("adding %s revisions\n" % f) |
|
1385 | 1385 | progress.increment() |
|
1386 | 1386 | fl = repo.file(f) |
|
1387 | 1387 | o = len(fl) |
|
1388 | 1388 | try: |
|
1389 | 1389 | deltas = source.deltaiter() |
|
1390 | 1390 | if not fl.addgroup(deltas, revmap, trp): |
|
1391 | 1391 | raise error.Abort(_("received file revlog group is empty")) |
|
1392 | 1392 | except error.CensoredBaseError as e: |
|
1393 | 1393 | raise error.Abort(_("received delta base is censored: %s") % e) |
|
1394 | 1394 | revisions += len(fl) - o |
|
1395 | 1395 | if f in needfiles: |
|
1396 | 1396 | needs = needfiles[f] |
|
1397 | 1397 | for new in pycompat.xrange(o, len(fl)): |
|
1398 | 1398 | n = fl.node(new) |
|
1399 | 1399 | if n in needs: |
|
1400 | 1400 | needs.remove(n) |
|
1401 | 1401 | else: |
|
1402 | 1402 | raise error.Abort( |
|
1403 | 1403 | _("received spurious file revlog entry")) |
|
1404 | 1404 | if not needs: |
|
1405 | 1405 | del needfiles[f] |
|
1406 | 1406 | progress.complete() |
|
1407 | 1407 | |
|
1408 | 1408 | for f, needs in needfiles.iteritems(): |
|
1409 | 1409 | fl = repo.file(f) |
|
1410 | 1410 | for n in needs: |
|
1411 | 1411 | try: |
|
1412 | 1412 | fl.rev(n) |
|
1413 | 1413 | except error.LookupError: |
|
1414 | 1414 | raise error.Abort( |
|
1415 | 1415 | _('missing file data for %s:%s - run hg verify') % |
|
1416 | 1416 | (f, hex(n))) |
|
1417 | 1417 | |
|
1418 | 1418 | return revisions, files |
@@ -1,1508 +1,1508 b'' | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import contextlib |
|
12 | 12 | import errno |
|
13 | 13 | import os |
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .node import nullid |
|
18 | 18 | from . import ( |
|
19 | 19 | encoding, |
|
20 | 20 | error, |
|
21 | 21 | match as matchmod, |
|
22 | 22 | pathutil, |
|
23 | 23 | policy, |
|
24 | 24 | pycompat, |
|
25 | 25 | scmutil, |
|
26 | 26 | txnutil, |
|
27 | 27 | util, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | parsers = policy.importmod(r'parsers') |
|
31 | 31 | |
|
32 | 32 | propertycache = util.propertycache |
|
33 | 33 | filecache = scmutil.filecache |
|
34 | 34 | _rangemask = 0x7fffffff |
|
35 | 35 | |
|
36 | 36 | dirstatetuple = parsers.dirstatetuple |
|
37 | 37 | |
|
38 | 38 | class repocache(filecache): |
|
39 | 39 | """filecache for files in .hg/""" |
|
40 | 40 | def join(self, obj, fname): |
|
41 | 41 | return obj._opener.join(fname) |
|
42 | 42 | |
|
43 | 43 | class rootcache(filecache): |
|
44 | 44 | """filecache for files in the repository root""" |
|
45 | 45 | def join(self, obj, fname): |
|
46 | 46 | return obj._join(fname) |
|
47 | 47 | |
|
48 | 48 | def _getfsnow(vfs): |
|
49 | 49 | '''Get "now" timestamp on filesystem''' |
|
50 | 50 | tmpfd, tmpname = vfs.mkstemp() |
|
51 | 51 | try: |
|
52 | 52 | return os.fstat(tmpfd)[stat.ST_MTIME] |
|
53 | 53 | finally: |
|
54 | 54 | os.close(tmpfd) |
|
55 | 55 | vfs.unlink(tmpname) |
|
56 | 56 | |
|
57 | 57 | class dirstate(object): |
|
58 | 58 | |
|
59 | 59 | def __init__(self, opener, ui, root, validate, sparsematchfn): |
|
60 | 60 | '''Create a new dirstate object. |
|
61 | 61 | |
|
62 | 62 | opener is an open()-like callable that can be used to open the |
|
63 | 63 | dirstate file; root is the root of the directory tracked by |
|
64 | 64 | the dirstate. |
|
65 | 65 | ''' |
|
66 | 66 | self._opener = opener |
|
67 | 67 | self._validate = validate |
|
68 | 68 | self._root = root |
|
69 | 69 | self._sparsematchfn = sparsematchfn |
|
70 | 70 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is |
|
71 | 71 | # UNC path pointing to root share (issue4557) |
|
72 | 72 | self._rootdir = pathutil.normasprefix(root) |
|
73 | 73 | self._dirty = False |
|
74 | 74 | self._lastnormaltime = 0 |
|
75 | 75 | self._ui = ui |
|
76 | 76 | self._filecache = {} |
|
77 | 77 | self._parentwriters = 0 |
|
78 | 78 | self._filename = 'dirstate' |
|
79 | 79 | self._pendingfilename = '%s.pending' % self._filename |
|
80 | 80 | self._plchangecallbacks = {} |
|
81 | 81 | self._origpl = None |
|
82 | 82 | self._updatedfiles = set() |
|
83 | 83 | self._mapcls = dirstatemap |
|
84 | 84 | # Access and cache cwd early, so we don't access it for the first time |
|
85 | 85 | # after a working-copy update caused it to not exist (accessing it then |
|
86 | 86 | # raises an exception). |
|
87 | 87 | self._cwd |
|
88 | 88 | |
|
89 | 89 | @contextlib.contextmanager |
|
90 | 90 | def parentchange(self): |
|
91 | 91 | '''Context manager for handling dirstate parents. |
|
92 | 92 | |
|
93 | 93 | If an exception occurs in the scope of the context manager, |
|
94 | 94 | the incoherent dirstate won't be written when wlock is |
|
95 | 95 | released. |
|
96 | 96 | ''' |
|
97 | 97 | self._parentwriters += 1 |
|
98 | 98 | yield |
|
99 | 99 | # Typically we want the "undo" step of a context manager in a |
|
100 | 100 | # finally block so it happens even when an exception |
|
101 | 101 | # occurs. In this case, however, we only want to decrement |
|
102 | 102 | # parentwriters if the code in the with statement exits |
|
103 | 103 | # normally, so we don't have a try/finally here on purpose. |
|
104 | 104 | self._parentwriters -= 1 |
|
105 | 105 | |
|
106 | 106 | def pendingparentchange(self): |
|
107 | 107 | '''Returns true if the dirstate is in the middle of a set of changes |
|
108 | 108 | that modify the dirstate parent. |
|
109 | 109 | ''' |
|
110 | 110 | return self._parentwriters > 0 |
|
111 | 111 | |
|
112 | 112 | @propertycache |
|
113 | 113 | def _map(self): |
|
114 | 114 | """Return the dirstate contents (see documentation for dirstatemap).""" |
|
115 | 115 | self._map = self._mapcls(self._ui, self._opener, self._root) |
|
116 | 116 | return self._map |
|
117 | 117 | |
|
118 | 118 | @property |
|
119 | 119 | def _sparsematcher(self): |
|
120 | 120 | """The matcher for the sparse checkout. |
|
121 | 121 | |
|
122 | 122 | The working directory may not include every file from a manifest. The |
|
123 | 123 | matcher obtained by this property will match a path if it is to be |
|
124 | 124 | included in the working directory. |
|
125 | 125 | """ |
|
126 | 126 | # TODO there is potential to cache this property. For now, the matcher |
|
127 | 127 | # is resolved on every access. (But the called function does use a |
|
128 | 128 | # cache to keep the lookup fast.) |
|
129 | 129 | return self._sparsematchfn() |
|
130 | 130 | |
|
131 | 131 | @repocache('branch') |
|
132 | 132 | def _branch(self): |
|
133 | 133 | try: |
|
134 | 134 | return self._opener.read("branch").strip() or "default" |
|
135 | 135 | except IOError as inst: |
|
136 | 136 | if inst.errno != errno.ENOENT: |
|
137 | 137 | raise |
|
138 | 138 | return "default" |
|
139 | 139 | |
|
140 | 140 | @property |
|
141 | 141 | def _pl(self): |
|
142 | 142 | return self._map.parents() |
|
143 | 143 | |
|
144 | 144 | def hasdir(self, d): |
|
145 | 145 | return self._map.hastrackeddir(d) |
|
146 | 146 | |
|
147 | 147 | @rootcache('.hgignore') |
|
148 | 148 | def _ignore(self): |
|
149 | 149 | files = self._ignorefiles() |
|
150 | 150 | if not files: |
|
151 |
return matchmod.never( |
|
|
151 | return matchmod.never() | |
|
152 | 152 | |
|
153 | 153 | pats = ['include:%s' % f for f in files] |
|
154 | 154 | return matchmod.match(self._root, '', [], pats, warn=self._ui.warn) |
|
155 | 155 | |
|
156 | 156 | @propertycache |
|
157 | 157 | def _slash(self): |
|
158 | 158 | return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/' |
|
159 | 159 | |
|
160 | 160 | @propertycache |
|
161 | 161 | def _checklink(self): |
|
162 | 162 | return util.checklink(self._root) |
|
163 | 163 | |
|
164 | 164 | @propertycache |
|
165 | 165 | def _checkexec(self): |
|
166 | 166 | return util.checkexec(self._root) |
|
167 | 167 | |
|
168 | 168 | @propertycache |
|
169 | 169 | def _checkcase(self): |
|
170 | 170 | return not util.fscasesensitive(self._join('.hg')) |
|
171 | 171 | |
|
172 | 172 | def _join(self, f): |
|
173 | 173 | # much faster than os.path.join() |
|
174 | 174 | # it's safe because f is always a relative path |
|
175 | 175 | return self._rootdir + f |
|
176 | 176 | |
|
177 | 177 | def flagfunc(self, buildfallback): |
|
178 | 178 | if self._checklink and self._checkexec: |
|
179 | 179 | def f(x): |
|
180 | 180 | try: |
|
181 | 181 | st = os.lstat(self._join(x)) |
|
182 | 182 | if util.statislink(st): |
|
183 | 183 | return 'l' |
|
184 | 184 | if util.statisexec(st): |
|
185 | 185 | return 'x' |
|
186 | 186 | except OSError: |
|
187 | 187 | pass |
|
188 | 188 | return '' |
|
189 | 189 | return f |
|
190 | 190 | |
|
191 | 191 | fallback = buildfallback() |
|
192 | 192 | if self._checklink: |
|
193 | 193 | def f(x): |
|
194 | 194 | if os.path.islink(self._join(x)): |
|
195 | 195 | return 'l' |
|
196 | 196 | if 'x' in fallback(x): |
|
197 | 197 | return 'x' |
|
198 | 198 | return '' |
|
199 | 199 | return f |
|
200 | 200 | if self._checkexec: |
|
201 | 201 | def f(x): |
|
202 | 202 | if 'l' in fallback(x): |
|
203 | 203 | return 'l' |
|
204 | 204 | if util.isexec(self._join(x)): |
|
205 | 205 | return 'x' |
|
206 | 206 | return '' |
|
207 | 207 | return f |
|
208 | 208 | else: |
|
209 | 209 | return fallback |
|
210 | 210 | |
|
211 | 211 | @propertycache |
|
212 | 212 | def _cwd(self): |
|
213 | 213 | # internal config: ui.forcecwd |
|
214 | 214 | forcecwd = self._ui.config('ui', 'forcecwd') |
|
215 | 215 | if forcecwd: |
|
216 | 216 | return forcecwd |
|
217 | 217 | return encoding.getcwd() |
|
218 | 218 | |
|
219 | 219 | def getcwd(self): |
|
220 | 220 | '''Return the path from which a canonical path is calculated. |
|
221 | 221 | |
|
222 | 222 | This path should be used to resolve file patterns or to convert |
|
223 | 223 | canonical paths back to file paths for display. It shouldn't be |
|
224 | 224 | used to get real file paths. Use vfs functions instead. |
|
225 | 225 | ''' |
|
226 | 226 | cwd = self._cwd |
|
227 | 227 | if cwd == self._root: |
|
228 | 228 | return '' |
|
229 | 229 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
230 | 230 | rootsep = self._root |
|
231 | 231 | if not util.endswithsep(rootsep): |
|
232 | 232 | rootsep += pycompat.ossep |
|
233 | 233 | if cwd.startswith(rootsep): |
|
234 | 234 | return cwd[len(rootsep):] |
|
235 | 235 | else: |
|
236 | 236 | # we're outside the repo. return an absolute path. |
|
237 | 237 | return cwd |
|
238 | 238 | |
|
239 | 239 | def pathto(self, f, cwd=None): |
|
240 | 240 | if cwd is None: |
|
241 | 241 | cwd = self.getcwd() |
|
242 | 242 | path = util.pathto(self._root, cwd, f) |
|
243 | 243 | if self._slash: |
|
244 | 244 | return util.pconvert(path) |
|
245 | 245 | return path |
|
246 | 246 | |
|
247 | 247 | def __getitem__(self, key): |
|
248 | 248 | '''Return the current state of key (a filename) in the dirstate. |
|
249 | 249 | |
|
250 | 250 | States are: |
|
251 | 251 | n normal |
|
252 | 252 | m needs merging |
|
253 | 253 | r marked for removal |
|
254 | 254 | a marked for addition |
|
255 | 255 | ? not tracked |
|
256 | 256 | ''' |
|
257 | 257 | return self._map.get(key, ("?",))[0] |
|
258 | 258 | |
|
259 | 259 | def __contains__(self, key): |
|
260 | 260 | return key in self._map |
|
261 | 261 | |
|
262 | 262 | def __iter__(self): |
|
263 | 263 | return iter(sorted(self._map)) |
|
264 | 264 | |
|
265 | 265 | def items(self): |
|
266 | 266 | return self._map.iteritems() |
|
267 | 267 | |
|
268 | 268 | iteritems = items |
|
269 | 269 | |
|
270 | 270 | def parents(self): |
|
271 | 271 | return [self._validate(p) for p in self._pl] |
|
272 | 272 | |
|
273 | 273 | def p1(self): |
|
274 | 274 | return self._validate(self._pl[0]) |
|
275 | 275 | |
|
276 | 276 | def p2(self): |
|
277 | 277 | return self._validate(self._pl[1]) |
|
278 | 278 | |
|
279 | 279 | def branch(self): |
|
280 | 280 | return encoding.tolocal(self._branch) |
|
281 | 281 | |
|
282 | 282 | def setparents(self, p1, p2=nullid): |
|
283 | 283 | """Set dirstate parents to p1 and p2. |
|
284 | 284 | |
|
285 | 285 | When moving from two parents to one, 'm' merged entries a |
|
286 | 286 | adjusted to normal and previous copy records discarded and |
|
287 | 287 | returned by the call. |
|
288 | 288 | |
|
289 | 289 | See localrepo.setparents() |
|
290 | 290 | """ |
|
291 | 291 | if self._parentwriters == 0: |
|
292 | 292 | raise ValueError("cannot set dirstate parent without " |
|
293 | 293 | "calling dirstate.beginparentchange") |
|
294 | 294 | |
|
295 | 295 | self._dirty = True |
|
296 | 296 | oldp2 = self._pl[1] |
|
297 | 297 | if self._origpl is None: |
|
298 | 298 | self._origpl = self._pl |
|
299 | 299 | self._map.setparents(p1, p2) |
|
300 | 300 | copies = {} |
|
301 | 301 | if oldp2 != nullid and p2 == nullid: |
|
302 | 302 | candidatefiles = self._map.nonnormalset.union( |
|
303 | 303 | self._map.otherparentset) |
|
304 | 304 | for f in candidatefiles: |
|
305 | 305 | s = self._map.get(f) |
|
306 | 306 | if s is None: |
|
307 | 307 | continue |
|
308 | 308 | |
|
309 | 309 | # Discard 'm' markers when moving away from a merge state |
|
310 | 310 | if s[0] == 'm': |
|
311 | 311 | source = self._map.copymap.get(f) |
|
312 | 312 | if source: |
|
313 | 313 | copies[f] = source |
|
314 | 314 | self.normallookup(f) |
|
315 | 315 | # Also fix up otherparent markers |
|
316 | 316 | elif s[0] == 'n' and s[2] == -2: |
|
317 | 317 | source = self._map.copymap.get(f) |
|
318 | 318 | if source: |
|
319 | 319 | copies[f] = source |
|
320 | 320 | self.add(f) |
|
321 | 321 | return copies |
|
322 | 322 | |
|
323 | 323 | def setbranch(self, branch): |
|
324 | 324 | self.__class__._branch.set(self, encoding.fromlocal(branch)) |
|
325 | 325 | f = self._opener('branch', 'w', atomictemp=True, checkambig=True) |
|
326 | 326 | try: |
|
327 | 327 | f.write(self._branch + '\n') |
|
328 | 328 | f.close() |
|
329 | 329 | |
|
330 | 330 | # make sure filecache has the correct stat info for _branch after |
|
331 | 331 | # replacing the underlying file |
|
332 | 332 | ce = self._filecache['_branch'] |
|
333 | 333 | if ce: |
|
334 | 334 | ce.refresh() |
|
335 | 335 | except: # re-raises |
|
336 | 336 | f.discard() |
|
337 | 337 | raise |
|
338 | 338 | |
|
339 | 339 | def invalidate(self): |
|
340 | 340 | '''Causes the next access to reread the dirstate. |
|
341 | 341 | |
|
342 | 342 | This is different from localrepo.invalidatedirstate() because it always |
|
343 | 343 | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to |
|
344 | 344 | check whether the dirstate has changed before rereading it.''' |
|
345 | 345 | |
|
346 | 346 | for a in (r"_map", r"_branch", r"_ignore"): |
|
347 | 347 | if a in self.__dict__: |
|
348 | 348 | delattr(self, a) |
|
349 | 349 | self._lastnormaltime = 0 |
|
350 | 350 | self._dirty = False |
|
351 | 351 | self._updatedfiles.clear() |
|
352 | 352 | self._parentwriters = 0 |
|
353 | 353 | self._origpl = None |
|
354 | 354 | |
|
355 | 355 | def copy(self, source, dest): |
|
356 | 356 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
357 | 357 | if source == dest: |
|
358 | 358 | return |
|
359 | 359 | self._dirty = True |
|
360 | 360 | if source is not None: |
|
361 | 361 | self._map.copymap[dest] = source |
|
362 | 362 | self._updatedfiles.add(source) |
|
363 | 363 | self._updatedfiles.add(dest) |
|
364 | 364 | elif self._map.copymap.pop(dest, None): |
|
365 | 365 | self._updatedfiles.add(dest) |
|
366 | 366 | |
|
367 | 367 | def copied(self, file): |
|
368 | 368 | return self._map.copymap.get(file, None) |
|
369 | 369 | |
|
370 | 370 | def copies(self): |
|
371 | 371 | return self._map.copymap |
|
372 | 372 | |
|
373 | 373 | def _addpath(self, f, state, mode, size, mtime): |
|
374 | 374 | oldstate = self[f] |
|
375 | 375 | if state == 'a' or oldstate == 'r': |
|
376 | 376 | scmutil.checkfilename(f) |
|
377 | 377 | if self._map.hastrackeddir(f): |
|
378 | 378 | raise error.Abort(_('directory %r already in dirstate') % |
|
379 | 379 | pycompat.bytestr(f)) |
|
380 | 380 | # shadows |
|
381 | 381 | for d in util.finddirs(f): |
|
382 | 382 | if self._map.hastrackeddir(d): |
|
383 | 383 | break |
|
384 | 384 | entry = self._map.get(d) |
|
385 | 385 | if entry is not None and entry[0] != 'r': |
|
386 | 386 | raise error.Abort( |
|
387 | 387 | _('file %r in dirstate clashes with %r') % |
|
388 | 388 | (pycompat.bytestr(d), pycompat.bytestr(f))) |
|
389 | 389 | self._dirty = True |
|
390 | 390 | self._updatedfiles.add(f) |
|
391 | 391 | self._map.addfile(f, oldstate, state, mode, size, mtime) |
|
392 | 392 | |
|
393 | 393 | def normal(self, f): |
|
394 | 394 | '''Mark a file normal and clean.''' |
|
395 | 395 | s = os.lstat(self._join(f)) |
|
396 | 396 | mtime = s[stat.ST_MTIME] |
|
397 | 397 | self._addpath(f, 'n', s.st_mode, |
|
398 | 398 | s.st_size & _rangemask, mtime & _rangemask) |
|
399 | 399 | self._map.copymap.pop(f, None) |
|
400 | 400 | if f in self._map.nonnormalset: |
|
401 | 401 | self._map.nonnormalset.remove(f) |
|
402 | 402 | if mtime > self._lastnormaltime: |
|
403 | 403 | # Remember the most recent modification timeslot for status(), |
|
404 | 404 | # to make sure we won't miss future size-preserving file content |
|
405 | 405 | # modifications that happen within the same timeslot. |
|
406 | 406 | self._lastnormaltime = mtime |
|
407 | 407 | |
|
408 | 408 | def normallookup(self, f): |
|
409 | 409 | '''Mark a file normal, but possibly dirty.''' |
|
410 | 410 | if self._pl[1] != nullid: |
|
411 | 411 | # if there is a merge going on and the file was either |
|
412 | 412 | # in state 'm' (-1) or coming from other parent (-2) before |
|
413 | 413 | # being removed, restore that state. |
|
414 | 414 | entry = self._map.get(f) |
|
415 | 415 | if entry is not None: |
|
416 | 416 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
417 | 417 | source = self._map.copymap.get(f) |
|
418 | 418 | if entry[2] == -1: |
|
419 | 419 | self.merge(f) |
|
420 | 420 | elif entry[2] == -2: |
|
421 | 421 | self.otherparent(f) |
|
422 | 422 | if source: |
|
423 | 423 | self.copy(source, f) |
|
424 | 424 | return |
|
425 | 425 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
426 | 426 | return |
|
427 | 427 | self._addpath(f, 'n', 0, -1, -1) |
|
428 | 428 | self._map.copymap.pop(f, None) |
|
429 | 429 | |
|
430 | 430 | def otherparent(self, f): |
|
431 | 431 | '''Mark as coming from the other parent, always dirty.''' |
|
432 | 432 | if self._pl[1] == nullid: |
|
433 | 433 | raise error.Abort(_("setting %r to other parent " |
|
434 | 434 | "only allowed in merges") % f) |
|
435 | 435 | if f in self and self[f] == 'n': |
|
436 | 436 | # merge-like |
|
437 | 437 | self._addpath(f, 'm', 0, -2, -1) |
|
438 | 438 | else: |
|
439 | 439 | # add-like |
|
440 | 440 | self._addpath(f, 'n', 0, -2, -1) |
|
441 | 441 | self._map.copymap.pop(f, None) |
|
442 | 442 | |
|
443 | 443 | def add(self, f): |
|
444 | 444 | '''Mark a file added.''' |
|
445 | 445 | self._addpath(f, 'a', 0, -1, -1) |
|
446 | 446 | self._map.copymap.pop(f, None) |
|
447 | 447 | |
|
448 | 448 | def remove(self, f): |
|
449 | 449 | '''Mark a file removed.''' |
|
450 | 450 | self._dirty = True |
|
451 | 451 | oldstate = self[f] |
|
452 | 452 | size = 0 |
|
453 | 453 | if self._pl[1] != nullid: |
|
454 | 454 | entry = self._map.get(f) |
|
455 | 455 | if entry is not None: |
|
456 | 456 | # backup the previous state |
|
457 | 457 | if entry[0] == 'm': # merge |
|
458 | 458 | size = -1 |
|
459 | 459 | elif entry[0] == 'n' and entry[2] == -2: # other parent |
|
460 | 460 | size = -2 |
|
461 | 461 | self._map.otherparentset.add(f) |
|
462 | 462 | self._updatedfiles.add(f) |
|
463 | 463 | self._map.removefile(f, oldstate, size) |
|
464 | 464 | if size == 0: |
|
465 | 465 | self._map.copymap.pop(f, None) |
|
466 | 466 | |
|
467 | 467 | def merge(self, f): |
|
468 | 468 | '''Mark a file merged.''' |
|
469 | 469 | if self._pl[1] == nullid: |
|
470 | 470 | return self.normallookup(f) |
|
471 | 471 | return self.otherparent(f) |
|
472 | 472 | |
|
473 | 473 | def drop(self, f): |
|
474 | 474 | '''Drop a file from the dirstate''' |
|
475 | 475 | oldstate = self[f] |
|
476 | 476 | if self._map.dropfile(f, oldstate): |
|
477 | 477 | self._dirty = True |
|
478 | 478 | self._updatedfiles.add(f) |
|
479 | 479 | self._map.copymap.pop(f, None) |
|
480 | 480 | |
|
481 | 481 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
482 | 482 | if exists is None: |
|
483 | 483 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
484 | 484 | if not exists: |
|
485 | 485 | # Maybe a path component exists |
|
486 | 486 | if not ignoremissing and '/' in path: |
|
487 | 487 | d, f = path.rsplit('/', 1) |
|
488 | 488 | d = self._normalize(d, False, ignoremissing, None) |
|
489 | 489 | folded = d + "/" + f |
|
490 | 490 | else: |
|
491 | 491 | # No path components, preserve original case |
|
492 | 492 | folded = path |
|
493 | 493 | else: |
|
494 | 494 | # recursively normalize leading directory components |
|
495 | 495 | # against dirstate |
|
496 | 496 | if '/' in normed: |
|
497 | 497 | d, f = normed.rsplit('/', 1) |
|
498 | 498 | d = self._normalize(d, False, ignoremissing, True) |
|
499 | 499 | r = self._root + "/" + d |
|
500 | 500 | folded = d + "/" + util.fspath(f, r) |
|
501 | 501 | else: |
|
502 | 502 | folded = util.fspath(normed, self._root) |
|
503 | 503 | storemap[normed] = folded |
|
504 | 504 | |
|
505 | 505 | return folded |
|
506 | 506 | |
|
507 | 507 | def _normalizefile(self, path, isknown, ignoremissing=False, exists=None): |
|
508 | 508 | normed = util.normcase(path) |
|
509 | 509 | folded = self._map.filefoldmap.get(normed, None) |
|
510 | 510 | if folded is None: |
|
511 | 511 | if isknown: |
|
512 | 512 | folded = path |
|
513 | 513 | else: |
|
514 | 514 | folded = self._discoverpath(path, normed, ignoremissing, exists, |
|
515 | 515 | self._map.filefoldmap) |
|
516 | 516 | return folded |
|
517 | 517 | |
|
518 | 518 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): |
|
519 | 519 | normed = util.normcase(path) |
|
520 | 520 | folded = self._map.filefoldmap.get(normed, None) |
|
521 | 521 | if folded is None: |
|
522 | 522 | folded = self._map.dirfoldmap.get(normed, None) |
|
523 | 523 | if folded is None: |
|
524 | 524 | if isknown: |
|
525 | 525 | folded = path |
|
526 | 526 | else: |
|
527 | 527 | # store discovered result in dirfoldmap so that future |
|
528 | 528 | # normalizefile calls don't start matching directories |
|
529 | 529 | folded = self._discoverpath(path, normed, ignoremissing, exists, |
|
530 | 530 | self._map.dirfoldmap) |
|
531 | 531 | return folded |
|
532 | 532 | |
|
533 | 533 | def normalize(self, path, isknown=False, ignoremissing=False): |
|
534 | 534 | ''' |
|
535 | 535 | normalize the case of a pathname when on a casefolding filesystem |
|
536 | 536 | |
|
537 | 537 | isknown specifies whether the filename came from walking the |
|
538 | 538 | disk, to avoid extra filesystem access. |
|
539 | 539 | |
|
540 | 540 | If ignoremissing is True, missing path are returned |
|
541 | 541 | unchanged. Otherwise, we try harder to normalize possibly |
|
542 | 542 | existing path components. |
|
543 | 543 | |
|
544 | 544 | The normalized case is determined based on the following precedence: |
|
545 | 545 | |
|
546 | 546 | - version of name already stored in the dirstate |
|
547 | 547 | - version of name stored on disk |
|
548 | 548 | - version provided via command arguments |
|
549 | 549 | ''' |
|
550 | 550 | |
|
551 | 551 | if self._checkcase: |
|
552 | 552 | return self._normalize(path, isknown, ignoremissing) |
|
553 | 553 | return path |
|
554 | 554 | |
|
555 | 555 | def clear(self): |
|
556 | 556 | self._map.clear() |
|
557 | 557 | self._lastnormaltime = 0 |
|
558 | 558 | self._updatedfiles.clear() |
|
559 | 559 | self._dirty = True |
|
560 | 560 | |
|
561 | 561 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
562 | 562 | if changedfiles is None: |
|
563 | 563 | # Rebuild entire dirstate |
|
564 | 564 | changedfiles = allfiles |
|
565 | 565 | lastnormaltime = self._lastnormaltime |
|
566 | 566 | self.clear() |
|
567 | 567 | self._lastnormaltime = lastnormaltime |
|
568 | 568 | |
|
569 | 569 | if self._origpl is None: |
|
570 | 570 | self._origpl = self._pl |
|
571 | 571 | self._map.setparents(parent, nullid) |
|
572 | 572 | for f in changedfiles: |
|
573 | 573 | if f in allfiles: |
|
574 | 574 | self.normallookup(f) |
|
575 | 575 | else: |
|
576 | 576 | self.drop(f) |
|
577 | 577 | |
|
578 | 578 | self._dirty = True |
|
579 | 579 | |
|
580 | 580 | def identity(self): |
|
581 | 581 | '''Return identity of dirstate itself to detect changing in storage |
|
582 | 582 | |
|
583 | 583 | If identity of previous dirstate is equal to this, writing |
|
584 | 584 | changes based on the former dirstate out can keep consistency. |
|
585 | 585 | ''' |
|
586 | 586 | return self._map.identity |
|
587 | 587 | |
|
588 | 588 | def write(self, tr): |
|
589 | 589 | if not self._dirty: |
|
590 | 590 | return |
|
591 | 591 | |
|
592 | 592 | filename = self._filename |
|
593 | 593 | if tr: |
|
594 | 594 | # 'dirstate.write()' is not only for writing in-memory |
|
595 | 595 | # changes out, but also for dropping ambiguous timestamp. |
|
596 | 596 | # delayed writing re-raise "ambiguous timestamp issue". |
|
597 | 597 | # See also the wiki page below for detail: |
|
598 | 598 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan |
|
599 | 599 | |
|
600 | 600 | # emulate dropping timestamp in 'parsers.pack_dirstate' |
|
601 | 601 | now = _getfsnow(self._opener) |
|
602 | 602 | self._map.clearambiguoustimes(self._updatedfiles, now) |
|
603 | 603 | |
|
604 | 604 | # emulate that all 'dirstate.normal' results are written out |
|
605 | 605 | self._lastnormaltime = 0 |
|
606 | 606 | self._updatedfiles.clear() |
|
607 | 607 | |
|
608 | 608 | # delay writing in-memory changes out |
|
609 | 609 | tr.addfilegenerator('dirstate', (self._filename,), |
|
610 | 610 | self._writedirstate, location='plain') |
|
611 | 611 | return |
|
612 | 612 | |
|
613 | 613 | st = self._opener(filename, "w", atomictemp=True, checkambig=True) |
|
614 | 614 | self._writedirstate(st) |
|
615 | 615 | |
|
616 | 616 | def addparentchangecallback(self, category, callback): |
|
617 | 617 | """add a callback to be called when the wd parents are changed |
|
618 | 618 | |
|
619 | 619 | Callback will be called with the following arguments: |
|
620 | 620 | dirstate, (oldp1, oldp2), (newp1, newp2) |
|
621 | 621 | |
|
622 | 622 | Category is a unique identifier to allow overwriting an old callback |
|
623 | 623 | with a newer callback. |
|
624 | 624 | """ |
|
625 | 625 | self._plchangecallbacks[category] = callback |
|
626 | 626 | |
|
627 | 627 | def _writedirstate(self, st): |
|
628 | 628 | # notify callbacks about parents change |
|
629 | 629 | if self._origpl is not None and self._origpl != self._pl: |
|
630 | 630 | for c, callback in sorted(self._plchangecallbacks.iteritems()): |
|
631 | 631 | callback(self, self._origpl, self._pl) |
|
632 | 632 | self._origpl = None |
|
633 | 633 | # use the modification time of the newly created temporary file as the |
|
634 | 634 | # filesystem's notion of 'now' |
|
635 | 635 | now = util.fstat(st)[stat.ST_MTIME] & _rangemask |
|
636 | 636 | |
|
637 | 637 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping |
|
638 | 638 | # timestamp of each entries in dirstate, because of 'now > mtime' |
|
639 | 639 | delaywrite = self._ui.configint('debug', 'dirstate.delaywrite') |
|
640 | 640 | if delaywrite > 0: |
|
641 | 641 | # do we have any files to delay for? |
|
642 | 642 | for f, e in self._map.iteritems(): |
|
643 | 643 | if e[0] == 'n' and e[3] == now: |
|
644 | 644 | import time # to avoid useless import |
|
645 | 645 | # rather than sleep n seconds, sleep until the next |
|
646 | 646 | # multiple of n seconds |
|
647 | 647 | clock = time.time() |
|
648 | 648 | start = int(clock) - (int(clock) % delaywrite) |
|
649 | 649 | end = start + delaywrite |
|
650 | 650 | time.sleep(end - clock) |
|
651 | 651 | now = end # trust our estimate that the end is near now |
|
652 | 652 | break |
|
653 | 653 | |
|
654 | 654 | self._map.write(st, now) |
|
655 | 655 | self._lastnormaltime = 0 |
|
656 | 656 | self._dirty = False |
|
657 | 657 | |
|
658 | 658 | def _dirignore(self, f): |
|
659 | 659 | if f == '.': |
|
660 | 660 | return False |
|
661 | 661 | if self._ignore(f): |
|
662 | 662 | return True |
|
663 | 663 | for p in util.finddirs(f): |
|
664 | 664 | if self._ignore(p): |
|
665 | 665 | return True |
|
666 | 666 | return False |
|
667 | 667 | |
|
668 | 668 | def _ignorefiles(self): |
|
669 | 669 | files = [] |
|
670 | 670 | if os.path.exists(self._join('.hgignore')): |
|
671 | 671 | files.append(self._join('.hgignore')) |
|
672 | 672 | for name, path in self._ui.configitems("ui"): |
|
673 | 673 | if name == 'ignore' or name.startswith('ignore.'): |
|
674 | 674 | # we need to use os.path.join here rather than self._join |
|
675 | 675 | # because path is arbitrary and user-specified |
|
676 | 676 | files.append(os.path.join(self._rootdir, util.expandpath(path))) |
|
677 | 677 | return files |
|
678 | 678 | |
|
679 | 679 | def _ignorefileandline(self, f): |
|
680 | 680 | files = collections.deque(self._ignorefiles()) |
|
681 | 681 | visited = set() |
|
682 | 682 | while files: |
|
683 | 683 | i = files.popleft() |
|
684 | 684 | patterns = matchmod.readpatternfile(i, self._ui.warn, |
|
685 | 685 | sourceinfo=True) |
|
686 | 686 | for pattern, lineno, line in patterns: |
|
687 | 687 | kind, p = matchmod._patsplit(pattern, 'glob') |
|
688 | 688 | if kind == "subinclude": |
|
689 | 689 | if p not in visited: |
|
690 | 690 | files.append(p) |
|
691 | 691 | continue |
|
692 | 692 | m = matchmod.match(self._root, '', [], [pattern], |
|
693 | 693 | warn=self._ui.warn) |
|
694 | 694 | if m(f): |
|
695 | 695 | return (i, lineno, line) |
|
696 | 696 | visited.add(i) |
|
697 | 697 | return (None, -1, "") |
|
698 | 698 | |
|
699 | 699 | def _walkexplicit(self, match, subrepos): |
|
700 | 700 | '''Get stat data about the files explicitly specified by match. |
|
701 | 701 | |
|
702 | 702 | Return a triple (results, dirsfound, dirsnotfound). |
|
703 | 703 | - results is a mapping from filename to stat result. It also contains |
|
704 | 704 | listings mapping subrepos and .hg to None. |
|
705 | 705 | - dirsfound is a list of files found to be directories. |
|
706 | 706 | - dirsnotfound is a list of files that the dirstate thinks are |
|
707 | 707 | directories and that were not found.''' |
|
708 | 708 | |
|
709 | 709 | def badtype(mode): |
|
710 | 710 | kind = _('unknown') |
|
711 | 711 | if stat.S_ISCHR(mode): |
|
712 | 712 | kind = _('character device') |
|
713 | 713 | elif stat.S_ISBLK(mode): |
|
714 | 714 | kind = _('block device') |
|
715 | 715 | elif stat.S_ISFIFO(mode): |
|
716 | 716 | kind = _('fifo') |
|
717 | 717 | elif stat.S_ISSOCK(mode): |
|
718 | 718 | kind = _('socket') |
|
719 | 719 | elif stat.S_ISDIR(mode): |
|
720 | 720 | kind = _('directory') |
|
721 | 721 | return _('unsupported file type (type is %s)') % kind |
|
722 | 722 | |
|
723 | 723 | matchedir = match.explicitdir |
|
724 | 724 | badfn = match.bad |
|
725 | 725 | dmap = self._map |
|
726 | 726 | lstat = os.lstat |
|
727 | 727 | getkind = stat.S_IFMT |
|
728 | 728 | dirkind = stat.S_IFDIR |
|
729 | 729 | regkind = stat.S_IFREG |
|
730 | 730 | lnkkind = stat.S_IFLNK |
|
731 | 731 | join = self._join |
|
732 | 732 | dirsfound = [] |
|
733 | 733 | foundadd = dirsfound.append |
|
734 | 734 | dirsnotfound = [] |
|
735 | 735 | notfoundadd = dirsnotfound.append |
|
736 | 736 | |
|
737 | 737 | if not match.isexact() and self._checkcase: |
|
738 | 738 | normalize = self._normalize |
|
739 | 739 | else: |
|
740 | 740 | normalize = None |
|
741 | 741 | |
|
742 | 742 | files = sorted(match.files()) |
|
743 | 743 | subrepos.sort() |
|
744 | 744 | i, j = 0, 0 |
|
745 | 745 | while i < len(files) and j < len(subrepos): |
|
746 | 746 | subpath = subrepos[j] + "/" |
|
747 | 747 | if files[i] < subpath: |
|
748 | 748 | i += 1 |
|
749 | 749 | continue |
|
750 | 750 | while i < len(files) and files[i].startswith(subpath): |
|
751 | 751 | del files[i] |
|
752 | 752 | j += 1 |
|
753 | 753 | |
|
754 | 754 | if not files or '.' in files: |
|
755 | 755 | files = ['.'] |
|
756 | 756 | results = dict.fromkeys(subrepos) |
|
757 | 757 | results['.hg'] = None |
|
758 | 758 | |
|
759 | 759 | for ff in files: |
|
760 | 760 | # constructing the foldmap is expensive, so don't do it for the |
|
761 | 761 | # common case where files is ['.'] |
|
762 | 762 | if normalize and ff != '.': |
|
763 | 763 | nf = normalize(ff, False, True) |
|
764 | 764 | else: |
|
765 | 765 | nf = ff |
|
766 | 766 | if nf in results: |
|
767 | 767 | continue |
|
768 | 768 | |
|
769 | 769 | try: |
|
770 | 770 | st = lstat(join(nf)) |
|
771 | 771 | kind = getkind(st.st_mode) |
|
772 | 772 | if kind == dirkind: |
|
773 | 773 | if nf in dmap: |
|
774 | 774 | # file replaced by dir on disk but still in dirstate |
|
775 | 775 | results[nf] = None |
|
776 | 776 | if matchedir: |
|
777 | 777 | matchedir(nf) |
|
778 | 778 | foundadd((nf, ff)) |
|
779 | 779 | elif kind == regkind or kind == lnkkind: |
|
780 | 780 | results[nf] = st |
|
781 | 781 | else: |
|
782 | 782 | badfn(ff, badtype(kind)) |
|
783 | 783 | if nf in dmap: |
|
784 | 784 | results[nf] = None |
|
785 | 785 | except OSError as inst: # nf not found on disk - it is dirstate only |
|
786 | 786 | if nf in dmap: # does it exactly match a missing file? |
|
787 | 787 | results[nf] = None |
|
788 | 788 | else: # does it match a missing directory? |
|
789 | 789 | if self._map.hasdir(nf): |
|
790 | 790 | if matchedir: |
|
791 | 791 | matchedir(nf) |
|
792 | 792 | notfoundadd(nf) |
|
793 | 793 | else: |
|
794 | 794 | badfn(ff, encoding.strtolocal(inst.strerror)) |
|
795 | 795 | |
|
796 | 796 | # match.files() may contain explicitly-specified paths that shouldn't |
|
797 | 797 | # be taken; drop them from the list of files found. dirsfound/notfound |
|
798 | 798 | # aren't filtered here because they will be tested later. |
|
799 | 799 | if match.anypats(): |
|
800 | 800 | for f in list(results): |
|
801 | 801 | if f == '.hg' or f in subrepos: |
|
802 | 802 | # keep sentinel to disable further out-of-repo walks |
|
803 | 803 | continue |
|
804 | 804 | if not match(f): |
|
805 | 805 | del results[f] |
|
806 | 806 | |
|
807 | 807 | # Case insensitive filesystems cannot rely on lstat() failing to detect |
|
808 | 808 | # a case-only rename. Prune the stat object for any file that does not |
|
809 | 809 | # match the case in the filesystem, if there are multiple files that |
|
810 | 810 | # normalize to the same path. |
|
811 | 811 | if match.isexact() and self._checkcase: |
|
812 | 812 | normed = {} |
|
813 | 813 | |
|
814 | 814 | for f, st in results.iteritems(): |
|
815 | 815 | if st is None: |
|
816 | 816 | continue |
|
817 | 817 | |
|
818 | 818 | nc = util.normcase(f) |
|
819 | 819 | paths = normed.get(nc) |
|
820 | 820 | |
|
821 | 821 | if paths is None: |
|
822 | 822 | paths = set() |
|
823 | 823 | normed[nc] = paths |
|
824 | 824 | |
|
825 | 825 | paths.add(f) |
|
826 | 826 | |
|
827 | 827 | for norm, paths in normed.iteritems(): |
|
828 | 828 | if len(paths) > 1: |
|
829 | 829 | for path in paths: |
|
830 | 830 | folded = self._discoverpath(path, norm, True, None, |
|
831 | 831 | self._map.dirfoldmap) |
|
832 | 832 | if path != folded: |
|
833 | 833 | results[path] = None |
|
834 | 834 | |
|
835 | 835 | return results, dirsfound, dirsnotfound |
|
836 | 836 | |
|
837 | 837 | def walk(self, match, subrepos, unknown, ignored, full=True): |
|
838 | 838 | ''' |
|
839 | 839 | Walk recursively through the directory tree, finding all files |
|
840 | 840 | matched by match. |
|
841 | 841 | |
|
842 | 842 | If full is False, maybe skip some known-clean files. |
|
843 | 843 | |
|
844 | 844 | Return a dict mapping filename to stat-like object (either |
|
845 | 845 | mercurial.osutil.stat instance or return value of os.stat()). |
|
846 | 846 | |
|
847 | 847 | ''' |
|
848 | 848 | # full is a flag that extensions that hook into walk can use -- this |
|
849 | 849 | # implementation doesn't use it at all. This satisfies the contract |
|
850 | 850 | # because we only guarantee a "maybe". |
|
851 | 851 | |
|
852 | 852 | if ignored: |
|
853 | 853 | ignore = util.never |
|
854 | 854 | dirignore = util.never |
|
855 | 855 | elif unknown: |
|
856 | 856 | ignore = self._ignore |
|
857 | 857 | dirignore = self._dirignore |
|
858 | 858 | else: |
|
859 | 859 | # if not unknown and not ignored, drop dir recursion and step 2 |
|
860 | 860 | ignore = util.always |
|
861 | 861 | dirignore = util.always |
|
862 | 862 | |
|
863 | 863 | matchfn = match.matchfn |
|
864 | 864 | matchalways = match.always() |
|
865 | 865 | matchtdir = match.traversedir |
|
866 | 866 | dmap = self._map |
|
867 | 867 | listdir = util.listdir |
|
868 | 868 | lstat = os.lstat |
|
869 | 869 | dirkind = stat.S_IFDIR |
|
870 | 870 | regkind = stat.S_IFREG |
|
871 | 871 | lnkkind = stat.S_IFLNK |
|
872 | 872 | join = self._join |
|
873 | 873 | |
|
874 | 874 | exact = skipstep3 = False |
|
875 | 875 | if match.isexact(): # match.exact |
|
876 | 876 | exact = True |
|
877 | 877 | dirignore = util.always # skip step 2 |
|
878 | 878 | elif match.prefix(): # match.match, no patterns |
|
879 | 879 | skipstep3 = True |
|
880 | 880 | |
|
881 | 881 | if not exact and self._checkcase: |
|
882 | 882 | normalize = self._normalize |
|
883 | 883 | normalizefile = self._normalizefile |
|
884 | 884 | skipstep3 = False |
|
885 | 885 | else: |
|
886 | 886 | normalize = self._normalize |
|
887 | 887 | normalizefile = None |
|
888 | 888 | |
|
889 | 889 | # step 1: find all explicit files |
|
890 | 890 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
891 | 891 | |
|
892 | 892 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
893 | 893 | work = [d for d in work if not dirignore(d[0])] |
|
894 | 894 | |
|
895 | 895 | # step 2: visit subdirectories |
|
896 | 896 | def traverse(work, alreadynormed): |
|
897 | 897 | wadd = work.append |
|
898 | 898 | while work: |
|
899 | 899 | nd = work.pop() |
|
900 | 900 | visitentries = match.visitchildrenset(nd) |
|
901 | 901 | if not visitentries: |
|
902 | 902 | continue |
|
903 | 903 | if visitentries == 'this' or visitentries == 'all': |
|
904 | 904 | visitentries = None |
|
905 | 905 | skip = None |
|
906 | 906 | if nd == '.': |
|
907 | 907 | nd = '' |
|
908 | 908 | else: |
|
909 | 909 | skip = '.hg' |
|
910 | 910 | try: |
|
911 | 911 | entries = listdir(join(nd), stat=True, skip=skip) |
|
912 | 912 | except OSError as inst: |
|
913 | 913 | if inst.errno in (errno.EACCES, errno.ENOENT): |
|
914 | 914 | match.bad(self.pathto(nd), |
|
915 | 915 | encoding.strtolocal(inst.strerror)) |
|
916 | 916 | continue |
|
917 | 917 | raise |
|
918 | 918 | for f, kind, st in entries: |
|
919 | 919 | # Some matchers may return files in the visitentries set, |
|
920 | 920 | # instead of 'this', if the matcher explicitly mentions them |
|
921 | 921 | # and is not an exactmatcher. This is acceptable; we do not |
|
922 | 922 | # make any hard assumptions about file-or-directory below |
|
923 | 923 | # based on the presence of `f` in visitentries. If |
|
924 | 924 | # visitchildrenset returned a set, we can always skip the |
|
925 | 925 | # entries *not* in the set it provided regardless of whether |
|
926 | 926 | # they're actually a file or a directory. |
|
927 | 927 | if visitentries and f not in visitentries: |
|
928 | 928 | continue |
|
929 | 929 | if normalizefile: |
|
930 | 930 | # even though f might be a directory, we're only |
|
931 | 931 | # interested in comparing it to files currently in the |
|
932 | 932 | # dmap -- therefore normalizefile is enough |
|
933 | 933 | nf = normalizefile(nd and (nd + "/" + f) or f, True, |
|
934 | 934 | True) |
|
935 | 935 | else: |
|
936 | 936 | nf = nd and (nd + "/" + f) or f |
|
937 | 937 | if nf not in results: |
|
938 | 938 | if kind == dirkind: |
|
939 | 939 | if not ignore(nf): |
|
940 | 940 | if matchtdir: |
|
941 | 941 | matchtdir(nf) |
|
942 | 942 | wadd(nf) |
|
943 | 943 | if nf in dmap and (matchalways or matchfn(nf)): |
|
944 | 944 | results[nf] = None |
|
945 | 945 | elif kind == regkind or kind == lnkkind: |
|
946 | 946 | if nf in dmap: |
|
947 | 947 | if matchalways or matchfn(nf): |
|
948 | 948 | results[nf] = st |
|
949 | 949 | elif ((matchalways or matchfn(nf)) |
|
950 | 950 | and not ignore(nf)): |
|
951 | 951 | # unknown file -- normalize if necessary |
|
952 | 952 | if not alreadynormed: |
|
953 | 953 | nf = normalize(nf, False, True) |
|
954 | 954 | results[nf] = st |
|
955 | 955 | elif nf in dmap and (matchalways or matchfn(nf)): |
|
956 | 956 | results[nf] = None |
|
957 | 957 | |
|
958 | 958 | for nd, d in work: |
|
959 | 959 | # alreadynormed means that processwork doesn't have to do any |
|
960 | 960 | # expensive directory normalization |
|
961 | 961 | alreadynormed = not normalize or nd == d |
|
962 | 962 | traverse([d], alreadynormed) |
|
963 | 963 | |
|
964 | 964 | for s in subrepos: |
|
965 | 965 | del results[s] |
|
966 | 966 | del results['.hg'] |
|
967 | 967 | |
|
968 | 968 | # step 3: visit remaining files from dmap |
|
969 | 969 | if not skipstep3 and not exact: |
|
970 | 970 | # If a dmap file is not in results yet, it was either |
|
971 | 971 | # a) not matching matchfn b) ignored, c) missing, or d) under a |
|
972 | 972 | # symlink directory. |
|
973 | 973 | if not results and matchalways: |
|
974 | 974 | visit = [f for f in dmap] |
|
975 | 975 | else: |
|
976 | 976 | visit = [f for f in dmap if f not in results and matchfn(f)] |
|
977 | 977 | visit.sort() |
|
978 | 978 | |
|
979 | 979 | if unknown: |
|
980 | 980 | # unknown == True means we walked all dirs under the roots |
|
981 | 981 | # that wasn't ignored, and everything that matched was stat'ed |
|
982 | 982 | # and is already in results. |
|
983 | 983 | # The rest must thus be ignored or under a symlink. |
|
984 | 984 | audit_path = pathutil.pathauditor(self._root, cached=True) |
|
985 | 985 | |
|
986 | 986 | for nf in iter(visit): |
|
987 | 987 | # If a stat for the same file was already added with a |
|
988 | 988 | # different case, don't add one for this, since that would |
|
989 | 989 | # make it appear as if the file exists under both names |
|
990 | 990 | # on disk. |
|
991 | 991 | if (normalizefile and |
|
992 | 992 | normalizefile(nf, True, True) in results): |
|
993 | 993 | results[nf] = None |
|
994 | 994 | # Report ignored items in the dmap as long as they are not |
|
995 | 995 | # under a symlink directory. |
|
996 | 996 | elif audit_path.check(nf): |
|
997 | 997 | try: |
|
998 | 998 | results[nf] = lstat(join(nf)) |
|
999 | 999 | # file was just ignored, no links, and exists |
|
1000 | 1000 | except OSError: |
|
1001 | 1001 | # file doesn't exist |
|
1002 | 1002 | results[nf] = None |
|
1003 | 1003 | else: |
|
1004 | 1004 | # It's either missing or under a symlink directory |
|
1005 | 1005 | # which we in this case report as missing |
|
1006 | 1006 | results[nf] = None |
|
1007 | 1007 | else: |
|
1008 | 1008 | # We may not have walked the full directory tree above, |
|
1009 | 1009 | # so stat and check everything we missed. |
|
1010 | 1010 | iv = iter(visit) |
|
1011 | 1011 | for st in util.statfiles([join(i) for i in visit]): |
|
1012 | 1012 | results[next(iv)] = st |
|
1013 | 1013 | return results |
|
1014 | 1014 | |
|
1015 | 1015 | def status(self, match, subrepos, ignored, clean, unknown): |
|
1016 | 1016 | '''Determine the status of the working copy relative to the |
|
1017 | 1017 | dirstate and return a pair of (unsure, status), where status is of type |
|
1018 | 1018 | scmutil.status and: |
|
1019 | 1019 | |
|
1020 | 1020 | unsure: |
|
1021 | 1021 | files that might have been modified since the dirstate was |
|
1022 | 1022 | written, but need to be read to be sure (size is the same |
|
1023 | 1023 | but mtime differs) |
|
1024 | 1024 | status.modified: |
|
1025 | 1025 | files that have definitely been modified since the dirstate |
|
1026 | 1026 | was written (different size or mode) |
|
1027 | 1027 | status.clean: |
|
1028 | 1028 | files that have definitely not been modified since the |
|
1029 | 1029 | dirstate was written |
|
1030 | 1030 | ''' |
|
1031 | 1031 | listignored, listclean, listunknown = ignored, clean, unknown |
|
1032 | 1032 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
1033 | 1033 | removed, deleted, clean = [], [], [] |
|
1034 | 1034 | |
|
1035 | 1035 | dmap = self._map |
|
1036 | 1036 | dmap.preload() |
|
1037 | 1037 | dcontains = dmap.__contains__ |
|
1038 | 1038 | dget = dmap.__getitem__ |
|
1039 | 1039 | ladd = lookup.append # aka "unsure" |
|
1040 | 1040 | madd = modified.append |
|
1041 | 1041 | aadd = added.append |
|
1042 | 1042 | uadd = unknown.append |
|
1043 | 1043 | iadd = ignored.append |
|
1044 | 1044 | radd = removed.append |
|
1045 | 1045 | dadd = deleted.append |
|
1046 | 1046 | cadd = clean.append |
|
1047 | 1047 | mexact = match.exact |
|
1048 | 1048 | dirignore = self._dirignore |
|
1049 | 1049 | checkexec = self._checkexec |
|
1050 | 1050 | copymap = self._map.copymap |
|
1051 | 1051 | lastnormaltime = self._lastnormaltime |
|
1052 | 1052 | |
|
1053 | 1053 | # We need to do full walks when either |
|
1054 | 1054 | # - we're listing all clean files, or |
|
1055 | 1055 | # - match.traversedir does something, because match.traversedir should |
|
1056 | 1056 | # be called for every dir in the working dir |
|
1057 | 1057 | full = listclean or match.traversedir is not None |
|
1058 | 1058 | for fn, st in self.walk(match, subrepos, listunknown, listignored, |
|
1059 | 1059 | full=full).iteritems(): |
|
1060 | 1060 | if not dcontains(fn): |
|
1061 | 1061 | if (listignored or mexact(fn)) and dirignore(fn): |
|
1062 | 1062 | if listignored: |
|
1063 | 1063 | iadd(fn) |
|
1064 | 1064 | else: |
|
1065 | 1065 | uadd(fn) |
|
1066 | 1066 | continue |
|
1067 | 1067 | |
|
1068 | 1068 | # This is equivalent to 'state, mode, size, time = dmap[fn]' but not |
|
1069 | 1069 | # written like that for performance reasons. dmap[fn] is not a |
|
1070 | 1070 | # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE |
|
1071 | 1071 | # opcode has fast paths when the value to be unpacked is a tuple or |
|
1072 | 1072 | # a list, but falls back to creating a full-fledged iterator in |
|
1073 | 1073 | # general. That is much slower than simply accessing and storing the |
|
1074 | 1074 | # tuple members one by one. |
|
1075 | 1075 | t = dget(fn) |
|
1076 | 1076 | state = t[0] |
|
1077 | 1077 | mode = t[1] |
|
1078 | 1078 | size = t[2] |
|
1079 | 1079 | time = t[3] |
|
1080 | 1080 | |
|
1081 | 1081 | if not st and state in "nma": |
|
1082 | 1082 | dadd(fn) |
|
1083 | 1083 | elif state == 'n': |
|
1084 | 1084 | if (size >= 0 and |
|
1085 | 1085 | ((size != st.st_size and size != st.st_size & _rangemask) |
|
1086 | 1086 | or ((mode ^ st.st_mode) & 0o100 and checkexec)) |
|
1087 | 1087 | or size == -2 # other parent |
|
1088 | 1088 | or fn in copymap): |
|
1089 | 1089 | madd(fn) |
|
1090 | 1090 | elif (time != st[stat.ST_MTIME] |
|
1091 | 1091 | and time != st[stat.ST_MTIME] & _rangemask): |
|
1092 | 1092 | ladd(fn) |
|
1093 | 1093 | elif st[stat.ST_MTIME] == lastnormaltime: |
|
1094 | 1094 | # fn may have just been marked as normal and it may have |
|
1095 | 1095 | # changed in the same second without changing its size. |
|
1096 | 1096 | # This can happen if we quickly do multiple commits. |
|
1097 | 1097 | # Force lookup, so we don't miss such a racy file change. |
|
1098 | 1098 | ladd(fn) |
|
1099 | 1099 | elif listclean: |
|
1100 | 1100 | cadd(fn) |
|
1101 | 1101 | elif state == 'm': |
|
1102 | 1102 | madd(fn) |
|
1103 | 1103 | elif state == 'a': |
|
1104 | 1104 | aadd(fn) |
|
1105 | 1105 | elif state == 'r': |
|
1106 | 1106 | radd(fn) |
|
1107 | 1107 | |
|
1108 | 1108 | return (lookup, scmutil.status(modified, added, removed, deleted, |
|
1109 | 1109 | unknown, ignored, clean)) |
|
1110 | 1110 | |
|
1111 | 1111 | def matches(self, match): |
|
1112 | 1112 | ''' |
|
1113 | 1113 | return files in the dirstate (in whatever state) filtered by match |
|
1114 | 1114 | ''' |
|
1115 | 1115 | dmap = self._map |
|
1116 | 1116 | if match.always(): |
|
1117 | 1117 | return dmap.keys() |
|
1118 | 1118 | files = match.files() |
|
1119 | 1119 | if match.isexact(): |
|
1120 | 1120 | # fast path -- filter the other way around, since typically files is |
|
1121 | 1121 | # much smaller than dmap |
|
1122 | 1122 | return [f for f in files if f in dmap] |
|
1123 | 1123 | if match.prefix() and all(fn in dmap for fn in files): |
|
1124 | 1124 | # fast path -- all the values are known to be files, so just return |
|
1125 | 1125 | # that |
|
1126 | 1126 | return list(files) |
|
1127 | 1127 | return [f for f in dmap if match(f)] |
|
1128 | 1128 | |
|
1129 | 1129 | def _actualfilename(self, tr): |
|
1130 | 1130 | if tr: |
|
1131 | 1131 | return self._pendingfilename |
|
1132 | 1132 | else: |
|
1133 | 1133 | return self._filename |
|
1134 | 1134 | |
|
1135 | 1135 | def savebackup(self, tr, backupname): |
|
1136 | 1136 | '''Save current dirstate into backup file''' |
|
1137 | 1137 | filename = self._actualfilename(tr) |
|
1138 | 1138 | assert backupname != filename |
|
1139 | 1139 | |
|
1140 | 1140 | # use '_writedirstate' instead of 'write' to write changes certainly, |
|
1141 | 1141 | # because the latter omits writing out if transaction is running. |
|
1142 | 1142 | # output file will be used to create backup of dirstate at this point. |
|
1143 | 1143 | if self._dirty or not self._opener.exists(filename): |
|
1144 | 1144 | self._writedirstate(self._opener(filename, "w", atomictemp=True, |
|
1145 | 1145 | checkambig=True)) |
|
1146 | 1146 | |
|
1147 | 1147 | if tr: |
|
1148 | 1148 | # ensure that subsequent tr.writepending returns True for |
|
1149 | 1149 | # changes written out above, even if dirstate is never |
|
1150 | 1150 | # changed after this |
|
1151 | 1151 | tr.addfilegenerator('dirstate', (self._filename,), |
|
1152 | 1152 | self._writedirstate, location='plain') |
|
1153 | 1153 | |
|
1154 | 1154 | # ensure that pending file written above is unlinked at |
|
1155 | 1155 | # failure, even if tr.writepending isn't invoked until the |
|
1156 | 1156 | # end of this transaction |
|
1157 | 1157 | tr.registertmp(filename, location='plain') |
|
1158 | 1158 | |
|
1159 | 1159 | self._opener.tryunlink(backupname) |
|
1160 | 1160 | # hardlink backup is okay because _writedirstate is always called |
|
1161 | 1161 | # with an "atomictemp=True" file. |
|
1162 | 1162 | util.copyfile(self._opener.join(filename), |
|
1163 | 1163 | self._opener.join(backupname), hardlink=True) |
|
1164 | 1164 | |
|
1165 | 1165 | def restorebackup(self, tr, backupname): |
|
1166 | 1166 | '''Restore dirstate by backup file''' |
|
1167 | 1167 | # this "invalidate()" prevents "wlock.release()" from writing |
|
1168 | 1168 | # changes of dirstate out after restoring from backup file |
|
1169 | 1169 | self.invalidate() |
|
1170 | 1170 | filename = self._actualfilename(tr) |
|
1171 | 1171 | o = self._opener |
|
1172 | 1172 | if util.samefile(o.join(backupname), o.join(filename)): |
|
1173 | 1173 | o.unlink(backupname) |
|
1174 | 1174 | else: |
|
1175 | 1175 | o.rename(backupname, filename, checkambig=True) |
|
1176 | 1176 | |
|
1177 | 1177 | def clearbackup(self, tr, backupname): |
|
1178 | 1178 | '''Clear backup file''' |
|
1179 | 1179 | self._opener.unlink(backupname) |
|
1180 | 1180 | |
|
1181 | 1181 | class dirstatemap(object): |
|
1182 | 1182 | """Map encapsulating the dirstate's contents. |
|
1183 | 1183 | |
|
1184 | 1184 | The dirstate contains the following state: |
|
1185 | 1185 | |
|
1186 | 1186 | - `identity` is the identity of the dirstate file, which can be used to |
|
1187 | 1187 | detect when changes have occurred to the dirstate file. |
|
1188 | 1188 | |
|
1189 | 1189 | - `parents` is a pair containing the parents of the working copy. The |
|
1190 | 1190 | parents are updated by calling `setparents`. |
|
1191 | 1191 | |
|
1192 | 1192 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
1193 | 1193 | where state is a single character representing 'normal', 'added', |
|
1194 | 1194 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
1195 | 1195 | dict. File state is updated by calling the `addfile`, `removefile` and |
|
1196 | 1196 | `dropfile` methods. |
|
1197 | 1197 | |
|
1198 | 1198 | - `copymap` maps destination filenames to their source filename. |
|
1199 | 1199 | |
|
1200 | 1200 | The dirstate also provides the following views onto the state: |
|
1201 | 1201 | |
|
1202 | 1202 | - `nonnormalset` is a set of the filenames that have state other |
|
1203 | 1203 | than 'normal', or are normal but have an mtime of -1 ('normallookup'). |
|
1204 | 1204 | |
|
1205 | 1205 | - `otherparentset` is a set of the filenames that are marked as coming |
|
1206 | 1206 | from the second parent when the dirstate is currently being merged. |
|
1207 | 1207 | |
|
1208 | 1208 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
1209 | 1209 | form that they appear as in the dirstate. |
|
1210 | 1210 | |
|
1211 | 1211 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
1212 | 1212 | denormalized form that they appear as in the dirstate. |
|
1213 | 1213 | """ |
|
1214 | 1214 | |
|
1215 | 1215 | def __init__(self, ui, opener, root): |
|
1216 | 1216 | self._ui = ui |
|
1217 | 1217 | self._opener = opener |
|
1218 | 1218 | self._root = root |
|
1219 | 1219 | self._filename = 'dirstate' |
|
1220 | 1220 | |
|
1221 | 1221 | self._parents = None |
|
1222 | 1222 | self._dirtyparents = False |
|
1223 | 1223 | |
|
1224 | 1224 | # for consistent view between _pl() and _read() invocations |
|
1225 | 1225 | self._pendingmode = None |
|
1226 | 1226 | |
|
1227 | 1227 | @propertycache |
|
1228 | 1228 | def _map(self): |
|
1229 | 1229 | self._map = {} |
|
1230 | 1230 | self.read() |
|
1231 | 1231 | return self._map |
|
1232 | 1232 | |
|
1233 | 1233 | @propertycache |
|
1234 | 1234 | def copymap(self): |
|
1235 | 1235 | self.copymap = {} |
|
1236 | 1236 | self._map |
|
1237 | 1237 | return self.copymap |
|
1238 | 1238 | |
|
1239 | 1239 | def clear(self): |
|
1240 | 1240 | self._map.clear() |
|
1241 | 1241 | self.copymap.clear() |
|
1242 | 1242 | self.setparents(nullid, nullid) |
|
1243 | 1243 | util.clearcachedproperty(self, "_dirs") |
|
1244 | 1244 | util.clearcachedproperty(self, "_alldirs") |
|
1245 | 1245 | util.clearcachedproperty(self, "filefoldmap") |
|
1246 | 1246 | util.clearcachedproperty(self, "dirfoldmap") |
|
1247 | 1247 | util.clearcachedproperty(self, "nonnormalset") |
|
1248 | 1248 | util.clearcachedproperty(self, "otherparentset") |
|
1249 | 1249 | |
|
1250 | 1250 | def items(self): |
|
1251 | 1251 | return self._map.iteritems() |
|
1252 | 1252 | |
|
1253 | 1253 | # forward for python2,3 compat |
|
1254 | 1254 | iteritems = items |
|
1255 | 1255 | |
|
1256 | 1256 | def __len__(self): |
|
1257 | 1257 | return len(self._map) |
|
1258 | 1258 | |
|
1259 | 1259 | def __iter__(self): |
|
1260 | 1260 | return iter(self._map) |
|
1261 | 1261 | |
|
1262 | 1262 | def get(self, key, default=None): |
|
1263 | 1263 | return self._map.get(key, default) |
|
1264 | 1264 | |
|
1265 | 1265 | def __contains__(self, key): |
|
1266 | 1266 | return key in self._map |
|
1267 | 1267 | |
|
1268 | 1268 | def __getitem__(self, key): |
|
1269 | 1269 | return self._map[key] |
|
1270 | 1270 | |
|
1271 | 1271 | def keys(self): |
|
1272 | 1272 | return self._map.keys() |
|
1273 | 1273 | |
|
1274 | 1274 | def preload(self): |
|
1275 | 1275 | """Loads the underlying data, if it's not already loaded""" |
|
1276 | 1276 | self._map |
|
1277 | 1277 | |
|
1278 | 1278 | def addfile(self, f, oldstate, state, mode, size, mtime): |
|
1279 | 1279 | """Add a tracked file to the dirstate.""" |
|
1280 | 1280 | if oldstate in "?r" and r"_dirs" in self.__dict__: |
|
1281 | 1281 | self._dirs.addpath(f) |
|
1282 | 1282 | if oldstate == "?" and r"_alldirs" in self.__dict__: |
|
1283 | 1283 | self._alldirs.addpath(f) |
|
1284 | 1284 | self._map[f] = dirstatetuple(state, mode, size, mtime) |
|
1285 | 1285 | if state != 'n' or mtime == -1: |
|
1286 | 1286 | self.nonnormalset.add(f) |
|
1287 | 1287 | if size == -2: |
|
1288 | 1288 | self.otherparentset.add(f) |
|
1289 | 1289 | |
|
1290 | 1290 | def removefile(self, f, oldstate, size): |
|
1291 | 1291 | """ |
|
1292 | 1292 | Mark a file as removed in the dirstate. |
|
1293 | 1293 | |
|
1294 | 1294 | The `size` parameter is used to store sentinel values that indicate |
|
1295 | 1295 | the file's previous state. In the future, we should refactor this |
|
1296 | 1296 | to be more explicit about what that state is. |
|
1297 | 1297 | """ |
|
1298 | 1298 | if oldstate not in "?r" and r"_dirs" in self.__dict__: |
|
1299 | 1299 | self._dirs.delpath(f) |
|
1300 | 1300 | if oldstate == "?" and r"_alldirs" in self.__dict__: |
|
1301 | 1301 | self._alldirs.addpath(f) |
|
1302 | 1302 | if r"filefoldmap" in self.__dict__: |
|
1303 | 1303 | normed = util.normcase(f) |
|
1304 | 1304 | self.filefoldmap.pop(normed, None) |
|
1305 | 1305 | self._map[f] = dirstatetuple('r', 0, size, 0) |
|
1306 | 1306 | self.nonnormalset.add(f) |
|
1307 | 1307 | |
|
1308 | 1308 | def dropfile(self, f, oldstate): |
|
1309 | 1309 | """ |
|
1310 | 1310 | Remove a file from the dirstate. Returns True if the file was |
|
1311 | 1311 | previously recorded. |
|
1312 | 1312 | """ |
|
1313 | 1313 | exists = self._map.pop(f, None) is not None |
|
1314 | 1314 | if exists: |
|
1315 | 1315 | if oldstate != "r" and r"_dirs" in self.__dict__: |
|
1316 | 1316 | self._dirs.delpath(f) |
|
1317 | 1317 | if r"_alldirs" in self.__dict__: |
|
1318 | 1318 | self._alldirs.delpath(f) |
|
1319 | 1319 | if r"filefoldmap" in self.__dict__: |
|
1320 | 1320 | normed = util.normcase(f) |
|
1321 | 1321 | self.filefoldmap.pop(normed, None) |
|
1322 | 1322 | self.nonnormalset.discard(f) |
|
1323 | 1323 | return exists |
|
1324 | 1324 | |
|
1325 | 1325 | def clearambiguoustimes(self, files, now): |
|
1326 | 1326 | for f in files: |
|
1327 | 1327 | e = self.get(f) |
|
1328 | 1328 | if e is not None and e[0] == 'n' and e[3] == now: |
|
1329 | 1329 | self._map[f] = dirstatetuple(e[0], e[1], e[2], -1) |
|
1330 | 1330 | self.nonnormalset.add(f) |
|
1331 | 1331 | |
|
1332 | 1332 | def nonnormalentries(self): |
|
1333 | 1333 | '''Compute the nonnormal dirstate entries from the dmap''' |
|
1334 | 1334 | try: |
|
1335 | 1335 | return parsers.nonnormalotherparententries(self._map) |
|
1336 | 1336 | except AttributeError: |
|
1337 | 1337 | nonnorm = set() |
|
1338 | 1338 | otherparent = set() |
|
1339 | 1339 | for fname, e in self._map.iteritems(): |
|
1340 | 1340 | if e[0] != 'n' or e[3] == -1: |
|
1341 | 1341 | nonnorm.add(fname) |
|
1342 | 1342 | if e[0] == 'n' and e[2] == -2: |
|
1343 | 1343 | otherparent.add(fname) |
|
1344 | 1344 | return nonnorm, otherparent |
|
1345 | 1345 | |
|
1346 | 1346 | @propertycache |
|
1347 | 1347 | def filefoldmap(self): |
|
1348 | 1348 | """Returns a dictionary mapping normalized case paths to their |
|
1349 | 1349 | non-normalized versions. |
|
1350 | 1350 | """ |
|
1351 | 1351 | try: |
|
1352 | 1352 | makefilefoldmap = parsers.make_file_foldmap |
|
1353 | 1353 | except AttributeError: |
|
1354 | 1354 | pass |
|
1355 | 1355 | else: |
|
1356 | 1356 | return makefilefoldmap(self._map, util.normcasespec, |
|
1357 | 1357 | util.normcasefallback) |
|
1358 | 1358 | |
|
1359 | 1359 | f = {} |
|
1360 | 1360 | normcase = util.normcase |
|
1361 | 1361 | for name, s in self._map.iteritems(): |
|
1362 | 1362 | if s[0] != 'r': |
|
1363 | 1363 | f[normcase(name)] = name |
|
1364 | 1364 | f['.'] = '.' # prevents useless util.fspath() invocation |
|
1365 | 1365 | return f |
|
1366 | 1366 | |
|
1367 | 1367 | def hastrackeddir(self, d): |
|
1368 | 1368 | """ |
|
1369 | 1369 | Returns True if the dirstate contains a tracked (not removed) file |
|
1370 | 1370 | in this directory. |
|
1371 | 1371 | """ |
|
1372 | 1372 | return d in self._dirs |
|
1373 | 1373 | |
|
1374 | 1374 | def hasdir(self, d): |
|
1375 | 1375 | """ |
|
1376 | 1376 | Returns True if the dirstate contains a file (tracked or removed) |
|
1377 | 1377 | in this directory. |
|
1378 | 1378 | """ |
|
1379 | 1379 | return d in self._alldirs |
|
1380 | 1380 | |
|
1381 | 1381 | @propertycache |
|
1382 | 1382 | def _dirs(self): |
|
1383 | 1383 | return util.dirs(self._map, 'r') |
|
1384 | 1384 | |
|
1385 | 1385 | @propertycache |
|
1386 | 1386 | def _alldirs(self): |
|
1387 | 1387 | return util.dirs(self._map) |
|
1388 | 1388 | |
|
1389 | 1389 | def _opendirstatefile(self): |
|
1390 | 1390 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
1391 | 1391 | if self._pendingmode is not None and self._pendingmode != mode: |
|
1392 | 1392 | fp.close() |
|
1393 | 1393 | raise error.Abort(_('working directory state may be ' |
|
1394 | 1394 | 'changed parallelly')) |
|
1395 | 1395 | self._pendingmode = mode |
|
1396 | 1396 | return fp |
|
1397 | 1397 | |
|
1398 | 1398 | def parents(self): |
|
1399 | 1399 | if not self._parents: |
|
1400 | 1400 | try: |
|
1401 | 1401 | fp = self._opendirstatefile() |
|
1402 | 1402 | st = fp.read(40) |
|
1403 | 1403 | fp.close() |
|
1404 | 1404 | except IOError as err: |
|
1405 | 1405 | if err.errno != errno.ENOENT: |
|
1406 | 1406 | raise |
|
1407 | 1407 | # File doesn't exist, so the current state is empty |
|
1408 | 1408 | st = '' |
|
1409 | 1409 | |
|
1410 | 1410 | l = len(st) |
|
1411 | 1411 | if l == 40: |
|
1412 | 1412 | self._parents = (st[:20], st[20:40]) |
|
1413 | 1413 | elif l == 0: |
|
1414 | 1414 | self._parents = (nullid, nullid) |
|
1415 | 1415 | else: |
|
1416 | 1416 | raise error.Abort(_('working directory state appears ' |
|
1417 | 1417 | 'damaged!')) |
|
1418 | 1418 | |
|
1419 | 1419 | return self._parents |
|
1420 | 1420 | |
|
1421 | 1421 | def setparents(self, p1, p2): |
|
1422 | 1422 | self._parents = (p1, p2) |
|
1423 | 1423 | self._dirtyparents = True |
|
1424 | 1424 | |
|
1425 | 1425 | def read(self): |
|
1426 | 1426 | # ignore HG_PENDING because identity is used only for writing |
|
1427 | 1427 | self.identity = util.filestat.frompath( |
|
1428 | 1428 | self._opener.join(self._filename)) |
|
1429 | 1429 | |
|
1430 | 1430 | try: |
|
1431 | 1431 | fp = self._opendirstatefile() |
|
1432 | 1432 | try: |
|
1433 | 1433 | st = fp.read() |
|
1434 | 1434 | finally: |
|
1435 | 1435 | fp.close() |
|
1436 | 1436 | except IOError as err: |
|
1437 | 1437 | if err.errno != errno.ENOENT: |
|
1438 | 1438 | raise |
|
1439 | 1439 | return |
|
1440 | 1440 | if not st: |
|
1441 | 1441 | return |
|
1442 | 1442 | |
|
1443 | 1443 | if util.safehasattr(parsers, 'dict_new_presized'): |
|
1444 | 1444 | # Make an estimate of the number of files in the dirstate based on |
|
1445 | 1445 | # its size. From a linear regression on a set of real-world repos, |
|
1446 | 1446 | # all over 10,000 files, the size of a dirstate entry is 85 |
|
1447 | 1447 | # bytes. The cost of resizing is significantly higher than the cost |
|
1448 | 1448 | # of filling in a larger presized dict, so subtract 20% from the |
|
1449 | 1449 | # size. |
|
1450 | 1450 | # |
|
1451 | 1451 | # This heuristic is imperfect in many ways, so in a future dirstate |
|
1452 | 1452 | # format update it makes sense to just record the number of entries |
|
1453 | 1453 | # on write. |
|
1454 | 1454 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
1455 | 1455 | |
|
1456 | 1456 | # Python's garbage collector triggers a GC each time a certain number |
|
1457 | 1457 | # of container objects (the number being defined by |
|
1458 | 1458 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
1459 | 1459 | # for each file in the dirstate. The C version then immediately marks |
|
1460 | 1460 | # them as not to be tracked by the collector. However, this has no |
|
1461 | 1461 | # effect on when GCs are triggered, only on what objects the GC looks |
|
1462 | 1462 | # into. This means that O(number of files) GCs are unavoidable. |
|
1463 | 1463 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
1464 | 1464 | # this can get very expensive. As a workaround, disable GC while |
|
1465 | 1465 | # parsing the dirstate. |
|
1466 | 1466 | # |
|
1467 | 1467 | # (we cannot decorate the function directly since it is in a C module) |
|
1468 | 1468 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
1469 | 1469 | p = parse_dirstate(self._map, self.copymap, st) |
|
1470 | 1470 | if not self._dirtyparents: |
|
1471 | 1471 | self.setparents(*p) |
|
1472 | 1472 | |
|
1473 | 1473 | # Avoid excess attribute lookups by fast pathing certain checks |
|
1474 | 1474 | self.__contains__ = self._map.__contains__ |
|
1475 | 1475 | self.__getitem__ = self._map.__getitem__ |
|
1476 | 1476 | self.get = self._map.get |
|
1477 | 1477 | |
|
1478 | 1478 | def write(self, st, now): |
|
1479 | 1479 | st.write(parsers.pack_dirstate(self._map, self.copymap, |
|
1480 | 1480 | self.parents(), now)) |
|
1481 | 1481 | st.close() |
|
1482 | 1482 | self._dirtyparents = False |
|
1483 | 1483 | self.nonnormalset, self.otherparentset = self.nonnormalentries() |
|
1484 | 1484 | |
|
1485 | 1485 | @propertycache |
|
1486 | 1486 | def nonnormalset(self): |
|
1487 | 1487 | nonnorm, otherparents = self.nonnormalentries() |
|
1488 | 1488 | self.otherparentset = otherparents |
|
1489 | 1489 | return nonnorm |
|
1490 | 1490 | |
|
1491 | 1491 | @propertycache |
|
1492 | 1492 | def otherparentset(self): |
|
1493 | 1493 | nonnorm, otherparents = self.nonnormalentries() |
|
1494 | 1494 | self.nonnormalset = nonnorm |
|
1495 | 1495 | return otherparents |
|
1496 | 1496 | |
|
1497 | 1497 | @propertycache |
|
1498 | 1498 | def identity(self): |
|
1499 | 1499 | self._map |
|
1500 | 1500 | return self.identity |
|
1501 | 1501 | |
|
1502 | 1502 | @propertycache |
|
1503 | 1503 | def dirfoldmap(self): |
|
1504 | 1504 | f = {} |
|
1505 | 1505 | normcase = util.normcase |
|
1506 | 1506 | for name in self._dirs: |
|
1507 | 1507 | f[normcase(name)] = name |
|
1508 | 1508 | return f |
@@ -1,560 +1,559 b'' | |||
|
1 | 1 | # fileset.py - file set queries for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import re |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from . import ( |
|
15 | 15 | error, |
|
16 | 16 | filesetlang, |
|
17 | 17 | match as matchmod, |
|
18 | 18 | merge, |
|
19 | 19 | pycompat, |
|
20 | 20 | registrar, |
|
21 | 21 | scmutil, |
|
22 | 22 | util, |
|
23 | 23 | ) |
|
24 | 24 | from .utils import ( |
|
25 | 25 | stringutil, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | # common weight constants |
|
29 | 29 | _WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME |
|
30 | 30 | _WEIGHT_READ_CONTENTS = filesetlang.WEIGHT_READ_CONTENTS |
|
31 | 31 | _WEIGHT_STATUS = filesetlang.WEIGHT_STATUS |
|
32 | 32 | _WEIGHT_STATUS_THOROUGH = filesetlang.WEIGHT_STATUS_THOROUGH |
|
33 | 33 | |
|
34 | 34 | # helpers for processing parsed tree |
|
35 | 35 | getsymbol = filesetlang.getsymbol |
|
36 | 36 | getstring = filesetlang.getstring |
|
37 | 37 | _getkindpat = filesetlang.getkindpat |
|
38 | 38 | getpattern = filesetlang.getpattern |
|
39 | 39 | getargs = filesetlang.getargs |
|
40 | 40 | |
|
41 | 41 | def getmatch(mctx, x): |
|
42 | 42 | if not x: |
|
43 | 43 | raise error.ParseError(_("missing argument")) |
|
44 | 44 | return methods[x[0]](mctx, *x[1:]) |
|
45 | 45 | |
|
46 | 46 | def getmatchwithstatus(mctx, x, hint): |
|
47 | 47 | keys = set(getstring(hint, 'status hint must be a string').split()) |
|
48 | 48 | return getmatch(mctx.withstatus(keys), x) |
|
49 | 49 | |
|
50 | 50 | def stringmatch(mctx, x): |
|
51 | 51 | return mctx.matcher([x]) |
|
52 | 52 | |
|
53 | 53 | def kindpatmatch(mctx, x, y): |
|
54 | 54 | return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds, |
|
55 | 55 | _("pattern must be a string"))) |
|
56 | 56 | |
|
57 | 57 | def patternsmatch(mctx, *xs): |
|
58 | 58 | allkinds = matchmod.allpatternkinds |
|
59 | 59 | patterns = [getpattern(x, allkinds, _("pattern must be a string")) |
|
60 | 60 | for x in xs] |
|
61 | 61 | return mctx.matcher(patterns) |
|
62 | 62 | |
|
63 | 63 | def andmatch(mctx, x, y): |
|
64 | 64 | xm = getmatch(mctx, x) |
|
65 | 65 | ym = getmatch(mctx.narrowed(xm), y) |
|
66 | 66 | return matchmod.intersectmatchers(xm, ym) |
|
67 | 67 | |
|
68 | 68 | def ormatch(mctx, *xs): |
|
69 | 69 | ms = [getmatch(mctx, x) for x in xs] |
|
70 | 70 | return matchmod.unionmatcher(ms) |
|
71 | 71 | |
|
72 | 72 | def notmatch(mctx, x): |
|
73 | 73 | m = getmatch(mctx, x) |
|
74 | 74 | return mctx.predicate(lambda f: not m(f), predrepr=('<not %r>', m)) |
|
75 | 75 | |
|
76 | 76 | def minusmatch(mctx, x, y): |
|
77 | 77 | xm = getmatch(mctx, x) |
|
78 | 78 | ym = getmatch(mctx.narrowed(xm), y) |
|
79 | 79 | return matchmod.differencematcher(xm, ym) |
|
80 | 80 | |
|
81 | 81 | def listmatch(mctx, *xs): |
|
82 | 82 | raise error.ParseError(_("can't use a list in this context"), |
|
83 | 83 | hint=_('see \'hg help "filesets.x or y"\'')) |
|
84 | 84 | |
|
85 | 85 | def func(mctx, a, b): |
|
86 | 86 | funcname = getsymbol(a) |
|
87 | 87 | if funcname in symbols: |
|
88 | 88 | return symbols[funcname](mctx, b) |
|
89 | 89 | |
|
90 | 90 | keep = lambda fn: getattr(fn, '__doc__', None) is not None |
|
91 | 91 | |
|
92 | 92 | syms = [s for (s, fn) in symbols.items() if keep(fn)] |
|
93 | 93 | raise error.UnknownIdentifier(funcname, syms) |
|
94 | 94 | |
|
95 | 95 | # symbols are callable like: |
|
96 | 96 | # fun(mctx, x) |
|
97 | 97 | # with: |
|
98 | 98 | # mctx - current matchctx instance |
|
99 | 99 | # x - argument in tree form |
|
100 | 100 | symbols = filesetlang.symbols |
|
101 | 101 | |
|
102 | 102 | predicate = registrar.filesetpredicate(symbols) |
|
103 | 103 | |
|
104 | 104 | @predicate('modified()', callstatus=True, weight=_WEIGHT_STATUS) |
|
105 | 105 | def modified(mctx, x): |
|
106 | 106 | """File that is modified according to :hg:`status`. |
|
107 | 107 | """ |
|
108 | 108 | # i18n: "modified" is a keyword |
|
109 | 109 | getargs(x, 0, 0, _("modified takes no arguments")) |
|
110 | 110 | s = set(mctx.status().modified) |
|
111 | 111 | return mctx.predicate(s.__contains__, predrepr='modified') |
|
112 | 112 | |
|
113 | 113 | @predicate('added()', callstatus=True, weight=_WEIGHT_STATUS) |
|
114 | 114 | def added(mctx, x): |
|
115 | 115 | """File that is added according to :hg:`status`. |
|
116 | 116 | """ |
|
117 | 117 | # i18n: "added" is a keyword |
|
118 | 118 | getargs(x, 0, 0, _("added takes no arguments")) |
|
119 | 119 | s = set(mctx.status().added) |
|
120 | 120 | return mctx.predicate(s.__contains__, predrepr='added') |
|
121 | 121 | |
|
122 | 122 | @predicate('removed()', callstatus=True, weight=_WEIGHT_STATUS) |
|
123 | 123 | def removed(mctx, x): |
|
124 | 124 | """File that is removed according to :hg:`status`. |
|
125 | 125 | """ |
|
126 | 126 | # i18n: "removed" is a keyword |
|
127 | 127 | getargs(x, 0, 0, _("removed takes no arguments")) |
|
128 | 128 | s = set(mctx.status().removed) |
|
129 | 129 | return mctx.predicate(s.__contains__, predrepr='removed') |
|
130 | 130 | |
|
131 | 131 | @predicate('deleted()', callstatus=True, weight=_WEIGHT_STATUS) |
|
132 | 132 | def deleted(mctx, x): |
|
133 | 133 | """Alias for ``missing()``. |
|
134 | 134 | """ |
|
135 | 135 | # i18n: "deleted" is a keyword |
|
136 | 136 | getargs(x, 0, 0, _("deleted takes no arguments")) |
|
137 | 137 | s = set(mctx.status().deleted) |
|
138 | 138 | return mctx.predicate(s.__contains__, predrepr='deleted') |
|
139 | 139 | |
|
140 | 140 | @predicate('missing()', callstatus=True, weight=_WEIGHT_STATUS) |
|
141 | 141 | def missing(mctx, x): |
|
142 | 142 | """File that is missing according to :hg:`status`. |
|
143 | 143 | """ |
|
144 | 144 | # i18n: "missing" is a keyword |
|
145 | 145 | getargs(x, 0, 0, _("missing takes no arguments")) |
|
146 | 146 | s = set(mctx.status().deleted) |
|
147 | 147 | return mctx.predicate(s.__contains__, predrepr='deleted') |
|
148 | 148 | |
|
149 | 149 | @predicate('unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH) |
|
150 | 150 | def unknown(mctx, x): |
|
151 | 151 | """File that is unknown according to :hg:`status`.""" |
|
152 | 152 | # i18n: "unknown" is a keyword |
|
153 | 153 | getargs(x, 0, 0, _("unknown takes no arguments")) |
|
154 | 154 | s = set(mctx.status().unknown) |
|
155 | 155 | return mctx.predicate(s.__contains__, predrepr='unknown') |
|
156 | 156 | |
|
157 | 157 | @predicate('ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH) |
|
158 | 158 | def ignored(mctx, x): |
|
159 | 159 | """File that is ignored according to :hg:`status`.""" |
|
160 | 160 | # i18n: "ignored" is a keyword |
|
161 | 161 | getargs(x, 0, 0, _("ignored takes no arguments")) |
|
162 | 162 | s = set(mctx.status().ignored) |
|
163 | 163 | return mctx.predicate(s.__contains__, predrepr='ignored') |
|
164 | 164 | |
|
165 | 165 | @predicate('clean()', callstatus=True, weight=_WEIGHT_STATUS) |
|
166 | 166 | def clean(mctx, x): |
|
167 | 167 | """File that is clean according to :hg:`status`. |
|
168 | 168 | """ |
|
169 | 169 | # i18n: "clean" is a keyword |
|
170 | 170 | getargs(x, 0, 0, _("clean takes no arguments")) |
|
171 | 171 | s = set(mctx.status().clean) |
|
172 | 172 | return mctx.predicate(s.__contains__, predrepr='clean') |
|
173 | 173 | |
|
174 | 174 | @predicate('tracked()') |
|
175 | 175 | def tracked(mctx, x): |
|
176 | 176 | """File that is under Mercurial control.""" |
|
177 | 177 | # i18n: "tracked" is a keyword |
|
178 | 178 | getargs(x, 0, 0, _("tracked takes no arguments")) |
|
179 | 179 | return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked') |
|
180 | 180 | |
|
181 | 181 | @predicate('binary()', weight=_WEIGHT_READ_CONTENTS) |
|
182 | 182 | def binary(mctx, x): |
|
183 | 183 | """File that appears to be binary (contains NUL bytes). |
|
184 | 184 | """ |
|
185 | 185 | # i18n: "binary" is a keyword |
|
186 | 186 | getargs(x, 0, 0, _("binary takes no arguments")) |
|
187 | 187 | return mctx.fpredicate(lambda fctx: fctx.isbinary(), |
|
188 | 188 | predrepr='binary', cache=True) |
|
189 | 189 | |
|
190 | 190 | @predicate('exec()') |
|
191 | 191 | def exec_(mctx, x): |
|
192 | 192 | """File that is marked as executable. |
|
193 | 193 | """ |
|
194 | 194 | # i18n: "exec" is a keyword |
|
195 | 195 | getargs(x, 0, 0, _("exec takes no arguments")) |
|
196 | 196 | ctx = mctx.ctx |
|
197 | 197 | return mctx.predicate(lambda f: ctx.flags(f) == 'x', predrepr='exec') |
|
198 | 198 | |
|
199 | 199 | @predicate('symlink()') |
|
200 | 200 | def symlink(mctx, x): |
|
201 | 201 | """File that is marked as a symlink. |
|
202 | 202 | """ |
|
203 | 203 | # i18n: "symlink" is a keyword |
|
204 | 204 | getargs(x, 0, 0, _("symlink takes no arguments")) |
|
205 | 205 | ctx = mctx.ctx |
|
206 | 206 | return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink') |
|
207 | 207 | |
|
208 | 208 | @predicate('resolved()', weight=_WEIGHT_STATUS) |
|
209 | 209 | def resolved(mctx, x): |
|
210 | 210 | """File that is marked resolved according to :hg:`resolve -l`. |
|
211 | 211 | """ |
|
212 | 212 | # i18n: "resolved" is a keyword |
|
213 | 213 | getargs(x, 0, 0, _("resolved takes no arguments")) |
|
214 | 214 | if mctx.ctx.rev() is not None: |
|
215 | 215 | return mctx.never() |
|
216 | 216 | ms = merge.mergestate.read(mctx.ctx.repo()) |
|
217 | 217 | return mctx.predicate(lambda f: f in ms and ms[f] == 'r', |
|
218 | 218 | predrepr='resolved') |
|
219 | 219 | |
|
220 | 220 | @predicate('unresolved()', weight=_WEIGHT_STATUS) |
|
221 | 221 | def unresolved(mctx, x): |
|
222 | 222 | """File that is marked unresolved according to :hg:`resolve -l`. |
|
223 | 223 | """ |
|
224 | 224 | # i18n: "unresolved" is a keyword |
|
225 | 225 | getargs(x, 0, 0, _("unresolved takes no arguments")) |
|
226 | 226 | if mctx.ctx.rev() is not None: |
|
227 | 227 | return mctx.never() |
|
228 | 228 | ms = merge.mergestate.read(mctx.ctx.repo()) |
|
229 | 229 | return mctx.predicate(lambda f: f in ms and ms[f] == 'u', |
|
230 | 230 | predrepr='unresolved') |
|
231 | 231 | |
|
232 | 232 | @predicate('hgignore()', weight=_WEIGHT_STATUS) |
|
233 | 233 | def hgignore(mctx, x): |
|
234 | 234 | """File that matches the active .hgignore pattern. |
|
235 | 235 | """ |
|
236 | 236 | # i18n: "hgignore" is a keyword |
|
237 | 237 | getargs(x, 0, 0, _("hgignore takes no arguments")) |
|
238 | 238 | return mctx.ctx.repo().dirstate._ignore |
|
239 | 239 | |
|
240 | 240 | @predicate('portable()', weight=_WEIGHT_CHECK_FILENAME) |
|
241 | 241 | def portable(mctx, x): |
|
242 | 242 | """File that has a portable name. (This doesn't include filenames with case |
|
243 | 243 | collisions.) |
|
244 | 244 | """ |
|
245 | 245 | # i18n: "portable" is a keyword |
|
246 | 246 | getargs(x, 0, 0, _("portable takes no arguments")) |
|
247 | 247 | return mctx.predicate(lambda f: util.checkwinfilename(f) is None, |
|
248 | 248 | predrepr='portable') |
|
249 | 249 | |
|
250 | 250 | @predicate('grep(regex)', weight=_WEIGHT_READ_CONTENTS) |
|
251 | 251 | def grep(mctx, x): |
|
252 | 252 | """File contains the given regular expression. |
|
253 | 253 | """ |
|
254 | 254 | try: |
|
255 | 255 | # i18n: "grep" is a keyword |
|
256 | 256 | r = re.compile(getstring(x, _("grep requires a pattern"))) |
|
257 | 257 | except re.error as e: |
|
258 | 258 | raise error.ParseError(_('invalid match pattern: %s') % |
|
259 | 259 | stringutil.forcebytestr(e)) |
|
260 | 260 | return mctx.fpredicate(lambda fctx: r.search(fctx.data()), |
|
261 | 261 | predrepr=('grep(%r)', r.pattern), cache=True) |
|
262 | 262 | |
|
263 | 263 | def _sizetomax(s): |
|
264 | 264 | try: |
|
265 | 265 | s = s.strip().lower() |
|
266 | 266 | for k, v in util._sizeunits: |
|
267 | 267 | if s.endswith(k): |
|
268 | 268 | # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1 |
|
269 | 269 | n = s[:-len(k)] |
|
270 | 270 | inc = 1.0 |
|
271 | 271 | if "." in n: |
|
272 | 272 | inc /= 10 ** len(n.split(".")[1]) |
|
273 | 273 | return int((float(n) + inc) * v) - 1 |
|
274 | 274 | # no extension, this is a precise value |
|
275 | 275 | return int(s) |
|
276 | 276 | except ValueError: |
|
277 | 277 | raise error.ParseError(_("couldn't parse size: %s") % s) |
|
278 | 278 | |
|
279 | 279 | def sizematcher(expr): |
|
280 | 280 | """Return a function(size) -> bool from the ``size()`` expression""" |
|
281 | 281 | expr = expr.strip() |
|
282 | 282 | if '-' in expr: # do we have a range? |
|
283 | 283 | a, b = expr.split('-', 1) |
|
284 | 284 | a = util.sizetoint(a) |
|
285 | 285 | b = util.sizetoint(b) |
|
286 | 286 | return lambda x: x >= a and x <= b |
|
287 | 287 | elif expr.startswith("<="): |
|
288 | 288 | a = util.sizetoint(expr[2:]) |
|
289 | 289 | return lambda x: x <= a |
|
290 | 290 | elif expr.startswith("<"): |
|
291 | 291 | a = util.sizetoint(expr[1:]) |
|
292 | 292 | return lambda x: x < a |
|
293 | 293 | elif expr.startswith(">="): |
|
294 | 294 | a = util.sizetoint(expr[2:]) |
|
295 | 295 | return lambda x: x >= a |
|
296 | 296 | elif expr.startswith(">"): |
|
297 | 297 | a = util.sizetoint(expr[1:]) |
|
298 | 298 | return lambda x: x > a |
|
299 | 299 | else: |
|
300 | 300 | a = util.sizetoint(expr) |
|
301 | 301 | b = _sizetomax(expr) |
|
302 | 302 | return lambda x: x >= a and x <= b |
|
303 | 303 | |
|
304 | 304 | @predicate('size(expression)', weight=_WEIGHT_STATUS) |
|
305 | 305 | def size(mctx, x): |
|
306 | 306 | """File size matches the given expression. Examples: |
|
307 | 307 | |
|
308 | 308 | - size('1k') - files from 1024 to 2047 bytes |
|
309 | 309 | - size('< 20k') - files less than 20480 bytes |
|
310 | 310 | - size('>= .5MB') - files at least 524288 bytes |
|
311 | 311 | - size('4k - 1MB') - files from 4096 bytes to 1048576 bytes |
|
312 | 312 | """ |
|
313 | 313 | # i18n: "size" is a keyword |
|
314 | 314 | expr = getstring(x, _("size requires an expression")) |
|
315 | 315 | m = sizematcher(expr) |
|
316 | 316 | return mctx.fpredicate(lambda fctx: m(fctx.size()), |
|
317 | 317 | predrepr=('size(%r)', expr), cache=True) |
|
318 | 318 | |
|
319 | 319 | @predicate('encoding(name)', weight=_WEIGHT_READ_CONTENTS) |
|
320 | 320 | def encoding(mctx, x): |
|
321 | 321 | """File can be successfully decoded with the given character |
|
322 | 322 | encoding. May not be useful for encodings other than ASCII and |
|
323 | 323 | UTF-8. |
|
324 | 324 | """ |
|
325 | 325 | |
|
326 | 326 | # i18n: "encoding" is a keyword |
|
327 | 327 | enc = getstring(x, _("encoding requires an encoding name")) |
|
328 | 328 | |
|
329 | 329 | def encp(fctx): |
|
330 | 330 | d = fctx.data() |
|
331 | 331 | try: |
|
332 | 332 | d.decode(pycompat.sysstr(enc)) |
|
333 | 333 | return True |
|
334 | 334 | except LookupError: |
|
335 | 335 | raise error.Abort(_("unknown encoding '%s'") % enc) |
|
336 | 336 | except UnicodeDecodeError: |
|
337 | 337 | return False |
|
338 | 338 | |
|
339 | 339 | return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True) |
|
340 | 340 | |
|
341 | 341 | @predicate('eol(style)', weight=_WEIGHT_READ_CONTENTS) |
|
342 | 342 | def eol(mctx, x): |
|
343 | 343 | """File contains newlines of the given style (dos, unix, mac). Binary |
|
344 | 344 | files are excluded, files with mixed line endings match multiple |
|
345 | 345 | styles. |
|
346 | 346 | """ |
|
347 | 347 | |
|
348 | 348 | # i18n: "eol" is a keyword |
|
349 | 349 | enc = getstring(x, _("eol requires a style name")) |
|
350 | 350 | |
|
351 | 351 | def eolp(fctx): |
|
352 | 352 | if fctx.isbinary(): |
|
353 | 353 | return False |
|
354 | 354 | d = fctx.data() |
|
355 | 355 | if (enc == 'dos' or enc == 'win') and '\r\n' in d: |
|
356 | 356 | return True |
|
357 | 357 | elif enc == 'unix' and re.search('(?<!\r)\n', d): |
|
358 | 358 | return True |
|
359 | 359 | elif enc == 'mac' and re.search('\r(?!\n)', d): |
|
360 | 360 | return True |
|
361 | 361 | return False |
|
362 | 362 | return mctx.fpredicate(eolp, predrepr=('eol(%r)', enc), cache=True) |
|
363 | 363 | |
|
364 | 364 | @predicate('copied()') |
|
365 | 365 | def copied(mctx, x): |
|
366 | 366 | """File that is recorded as being copied. |
|
367 | 367 | """ |
|
368 | 368 | # i18n: "copied" is a keyword |
|
369 | 369 | getargs(x, 0, 0, _("copied takes no arguments")) |
|
370 | 370 | def copiedp(fctx): |
|
371 | 371 | p = fctx.parents() |
|
372 | 372 | return p and p[0].path() != fctx.path() |
|
373 | 373 | return mctx.fpredicate(copiedp, predrepr='copied', cache=True) |
|
374 | 374 | |
|
375 | 375 | @predicate('revs(revs, pattern)', weight=_WEIGHT_STATUS) |
|
376 | 376 | def revs(mctx, x): |
|
377 | 377 | """Evaluate set in the specified revisions. If the revset match multiple |
|
378 | 378 | revs, this will return file matching pattern in any of the revision. |
|
379 | 379 | """ |
|
380 | 380 | # i18n: "revs" is a keyword |
|
381 | 381 | r, x = getargs(x, 2, 2, _("revs takes two arguments")) |
|
382 | 382 | # i18n: "revs" is a keyword |
|
383 | 383 | revspec = getstring(r, _("first argument to revs must be a revision")) |
|
384 | 384 | repo = mctx.ctx.repo() |
|
385 | 385 | revs = scmutil.revrange(repo, [revspec]) |
|
386 | 386 | |
|
387 | 387 | matchers = [] |
|
388 | 388 | for r in revs: |
|
389 | 389 | ctx = repo[r] |
|
390 | 390 | mc = mctx.switch(ctx.p1(), ctx) |
|
391 | 391 | matchers.append(getmatch(mc, x)) |
|
392 | 392 | if not matchers: |
|
393 | 393 | return mctx.never() |
|
394 | 394 | if len(matchers) == 1: |
|
395 | 395 | return matchers[0] |
|
396 | 396 | return matchmod.unionmatcher(matchers) |
|
397 | 397 | |
|
398 | 398 | @predicate('status(base, rev, pattern)', weight=_WEIGHT_STATUS) |
|
399 | 399 | def status(mctx, x): |
|
400 | 400 | """Evaluate predicate using status change between ``base`` and |
|
401 | 401 | ``rev``. Examples: |
|
402 | 402 | |
|
403 | 403 | - ``status(3, 7, added())`` - matches files added from "3" to "7" |
|
404 | 404 | """ |
|
405 | 405 | repo = mctx.ctx.repo() |
|
406 | 406 | # i18n: "status" is a keyword |
|
407 | 407 | b, r, x = getargs(x, 3, 3, _("status takes three arguments")) |
|
408 | 408 | # i18n: "status" is a keyword |
|
409 | 409 | baseerr = _("first argument to status must be a revision") |
|
410 | 410 | baserevspec = getstring(b, baseerr) |
|
411 | 411 | if not baserevspec: |
|
412 | 412 | raise error.ParseError(baseerr) |
|
413 | 413 | reverr = _("second argument to status must be a revision") |
|
414 | 414 | revspec = getstring(r, reverr) |
|
415 | 415 | if not revspec: |
|
416 | 416 | raise error.ParseError(reverr) |
|
417 | 417 | basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec]) |
|
418 | 418 | mc = mctx.switch(basectx, ctx) |
|
419 | 419 | return getmatch(mc, x) |
|
420 | 420 | |
|
421 | 421 | @predicate('subrepo([pattern])') |
|
422 | 422 | def subrepo(mctx, x): |
|
423 | 423 | """Subrepositories whose paths match the given pattern. |
|
424 | 424 | """ |
|
425 | 425 | # i18n: "subrepo" is a keyword |
|
426 | 426 | getargs(x, 0, 1, _("subrepo takes at most one argument")) |
|
427 | 427 | ctx = mctx.ctx |
|
428 | 428 | sstate = ctx.substate |
|
429 | 429 | if x: |
|
430 | 430 | pat = getpattern(x, matchmod.allpatternkinds, |
|
431 | 431 | # i18n: "subrepo" is a keyword |
|
432 | 432 | _("subrepo requires a pattern or no arguments")) |
|
433 | 433 | fast = not matchmod.patkind(pat) |
|
434 | 434 | if fast: |
|
435 | 435 | def m(s): |
|
436 | 436 | return (s == pat) |
|
437 | 437 | else: |
|
438 | 438 | m = matchmod.match(ctx.repo().root, '', [pat], ctx=ctx) |
|
439 | 439 | return mctx.predicate(lambda f: f in sstate and m(f), |
|
440 | 440 | predrepr=('subrepo(%r)', pat)) |
|
441 | 441 | else: |
|
442 | 442 | return mctx.predicate(sstate.__contains__, predrepr='subrepo') |
|
443 | 443 | |
|
444 | 444 | methods = { |
|
445 | 445 | 'withstatus': getmatchwithstatus, |
|
446 | 446 | 'string': stringmatch, |
|
447 | 447 | 'symbol': stringmatch, |
|
448 | 448 | 'kindpat': kindpatmatch, |
|
449 | 449 | 'patterns': patternsmatch, |
|
450 | 450 | 'and': andmatch, |
|
451 | 451 | 'or': ormatch, |
|
452 | 452 | 'minus': minusmatch, |
|
453 | 453 | 'list': listmatch, |
|
454 | 454 | 'not': notmatch, |
|
455 | 455 | 'func': func, |
|
456 | 456 | } |
|
457 | 457 | |
|
458 | 458 | class matchctx(object): |
|
459 | 459 | def __init__(self, basectx, ctx, badfn=None): |
|
460 | 460 | self._basectx = basectx |
|
461 | 461 | self.ctx = ctx |
|
462 | 462 | self._badfn = badfn |
|
463 | 463 | self._match = None |
|
464 | 464 | self._status = None |
|
465 | 465 | |
|
466 | 466 | def narrowed(self, match): |
|
467 | 467 | """Create matchctx for a sub-tree narrowed by the given matcher""" |
|
468 | 468 | mctx = matchctx(self._basectx, self.ctx, self._badfn) |
|
469 | 469 | mctx._match = match |
|
470 | 470 | # leave wider status which we don't have to care |
|
471 | 471 | mctx._status = self._status |
|
472 | 472 | return mctx |
|
473 | 473 | |
|
474 | 474 | def switch(self, basectx, ctx): |
|
475 | 475 | mctx = matchctx(basectx, ctx, self._badfn) |
|
476 | 476 | mctx._match = self._match |
|
477 | 477 | return mctx |
|
478 | 478 | |
|
479 | 479 | def withstatus(self, keys): |
|
480 | 480 | """Create matchctx which has precomputed status specified by the keys""" |
|
481 | 481 | mctx = matchctx(self._basectx, self.ctx, self._badfn) |
|
482 | 482 | mctx._match = self._match |
|
483 | 483 | mctx._buildstatus(keys) |
|
484 | 484 | return mctx |
|
485 | 485 | |
|
486 | 486 | def _buildstatus(self, keys): |
|
487 | 487 | self._status = self._basectx.status(self.ctx, self._match, |
|
488 | 488 | listignored='ignored' in keys, |
|
489 | 489 | listclean='clean' in keys, |
|
490 | 490 | listunknown='unknown' in keys) |
|
491 | 491 | |
|
492 | 492 | def status(self): |
|
493 | 493 | return self._status |
|
494 | 494 | |
|
495 | 495 | def matcher(self, patterns): |
|
496 | 496 | return self.ctx.match(patterns, badfn=self._badfn) |
|
497 | 497 | |
|
498 | 498 | def predicate(self, predfn, predrepr=None, cache=False): |
|
499 | 499 | """Create a matcher to select files by predfn(filename)""" |
|
500 | 500 | if cache: |
|
501 | 501 | predfn = util.cachefunc(predfn) |
|
502 | 502 | return matchmod.predicatematcher(predfn, predrepr=predrepr, |
|
503 | 503 | badfn=self._badfn) |
|
504 | 504 | |
|
505 | 505 | def fpredicate(self, predfn, predrepr=None, cache=False): |
|
506 | 506 | """Create a matcher to select files by predfn(fctx) at the current |
|
507 | 507 | revision |
|
508 | 508 | |
|
509 | 509 | Missing files are ignored. |
|
510 | 510 | """ |
|
511 | 511 | ctx = self.ctx |
|
512 | 512 | if ctx.rev() is None: |
|
513 | 513 | def fctxpredfn(f): |
|
514 | 514 | try: |
|
515 | 515 | fctx = ctx[f] |
|
516 | 516 | except error.LookupError: |
|
517 | 517 | return False |
|
518 | 518 | try: |
|
519 | 519 | fctx.audit() |
|
520 | 520 | except error.Abort: |
|
521 | 521 | return False |
|
522 | 522 | try: |
|
523 | 523 | return predfn(fctx) |
|
524 | 524 | except (IOError, OSError) as e: |
|
525 | 525 | # open()-ing a directory fails with EACCES on Windows |
|
526 | 526 | if e.errno in (errno.ENOENT, errno.EACCES, errno.ENOTDIR, |
|
527 | 527 | errno.EISDIR): |
|
528 | 528 | return False |
|
529 | 529 | raise |
|
530 | 530 | else: |
|
531 | 531 | def fctxpredfn(f): |
|
532 | 532 | try: |
|
533 | 533 | fctx = ctx[f] |
|
534 | 534 | except error.LookupError: |
|
535 | 535 | return False |
|
536 | 536 | return predfn(fctx) |
|
537 | 537 | return self.predicate(fctxpredfn, predrepr=predrepr, cache=cache) |
|
538 | 538 | |
|
539 | 539 | def never(self): |
|
540 | 540 | """Create a matcher to select nothing""" |
|
541 | repo = self.ctx.repo() | |
|
542 | return matchmod.never(repo.root, repo.getcwd(), badfn=self._badfn) | |
|
541 | return matchmod.never(badfn=self._badfn) | |
|
543 | 542 | |
|
544 | 543 | def match(ctx, expr, badfn=None): |
|
545 | 544 | """Create a matcher for a single fileset expression""" |
|
546 | 545 | tree = filesetlang.parse(expr) |
|
547 | 546 | tree = filesetlang.analyze(tree) |
|
548 | 547 | tree = filesetlang.optimize(tree) |
|
549 | 548 | mctx = matchctx(ctx.p1(), ctx, badfn=badfn) |
|
550 | 549 | return getmatch(mctx, tree) |
|
551 | 550 | |
|
552 | 551 | |
|
553 | 552 | def loadpredicate(ui, extname, registrarobj): |
|
554 | 553 | """Load fileset predicates from specified registrarobj |
|
555 | 554 | """ |
|
556 | 555 | for name, func in registrarobj._table.iteritems(): |
|
557 | 556 | symbols[name] = func |
|
558 | 557 | |
|
559 | 558 | # tell hggettext to extract docstrings from these functions: |
|
560 | 559 | i18nfunctions = symbols.values() |
@@ -1,813 +1,813 b'' | |||
|
1 | 1 | # hgweb/webutil.py - utility library for the web interface. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import copy |
|
12 | 12 | import difflib |
|
13 | 13 | import os |
|
14 | 14 | import re |
|
15 | 15 | |
|
16 | 16 | from ..i18n import _ |
|
17 | 17 | from ..node import hex, nullid, short |
|
18 | 18 | |
|
19 | 19 | from .common import ( |
|
20 | 20 | ErrorResponse, |
|
21 | 21 | HTTP_BAD_REQUEST, |
|
22 | 22 | HTTP_NOT_FOUND, |
|
23 | 23 | paritygen, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | from .. import ( |
|
27 | 27 | context, |
|
28 | 28 | diffutil, |
|
29 | 29 | error, |
|
30 | 30 | match, |
|
31 | 31 | mdiff, |
|
32 | 32 | obsutil, |
|
33 | 33 | patch, |
|
34 | 34 | pathutil, |
|
35 | 35 | pycompat, |
|
36 | 36 | scmutil, |
|
37 | 37 | templatefilters, |
|
38 | 38 | templatekw, |
|
39 | 39 | templateutil, |
|
40 | 40 | ui as uimod, |
|
41 | 41 | util, |
|
42 | 42 | ) |
|
43 | 43 | |
|
44 | 44 | from ..utils import ( |
|
45 | 45 | stringutil, |
|
46 | 46 | ) |
|
47 | 47 | |
|
48 | 48 | archivespecs = util.sortdict(( |
|
49 | 49 | ('zip', ('application/zip', 'zip', '.zip', None)), |
|
50 | 50 | ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)), |
|
51 | 51 | ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)), |
|
52 | 52 | )) |
|
53 | 53 | |
|
54 | 54 | def archivelist(ui, nodeid, url=None): |
|
55 | 55 | allowed = ui.configlist('web', 'allow-archive', untrusted=True) |
|
56 | 56 | archives = [] |
|
57 | 57 | |
|
58 | 58 | for typ, spec in archivespecs.iteritems(): |
|
59 | 59 | if typ in allowed or ui.configbool('web', 'allow' + typ, |
|
60 | 60 | untrusted=True): |
|
61 | 61 | archives.append({ |
|
62 | 62 | 'type': typ, |
|
63 | 63 | 'extension': spec[2], |
|
64 | 64 | 'node': nodeid, |
|
65 | 65 | 'url': url, |
|
66 | 66 | }) |
|
67 | 67 | |
|
68 | 68 | return templateutil.mappinglist(archives) |
|
69 | 69 | |
|
70 | 70 | def up(p): |
|
71 | 71 | if p[0:1] != "/": |
|
72 | 72 | p = "/" + p |
|
73 | 73 | if p[-1:] == "/": |
|
74 | 74 | p = p[:-1] |
|
75 | 75 | up = os.path.dirname(p) |
|
76 | 76 | if up == "/": |
|
77 | 77 | return "/" |
|
78 | 78 | return up + "/" |
|
79 | 79 | |
|
80 | 80 | def _navseq(step, firststep=None): |
|
81 | 81 | if firststep: |
|
82 | 82 | yield firststep |
|
83 | 83 | if firststep >= 20 and firststep <= 40: |
|
84 | 84 | firststep = 50 |
|
85 | 85 | yield firststep |
|
86 | 86 | assert step > 0 |
|
87 | 87 | assert firststep > 0 |
|
88 | 88 | while step <= firststep: |
|
89 | 89 | step *= 10 |
|
90 | 90 | while True: |
|
91 | 91 | yield 1 * step |
|
92 | 92 | yield 3 * step |
|
93 | 93 | step *= 10 |
|
94 | 94 | |
|
95 | 95 | class revnav(object): |
|
96 | 96 | |
|
97 | 97 | def __init__(self, repo): |
|
98 | 98 | """Navigation generation object |
|
99 | 99 | |
|
100 | 100 | :repo: repo object we generate nav for |
|
101 | 101 | """ |
|
102 | 102 | # used for hex generation |
|
103 | 103 | self._revlog = repo.changelog |
|
104 | 104 | |
|
105 | 105 | def __nonzero__(self): |
|
106 | 106 | """return True if any revision to navigate over""" |
|
107 | 107 | return self._first() is not None |
|
108 | 108 | |
|
109 | 109 | __bool__ = __nonzero__ |
|
110 | 110 | |
|
111 | 111 | def _first(self): |
|
112 | 112 | """return the minimum non-filtered changeset or None""" |
|
113 | 113 | try: |
|
114 | 114 | return next(iter(self._revlog)) |
|
115 | 115 | except StopIteration: |
|
116 | 116 | return None |
|
117 | 117 | |
|
118 | 118 | def hex(self, rev): |
|
119 | 119 | return hex(self._revlog.node(rev)) |
|
120 | 120 | |
|
121 | 121 | def gen(self, pos, pagelen, limit): |
|
122 | 122 | """computes label and revision id for navigation link |
|
123 | 123 | |
|
124 | 124 | :pos: is the revision relative to which we generate navigation. |
|
125 | 125 | :pagelen: the size of each navigation page |
|
126 | 126 | :limit: how far shall we link |
|
127 | 127 | |
|
128 | 128 | The return is: |
|
129 | 129 | - a single element mappinglist |
|
130 | 130 | - containing a dictionary with a `before` and `after` key |
|
131 | 131 | - values are dictionaries with `label` and `node` keys |
|
132 | 132 | """ |
|
133 | 133 | if not self: |
|
134 | 134 | # empty repo |
|
135 | 135 | return templateutil.mappinglist([ |
|
136 | 136 | {'before': templateutil.mappinglist([]), |
|
137 | 137 | 'after': templateutil.mappinglist([])}, |
|
138 | 138 | ]) |
|
139 | 139 | |
|
140 | 140 | targets = [] |
|
141 | 141 | for f in _navseq(1, pagelen): |
|
142 | 142 | if f > limit: |
|
143 | 143 | break |
|
144 | 144 | targets.append(pos + f) |
|
145 | 145 | targets.append(pos - f) |
|
146 | 146 | targets.sort() |
|
147 | 147 | |
|
148 | 148 | first = self._first() |
|
149 | 149 | navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}] |
|
150 | 150 | navafter = [] |
|
151 | 151 | for rev in targets: |
|
152 | 152 | if rev not in self._revlog: |
|
153 | 153 | continue |
|
154 | 154 | if pos < rev < limit: |
|
155 | 155 | navafter.append({'label': '+%d' % abs(rev - pos), |
|
156 | 156 | 'node': self.hex(rev)}) |
|
157 | 157 | if 0 < rev < pos: |
|
158 | 158 | navbefore.append({'label': '-%d' % abs(rev - pos), |
|
159 | 159 | 'node': self.hex(rev)}) |
|
160 | 160 | |
|
161 | 161 | navafter.append({'label': 'tip', 'node': 'tip'}) |
|
162 | 162 | |
|
163 | 163 | # TODO: maybe this can be a scalar object supporting tomap() |
|
164 | 164 | return templateutil.mappinglist([ |
|
165 | 165 | {'before': templateutil.mappinglist(navbefore), |
|
166 | 166 | 'after': templateutil.mappinglist(navafter)}, |
|
167 | 167 | ]) |
|
168 | 168 | |
|
169 | 169 | class filerevnav(revnav): |
|
170 | 170 | |
|
171 | 171 | def __init__(self, repo, path): |
|
172 | 172 | """Navigation generation object |
|
173 | 173 | |
|
174 | 174 | :repo: repo object we generate nav for |
|
175 | 175 | :path: path of the file we generate nav for |
|
176 | 176 | """ |
|
177 | 177 | # used for iteration |
|
178 | 178 | self._changelog = repo.unfiltered().changelog |
|
179 | 179 | # used for hex generation |
|
180 | 180 | self._revlog = repo.file(path) |
|
181 | 181 | |
|
182 | 182 | def hex(self, rev): |
|
183 | 183 | return hex(self._changelog.node(self._revlog.linkrev(rev))) |
|
184 | 184 | |
|
185 | 185 | # TODO: maybe this can be a wrapper class for changectx/filectx list, which |
|
186 | 186 | # yields {'ctx': ctx} |
|
187 | 187 | def _ctxsgen(context, ctxs): |
|
188 | 188 | for s in ctxs: |
|
189 | 189 | d = { |
|
190 | 190 | 'node': s.hex(), |
|
191 | 191 | 'rev': s.rev(), |
|
192 | 192 | 'user': s.user(), |
|
193 | 193 | 'date': s.date(), |
|
194 | 194 | 'description': s.description(), |
|
195 | 195 | 'branch': s.branch(), |
|
196 | 196 | } |
|
197 | 197 | if util.safehasattr(s, 'path'): |
|
198 | 198 | d['file'] = s.path() |
|
199 | 199 | yield d |
|
200 | 200 | |
|
201 | 201 | def _siblings(siblings=None, hiderev=None): |
|
202 | 202 | if siblings is None: |
|
203 | 203 | siblings = [] |
|
204 | 204 | siblings = [s for s in siblings if s.node() != nullid] |
|
205 | 205 | if len(siblings) == 1 and siblings[0].rev() == hiderev: |
|
206 | 206 | siblings = [] |
|
207 | 207 | return templateutil.mappinggenerator(_ctxsgen, args=(siblings,)) |
|
208 | 208 | |
|
209 | 209 | def difffeatureopts(req, ui, section): |
|
210 | 210 | diffopts = diffutil.difffeatureopts(ui, untrusted=True, |
|
211 | 211 | section=section, whitespace=True) |
|
212 | 212 | |
|
213 | 213 | for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'): |
|
214 | 214 | v = req.qsparams.get(k) |
|
215 | 215 | if v is not None: |
|
216 | 216 | v = stringutil.parsebool(v) |
|
217 | 217 | setattr(diffopts, k, v if v is not None else True) |
|
218 | 218 | |
|
219 | 219 | return diffopts |
|
220 | 220 | |
|
221 | 221 | def annotate(req, fctx, ui): |
|
222 | 222 | diffopts = difffeatureopts(req, ui, 'annotate') |
|
223 | 223 | return fctx.annotate(follow=True, diffopts=diffopts) |
|
224 | 224 | |
|
225 | 225 | def parents(ctx, hide=None): |
|
226 | 226 | if isinstance(ctx, context.basefilectx): |
|
227 | 227 | introrev = ctx.introrev() |
|
228 | 228 | if ctx.changectx().rev() != introrev: |
|
229 | 229 | return _siblings([ctx.repo()[introrev]], hide) |
|
230 | 230 | return _siblings(ctx.parents(), hide) |
|
231 | 231 | |
|
232 | 232 | def children(ctx, hide=None): |
|
233 | 233 | return _siblings(ctx.children(), hide) |
|
234 | 234 | |
|
235 | 235 | def renamelink(fctx): |
|
236 | 236 | r = fctx.renamed() |
|
237 | 237 | if r: |
|
238 | 238 | return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}]) |
|
239 | 239 | return templateutil.mappinglist([]) |
|
240 | 240 | |
|
241 | 241 | def nodetagsdict(repo, node): |
|
242 | 242 | return templateutil.hybridlist(repo.nodetags(node), name='name') |
|
243 | 243 | |
|
244 | 244 | def nodebookmarksdict(repo, node): |
|
245 | 245 | return templateutil.hybridlist(repo.nodebookmarks(node), name='name') |
|
246 | 246 | |
|
247 | 247 | def nodebranchdict(repo, ctx): |
|
248 | 248 | branches = [] |
|
249 | 249 | branch = ctx.branch() |
|
250 | 250 | # If this is an empty repo, ctx.node() == nullid, |
|
251 | 251 | # ctx.branch() == 'default'. |
|
252 | 252 | try: |
|
253 | 253 | branchnode = repo.branchtip(branch) |
|
254 | 254 | except error.RepoLookupError: |
|
255 | 255 | branchnode = None |
|
256 | 256 | if branchnode == ctx.node(): |
|
257 | 257 | branches.append(branch) |
|
258 | 258 | return templateutil.hybridlist(branches, name='name') |
|
259 | 259 | |
|
260 | 260 | def nodeinbranch(repo, ctx): |
|
261 | 261 | branches = [] |
|
262 | 262 | branch = ctx.branch() |
|
263 | 263 | try: |
|
264 | 264 | branchnode = repo.branchtip(branch) |
|
265 | 265 | except error.RepoLookupError: |
|
266 | 266 | branchnode = None |
|
267 | 267 | if branch != 'default' and branchnode != ctx.node(): |
|
268 | 268 | branches.append(branch) |
|
269 | 269 | return templateutil.hybridlist(branches, name='name') |
|
270 | 270 | |
|
271 | 271 | def nodebranchnodefault(ctx): |
|
272 | 272 | branches = [] |
|
273 | 273 | branch = ctx.branch() |
|
274 | 274 | if branch != 'default': |
|
275 | 275 | branches.append(branch) |
|
276 | 276 | return templateutil.hybridlist(branches, name='name') |
|
277 | 277 | |
|
278 | 278 | def _nodenamesgen(context, f, node, name): |
|
279 | 279 | for t in f(node): |
|
280 | 280 | yield {name: t} |
|
281 | 281 | |
|
282 | 282 | def showtag(repo, t1, node=nullid): |
|
283 | 283 | args = (repo.nodetags, node, 'tag') |
|
284 | 284 | return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1) |
|
285 | 285 | |
|
286 | 286 | def showbookmark(repo, t1, node=nullid): |
|
287 | 287 | args = (repo.nodebookmarks, node, 'bookmark') |
|
288 | 288 | return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1) |
|
289 | 289 | |
|
290 | 290 | def branchentries(repo, stripecount, limit=0): |
|
291 | 291 | tips = [] |
|
292 | 292 | heads = repo.heads() |
|
293 | 293 | parity = paritygen(stripecount) |
|
294 | 294 | sortkey = lambda item: (not item[1], item[0].rev()) |
|
295 | 295 | |
|
296 | 296 | def entries(context): |
|
297 | 297 | count = 0 |
|
298 | 298 | if not tips: |
|
299 | 299 | for tag, hs, tip, closed in repo.branchmap().iterbranches(): |
|
300 | 300 | tips.append((repo[tip], closed)) |
|
301 | 301 | for ctx, closed in sorted(tips, key=sortkey, reverse=True): |
|
302 | 302 | if limit > 0 and count >= limit: |
|
303 | 303 | return |
|
304 | 304 | count += 1 |
|
305 | 305 | if closed: |
|
306 | 306 | status = 'closed' |
|
307 | 307 | elif ctx.node() not in heads: |
|
308 | 308 | status = 'inactive' |
|
309 | 309 | else: |
|
310 | 310 | status = 'open' |
|
311 | 311 | yield { |
|
312 | 312 | 'parity': next(parity), |
|
313 | 313 | 'branch': ctx.branch(), |
|
314 | 314 | 'status': status, |
|
315 | 315 | 'node': ctx.hex(), |
|
316 | 316 | 'date': ctx.date() |
|
317 | 317 | } |
|
318 | 318 | |
|
319 | 319 | return templateutil.mappinggenerator(entries) |
|
320 | 320 | |
|
321 | 321 | def cleanpath(repo, path): |
|
322 | 322 | path = path.lstrip('/') |
|
323 | 323 | auditor = pathutil.pathauditor(repo.root, realfs=False) |
|
324 | 324 | return pathutil.canonpath(repo.root, '', path, auditor=auditor) |
|
325 | 325 | |
|
326 | 326 | def changectx(repo, req): |
|
327 | 327 | changeid = "tip" |
|
328 | 328 | if 'node' in req.qsparams: |
|
329 | 329 | changeid = req.qsparams['node'] |
|
330 | 330 | ipos = changeid.find(':') |
|
331 | 331 | if ipos != -1: |
|
332 | 332 | changeid = changeid[(ipos + 1):] |
|
333 | 333 | |
|
334 | 334 | return scmutil.revsymbol(repo, changeid) |
|
335 | 335 | |
|
336 | 336 | def basechangectx(repo, req): |
|
337 | 337 | if 'node' in req.qsparams: |
|
338 | 338 | changeid = req.qsparams['node'] |
|
339 | 339 | ipos = changeid.find(':') |
|
340 | 340 | if ipos != -1: |
|
341 | 341 | changeid = changeid[:ipos] |
|
342 | 342 | return scmutil.revsymbol(repo, changeid) |
|
343 | 343 | |
|
344 | 344 | return None |
|
345 | 345 | |
|
346 | 346 | def filectx(repo, req): |
|
347 | 347 | if 'file' not in req.qsparams: |
|
348 | 348 | raise ErrorResponse(HTTP_NOT_FOUND, 'file not given') |
|
349 | 349 | path = cleanpath(repo, req.qsparams['file']) |
|
350 | 350 | if 'node' in req.qsparams: |
|
351 | 351 | changeid = req.qsparams['node'] |
|
352 | 352 | elif 'filenode' in req.qsparams: |
|
353 | 353 | changeid = req.qsparams['filenode'] |
|
354 | 354 | else: |
|
355 | 355 | raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given') |
|
356 | 356 | try: |
|
357 | 357 | fctx = scmutil.revsymbol(repo, changeid)[path] |
|
358 | 358 | except error.RepoError: |
|
359 | 359 | fctx = repo.filectx(path, fileid=changeid) |
|
360 | 360 | |
|
361 | 361 | return fctx |
|
362 | 362 | |
|
363 | 363 | def linerange(req): |
|
364 | 364 | linerange = req.qsparams.getall('linerange') |
|
365 | 365 | if not linerange: |
|
366 | 366 | return None |
|
367 | 367 | if len(linerange) > 1: |
|
368 | 368 | raise ErrorResponse(HTTP_BAD_REQUEST, |
|
369 | 369 | 'redundant linerange parameter') |
|
370 | 370 | try: |
|
371 | 371 | fromline, toline = map(int, linerange[0].split(':', 1)) |
|
372 | 372 | except ValueError: |
|
373 | 373 | raise ErrorResponse(HTTP_BAD_REQUEST, |
|
374 | 374 | 'invalid linerange parameter') |
|
375 | 375 | try: |
|
376 | 376 | return util.processlinerange(fromline, toline) |
|
377 | 377 | except error.ParseError as exc: |
|
378 | 378 | raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc)) |
|
379 | 379 | |
|
380 | 380 | def formatlinerange(fromline, toline): |
|
381 | 381 | return '%d:%d' % (fromline + 1, toline) |
|
382 | 382 | |
|
383 | 383 | def _succsandmarkersgen(context, mapping): |
|
384 | 384 | repo = context.resource(mapping, 'repo') |
|
385 | 385 | itemmappings = templatekw.showsuccsandmarkers(context, mapping) |
|
386 | 386 | for item in itemmappings.tovalue(context, mapping): |
|
387 | 387 | item['successors'] = _siblings(repo[successor] |
|
388 | 388 | for successor in item['successors']) |
|
389 | 389 | yield item |
|
390 | 390 | |
|
391 | 391 | def succsandmarkers(context, mapping): |
|
392 | 392 | return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,)) |
|
393 | 393 | |
|
394 | 394 | # teach templater succsandmarkers is switched to (context, mapping) API |
|
395 | 395 | succsandmarkers._requires = {'repo', 'ctx'} |
|
396 | 396 | |
|
397 | 397 | def _whyunstablegen(context, mapping): |
|
398 | 398 | repo = context.resource(mapping, 'repo') |
|
399 | 399 | ctx = context.resource(mapping, 'ctx') |
|
400 | 400 | |
|
401 | 401 | entries = obsutil.whyunstable(repo, ctx) |
|
402 | 402 | for entry in entries: |
|
403 | 403 | if entry.get('divergentnodes'): |
|
404 | 404 | entry['divergentnodes'] = _siblings(entry['divergentnodes']) |
|
405 | 405 | yield entry |
|
406 | 406 | |
|
407 | 407 | def whyunstable(context, mapping): |
|
408 | 408 | return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,)) |
|
409 | 409 | |
|
410 | 410 | whyunstable._requires = {'repo', 'ctx'} |
|
411 | 411 | |
|
412 | 412 | # helper to mark a function as a new-style template keyword; can be removed |
|
413 | 413 | # once old-style function gets unsupported and new-style becomes the default |
|
414 | 414 | def _kwfunc(f): |
|
415 | 415 | f._requires = () |
|
416 | 416 | return f |
|
417 | 417 | |
|
418 | 418 | def commonentry(repo, ctx): |
|
419 | 419 | node = scmutil.binnode(ctx) |
|
420 | 420 | return { |
|
421 | 421 | # TODO: perhaps ctx.changectx() should be assigned if ctx is a |
|
422 | 422 | # filectx, but I'm not pretty sure if that would always work because |
|
423 | 423 | # fctx.parents() != fctx.changectx.parents() for example. |
|
424 | 424 | 'ctx': ctx, |
|
425 | 425 | 'rev': ctx.rev(), |
|
426 | 426 | 'node': hex(node), |
|
427 | 427 | 'author': ctx.user(), |
|
428 | 428 | 'desc': ctx.description(), |
|
429 | 429 | 'date': ctx.date(), |
|
430 | 430 | 'extra': ctx.extra(), |
|
431 | 431 | 'phase': ctx.phasestr(), |
|
432 | 432 | 'obsolete': ctx.obsolete(), |
|
433 | 433 | 'succsandmarkers': succsandmarkers, |
|
434 | 434 | 'instabilities': templateutil.hybridlist(ctx.instabilities(), |
|
435 | 435 | name='instability'), |
|
436 | 436 | 'whyunstable': whyunstable, |
|
437 | 437 | 'branch': nodebranchnodefault(ctx), |
|
438 | 438 | 'inbranch': nodeinbranch(repo, ctx), |
|
439 | 439 | 'branches': nodebranchdict(repo, ctx), |
|
440 | 440 | 'tags': nodetagsdict(repo, node), |
|
441 | 441 | 'bookmarks': nodebookmarksdict(repo, node), |
|
442 | 442 | 'parent': _kwfunc(lambda context, mapping: parents(ctx)), |
|
443 | 443 | 'child': _kwfunc(lambda context, mapping: children(ctx)), |
|
444 | 444 | } |
|
445 | 445 | |
|
446 | 446 | def changelistentry(web, ctx): |
|
447 | 447 | '''Obtain a dictionary to be used for entries in a changelist. |
|
448 | 448 | |
|
449 | 449 | This function is called when producing items for the "entries" list passed |
|
450 | 450 | to the "shortlog" and "changelog" templates. |
|
451 | 451 | ''' |
|
452 | 452 | repo = web.repo |
|
453 | 453 | rev = ctx.rev() |
|
454 | 454 | n = scmutil.binnode(ctx) |
|
455 | 455 | showtags = showtag(repo, 'changelogtag', n) |
|
456 | 456 | files = listfilediffs(ctx.files(), n, web.maxfiles) |
|
457 | 457 | |
|
458 | 458 | entry = commonentry(repo, ctx) |
|
459 | 459 | entry.update({ |
|
460 | 460 | 'allparents': _kwfunc(lambda context, mapping: parents(ctx)), |
|
461 | 461 | 'parent': _kwfunc(lambda context, mapping: parents(ctx, rev - 1)), |
|
462 | 462 | 'child': _kwfunc(lambda context, mapping: children(ctx, rev + 1)), |
|
463 | 463 | 'changelogtag': showtags, |
|
464 | 464 | 'files': files, |
|
465 | 465 | }) |
|
466 | 466 | return entry |
|
467 | 467 | |
|
468 | 468 | def changelistentries(web, revs, maxcount, parityfn): |
|
469 | 469 | """Emit up to N records for an iterable of revisions.""" |
|
470 | 470 | repo = web.repo |
|
471 | 471 | |
|
472 | 472 | count = 0 |
|
473 | 473 | for rev in revs: |
|
474 | 474 | if count >= maxcount: |
|
475 | 475 | break |
|
476 | 476 | |
|
477 | 477 | count += 1 |
|
478 | 478 | |
|
479 | 479 | entry = changelistentry(web, repo[rev]) |
|
480 | 480 | entry['parity'] = next(parityfn) |
|
481 | 481 | |
|
482 | 482 | yield entry |
|
483 | 483 | |
|
484 | 484 | def symrevorshortnode(req, ctx): |
|
485 | 485 | if 'node' in req.qsparams: |
|
486 | 486 | return templatefilters.revescape(req.qsparams['node']) |
|
487 | 487 | else: |
|
488 | 488 | return short(scmutil.binnode(ctx)) |
|
489 | 489 | |
|
490 | 490 | def _listfilesgen(context, ctx, stripecount): |
|
491 | 491 | parity = paritygen(stripecount) |
|
492 | 492 | for blockno, f in enumerate(ctx.files()): |
|
493 | 493 | template = 'filenodelink' if f in ctx else 'filenolink' |
|
494 | 494 | yield context.process(template, { |
|
495 | 495 | 'node': ctx.hex(), |
|
496 | 496 | 'file': f, |
|
497 | 497 | 'blockno': blockno + 1, |
|
498 | 498 | 'parity': next(parity), |
|
499 | 499 | }) |
|
500 | 500 | |
|
501 | 501 | def changesetentry(web, ctx): |
|
502 | 502 | '''Obtain a dictionary to be used to render the "changeset" template.''' |
|
503 | 503 | |
|
504 | 504 | showtags = showtag(web.repo, 'changesettag', scmutil.binnode(ctx)) |
|
505 | 505 | showbookmarks = showbookmark(web.repo, 'changesetbookmark', |
|
506 | 506 | scmutil.binnode(ctx)) |
|
507 | 507 | showbranch = nodebranchnodefault(ctx) |
|
508 | 508 | |
|
509 | 509 | basectx = basechangectx(web.repo, web.req) |
|
510 | 510 | if basectx is None: |
|
511 | 511 | basectx = ctx.p1() |
|
512 | 512 | |
|
513 | 513 | style = web.config('web', 'style') |
|
514 | 514 | if 'style' in web.req.qsparams: |
|
515 | 515 | style = web.req.qsparams['style'] |
|
516 | 516 | |
|
517 | 517 | diff = diffs(web, ctx, basectx, None, style) |
|
518 | 518 | |
|
519 | 519 | parity = paritygen(web.stripecount) |
|
520 | 520 | diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx) |
|
521 | 521 | diffstats = diffstat(ctx, diffstatsgen, parity) |
|
522 | 522 | |
|
523 | 523 | return dict( |
|
524 | 524 | diff=diff, |
|
525 | 525 | symrev=symrevorshortnode(web.req, ctx), |
|
526 | 526 | basenode=basectx.hex(), |
|
527 | 527 | changesettag=showtags, |
|
528 | 528 | changesetbookmark=showbookmarks, |
|
529 | 529 | changesetbranch=showbranch, |
|
530 | 530 | files=templateutil.mappedgenerator(_listfilesgen, |
|
531 | 531 | args=(ctx, web.stripecount)), |
|
532 | 532 | diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)), |
|
533 | 533 | diffstat=diffstats, |
|
534 | 534 | archives=web.archivelist(ctx.hex()), |
|
535 | 535 | **pycompat.strkwargs(commonentry(web.repo, ctx))) |
|
536 | 536 | |
|
537 | 537 | def _listfilediffsgen(context, files, node, max): |
|
538 | 538 | for f in files[:max]: |
|
539 | 539 | yield context.process('filedifflink', {'node': hex(node), 'file': f}) |
|
540 | 540 | if len(files) > max: |
|
541 | 541 | yield context.process('fileellipses', {}) |
|
542 | 542 | |
|
543 | 543 | def listfilediffs(files, node, max): |
|
544 | 544 | return templateutil.mappedgenerator(_listfilediffsgen, |
|
545 | 545 | args=(files, node, max)) |
|
546 | 546 | |
|
547 | 547 | def _prettyprintdifflines(context, lines, blockno, lineidprefix): |
|
548 | 548 | for lineno, l in enumerate(lines, 1): |
|
549 | 549 | difflineno = "%d.%d" % (blockno, lineno) |
|
550 | 550 | if l.startswith('+'): |
|
551 | 551 | ltype = "difflineplus" |
|
552 | 552 | elif l.startswith('-'): |
|
553 | 553 | ltype = "difflineminus" |
|
554 | 554 | elif l.startswith('@'): |
|
555 | 555 | ltype = "difflineat" |
|
556 | 556 | else: |
|
557 | 557 | ltype = "diffline" |
|
558 | 558 | yield context.process(ltype, { |
|
559 | 559 | 'line': l, |
|
560 | 560 | 'lineno': lineno, |
|
561 | 561 | 'lineid': lineidprefix + "l%s" % difflineno, |
|
562 | 562 | 'linenumber': "% 8s" % difflineno, |
|
563 | 563 | }) |
|
564 | 564 | |
|
565 | 565 | def _diffsgen(context, repo, ctx, basectx, files, style, stripecount, |
|
566 | 566 | linerange, lineidprefix): |
|
567 | 567 | if files: |
|
568 |
m = match.exact( |
|
|
568 | m = match.exact(files) | |
|
569 | 569 | else: |
|
570 |
m = match.always( |
|
|
570 | m = match.always() | |
|
571 | 571 | |
|
572 | 572 | diffopts = patch.diffopts(repo.ui, untrusted=True) |
|
573 | 573 | parity = paritygen(stripecount) |
|
574 | 574 | |
|
575 | 575 | diffhunks = patch.diffhunks(repo, basectx, ctx, m, opts=diffopts) |
|
576 | 576 | for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1): |
|
577 | 577 | if style != 'raw': |
|
578 | 578 | header = header[1:] |
|
579 | 579 | lines = [h + '\n' for h in header] |
|
580 | 580 | for hunkrange, hunklines in hunks: |
|
581 | 581 | if linerange is not None and hunkrange is not None: |
|
582 | 582 | s1, l1, s2, l2 = hunkrange |
|
583 | 583 | if not mdiff.hunkinrange((s2, l2), linerange): |
|
584 | 584 | continue |
|
585 | 585 | lines.extend(hunklines) |
|
586 | 586 | if lines: |
|
587 | 587 | l = templateutil.mappedgenerator(_prettyprintdifflines, |
|
588 | 588 | args=(lines, blockno, |
|
589 | 589 | lineidprefix)) |
|
590 | 590 | yield { |
|
591 | 591 | 'parity': next(parity), |
|
592 | 592 | 'blockno': blockno, |
|
593 | 593 | 'lines': l, |
|
594 | 594 | } |
|
595 | 595 | |
|
596 | 596 | def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''): |
|
597 | 597 | args = (web.repo, ctx, basectx, files, style, web.stripecount, |
|
598 | 598 | linerange, lineidprefix) |
|
599 | 599 | return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock') |
|
600 | 600 | |
|
601 | 601 | def _compline(type, leftlineno, leftline, rightlineno, rightline): |
|
602 | 602 | lineid = leftlineno and ("l%d" % leftlineno) or '' |
|
603 | 603 | lineid += rightlineno and ("r%d" % rightlineno) or '' |
|
604 | 604 | llno = '%d' % leftlineno if leftlineno else '' |
|
605 | 605 | rlno = '%d' % rightlineno if rightlineno else '' |
|
606 | 606 | return { |
|
607 | 607 | 'type': type, |
|
608 | 608 | 'lineid': lineid, |
|
609 | 609 | 'leftlineno': leftlineno, |
|
610 | 610 | 'leftlinenumber': "% 6s" % llno, |
|
611 | 611 | 'leftline': leftline or '', |
|
612 | 612 | 'rightlineno': rightlineno, |
|
613 | 613 | 'rightlinenumber': "% 6s" % rlno, |
|
614 | 614 | 'rightline': rightline or '', |
|
615 | 615 | } |
|
616 | 616 | |
|
617 | 617 | def _getcompblockgen(context, leftlines, rightlines, opcodes): |
|
618 | 618 | for type, llo, lhi, rlo, rhi in opcodes: |
|
619 | 619 | type = pycompat.sysbytes(type) |
|
620 | 620 | len1 = lhi - llo |
|
621 | 621 | len2 = rhi - rlo |
|
622 | 622 | count = min(len1, len2) |
|
623 | 623 | for i in pycompat.xrange(count): |
|
624 | 624 | yield _compline(type=type, |
|
625 | 625 | leftlineno=llo + i + 1, |
|
626 | 626 | leftline=leftlines[llo + i], |
|
627 | 627 | rightlineno=rlo + i + 1, |
|
628 | 628 | rightline=rightlines[rlo + i]) |
|
629 | 629 | if len1 > len2: |
|
630 | 630 | for i in pycompat.xrange(llo + count, lhi): |
|
631 | 631 | yield _compline(type=type, |
|
632 | 632 | leftlineno=i + 1, |
|
633 | 633 | leftline=leftlines[i], |
|
634 | 634 | rightlineno=None, |
|
635 | 635 | rightline=None) |
|
636 | 636 | elif len2 > len1: |
|
637 | 637 | for i in pycompat.xrange(rlo + count, rhi): |
|
638 | 638 | yield _compline(type=type, |
|
639 | 639 | leftlineno=None, |
|
640 | 640 | leftline=None, |
|
641 | 641 | rightlineno=i + 1, |
|
642 | 642 | rightline=rightlines[i]) |
|
643 | 643 | |
|
644 | 644 | def _getcompblock(leftlines, rightlines, opcodes): |
|
645 | 645 | args = (leftlines, rightlines, opcodes) |
|
646 | 646 | return templateutil.mappinggenerator(_getcompblockgen, args=args, |
|
647 | 647 | name='comparisonline') |
|
648 | 648 | |
|
649 | 649 | def _comparegen(context, contextnum, leftlines, rightlines): |
|
650 | 650 | '''Generator function that provides side-by-side comparison data.''' |
|
651 | 651 | s = difflib.SequenceMatcher(None, leftlines, rightlines) |
|
652 | 652 | if contextnum < 0: |
|
653 | 653 | l = _getcompblock(leftlines, rightlines, s.get_opcodes()) |
|
654 | 654 | yield {'lines': l} |
|
655 | 655 | else: |
|
656 | 656 | for oc in s.get_grouped_opcodes(n=contextnum): |
|
657 | 657 | l = _getcompblock(leftlines, rightlines, oc) |
|
658 | 658 | yield {'lines': l} |
|
659 | 659 | |
|
660 | 660 | def compare(contextnum, leftlines, rightlines): |
|
661 | 661 | args = (contextnum, leftlines, rightlines) |
|
662 | 662 | return templateutil.mappinggenerator(_comparegen, args=args, |
|
663 | 663 | name='comparisonblock') |
|
664 | 664 | |
|
665 | 665 | def diffstatgen(ui, ctx, basectx): |
|
666 | 666 | '''Generator function that provides the diffstat data.''' |
|
667 | 667 | |
|
668 | 668 | diffopts = patch.diffopts(ui, {'noprefix': False}) |
|
669 | 669 | stats = patch.diffstatdata( |
|
670 | 670 | util.iterlines(ctx.diff(basectx, opts=diffopts))) |
|
671 | 671 | maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats) |
|
672 | 672 | while True: |
|
673 | 673 | yield stats, maxname, maxtotal, addtotal, removetotal, binary |
|
674 | 674 | |
|
675 | 675 | def diffsummary(statgen): |
|
676 | 676 | '''Return a short summary of the diff.''' |
|
677 | 677 | |
|
678 | 678 | stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) |
|
679 | 679 | return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % ( |
|
680 | 680 | len(stats), addtotal, removetotal) |
|
681 | 681 | |
|
682 | 682 | def _diffstattmplgen(context, ctx, statgen, parity): |
|
683 | 683 | stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) |
|
684 | 684 | files = ctx.files() |
|
685 | 685 | |
|
686 | 686 | def pct(i): |
|
687 | 687 | if maxtotal == 0: |
|
688 | 688 | return 0 |
|
689 | 689 | return (float(i) / maxtotal) * 100 |
|
690 | 690 | |
|
691 | 691 | fileno = 0 |
|
692 | 692 | for filename, adds, removes, isbinary in stats: |
|
693 | 693 | template = 'diffstatlink' if filename in files else 'diffstatnolink' |
|
694 | 694 | total = adds + removes |
|
695 | 695 | fileno += 1 |
|
696 | 696 | yield context.process(template, { |
|
697 | 697 | 'node': ctx.hex(), |
|
698 | 698 | 'file': filename, |
|
699 | 699 | 'fileno': fileno, |
|
700 | 700 | 'total': total, |
|
701 | 701 | 'addpct': pct(adds), |
|
702 | 702 | 'removepct': pct(removes), |
|
703 | 703 | 'parity': next(parity), |
|
704 | 704 | }) |
|
705 | 705 | |
|
706 | 706 | def diffstat(ctx, statgen, parity): |
|
707 | 707 | '''Return a diffstat template for each file in the diff.''' |
|
708 | 708 | args = (ctx, statgen, parity) |
|
709 | 709 | return templateutil.mappedgenerator(_diffstattmplgen, args=args) |
|
710 | 710 | |
|
711 | 711 | class sessionvars(templateutil.wrapped): |
|
712 | 712 | def __init__(self, vars, start='?'): |
|
713 | 713 | self._start = start |
|
714 | 714 | self._vars = vars |
|
715 | 715 | |
|
716 | 716 | def __getitem__(self, key): |
|
717 | 717 | return self._vars[key] |
|
718 | 718 | |
|
719 | 719 | def __setitem__(self, key, value): |
|
720 | 720 | self._vars[key] = value |
|
721 | 721 | |
|
722 | 722 | def __copy__(self): |
|
723 | 723 | return sessionvars(copy.copy(self._vars), self._start) |
|
724 | 724 | |
|
725 | 725 | def contains(self, context, mapping, item): |
|
726 | 726 | item = templateutil.unwrapvalue(context, mapping, item) |
|
727 | 727 | return item in self._vars |
|
728 | 728 | |
|
729 | 729 | def getmember(self, context, mapping, key): |
|
730 | 730 | key = templateutil.unwrapvalue(context, mapping, key) |
|
731 | 731 | return self._vars.get(key) |
|
732 | 732 | |
|
733 | 733 | def getmin(self, context, mapping): |
|
734 | 734 | raise error.ParseError(_('not comparable')) |
|
735 | 735 | |
|
736 | 736 | def getmax(self, context, mapping): |
|
737 | 737 | raise error.ParseError(_('not comparable')) |
|
738 | 738 | |
|
739 | 739 | def filter(self, context, mapping, select): |
|
740 | 740 | # implement if necessary |
|
741 | 741 | raise error.ParseError(_('not filterable')) |
|
742 | 742 | |
|
743 | 743 | def itermaps(self, context): |
|
744 | 744 | separator = self._start |
|
745 | 745 | for key, value in sorted(self._vars.iteritems()): |
|
746 | 746 | yield {'name': key, |
|
747 | 747 | 'value': pycompat.bytestr(value), |
|
748 | 748 | 'separator': separator, |
|
749 | 749 | } |
|
750 | 750 | separator = '&' |
|
751 | 751 | |
|
752 | 752 | def join(self, context, mapping, sep): |
|
753 | 753 | # could be '{separator}{name}={value|urlescape}' |
|
754 | 754 | raise error.ParseError(_('not displayable without template')) |
|
755 | 755 | |
|
756 | 756 | def show(self, context, mapping): |
|
757 | 757 | return self.join(context, '') |
|
758 | 758 | |
|
759 | 759 | def tobool(self, context, mapping): |
|
760 | 760 | return bool(self._vars) |
|
761 | 761 | |
|
762 | 762 | def tovalue(self, context, mapping): |
|
763 | 763 | return self._vars |
|
764 | 764 | |
|
765 | 765 | class wsgiui(uimod.ui): |
|
766 | 766 | # default termwidth breaks under mod_wsgi |
|
767 | 767 | def termwidth(self): |
|
768 | 768 | return 80 |
|
769 | 769 | |
|
770 | 770 | def getwebsubs(repo): |
|
771 | 771 | websubtable = [] |
|
772 | 772 | websubdefs = repo.ui.configitems('websub') |
|
773 | 773 | # we must maintain interhg backwards compatibility |
|
774 | 774 | websubdefs += repo.ui.configitems('interhg') |
|
775 | 775 | for key, pattern in websubdefs: |
|
776 | 776 | # grab the delimiter from the character after the "s" |
|
777 | 777 | unesc = pattern[1:2] |
|
778 | 778 | delim = stringutil.reescape(unesc) |
|
779 | 779 | |
|
780 | 780 | # identify portions of the pattern, taking care to avoid escaped |
|
781 | 781 | # delimiters. the replace format and flags are optional, but |
|
782 | 782 | # delimiters are required. |
|
783 | 783 | match = re.match( |
|
784 | 784 | br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$' |
|
785 | 785 | % (delim, delim, delim), pattern) |
|
786 | 786 | if not match: |
|
787 | 787 | repo.ui.warn(_("websub: invalid pattern for %s: %s\n") |
|
788 | 788 | % (key, pattern)) |
|
789 | 789 | continue |
|
790 | 790 | |
|
791 | 791 | # we need to unescape the delimiter for regexp and format |
|
792 | 792 | delim_re = re.compile(br'(?<!\\)\\%s' % delim) |
|
793 | 793 | regexp = delim_re.sub(unesc, match.group(1)) |
|
794 | 794 | format = delim_re.sub(unesc, match.group(2)) |
|
795 | 795 | |
|
796 | 796 | # the pattern allows for 6 regexp flags, so set them if necessary |
|
797 | 797 | flagin = match.group(3) |
|
798 | 798 | flags = 0 |
|
799 | 799 | if flagin: |
|
800 | 800 | for flag in flagin.upper(): |
|
801 | 801 | flags |= re.__dict__[flag] |
|
802 | 802 | |
|
803 | 803 | try: |
|
804 | 804 | regexp = re.compile(regexp, flags) |
|
805 | 805 | websubtable.append((regexp, format)) |
|
806 | 806 | except re.error: |
|
807 | 807 | repo.ui.warn(_("websub: invalid regexp for %s: %s\n") |
|
808 | 808 | % (key, regexp)) |
|
809 | 809 | return websubtable |
|
810 | 810 | |
|
811 | 811 | def getgraphnode(repo, ctx): |
|
812 | 812 | return (templatekw.getgraphnodecurrent(repo, ctx) + |
|
813 | 813 | templatekw.getgraphnodesymbol(ctx)) |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now