Show More
The requested changes are too big and content was truncated. Show full diff
This diff has been collapsed as it changes many lines, (506 lines changed) Show them Hide them | |||
@@ -0,0 +1,506 b'' | |||
|
1 | # journal.py | |
|
2 | # | |
|
3 | # Copyright 2014-2016 Facebook, Inc. | |
|
4 | # | |
|
5 | # This software may be used and distributed according to the terms of the | |
|
6 | # GNU General Public License version 2 or any later version. | |
|
7 | """Track previous positions of bookmarks (EXPERIMENTAL) | |
|
8 | ||
|
9 | This extension adds a new command: `hg journal`, which shows you where | |
|
10 | bookmarks were previously located. | |
|
11 | ||
|
12 | """ | |
|
13 | ||
|
14 | from __future__ import absolute_import | |
|
15 | ||
|
16 | import collections | |
|
17 | import errno | |
|
18 | import os | |
|
19 | import weakref | |
|
20 | ||
|
21 | from mercurial.i18n import _ | |
|
22 | ||
|
23 | from mercurial import ( | |
|
24 | bookmarks, | |
|
25 | cmdutil, | |
|
26 | commands, | |
|
27 | dirstate, | |
|
28 | dispatch, | |
|
29 | error, | |
|
30 | extensions, | |
|
31 | hg, | |
|
32 | localrepo, | |
|
33 | lock, | |
|
34 | node, | |
|
35 | util, | |
|
36 | ) | |
|
37 | ||
|
38 | from . import share | |
|
39 | ||
|
40 | cmdtable = {} | |
|
41 | command = cmdutil.command(cmdtable) | |
|
42 | ||
|
43 | # Note for extension authors: ONLY specify testedwith = 'internal' for | |
|
44 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
|
45 | # be specifying the version(s) of Mercurial they are tested with, or | |
|
46 | # leave the attribute unspecified. | |
|
47 | testedwith = 'internal' | |
|
48 | ||
|
49 | # storage format version; increment when the format changes | |
|
50 | storageversion = 0 | |
|
51 | ||
|
52 | # namespaces | |
|
53 | bookmarktype = 'bookmark' | |
|
54 | wdirparenttype = 'wdirparent' | |
|
55 | # In a shared repository, what shared feature name is used | |
|
56 | # to indicate this namespace is shared with the source? | |
|
57 | sharednamespaces = { | |
|
58 | bookmarktype: hg.sharedbookmarks, | |
|
59 | } | |
|
60 | ||
|
61 | # Journal recording, register hooks and storage object | |
|
62 | def extsetup(ui): | |
|
63 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) | |
|
64 | extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks) | |
|
65 | extensions.wrapfunction( | |
|
66 | dirstate.dirstate, '_writedirstate', recorddirstateparents) | |
|
67 | extensions.wrapfunction( | |
|
68 | localrepo.localrepository.dirstate, 'func', wrapdirstate) | |
|
69 | extensions.wrapfunction(hg, 'postshare', wrappostshare) | |
|
70 | extensions.wrapfunction(hg, 'copystore', unsharejournal) | |
|
71 | ||
|
72 | def reposetup(ui, repo): | |
|
73 | if repo.local(): | |
|
74 | repo.journal = journalstorage(repo) | |
|
75 | ||
|
76 | def runcommand(orig, lui, repo, cmd, fullargs, *args): | |
|
77 | """Track the command line options for recording in the journal""" | |
|
78 | journalstorage.recordcommand(*fullargs) | |
|
79 | return orig(lui, repo, cmd, fullargs, *args) | |
|
80 | ||
|
81 | # hooks to record dirstate changes | |
|
82 | def wrapdirstate(orig, repo): | |
|
83 | """Make journal storage available to the dirstate object""" | |
|
84 | dirstate = orig(repo) | |
|
85 | if util.safehasattr(repo, 'journal'): | |
|
86 | dirstate.journalstorage = repo.journal | |
|
87 | return dirstate | |
|
88 | ||
|
89 | def recorddirstateparents(orig, dirstate, dirstatefp): | |
|
90 | """Records all dirstate parent changes in the journal.""" | |
|
91 | if util.safehasattr(dirstate, 'journalstorage'): | |
|
92 | old = [node.nullid, node.nullid] | |
|
93 | nodesize = len(node.nullid) | |
|
94 | try: | |
|
95 | # The only source for the old state is in the dirstate file still | |
|
96 | # on disk; the in-memory dirstate object only contains the new | |
|
97 | # state. dirstate._opendirstatefile() switches beteen .hg/dirstate | |
|
98 | # and .hg/dirstate.pending depending on the transaction state. | |
|
99 | with dirstate._opendirstatefile() as fp: | |
|
100 | state = fp.read(2 * nodesize) | |
|
101 | if len(state) == 2 * nodesize: | |
|
102 | old = [state[:nodesize], state[nodesize:]] | |
|
103 | except IOError: | |
|
104 | pass | |
|
105 | ||
|
106 | new = dirstate.parents() | |
|
107 | if old != new: | |
|
108 | # only record two hashes if there was a merge | |
|
109 | oldhashes = old[:1] if old[1] == node.nullid else old | |
|
110 | newhashes = new[:1] if new[1] == node.nullid else new | |
|
111 | dirstate.journalstorage.record( | |
|
112 | wdirparenttype, '.', oldhashes, newhashes) | |
|
113 | ||
|
114 | return orig(dirstate, dirstatefp) | |
|
115 | ||
|
116 | # hooks to record bookmark changes (both local and remote) | |
|
117 | def recordbookmarks(orig, store, fp): | |
|
118 | """Records all bookmark changes in the journal.""" | |
|
119 | repo = store._repo | |
|
120 | if util.safehasattr(repo, 'journal'): | |
|
121 | oldmarks = bookmarks.bmstore(repo) | |
|
122 | for mark, value in store.iteritems(): | |
|
123 | oldvalue = oldmarks.get(mark, node.nullid) | |
|
124 | if value != oldvalue: | |
|
125 | repo.journal.record(bookmarktype, mark, oldvalue, value) | |
|
126 | return orig(store, fp) | |
|
127 | ||
|
128 | # shared repository support | |
|
129 | def _readsharedfeatures(repo): | |
|
130 | """A set of shared features for this repository""" | |
|
131 | try: | |
|
132 | return set(repo.vfs.read('shared').splitlines()) | |
|
133 | except IOError as inst: | |
|
134 | if inst.errno != errno.ENOENT: | |
|
135 | raise | |
|
136 | return set() | |
|
137 | ||
|
138 | def _mergeentriesiter(*iterables, **kwargs): | |
|
139 | """Given a set of sorted iterables, yield the next entry in merged order | |
|
140 | ||
|
141 | Note that by default entries go from most recent to oldest. | |
|
142 | """ | |
|
143 | order = kwargs.pop('order', max) | |
|
144 | iterables = [iter(it) for it in iterables] | |
|
145 | # this tracks still active iterables; iterables are deleted as they are | |
|
146 | # exhausted, which is why this is a dictionary and why each entry also | |
|
147 | # stores the key. Entries are mutable so we can store the next value each | |
|
148 | # time. | |
|
149 | iterable_map = {} | |
|
150 | for key, it in enumerate(iterables): | |
|
151 | try: | |
|
152 | iterable_map[key] = [next(it), key, it] | |
|
153 | except StopIteration: | |
|
154 | # empty entry, can be ignored | |
|
155 | pass | |
|
156 | ||
|
157 | while iterable_map: | |
|
158 | value, key, it = order(iterable_map.itervalues()) | |
|
159 | yield value | |
|
160 | try: | |
|
161 | iterable_map[key][0] = next(it) | |
|
162 | except StopIteration: | |
|
163 | # this iterable is empty, remove it from consideration | |
|
164 | del iterable_map[key] | |
|
165 | ||
|
166 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): | |
|
167 | """Mark this shared working copy as sharing journal information""" | |
|
168 | orig(sourcerepo, destrepo, **kwargs) | |
|
169 | with destrepo.vfs('shared', 'a') as fp: | |
|
170 | fp.write('journal\n') | |
|
171 | ||
|
172 | def unsharejournal(orig, ui, repo, repopath): | |
|
173 | """Copy shared journal entries into this repo when unsharing""" | |
|
174 | if (repo.path == repopath and repo.shared() and | |
|
175 | util.safehasattr(repo, 'journal')): | |
|
176 | sharedrepo = share._getsrcrepo(repo) | |
|
177 | sharedfeatures = _readsharedfeatures(repo) | |
|
178 | if sharedrepo and sharedfeatures > set(['journal']): | |
|
179 | # there is a shared repository and there are shared journal entries | |
|
180 | # to copy. move shared date over from source to destination but | |
|
181 | # move the local file first | |
|
182 | if repo.vfs.exists('journal'): | |
|
183 | journalpath = repo.join('journal') | |
|
184 | util.rename(journalpath, journalpath + '.bak') | |
|
185 | storage = repo.journal | |
|
186 | local = storage._open( | |
|
187 | repo.vfs, filename='journal.bak', _newestfirst=False) | |
|
188 | shared = ( | |
|
189 | e for e in storage._open(sharedrepo.vfs, _newestfirst=False) | |
|
190 | if sharednamespaces.get(e.namespace) in sharedfeatures) | |
|
191 | for entry in _mergeentriesiter(local, shared, order=min): | |
|
192 | storage._write(repo.vfs, entry) | |
|
193 | ||
|
194 | return orig(ui, repo, repopath) | |
|
195 | ||
|
196 | class journalentry(collections.namedtuple( | |
|
197 | 'journalentry', | |
|
198 | 'timestamp user command namespace name oldhashes newhashes')): | |
|
199 | """Individual journal entry | |
|
200 | ||
|
201 | * timestamp: a mercurial (time, timezone) tuple | |
|
202 | * user: the username that ran the command | |
|
203 | * namespace: the entry namespace, an opaque string | |
|
204 | * name: the name of the changed item, opaque string with meaning in the | |
|
205 | namespace | |
|
206 | * command: the hg command that triggered this record | |
|
207 | * oldhashes: a tuple of one or more binary hashes for the old location | |
|
208 | * newhashes: a tuple of one or more binary hashes for the new location | |
|
209 | ||
|
210 | Handles serialisation from and to the storage format. Fields are | |
|
211 | separated by newlines, hashes are written out in hex separated by commas, | |
|
212 | timestamp and timezone are separated by a space. | |
|
213 | ||
|
214 | """ | |
|
215 | @classmethod | |
|
216 | def fromstorage(cls, line): | |
|
217 | (time, user, command, namespace, name, | |
|
218 | oldhashes, newhashes) = line.split('\n') | |
|
219 | timestamp, tz = time.split() | |
|
220 | timestamp, tz = float(timestamp), int(tz) | |
|
221 | oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(',')) | |
|
222 | newhashes = tuple(node.bin(hash) for hash in newhashes.split(',')) | |
|
223 | return cls( | |
|
224 | (timestamp, tz), user, command, namespace, name, | |
|
225 | oldhashes, newhashes) | |
|
226 | ||
|
227 | def __str__(self): | |
|
228 | """String representation for storage""" | |
|
229 | time = ' '.join(map(str, self.timestamp)) | |
|
230 | oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes]) | |
|
231 | newhashes = ','.join([node.hex(hash) for hash in self.newhashes]) | |
|
232 | return '\n'.join(( | |
|
233 | time, self.user, self.command, self.namespace, self.name, | |
|
234 | oldhashes, newhashes)) | |
|
235 | ||
|
236 | class journalstorage(object): | |
|
237 | """Storage for journal entries | |
|
238 | ||
|
239 | Entries are divided over two files; one with entries that pertain to the | |
|
240 | local working copy *only*, and one with entries that are shared across | |
|
241 | multiple working copies when shared using the share extension. | |
|
242 | ||
|
243 | Entries are stored with NUL bytes as separators. See the journalentry | |
|
244 | class for the per-entry structure. | |
|
245 | ||
|
246 | The file format starts with an integer version, delimited by a NUL. | |
|
247 | ||
|
248 | This storage uses a dedicated lock; this makes it easier to avoid issues | |
|
249 | with adding entries that added when the regular wlock is unlocked (e.g. | |
|
250 | the dirstate). | |
|
251 | ||
|
252 | """ | |
|
253 | _currentcommand = () | |
|
254 | _lockref = None | |
|
255 | ||
|
256 | def __init__(self, repo): | |
|
257 | self.user = util.getuser() | |
|
258 | self.ui = repo.ui | |
|
259 | self.vfs = repo.vfs | |
|
260 | ||
|
261 | # is this working copy using a shared storage? | |
|
262 | self.sharedfeatures = self.sharedvfs = None | |
|
263 | if repo.shared(): | |
|
264 | features = _readsharedfeatures(repo) | |
|
265 | sharedrepo = share._getsrcrepo(repo) | |
|
266 | if sharedrepo is not None and 'journal' in features: | |
|
267 | self.sharedvfs = sharedrepo.vfs | |
|
268 | self.sharedfeatures = features | |
|
269 | ||
|
270 | # track the current command for recording in journal entries | |
|
271 | @property | |
|
272 | def command(self): | |
|
273 | commandstr = ' '.join( | |
|
274 | map(util.shellquote, journalstorage._currentcommand)) | |
|
275 | if '\n' in commandstr: | |
|
276 | # truncate multi-line commands | |
|
277 | commandstr = commandstr.partition('\n')[0] + ' ...' | |
|
278 | return commandstr | |
|
279 | ||
|
280 | @classmethod | |
|
281 | def recordcommand(cls, *fullargs): | |
|
282 | """Set the current hg arguments, stored with recorded entries""" | |
|
283 | # Set the current command on the class because we may have started | |
|
284 | # with a non-local repo (cloning for example). | |
|
285 | cls._currentcommand = fullargs | |
|
286 | ||
|
287 | def jlock(self, vfs): | |
|
288 | """Create a lock for the journal file""" | |
|
289 | if self._lockref and self._lockref(): | |
|
290 | raise error.Abort(_('journal lock does not support nesting')) | |
|
291 | desc = _('journal of %s') % vfs.base | |
|
292 | try: | |
|
293 | l = lock.lock(vfs, 'journal.lock', 0, desc=desc) | |
|
294 | except error.LockHeld as inst: | |
|
295 | self.ui.warn( | |
|
296 | _("waiting for lock on %s held by %r\n") % (desc, inst.locker)) | |
|
297 | # default to 600 seconds timeout | |
|
298 | l = lock.lock( | |
|
299 | vfs, 'journal.lock', | |
|
300 | int(self.ui.config("ui", "timeout", "600")), desc=desc) | |
|
301 | self.ui.warn(_("got lock after %s seconds\n") % l.delay) | |
|
302 | self._lockref = weakref.ref(l) | |
|
303 | return l | |
|
304 | ||
|
305 | def record(self, namespace, name, oldhashes, newhashes): | |
|
306 | """Record a new journal entry | |
|
307 | ||
|
308 | * namespace: an opaque string; this can be used to filter on the type | |
|
309 | of recorded entries. | |
|
310 | * name: the name defining this entry; for bookmarks, this is the | |
|
311 | bookmark name. Can be filtered on when retrieving entries. | |
|
312 | * oldhashes and newhashes: each a single binary hash, or a list of | |
|
313 | binary hashes. These represent the old and new position of the named | |
|
314 | item. | |
|
315 | ||
|
316 | """ | |
|
317 | if not isinstance(oldhashes, list): | |
|
318 | oldhashes = [oldhashes] | |
|
319 | if not isinstance(newhashes, list): | |
|
320 | newhashes = [newhashes] | |
|
321 | ||
|
322 | entry = journalentry( | |
|
323 | util.makedate(), self.user, self.command, namespace, name, | |
|
324 | oldhashes, newhashes) | |
|
325 | ||
|
326 | vfs = self.vfs | |
|
327 | if self.sharedvfs is not None: | |
|
328 | # write to the shared repository if this feature is being | |
|
329 | # shared between working copies. | |
|
330 | if sharednamespaces.get(namespace) in self.sharedfeatures: | |
|
331 | vfs = self.sharedvfs | |
|
332 | ||
|
333 | self._write(vfs, entry) | |
|
334 | ||
|
335 | def _write(self, vfs, entry): | |
|
336 | with self.jlock(vfs): | |
|
337 | version = None | |
|
338 | # open file in amend mode to ensure it is created if missing | |
|
339 | with vfs('journal', mode='a+b', atomictemp=True) as f: | |
|
340 | f.seek(0, os.SEEK_SET) | |
|
341 | # Read just enough bytes to get a version number (up to 2 | |
|
342 | # digits plus separator) | |
|
343 | version = f.read(3).partition('\0')[0] | |
|
344 | if version and version != str(storageversion): | |
|
345 | # different version of the storage. Exit early (and not | |
|
346 | # write anything) if this is not a version we can handle or | |
|
347 | # the file is corrupt. In future, perhaps rotate the file | |
|
348 | # instead? | |
|
349 | self.ui.warn( | |
|
350 | _("unsupported journal file version '%s'\n") % version) | |
|
351 | return | |
|
352 | if not version: | |
|
353 | # empty file, write version first | |
|
354 | f.write(str(storageversion) + '\0') | |
|
355 | f.seek(0, os.SEEK_END) | |
|
356 | f.write(str(entry) + '\0') | |
|
357 | ||
|
358 | def filtered(self, namespace=None, name=None): | |
|
359 | """Yield all journal entries with the given namespace or name | |
|
360 | ||
|
361 | Both the namespace and the name are optional; if neither is given all | |
|
362 | entries in the journal are produced. | |
|
363 | ||
|
364 | Matching supports regular expressions by using the `re:` prefix | |
|
365 | (use `literal:` to match names or namespaces that start with `re:`) | |
|
366 | ||
|
367 | """ | |
|
368 | if namespace is not None: | |
|
369 | namespace = util.stringmatcher(namespace)[-1] | |
|
370 | if name is not None: | |
|
371 | name = util.stringmatcher(name)[-1] | |
|
372 | for entry in self: | |
|
373 | if namespace is not None and not namespace(entry.namespace): | |
|
374 | continue | |
|
375 | if name is not None and not name(entry.name): | |
|
376 | continue | |
|
377 | yield entry | |
|
378 | ||
|
379 | def __iter__(self): | |
|
380 | """Iterate over the storage | |
|
381 | ||
|
382 | Yields journalentry instances for each contained journal record. | |
|
383 | ||
|
384 | """ | |
|
385 | local = self._open(self.vfs) | |
|
386 | ||
|
387 | if self.sharedvfs is None: | |
|
388 | return local | |
|
389 | ||
|
390 | # iterate over both local and shared entries, but only those | |
|
391 | # shared entries that are among the currently shared features | |
|
392 | shared = ( | |
|
393 | e for e in self._open(self.sharedvfs) | |
|
394 | if sharednamespaces.get(e.namespace) in self.sharedfeatures) | |
|
395 | return _mergeentriesiter(local, shared) | |
|
396 | ||
|
397 | def _open(self, vfs, filename='journal', _newestfirst=True): | |
|
398 | if not vfs.exists(filename): | |
|
399 | return | |
|
400 | ||
|
401 | with vfs(filename) as f: | |
|
402 | raw = f.read() | |
|
403 | ||
|
404 | lines = raw.split('\0') | |
|
405 | version = lines and lines[0] | |
|
406 | if version != str(storageversion): | |
|
407 | version = version or _('not available') | |
|
408 | raise error.Abort(_("unknown journal file version '%s'") % version) | |
|
409 | ||
|
410 | # Skip the first line, it's a version number. Normally we iterate over | |
|
411 | # these in reverse order to list newest first; only when copying across | |
|
412 | # a shared storage do we forgo reversing. | |
|
413 | lines = lines[1:] | |
|
414 | if _newestfirst: | |
|
415 | lines = reversed(lines) | |
|
416 | for line in lines: | |
|
417 | if not line: | |
|
418 | continue | |
|
419 | yield journalentry.fromstorage(line) | |
|
420 | ||
|
421 | # journal reading | |
|
422 | # log options that don't make sense for journal | |
|
423 | _ignoreopts = ('no-merges', 'graph') | |
|
424 | @command( | |
|
425 | 'journal', [ | |
|
426 | ('', 'all', None, 'show history for all names'), | |
|
427 | ('c', 'commits', None, 'show commit metadata'), | |
|
428 | ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts], | |
|
429 | '[OPTION]... [BOOKMARKNAME]') | |
|
430 | def journal(ui, repo, *args, **opts): | |
|
431 | """show the previous position of bookmarks and the working copy | |
|
432 | ||
|
433 | The journal is used to see the previous commits that bookmarks and the | |
|
434 | working copy pointed to. By default the previous locations for the working | |
|
435 | copy. Passing a bookmark name will show all the previous positions of | |
|
436 | that bookmark. Use the --all switch to show previous locations for all | |
|
437 | bookmarks and the working copy; each line will then include the bookmark | |
|
438 | name, or '.' for the working copy, as well. | |
|
439 | ||
|
440 | If `name` starts with `re:`, the remainder of the name is treated as | |
|
441 | a regular expression. To match a name that actually starts with `re:`, | |
|
442 | use the prefix `literal:`. | |
|
443 | ||
|
444 | By default hg journal only shows the commit hash and the command that was | |
|
445 | running at that time. -v/--verbose will show the prior hash, the user, and | |
|
446 | the time at which it happened. | |
|
447 | ||
|
448 | Use -c/--commits to output log information on each commit hash; at this | |
|
449 | point you can use the usual `--patch`, `--git`, `--stat` and `--template` | |
|
450 | switches to alter the log output for these. | |
|
451 | ||
|
452 | `hg journal -T json` can be used to produce machine readable output. | |
|
453 | ||
|
454 | """ | |
|
455 | name = '.' | |
|
456 | if opts.get('all'): | |
|
457 | if args: | |
|
458 | raise error.Abort( | |
|
459 | _("You can't combine --all and filtering on a name")) | |
|
460 | name = None | |
|
461 | if args: | |
|
462 | name = args[0] | |
|
463 | ||
|
464 | fm = ui.formatter('journal', opts) | |
|
465 | ||
|
466 | if opts.get("template") != "json": | |
|
467 | if name is None: | |
|
468 | displayname = _('the working copy and bookmarks') | |
|
469 | else: | |
|
470 | displayname = "'%s'" % name | |
|
471 | ui.status(_("previous locations of %s:\n") % displayname) | |
|
472 | ||
|
473 | limit = cmdutil.loglimit(opts) | |
|
474 | entry = None | |
|
475 | for count, entry in enumerate(repo.journal.filtered(name=name)): | |
|
476 | if count == limit: | |
|
477 | break | |
|
478 | newhashesstr = ','.join([node.short(hash) for hash in entry.newhashes]) | |
|
479 | oldhashesstr = ','.join([node.short(hash) for hash in entry.oldhashes]) | |
|
480 | ||
|
481 | fm.startitem() | |
|
482 | fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr) | |
|
483 | fm.write('newhashes', '%s', newhashesstr) | |
|
484 | fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user) | |
|
485 | fm.condwrite( | |
|
486 | opts.get('all') or name.startswith('re:'), | |
|
487 | 'name', ' %-8s', entry.name) | |
|
488 | ||
|
489 | timestring = util.datestr(entry.timestamp, '%Y-%m-%d %H:%M %1%2') | |
|
490 | fm.condwrite(ui.verbose, 'date', ' %s', timestring) | |
|
491 | fm.write('command', ' %s\n', entry.command) | |
|
492 | ||
|
493 | if opts.get("commits"): | |
|
494 | displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False) | |
|
495 | for hash in entry.newhashes: | |
|
496 | try: | |
|
497 | ctx = repo[hash] | |
|
498 | displayer.show(ctx) | |
|
499 | except error.RepoLookupError as e: | |
|
500 | fm.write('repolookuperror', "%s\n\n", str(e)) | |
|
501 | displayer.close() | |
|
502 | ||
|
503 | fm.end() | |
|
504 | ||
|
505 | if entry is None: | |
|
506 | ui.status(_("no recorded locations\n")) |
@@ -0,0 +1,21 b'' | |||
|
1 | #ifndef _HG_BDIFF_H_ | |
|
2 | #define _HG_BDIFF_H_ | |
|
3 | ||
|
4 | struct bdiff_line { | |
|
5 | int hash, n, e; | |
|
6 | ssize_t len; | |
|
7 | const char *l; | |
|
8 | }; | |
|
9 | ||
|
10 | struct bdiff_hunk; | |
|
11 | struct bdiff_hunk { | |
|
12 | int a1, a2, b1, b2; | |
|
13 | struct bdiff_hunk *next; | |
|
14 | }; | |
|
15 | ||
|
16 | int bdiff_splitlines(const char *a, ssize_t len, struct bdiff_line **lr); | |
|
17 | int bdiff_diff(struct bdiff_line *a, int an, struct bdiff_line *b, int bn, | |
|
18 | struct bdiff_hunk *base); | |
|
19 | void bdiff_freehunks(struct bdiff_hunk *l); | |
|
20 | ||
|
21 | #endif |
@@ -0,0 +1,203 b'' | |||
|
1 | /* | |
|
2 | bdiff.c - efficient binary diff extension for Mercurial | |
|
3 | ||
|
4 | Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
|
5 | ||
|
6 | This software may be used and distributed according to the terms of | |
|
7 | the GNU General Public License, incorporated herein by reference. | |
|
8 | ||
|
9 | Based roughly on Python difflib | |
|
10 | */ | |
|
11 | ||
|
12 | #define PY_SSIZE_T_CLEAN | |
|
13 | #include <Python.h> | |
|
14 | #include <stdlib.h> | |
|
15 | #include <string.h> | |
|
16 | #include <limits.h> | |
|
17 | ||
|
18 | #include "bdiff.h" | |
|
19 | #include "bitmanipulation.h" | |
|
20 | ||
|
21 | ||
|
22 | static PyObject *blocks(PyObject *self, PyObject *args) | |
|
23 | { | |
|
24 | PyObject *sa, *sb, *rl = NULL, *m; | |
|
25 | struct bdiff_line *a, *b; | |
|
26 | struct bdiff_hunk l, *h; | |
|
27 | int an, bn, count, pos = 0; | |
|
28 | ||
|
29 | l.next = NULL; | |
|
30 | ||
|
31 | if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb)) | |
|
32 | return NULL; | |
|
33 | ||
|
34 | an = bdiff_splitlines(PyBytes_AsString(sa), PyBytes_Size(sa), &a); | |
|
35 | bn = bdiff_splitlines(PyBytes_AsString(sb), PyBytes_Size(sb), &b); | |
|
36 | ||
|
37 | if (!a || !b) | |
|
38 | goto nomem; | |
|
39 | ||
|
40 | count = bdiff_diff(a, an, b, bn, &l); | |
|
41 | if (count < 0) | |
|
42 | goto nomem; | |
|
43 | ||
|
44 | rl = PyList_New(count); | |
|
45 | if (!rl) | |
|
46 | goto nomem; | |
|
47 | ||
|
48 | for (h = l.next; h; h = h->next) { | |
|
49 | m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2); | |
|
50 | PyList_SetItem(rl, pos, m); | |
|
51 | pos++; | |
|
52 | } | |
|
53 | ||
|
54 | nomem: | |
|
55 | free(a); | |
|
56 | free(b); | |
|
57 | bdiff_freehunks(l.next); | |
|
58 | return rl ? rl : PyErr_NoMemory(); | |
|
59 | } | |
|
60 | ||
|
61 | static PyObject *bdiff(PyObject *self, PyObject *args) | |
|
62 | { | |
|
63 | char *sa, *sb, *rb; | |
|
64 | PyObject *result = NULL; | |
|
65 | struct bdiff_line *al, *bl; | |
|
66 | struct bdiff_hunk l, *h; | |
|
67 | int an, bn, count; | |
|
68 | Py_ssize_t len = 0, la, lb; | |
|
69 | PyThreadState *_save; | |
|
70 | ||
|
71 | l.next = NULL; | |
|
72 | ||
|
73 | if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb)) | |
|
74 | return NULL; | |
|
75 | ||
|
76 | if (la > UINT_MAX || lb > UINT_MAX) { | |
|
77 | PyErr_SetString(PyExc_ValueError, "bdiff inputs too large"); | |
|
78 | return NULL; | |
|
79 | } | |
|
80 | ||
|
81 | _save = PyEval_SaveThread(); | |
|
82 | an = bdiff_splitlines(sa, la, &al); | |
|
83 | bn = bdiff_splitlines(sb, lb, &bl); | |
|
84 | if (!al || !bl) | |
|
85 | goto nomem; | |
|
86 | ||
|
87 | count = bdiff_diff(al, an, bl, bn, &l); | |
|
88 | if (count < 0) | |
|
89 | goto nomem; | |
|
90 | ||
|
91 | /* calculate length of output */ | |
|
92 | la = lb = 0; | |
|
93 | for (h = l.next; h; h = h->next) { | |
|
94 | if (h->a1 != la || h->b1 != lb) | |
|
95 | len += 12 + bl[h->b1].l - bl[lb].l; | |
|
96 | la = h->a2; | |
|
97 | lb = h->b2; | |
|
98 | } | |
|
99 | PyEval_RestoreThread(_save); | |
|
100 | _save = NULL; | |
|
101 | ||
|
102 | result = PyBytes_FromStringAndSize(NULL, len); | |
|
103 | ||
|
104 | if (!result) | |
|
105 | goto nomem; | |
|
106 | ||
|
107 | /* build binary patch */ | |
|
108 | rb = PyBytes_AsString(result); | |
|
109 | la = lb = 0; | |
|
110 | ||
|
111 | for (h = l.next; h; h = h->next) { | |
|
112 | if (h->a1 != la || h->b1 != lb) { | |
|
113 | len = bl[h->b1].l - bl[lb].l; | |
|
114 | putbe32((uint32_t)(al[la].l - al->l), rb); | |
|
115 | putbe32((uint32_t)(al[h->a1].l - al->l), rb + 4); | |
|
116 | putbe32((uint32_t)len, rb + 8); | |
|
117 | memcpy(rb + 12, bl[lb].l, len); | |
|
118 | rb += 12 + len; | |
|
119 | } | |
|
120 | la = h->a2; | |
|
121 | lb = h->b2; | |
|
122 | } | |
|
123 | ||
|
124 | nomem: | |
|
125 | if (_save) | |
|
126 | PyEval_RestoreThread(_save); | |
|
127 | free(al); | |
|
128 | free(bl); | |
|
129 | bdiff_freehunks(l.next); | |
|
130 | return result ? result : PyErr_NoMemory(); | |
|
131 | } | |
|
132 | ||
|
133 | /* | |
|
134 | * If allws != 0, remove all whitespace (' ', \t and \r). Otherwise, | |
|
135 | * reduce whitespace sequences to a single space and trim remaining whitespace | |
|
136 | * from end of lines. | |
|
137 | */ | |
|
138 | static PyObject *fixws(PyObject *self, PyObject *args) | |
|
139 | { | |
|
140 | PyObject *s, *result = NULL; | |
|
141 | char allws, c; | |
|
142 | const char *r; | |
|
143 | Py_ssize_t i, rlen, wlen = 0; | |
|
144 | char *w; | |
|
145 | ||
|
146 | if (!PyArg_ParseTuple(args, "Sb:fixws", &s, &allws)) | |
|
147 | return NULL; | |
|
148 | r = PyBytes_AsString(s); | |
|
149 | rlen = PyBytes_Size(s); | |
|
150 | ||
|
151 | w = (char *)malloc(rlen ? rlen : 1); | |
|
152 | if (!w) | |
|
153 | goto nomem; | |
|
154 | ||
|
155 | for (i = 0; i != rlen; i++) { | |
|
156 | c = r[i]; | |
|
157 | if (c == ' ' || c == '\t' || c == '\r') { | |
|
158 | if (!allws && (wlen == 0 || w[wlen - 1] != ' ')) | |
|
159 | w[wlen++] = ' '; | |
|
160 | } else if (c == '\n' && !allws | |
|
161 | && wlen > 0 && w[wlen - 1] == ' ') { | |
|
162 | w[wlen - 1] = '\n'; | |
|
163 | } else { | |
|
164 | w[wlen++] = c; | |
|
165 | } | |
|
166 | } | |
|
167 | ||
|
168 | result = PyBytes_FromStringAndSize(w, wlen); | |
|
169 | ||
|
170 | nomem: | |
|
171 | free(w); | |
|
172 | return result ? result : PyErr_NoMemory(); | |
|
173 | } | |
|
174 | ||
|
175 | ||
|
176 | static char mdiff_doc[] = "Efficient binary diff."; | |
|
177 | ||
|
178 | static PyMethodDef methods[] = { | |
|
179 | {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"}, | |
|
180 | {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"}, | |
|
181 | {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"}, | |
|
182 | {NULL, NULL} | |
|
183 | }; | |
|
184 | ||
|
185 | #ifdef IS_PY3K | |
|
186 | static struct PyModuleDef bdiff_module = { | |
|
187 | PyModuleDef_HEAD_INIT, | |
|
188 | "bdiff", | |
|
189 | mdiff_doc, | |
|
190 | -1, | |
|
191 | methods | |
|
192 | }; | |
|
193 | ||
|
194 | PyMODINIT_FUNC PyInit_bdiff(void) | |
|
195 | { | |
|
196 | return PyModule_Create(&bdiff_module); | |
|
197 | } | |
|
198 | #else | |
|
199 | PyMODINIT_FUNC initbdiff(void) | |
|
200 | { | |
|
201 | Py_InitModule3("bdiff", methods, mdiff_doc); | |
|
202 | } | |
|
203 | #endif |
@@ -0,0 +1,53 b'' | |||
|
1 | #ifndef _HG_BITMANIPULATION_H_ | |
|
2 | #define _HG_BITMANIPULATION_H_ | |
|
3 | ||
|
4 | #include "compat.h" | |
|
5 | ||
|
6 | static inline uint32_t getbe32(const char *c) | |
|
7 | { | |
|
8 | const unsigned char *d = (const unsigned char *)c; | |
|
9 | ||
|
10 | return ((d[0] << 24) | | |
|
11 | (d[1] << 16) | | |
|
12 | (d[2] << 8) | | |
|
13 | (d[3])); | |
|
14 | } | |
|
15 | ||
|
16 | static inline int16_t getbeint16(const char *c) | |
|
17 | { | |
|
18 | const unsigned char *d = (const unsigned char *)c; | |
|
19 | ||
|
20 | return ((d[0] << 8) | | |
|
21 | (d[1])); | |
|
22 | } | |
|
23 | ||
|
24 | static inline uint16_t getbeuint16(const char *c) | |
|
25 | { | |
|
26 | const unsigned char *d = (const unsigned char *)c; | |
|
27 | ||
|
28 | return ((d[0] << 8) | | |
|
29 | (d[1])); | |
|
30 | } | |
|
31 | ||
|
32 | static inline void putbe32(uint32_t x, char *c) | |
|
33 | { | |
|
34 | c[0] = (x >> 24) & 0xff; | |
|
35 | c[1] = (x >> 16) & 0xff; | |
|
36 | c[2] = (x >> 8) & 0xff; | |
|
37 | c[3] = (x) & 0xff; | |
|
38 | } | |
|
39 | ||
|
40 | static inline double getbefloat64(const char *c) | |
|
41 | { | |
|
42 | const unsigned char *d = (const unsigned char *)c; | |
|
43 | double ret; | |
|
44 | int i; | |
|
45 | uint64_t t = 0; | |
|
46 | for (i = 0; i < 8; i++) { | |
|
47 | t = (t<<8) + d[i]; | |
|
48 | } | |
|
49 | memcpy(&ret, &t, sizeof(t)); | |
|
50 | return ret; | |
|
51 | } | |
|
52 | ||
|
53 | #endif |
@@ -0,0 +1,43 b'' | |||
|
1 | #ifndef _HG_COMPAT_H_ | |
|
2 | #define _HG_COMPAT_H_ | |
|
3 | ||
|
4 | #ifdef _WIN32 | |
|
5 | #ifdef _MSC_VER | |
|
6 | /* msvc 6.0 has problems */ | |
|
7 | #define inline __inline | |
|
8 | #if defined(_WIN64) | |
|
9 | typedef __int64 ssize_t; | |
|
10 | #else | |
|
11 | typedef int ssize_t; | |
|
12 | #endif | |
|
13 | typedef signed char int8_t; | |
|
14 | typedef short int16_t; | |
|
15 | typedef long int32_t; | |
|
16 | typedef __int64 int64_t; | |
|
17 | typedef unsigned char uint8_t; | |
|
18 | typedef unsigned short uint16_t; | |
|
19 | typedef unsigned long uint32_t; | |
|
20 | typedef unsigned __int64 uint64_t; | |
|
21 | #else | |
|
22 | #include <stdint.h> | |
|
23 | #endif | |
|
24 | #else | |
|
25 | /* not windows */ | |
|
26 | #include <sys/types.h> | |
|
27 | #if defined __BEOS__ && !defined __HAIKU__ | |
|
28 | #include <ByteOrder.h> | |
|
29 | #else | |
|
30 | #include <arpa/inet.h> | |
|
31 | #endif | |
|
32 | #include <inttypes.h> | |
|
33 | #endif | |
|
34 | ||
|
35 | #if defined __hpux || defined __SUNPRO_C || defined _AIX | |
|
36 | #define inline | |
|
37 | #endif | |
|
38 | ||
|
39 | #ifdef __linux | |
|
40 | #define inline __inline | |
|
41 | #endif | |
|
42 | ||
|
43 | #endif |
@@ -0,0 +1,45 b'' | |||
|
1 | # policy.py - module policy logic for Mercurial. | |
|
2 | # | |
|
3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> | |
|
4 | # | |
|
5 | # This software may be used and distributed according to the terms of the | |
|
6 | # GNU General Public License version 2 or any later version. | |
|
7 | ||
|
8 | from __future__ import absolute_import | |
|
9 | ||
|
10 | import os | |
|
11 | import sys | |
|
12 | ||
|
13 | # Rules for how modules can be loaded. Values are: | |
|
14 | # | |
|
15 | # c - require C extensions | |
|
16 | # allow - allow pure Python implementation when C loading fails | |
|
17 | # cffi - required cffi versions (implemented within pure module) | |
|
18 | # cffi-allow - allow pure Python implementation if cffi version is missing | |
|
19 | # py - only load pure Python modules | |
|
20 | # | |
|
21 | # By default, require the C extensions for performance reasons. | |
|
22 | policy = 'c' | |
|
23 | policynoc = ('cffi', 'cffi-allow', 'py') | |
|
24 | policynocffi = ('c', 'py') | |
|
25 | ||
|
26 | try: | |
|
27 | from . import __modulepolicy__ | |
|
28 | policy = __modulepolicy__.modulepolicy | |
|
29 | except ImportError: | |
|
30 | pass | |
|
31 | ||
|
32 | # PyPy doesn't load C extensions. | |
|
33 | # | |
|
34 | # The canonical way to do this is to test platform.python_implementation(). | |
|
35 | # But we don't import platform and don't bloat for it here. | |
|
36 | if '__pypy__' in sys.builtin_module_names: | |
|
37 | policy = 'cffi' | |
|
38 | ||
|
39 | # Our C extensions aren't yet compatible with Python 3. So use pure Python | |
|
40 | # on Python 3 for now. | |
|
41 | if sys.version_info[0] >= 3: | |
|
42 | policy = 'py' | |
|
43 | ||
|
44 | # Environment variable can always force settings. | |
|
45 | policy = os.environ.get('HGMODULEPOLICY', policy) |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100755 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100755 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -156,7 +156,7 b' i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n' | |||
|
156 | 156 | # Packaging targets |
|
157 | 157 | |
|
158 | 158 | osx: |
|
159 | python setup.py install --optimize=1 \ | |
|
159 | /usr/bin/python2.7 setup.py install --optimize=1 \ | |
|
160 | 160 | --root=build/mercurial/ --prefix=/usr/local/ \ |
|
161 | 161 | --install-lib=/Library/Python/2.7/site-packages/ |
|
162 | 162 | make -C doc all install DESTDIR="$(PWD)/build/mercurial/" |
@@ -184,7 +184,7 b' shopt -s extglob' | |||
|
184 | 184 | return |
|
185 | 185 | fi |
|
186 | 186 | |
|
187 | opts=$(_hg_cmd debugcomplete --options "$cmd") | |
|
187 | opts=$(HGPLAINEXCEPT=alias _hg_cmd debugcomplete --options "$cmd") | |
|
188 | 188 | |
|
189 | 189 | COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$opts' -- "$cur")) |
|
190 | 190 | _hg_fix_wordlist |
@@ -1,7 +1,9 b'' | |||
|
1 | 1 | # Randomized torture test generation for bdiff |
|
2 | 2 | |
|
3 | 3 | from __future__ import absolute_import, print_function |
|
4 |
import random |
|
|
4 | import random | |
|
5 | import sys | |
|
6 | ||
|
5 | 7 | from mercurial import ( |
|
6 | 8 | bdiff, |
|
7 | 9 | mpatch, |
@@ -26,6 +26,15 b' import optparse' | |||
|
26 | 26 | import os |
|
27 | 27 | import re |
|
28 | 28 | import sys |
|
29 | if sys.version_info[0] < 3: | |
|
30 | opentext = open | |
|
31 | else: | |
|
32 | def opentext(f): | |
|
33 | return open(f, encoding='ascii') | |
|
34 | try: | |
|
35 | xrange | |
|
36 | except NameError: | |
|
37 | xrange = range | |
|
29 | 38 | try: |
|
30 | 39 | import re2 |
|
31 | 40 | except ImportError: |
@@ -41,26 +50,26 b' def compilere(pat, multiline=False):' | |||
|
41 | 50 | pass |
|
42 | 51 | return re.compile(pat) |
|
43 | 52 | |
|
53 | # check "rules depending on implementation of repquote()" in each | |
|
54 | # patterns (especially pypats), before changing around repquote() | |
|
55 | _repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q', | |
|
56 | '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'} | |
|
57 | def _repquoteencodechr(i): | |
|
58 | if i > 255: | |
|
59 | return 'u' | |
|
60 | c = chr(i) | |
|
61 | if c in _repquotefixedmap: | |
|
62 | return _repquotefixedmap[c] | |
|
63 | if c.isalpha(): | |
|
64 | return 'x' | |
|
65 | if c.isdigit(): | |
|
66 | return 'n' | |
|
67 | return 'o' | |
|
68 | _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256)) | |
|
69 | ||
|
44 | 70 | def repquote(m): |
|
45 | fromc = '.:' | |
|
46 | tochr = 'pq' | |
|
47 | def encodechr(i): | |
|
48 | if i > 255: | |
|
49 | return 'u' | |
|
50 | c = chr(i) | |
|
51 | if c in ' \n': | |
|
52 | return c | |
|
53 | if c.isalpha(): | |
|
54 | return 'x' | |
|
55 | if c.isdigit(): | |
|
56 | return 'n' | |
|
57 | try: | |
|
58 | return tochr[fromc.find(c)] | |
|
59 | except (ValueError, IndexError): | |
|
60 | return 'o' | |
|
61 | 71 | t = m.group('text') |
|
62 | tt = ''.join(encodechr(i) for i in xrange(256)) | |
|
63 | t = t.translate(tt) | |
|
72 | t = t.translate(_repquotett) | |
|
64 | 73 | return m.group('quote') + t + m.group('quote') |
|
65 | 74 | |
|
66 | 75 | def reppython(m): |
@@ -103,7 +112,7 b' testpats = [' | |||
|
103 | 112 | (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"), |
|
104 | 113 | (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"), |
|
105 | 114 | (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"), |
|
106 | (r'printf.*[^\\]\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"), | |
|
115 | (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"), | |
|
107 | 116 | (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"), |
|
108 | 117 | (r'\$\(.*\)', "don't use $(expr), use `expr`"), |
|
109 | 118 | (r'rm -rf \*', "don't use naked rm -rf, target a directory"), |
@@ -114,7 +123,7 b' testpats = [' | |||
|
114 | 123 | (r'export .*=', "don't export and assign at once"), |
|
115 | 124 | (r'^source\b', "don't use 'source', use '.'"), |
|
116 | 125 | (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"), |
|
117 | (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"), | |
|
126 | (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"), | |
|
118 | 127 | (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"), |
|
119 | 128 | (r'^stop\(\)', "don't use 'stop' as a shell function name"), |
|
120 | 129 | (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"), |
@@ -133,6 +142,7 b' testpats = [' | |||
|
133 | 142 | (r'\|&', "don't use |&, use 2>&1"), |
|
134 | 143 | (r'\w = +\w', "only one space after = allowed"), |
|
135 | 144 | (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"), |
|
145 | (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'") | |
|
136 | 146 | ], |
|
137 | 147 | # warnings |
|
138 | 148 | [ |
@@ -179,6 +189,8 b' utestpats = [' | |||
|
179 | 189 | (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg), |
|
180 | 190 | (r'^ .*file://\$TESTTMP', |
|
181 | 191 | 'write "file:/*/$TESTTMP" + (glob) to match on windows too'), |
|
192 | (r'^ [^$>].*27\.0\.0\.1.*[^)]$', | |
|
193 | 'use (glob) to match localhost IP on hosts without 127.0.0.1 too'), | |
|
182 | 194 | (r'^ (cat|find): .*: No such file or directory', |
|
183 | 195 | 'use test -f to test for file existence'), |
|
184 | 196 | (r'^ diff -[^ -]*p', |
@@ -197,8 +209,8 b' utestpats = [' | |||
|
197 | 209 | ], |
|
198 | 210 | # warnings |
|
199 | 211 | [ |
|
200 | (r'^ [^*?/\n]* \(glob\)$', | |
|
201 |
"glob match with no glob |
|
|
212 | (r'^ (?!.*127\.0\.0\.1)[^*?/\n]* \(glob\)$', | |
|
213 | "glob match with no glob string (?, *, /, and 127.0.0.1)"), | |
|
202 | 214 | ] |
|
203 | 215 | ] |
|
204 | 216 | |
@@ -214,7 +226,7 b' for i in [0, 1]:' | |||
|
214 | 226 | |
|
215 | 227 | utestfilters = [ |
|
216 | 228 | (r"<<(\S+)((.|\n)*?\n > \1)", rephere), |
|
217 |
(r"( |
|
|
229 | (r"( +)(#([^\n]*\S)?)", repcomment), | |
|
218 | 230 | ] |
|
219 | 231 | |
|
220 | 232 | pypats = [ |
@@ -238,7 +250,6 b' pypats = [' | |||
|
238 | 250 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), |
|
239 | 251 | (r'\w\s=\s\s+\w', "gratuitous whitespace after ="), |
|
240 | 252 | (r'.{81}', "line too long"), |
|
241 | (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'), | |
|
242 | 253 | (r'[^\n]\Z', "no trailing newline"), |
|
243 | 254 | (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"), |
|
244 | 255 | # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=', |
@@ -305,8 +316,6 b' pypats = [' | |||
|
305 | 316 | (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,', |
|
306 | 317 | 'legacy exception syntax; use "as" instead of ","'), |
|
307 | 318 | (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"), |
|
308 | (r'ui\.(status|progress|write|note|warn)\([\'\"]x', | |
|
309 | "missing _() in ui message (use () to hide false-positives)"), | |
|
310 | 319 | (r'release\(.*wlock, .*lock\)', "wrong lock release order"), |
|
311 | 320 | (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"), |
|
312 | 321 | (r'os\.path\.join\(.*, *(""|\'\')\)', |
@@ -318,9 +327,37 b' pypats = [' | |||
|
318 | 327 | (r'^import Queue', "don't use Queue, use util.queue + util.empty"), |
|
319 | 328 | (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"), |
|
320 | 329 | (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"), |
|
330 | (r'^import SocketServer', "don't use SockerServer, use util.socketserver"), | |
|
331 | (r'^import urlparse', "don't use urlparse, use util.urlparse"), | |
|
332 | (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"), | |
|
333 | (r'^import cPickle', "don't use cPickle, use util.pickle"), | |
|
334 | (r'^import pickle', "don't use pickle, use util.pickle"), | |
|
335 | (r'^import httplib', "don't use httplib, use util.httplib"), | |
|
336 | (r'^import BaseHTTPServer', "use util.httpserver instead"), | |
|
337 | (r'\.next\(\)', "don't use .next(), use next(...)"), | |
|
338 | ||
|
339 | # rules depending on implementation of repquote() | |
|
340 | (r' x+[xpqo%APM][\'"]\n\s+[\'"]x', | |
|
341 | 'string join across lines with no space'), | |
|
342 | (r'''(?x)ui\.(status|progress|write|note|warn)\( | |
|
343 | [ \t\n#]* | |
|
344 | (?# any strings/comments might precede a string, which | |
|
345 | # contains translatable message) | |
|
346 | ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)* | |
|
347 | (?# sequence consisting of below might precede translatable message | |
|
348 | # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ... | |
|
349 | # - escaped character: "\\", "\n", "\0" ... | |
|
350 | # - character other than '%', 'b' as '\', and 'x' as alphabet) | |
|
351 | (['"]|\'\'\'|""") | |
|
352 | ((%([ n]?[PM]?([np]+|A))?x)|%%|b[bnx]|[ \nnpqAPMo])*x | |
|
353 | (?# this regexp can't use [^...] style, | |
|
354 | # because _preparepats forcibly adds "\n" into [^...], | |
|
355 | # even though this regexp wants match it against "\n")''', | |
|
356 | "missing _() in ui message (use () to hide false-positives)"), | |
|
321 | 357 | ], |
|
322 | 358 | # warnings |
|
323 | 359 | [ |
|
360 | # rules depending on implementation of repquote() | |
|
324 | 361 | (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"), |
|
325 | 362 | ] |
|
326 | 363 | ] |
@@ -365,9 +402,13 b' cpats = [' | |||
|
365 | 402 | (r'^\s*#import\b', "use only #include in standard C code"), |
|
366 | 403 | (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"), |
|
367 | 404 | (r'strcat\(', "don't use strcat"), |
|
405 | ||
|
406 | # rules depending on implementation of repquote() | |
|
368 | 407 | ], |
|
369 | 408 | # warnings |
|
370 |
[ |
|
|
409 | [ | |
|
410 | # rules depending on implementation of repquote() | |
|
411 | ] | |
|
371 | 412 | ] |
|
372 | 413 | |
|
373 | 414 | cfilters = [ |
@@ -433,7 +474,6 b' def _preparepats():' | |||
|
433 | 474 | filters = c[3] |
|
434 | 475 | for i, flt in enumerate(filters): |
|
435 | 476 | filters[i] = re.compile(flt[0]), flt[1] |
|
436 | _preparepats() | |
|
437 | 477 | |
|
438 | 478 | class norepeatlogger(object): |
|
439 | 479 | def __init__(self): |
@@ -486,12 +526,15 b' def checkfile(f, logfunc=_defaultlogger.' | |||
|
486 | 526 | result = True |
|
487 | 527 | |
|
488 | 528 | try: |
|
489 |
|
|
|
529 | with opentext(f) as fp: | |
|
530 | try: | |
|
531 | pre = post = fp.read() | |
|
532 | except UnicodeDecodeError as e: | |
|
533 | print("%s while reading %s" % (e, f)) | |
|
534 | return result | |
|
490 | 535 | except IOError as e: |
|
491 | 536 | print("Skipping %s, %s" % (f, str(e).split(':', 1)[0])) |
|
492 | 537 | return result |
|
493 | pre = post = fp.read() | |
|
494 | fp.close() | |
|
495 | 538 | |
|
496 | 539 | for name, match, magic, filters, pats in checks: |
|
497 | 540 | if debug: |
@@ -578,7 +621,7 b' def checkfile(f, logfunc=_defaultlogger.' | |||
|
578 | 621 | |
|
579 | 622 | return result |
|
580 | 623 | |
|
581 | if __name__ == "__main__": | |
|
624 | def main(): | |
|
582 | 625 | parser = optparse.OptionParser("%prog [options] [files]") |
|
583 | 626 | parser.add_option("-w", "--warnings", action="store_true", |
|
584 | 627 | help="include warning-level checks") |
@@ -600,10 +643,15 b' if __name__ == "__main__":' | |||
|
600 | 643 | else: |
|
601 | 644 | check = args |
|
602 | 645 | |
|
646 | _preparepats() | |
|
647 | ||
|
603 | 648 | ret = 0 |
|
604 | 649 | for f in check: |
|
605 | 650 | if not checkfile(f, maxerr=options.per_file, warnings=options.warnings, |
|
606 | 651 | blame=options.blame, debug=options.debug, |
|
607 | 652 | lineno=options.lineno): |
|
608 | 653 | ret = 1 |
|
609 | sys.exit(ret) | |
|
654 | return ret | |
|
655 | ||
|
656 | if __name__ == "__main__": | |
|
657 | sys.exit(main()) |
@@ -15,7 +15,11 b'' | |||
|
15 | 15 | # |
|
16 | 16 | # See also: https://mercurial-scm.org/wiki/ContributingChanges |
|
17 | 17 | |
|
18 | import re, sys, os | |
|
18 | from __future__ import absolute_import, print_function | |
|
19 | ||
|
20 | import os | |
|
21 | import re | |
|
22 | import sys | |
|
19 | 23 | |
|
20 | 24 | commitheader = r"^(?:# [^\n]*\n)*" |
|
21 | 25 | afterheader = commitheader + r"(?!#)" |
@@ -69,9 +73,9 b' def checkcommit(commit, node=None):' | |||
|
69 | 73 | break |
|
70 | 74 | if not printed: |
|
71 | 75 | printed = True |
|
72 |
print |
|
|
73 |
print |
|
|
74 |
print |
|
|
76 | print("node: %s" % node) | |
|
77 | print("%d: %s" % (n, msg)) | |
|
78 | print(" %s" % nonempty(l, last)[:-1]) | |
|
75 | 79 | if "BYPASS" not in os.environ: |
|
76 | 80 | exitcode = 1 |
|
77 | 81 | del hits[0] |
@@ -61,7 +61,20 b' def check_compat_py3(f):' | |||
|
61 | 61 | imp.load_module(name, fh, '', ('py', 'r', imp.PY_SOURCE)) |
|
62 | 62 | except Exception as e: |
|
63 | 63 | exc_type, exc_value, tb = sys.exc_info() |
|
64 | frame = traceback.extract_tb(tb)[-1] | |
|
64 | # We walk the stack and ignore frames from our custom importer, | |
|
65 | # import mechanisms, and stdlib modules. This kinda/sorta | |
|
66 | # emulates CPython behavior in import.c while also attempting | |
|
67 | # to pin blame on a Mercurial file. | |
|
68 | for frame in reversed(traceback.extract_tb(tb)): | |
|
69 | if frame.name == '_call_with_frames_removed': | |
|
70 | continue | |
|
71 | if 'importlib' in frame.filename: | |
|
72 | continue | |
|
73 | if 'mercurial/__init__.py' in frame.filename: | |
|
74 | continue | |
|
75 | if frame.filename.startswith(sys.prefix): | |
|
76 | continue | |
|
77 | break | |
|
65 | 78 | |
|
66 | 79 | if frame.filename: |
|
67 | 80 | filename = os.path.basename(frame.filename) |
@@ -28,3 +28,5 b' The following variables are available fo' | |||
|
28 | 28 | |
|
29 | 29 | * CHGDEBUG enables debug messages. |
|
30 | 30 | * CHGSOCKNAME specifies the socket path of the background cmdserver. |
|
31 | * CHGTIMEOUT specifies how many seconds chg will wait before giving up | |
|
32 | connecting to a cmdserver. If it is 0, chg will wait forever. Default: 60 |
@@ -249,7 +249,13 b' static hgclient_t *retryconnectcmdserver' | |||
|
249 | 249 | int pst = 0; |
|
250 | 250 | |
|
251 | 251 | debugmsg("try connect to %s repeatedly", opts->sockname); |
|
252 | for (unsigned int i = 0; i < 10 * 100; i++) { | |
|
252 | ||
|
253 | unsigned int timeoutsec = 60; /* default: 60 seconds */ | |
|
254 | const char *timeoutenv = getenv("CHGTIMEOUT"); | |
|
255 | if (timeoutenv) | |
|
256 | sscanf(timeoutenv, "%u", &timeoutsec); | |
|
257 | ||
|
258 | for (unsigned int i = 0; !timeoutsec || i < timeoutsec * 100; i++) { | |
|
253 | 259 | hgclient_t *hgc = hgc_open(opts->sockname); |
|
254 | 260 | if (hgc) |
|
255 | 261 | return hgc; |
@@ -332,6 +338,7 b' static void killcmdserver(const struct c' | |||
|
332 | 338 | } |
|
333 | 339 | } |
|
334 | 340 | |
|
341 | static pid_t pagerpid = 0; | |
|
335 | 342 | static pid_t peerpid = 0; |
|
336 | 343 | |
|
337 | 344 | static void forwardsignal(int sig) |
@@ -374,6 +381,17 b' error:' | |||
|
374 | 381 | abortmsgerrno("failed to handle stop signal"); |
|
375 | 382 | } |
|
376 | 383 | |
|
384 | static void handlechildsignal(int sig UNUSED_) | |
|
385 | { | |
|
386 | if (peerpid == 0 || pagerpid == 0) | |
|
387 | return; | |
|
388 | /* if pager exits, notify the server with SIGPIPE immediately. | |
|
389 | * otherwise the server won't get SIGPIPE if it does not write | |
|
390 | * anything. (issue5278) */ | |
|
391 | if (waitpid(pagerpid, NULL, WNOHANG) == pagerpid) | |
|
392 | kill(peerpid, SIGPIPE); | |
|
393 | } | |
|
394 | ||
|
377 | 395 | static void setupsignalhandler(pid_t pid) |
|
378 | 396 | { |
|
379 | 397 | if (pid <= 0) |
@@ -410,6 +428,11 b' static void setupsignalhandler(pid_t pid' | |||
|
410 | 428 | sa.sa_flags = SA_RESTART; |
|
411 | 429 | if (sigaction(SIGTSTP, &sa, NULL) < 0) |
|
412 | 430 | goto error; |
|
431 | /* get notified when pager exits */ | |
|
432 | sa.sa_handler = handlechildsignal; | |
|
433 | sa.sa_flags = SA_RESTART; | |
|
434 | if (sigaction(SIGCHLD, &sa, NULL) < 0) | |
|
435 | goto error; | |
|
413 | 436 | |
|
414 | 437 | return; |
|
415 | 438 | |
@@ -417,21 +440,56 b' error:' | |||
|
417 | 440 | abortmsgerrno("failed to set up signal handlers"); |
|
418 | 441 | } |
|
419 | 442 | |
|
420 | /* This implementation is based on hgext/pager.py (pre 369741ef7253) */ | |
|
421 | static void setuppager(hgclient_t *hgc, const char *const args[], | |
|
443 | static void restoresignalhandler() | |
|
444 | { | |
|
445 | struct sigaction sa; | |
|
446 | memset(&sa, 0, sizeof(sa)); | |
|
447 | sa.sa_handler = SIG_DFL; | |
|
448 | sa.sa_flags = SA_RESTART; | |
|
449 | if (sigemptyset(&sa.sa_mask) < 0) | |
|
450 | goto error; | |
|
451 | ||
|
452 | if (sigaction(SIGHUP, &sa, NULL) < 0) | |
|
453 | goto error; | |
|
454 | if (sigaction(SIGTERM, &sa, NULL) < 0) | |
|
455 | goto error; | |
|
456 | if (sigaction(SIGWINCH, &sa, NULL) < 0) | |
|
457 | goto error; | |
|
458 | if (sigaction(SIGCONT, &sa, NULL) < 0) | |
|
459 | goto error; | |
|
460 | if (sigaction(SIGTSTP, &sa, NULL) < 0) | |
|
461 | goto error; | |
|
462 | if (sigaction(SIGCHLD, &sa, NULL) < 0) | |
|
463 | goto error; | |
|
464 | ||
|
465 | /* ignore Ctrl+C while shutting down to make pager exits cleanly */ | |
|
466 | sa.sa_handler = SIG_IGN; | |
|
467 | if (sigaction(SIGINT, &sa, NULL) < 0) | |
|
468 | goto error; | |
|
469 | ||
|
470 | peerpid = 0; | |
|
471 | return; | |
|
472 | ||
|
473 | error: | |
|
474 | abortmsgerrno("failed to restore signal handlers"); | |
|
475 | } | |
|
476 | ||
|
477 | /* This implementation is based on hgext/pager.py (post 369741ef7253) | |
|
478 | * Return 0 if pager is not started, or pid of the pager */ | |
|
479 | static pid_t setuppager(hgclient_t *hgc, const char *const args[], | |
|
422 | 480 | size_t argsize) |
|
423 | 481 | { |
|
424 | 482 | const char *pagercmd = hgc_getpager(hgc, args, argsize); |
|
425 | 483 | if (!pagercmd) |
|
426 | return; | |
|
484 | return 0; | |
|
427 | 485 | |
|
428 | 486 | int pipefds[2]; |
|
429 | 487 | if (pipe(pipefds) < 0) |
|
430 | return; | |
|
488 | return 0; | |
|
431 | 489 | pid_t pid = fork(); |
|
432 | 490 | if (pid < 0) |
|
433 | 491 | goto error; |
|
434 |
if (pid |
|
|
492 | if (pid > 0) { | |
|
435 | 493 | close(pipefds[0]); |
|
436 | 494 | if (dup2(pipefds[1], fileno(stdout)) < 0) |
|
437 | 495 | goto error; |
@@ -441,7 +499,7 b' static void setuppager(hgclient_t *hgc, ' | |||
|
441 | 499 | } |
|
442 | 500 | close(pipefds[1]); |
|
443 | 501 | hgc_attachio(hgc); /* reattach to pager */ |
|
444 | return; | |
|
502 | return pid; | |
|
445 | 503 | } else { |
|
446 | 504 | dup2(pipefds[0], fileno(stdin)); |
|
447 | 505 | close(pipefds[0]); |
@@ -451,13 +509,27 b' static void setuppager(hgclient_t *hgc, ' | |||
|
451 | 509 | if (r < 0) { |
|
452 | 510 | abortmsgerrno("cannot start pager '%s'", pagercmd); |
|
453 | 511 | } |
|
454 | return; | |
|
512 | return 0; | |
|
455 | 513 | } |
|
456 | 514 | |
|
457 | 515 | error: |
|
458 | 516 | close(pipefds[0]); |
|
459 | 517 | close(pipefds[1]); |
|
460 | 518 | abortmsgerrno("failed to prepare pager"); |
|
519 | return 0; | |
|
520 | } | |
|
521 | ||
|
522 | static void waitpager(pid_t pid) | |
|
523 | { | |
|
524 | /* close output streams to notify the pager its input ends */ | |
|
525 | fclose(stdout); | |
|
526 | fclose(stderr); | |
|
527 | while (1) { | |
|
528 | pid_t ret = waitpid(pid, NULL, 0); | |
|
529 | if (ret == -1 && errno == EINTR) | |
|
530 | continue; | |
|
531 | break; | |
|
532 | } | |
|
461 | 533 | } |
|
462 | 534 | |
|
463 | 535 | /* Run instructions sent from the server like unlink and set redirect path |
@@ -585,9 +657,13 b' int main(int argc, const char *argv[], c' | |||
|
585 | 657 | } |
|
586 | 658 | |
|
587 | 659 | setupsignalhandler(hgc_peerpid(hgc)); |
|
588 | setuppager(hgc, argv + 1, argc - 1); | |
|
660 | pagerpid = setuppager(hgc, argv + 1, argc - 1); | |
|
589 | 661 | int exitcode = hgc_runcommand(hgc, argv + 1, argc - 1); |
|
662 | restoresignalhandler(); | |
|
590 | 663 | hgc_close(hgc); |
|
591 | 664 | freecmdserveropts(&opts); |
|
665 | if (pagerpid) | |
|
666 | waitpager(pagerpid); | |
|
667 | ||
|
592 | 668 | return exitcode; |
|
593 | 669 | } |
@@ -63,6 +63,7 b' typedef struct {' | |||
|
63 | 63 | |
|
64 | 64 | struct hgclient_tag_ { |
|
65 | 65 | int sockfd; |
|
66 | pid_t pgid; | |
|
66 | 67 | pid_t pid; |
|
67 | 68 | context_t ctx; |
|
68 | 69 | unsigned int capflags; |
@@ -125,10 +126,15 b' static void readchannel(hgclient_t *hgc)' | |||
|
125 | 126 | return; /* assumes input request */ |
|
126 | 127 | |
|
127 | 128 | size_t cursize = 0; |
|
129 | int emptycount = 0; | |
|
128 | 130 | while (cursize < hgc->ctx.datasize) { |
|
129 | 131 | rsize = recv(hgc->sockfd, hgc->ctx.data + cursize, |
|
130 | 132 | hgc->ctx.datasize - cursize, 0); |
|
131 | if (rsize < 0) | |
|
133 | /* rsize == 0 normally indicates EOF, while it's also a valid | |
|
134 | * packet size for unix socket. treat it as EOF and abort if | |
|
135 | * we get many empty responses in a row. */ | |
|
136 | emptycount = (rsize == 0 ? emptycount + 1 : 0); | |
|
137 | if (rsize < 0 || emptycount > 20) | |
|
132 | 138 | abortmsg("failed to read data block"); |
|
133 | 139 | cursize += rsize; |
|
134 | 140 | } |
@@ -339,6 +345,8 b' static void readhello(hgclient_t *hgc)' | |||
|
339 | 345 | u = dataend; |
|
340 | 346 | if (strncmp(s, "capabilities:", t - s + 1) == 0) { |
|
341 | 347 | hgc->capflags = parsecapabilities(t + 2, u); |
|
348 | } else if (strncmp(s, "pgid:", t - s + 1) == 0) { | |
|
349 | hgc->pgid = strtol(t + 2, NULL, 10); | |
|
342 | 350 | } else if (strncmp(s, "pid:", t - s + 1) == 0) { |
|
343 | 351 | hgc->pid = strtol(t + 2, NULL, 10); |
|
344 | 352 | } |
@@ -463,6 +471,12 b' void hgc_close(hgclient_t *hgc)' | |||
|
463 | 471 | free(hgc); |
|
464 | 472 | } |
|
465 | 473 | |
|
474 | pid_t hgc_peerpgid(const hgclient_t *hgc) | |
|
475 | { | |
|
476 | assert(hgc); | |
|
477 | return hgc->pgid; | |
|
478 | } | |
|
479 | ||
|
466 | 480 | pid_t hgc_peerpid(const hgclient_t *hgc) |
|
467 | 481 | { |
|
468 | 482 | assert(hgc); |
@@ -18,6 +18,7 b' typedef struct hgclient_tag_ hgclient_t;' | |||
|
18 | 18 | hgclient_t *hgc_open(const char *sockname); |
|
19 | 19 | void hgc_close(hgclient_t *hgc); |
|
20 | 20 | |
|
21 | pid_t hgc_peerpgid(const hgclient_t *hgc); | |
|
21 | 22 | pid_t hgc_peerpid(const hgclient_t *hgc); |
|
22 | 23 | |
|
23 | 24 | const char **hgc_validate(hgclient_t *hgc, const char *const args[], |
@@ -12,8 +12,10 b'' | |||
|
12 | 12 | |
|
13 | 13 | #ifdef __GNUC__ |
|
14 | 14 | #define PRINTF_FORMAT_ __attribute__((format(printf, 1, 2))) |
|
15 | #define UNUSED_ __attribute__((unused)) | |
|
15 | 16 | #else |
|
16 | 17 | #define PRINTF_FORMAT_ |
|
18 | #define UNUSED_ | |
|
17 | 19 | #endif |
|
18 | 20 | |
|
19 | 21 | void abortmsg(const char *fmt, ...) PRINTF_FORMAT_; |
@@ -52,7 +52,7 b' def debugshell(ui, repo, **opts):' | |||
|
52 | 52 | with demandimport.deactivated(): |
|
53 | 53 | __import__(pdbmap[debugger]) |
|
54 | 54 | except ImportError: |
|
55 | ui.warn("%s debugger specified but %s module was not found\n" | |
|
55 | ui.warn(("%s debugger specified but %s module was not found\n") | |
|
56 | 56 | % (debugger, pdbmap[debugger])) |
|
57 | 57 | debugger = 'pdb' |
|
58 | 58 |
@@ -25,10 +25,10 b' def checkconsistency(ui, orig, dmap, _no' | |||
|
25 | 25 | """Compute nonnormalset from dmap, check that it matches _nonnormalset""" |
|
26 | 26 | nonnormalcomputedmap = nonnormalentries(dmap) |
|
27 | 27 | if _nonnormalset != nonnormalcomputedmap: |
|
28 | ui.develwarn("%s call to %s\n" % (label, orig)) | |
|
29 | ui.develwarn("inconsistency in nonnormalset\n") | |
|
30 | ui.develwarn("[nonnormalset] %s\n" % _nonnormalset) | |
|
31 | ui.develwarn("[map] %s\n" % nonnormalcomputedmap) | |
|
28 | ui.develwarn("%s call to %s\n" % (label, orig), config='dirstate') | |
|
29 | ui.develwarn("inconsistency in nonnormalset\n", config='dirstate') | |
|
30 | ui.develwarn("[nonnormalset] %s\n" % _nonnormalset, config='dirstate') | |
|
31 | ui.develwarn("[map] %s\n" % nonnormalcomputedmap, config='dirstate') | |
|
32 | 32 | |
|
33 | 33 | def _checkdirstate(orig, self, arg): |
|
34 | 34 | """Check nonnormal set consistency before and after the call to orig""" |
@@ -2,8 +2,14 b'' | |||
|
2 | 2 | # Dump revlogs as raw data stream |
|
3 | 3 | # $ find .hg/store/ -name "*.i" | xargs dumprevlog > repo.dump |
|
4 | 4 | |
|
5 | from __future__ import absolute_import, print_function | |
|
6 | ||
|
5 | 7 | import sys |
|
6 |
from mercurial import |
|
|
8 | from mercurial import ( | |
|
9 | node, | |
|
10 | revlog, | |
|
11 | util, | |
|
12 | ) | |
|
7 | 13 | |
|
8 | 14 | for fp in (sys.stdin, sys.stdout, sys.stderr): |
|
9 | 15 | util.setbinary(fp) |
@@ -11,15 +17,15 b' for fp in (sys.stdin, sys.stdout, sys.st' | |||
|
11 | 17 | for f in sys.argv[1:]: |
|
12 | 18 | binopen = lambda fn: open(fn, 'rb') |
|
13 | 19 | r = revlog.revlog(binopen, f) |
|
14 |
print |
|
|
20 | print("file:", f) | |
|
15 | 21 | for i in r: |
|
16 | 22 | n = r.node(i) |
|
17 | 23 | p = r.parents(n) |
|
18 | 24 | d = r.revision(n) |
|
19 |
print |
|
|
20 |
print |
|
|
21 |
print |
|
|
22 |
print |
|
|
23 |
print |
|
|
24 |
print |
|
|
25 |
print |
|
|
25 | print("node:", node.hex(n)) | |
|
26 | print("linkrev:", r.linkrev(i)) | |
|
27 | print("parents:", node.hex(p[0]), node.hex(p[1])) | |
|
28 | print("length:", len(d)) | |
|
29 | print("-start-") | |
|
30 | print(d) | |
|
31 | print("-end-") |
@@ -11,8 +11,9 b' import sys' | |||
|
11 | 11 | # Import a minimal set of stdlib modules needed for list_stdlib_modules() |
|
12 | 12 | # to work when run from a virtualenv. The modules were chosen empirically |
|
13 | 13 | # so that the return value matches the return value without virtualenv. |
|
14 | import BaseHTTPServer | |
|
15 | import zlib | |
|
14 | if True: # disable lexical sorting checks | |
|
15 | import BaseHTTPServer | |
|
16 | import zlib | |
|
16 | 17 | |
|
17 | 18 | # Whitelist of modules that symbols can be directly imported from. |
|
18 | 19 | allowsymbolimports = ( |
@@ -126,22 +127,32 b' def fromlocalfunc(modulename, localmods)' | |||
|
126 | 127 | False |
|
127 | 128 | >>> fromlocal(None, 1) |
|
128 | 129 | ('foo', 'foo.__init__', True) |
|
130 | >>> fromlocal('foo1', 1) | |
|
131 | ('foo.foo1', 'foo.foo1', False) | |
|
129 | 132 | >>> fromlocal2 = fromlocalfunc('foo.xxx.yyy', localmods) |
|
130 | 133 | >>> fromlocal2(None, 2) |
|
131 | 134 | ('foo', 'foo.__init__', True) |
|
135 | >>> fromlocal2('bar2', 1) | |
|
136 | False | |
|
137 | >>> fromlocal2('bar', 2) | |
|
138 | ('foo.bar', 'foo.bar.__init__', True) | |
|
132 | 139 | """ |
|
133 | 140 | prefix = '.'.join(modulename.split('.')[:-1]) |
|
134 | 141 | if prefix: |
|
135 | 142 | prefix += '.' |
|
136 | 143 | def fromlocal(name, level=0): |
|
137 |
# name is |
|
|
138 |
if name |
|
|
144 | # name is false value when relative imports are used. | |
|
145 | if not name: | |
|
139 | 146 | # If relative imports are used, level must not be absolute. |
|
140 | 147 | assert level > 0 |
|
141 | 148 | candidates = ['.'.join(modulename.split('.')[:-level])] |
|
142 | 149 | else: |
|
143 | # Check relative name first. | |
|
144 | candidates = [prefix + name, name] | |
|
150 | if not level: | |
|
151 | # Check relative name first. | |
|
152 | candidates = [prefix + name, name] | |
|
153 | else: | |
|
154 | candidates = ['.'.join(modulename.split('.')[:-level]) + | |
|
155 | '.' + name] | |
|
145 | 156 | |
|
146 | 157 | for n in candidates: |
|
147 | 158 | if n in localmods: |
@@ -175,6 +186,9 b' def list_stdlib_modules():' | |||
|
175 | 186 | |
|
176 | 187 | >>> 'cStringIO' in mods |
|
177 | 188 | True |
|
189 | ||
|
190 | >>> 'cffi' in mods | |
|
191 | True | |
|
178 | 192 | """ |
|
179 | 193 | for m in sys.builtin_module_names: |
|
180 | 194 | yield m |
@@ -187,6 +201,8 b' def list_stdlib_modules():' | |||
|
187 | 201 | yield m |
|
188 | 202 | for m in 'cPickle', 'datetime': # in Python (not C) on PyPy |
|
189 | 203 | yield m |
|
204 | for m in ['cffi']: | |
|
205 | yield m | |
|
190 | 206 | stdlib_prefixes = set([sys.prefix, sys.exec_prefix]) |
|
191 | 207 | # We need to supplement the list of prefixes for the search to work |
|
192 | 208 | # when run from within a virtualenv. |
@@ -360,7 +376,7 b' def verify_modern_convention(module, roo' | |||
|
360 | 376 | * Symbols can only be imported from specific modules (see |
|
361 | 377 | `allowsymbolimports`). For other modules, first import the module then |
|
362 | 378 | assign the symbol to a module-level variable. In addition, these imports |
|
363 |
must be performed before other |
|
|
379 | must be performed before other local imports. This rule only | |
|
364 | 380 | applies to import statements outside of any blocks. |
|
365 | 381 | * Relative imports from the standard library are not allowed. |
|
366 | 382 | * Certain modules must be aliased to alternate names to avoid aliasing |
@@ -371,8 +387,8 b' def verify_modern_convention(module, roo' | |||
|
371 | 387 | |
|
372 | 388 | # Whether a local/non-stdlib import has been performed. |
|
373 | 389 | seenlocal = None |
|
374 |
# Whether a |
|
|
375 |
seennonsymbol |
|
|
390 | # Whether a local/non-stdlib, non-symbol import has been seen. | |
|
391 | seennonsymbollocal = False | |
|
376 | 392 | # The last name to be imported (for sorting). |
|
377 | 393 | lastname = None |
|
378 | 394 | # Relative import levels encountered so far. |
@@ -446,26 +462,26 b' def verify_modern_convention(module, roo' | |||
|
446 | 462 | |
|
447 | 463 | # Direct symbol import is only allowed from certain modules and |
|
448 | 464 | # must occur before non-symbol imports. |
|
465 | found = fromlocal(node.module, node.level) | |
|
466 | if found and found[2]: # node.module is a package | |
|
467 | prefix = found[0] + '.' | |
|
468 | symbols = [n.name for n in node.names | |
|
469 | if not fromlocal(prefix + n.name)] | |
|
470 | else: | |
|
471 | symbols = [n.name for n in node.names] | |
|
449 | 472 | if node.module and node.col_offset == root_col_offset: |
|
450 | found = fromlocal(node.module, node.level) | |
|
451 | if found and found[2]: # node.module is a package | |
|
452 | prefix = found[0] + '.' | |
|
453 | symbols = [n.name for n in node.names | |
|
454 | if not fromlocal(prefix + n.name)] | |
|
455 | else: | |
|
456 | symbols = [n.name for n in node.names] | |
|
457 | ||
|
458 | 473 | if symbols and fullname not in allowsymbolimports: |
|
459 | 474 | yield msg('direct symbol import %s from %s', |
|
460 | 475 | ', '.join(symbols), fullname) |
|
461 | 476 | |
|
462 |
if symbols and seennonsymbol |
|
|
477 | if symbols and seennonsymbollocal: | |
|
463 | 478 | yield msg('symbol import follows non-symbol import: %s', |
|
464 | 479 | fullname) |
|
480 | if not symbols and fullname not in stdlib_modules: | |
|
481 | seennonsymbollocal = True | |
|
465 | 482 | |
|
466 | 483 | if not node.module: |
|
467 | 484 | assert node.level |
|
468 | seennonsymbolrelative = True | |
|
469 | 485 | |
|
470 | 486 | # Only allow 1 group per level. |
|
471 | 487 | if (node.level in seenlevels |
@@ -652,7 +668,7 b' def sources(f, modname):' | |||
|
652 | 668 | the input file. |
|
653 | 669 | """ |
|
654 | 670 | py = False |
|
655 |
if f.endswith('. |
|
|
671 | if not f.endswith('.t'): | |
|
656 | 672 | with open(f) as src: |
|
657 | 673 | yield src.read(), modname, f, 0 |
|
658 | 674 | py = True |
@@ -1,5 +1,5 b'' | |||
|
1 | 1 | <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> |
|
2 |
<!-- This is the |
|
|
2 | <!-- This is the first screen displayed during the install. --> | |
|
3 | 3 | <html> |
|
4 | 4 | <head> |
|
5 | 5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> |
@@ -1,6 +1,23 b'' | |||
|
1 | 1 | # perf.py - performance test routines |
|
2 | 2 | '''helper extension to measure performance''' |
|
3 | 3 | |
|
4 | # "historical portability" policy of perf.py: | |
|
5 | # | |
|
6 | # We have to do: | |
|
7 | # - make perf.py "loadable" with as wide Mercurial version as possible | |
|
8 | # This doesn't mean that perf commands work correctly with that Mercurial. | |
|
9 | # BTW, perf.py itself has been available since 1.1 (or eb240755386d). | |
|
10 | # - make historical perf command work correctly with as wide Mercurial | |
|
11 | # version as possible | |
|
12 | # | |
|
13 | # We have to do, if possible with reasonable cost: | |
|
14 | # - make recent perf command for historical feature work correctly | |
|
15 | # with early Mercurial | |
|
16 | # | |
|
17 | # We don't have to do: | |
|
18 | # - make perf command for recent feature work correctly with early | |
|
19 | # Mercurial | |
|
20 | ||
|
4 | 21 | from __future__ import absolute_import |
|
5 | 22 | import functools |
|
6 | 23 | import os |
@@ -8,25 +25,97 b' import random' | |||
|
8 | 25 | import sys |
|
9 | 26 | import time |
|
10 | 27 | from mercurial import ( |
|
11 | branchmap, | |
|
12 | 28 | cmdutil, |
|
13 | 29 | commands, |
|
14 | 30 | copies, |
|
15 | 31 | error, |
|
32 | extensions, | |
|
16 | 33 | mdiff, |
|
17 | 34 | merge, |
|
18 | obsolete, | |
|
19 | repoview, | |
|
20 | 35 | revlog, |
|
21 | scmutil, | |
|
22 | 36 | util, |
|
23 | 37 | ) |
|
24 | 38 | |
|
25 | formatteropts = commands.formatteropts | |
|
26 | revlogopts = commands.debugrevlogopts | |
|
39 | # for "historical portability": | |
|
40 | # try to import modules separately (in dict order), and ignore | |
|
41 | # failure, because these aren't available with early Mercurial | |
|
42 | try: | |
|
43 | from mercurial import branchmap # since 2.5 (or bcee63733aad) | |
|
44 | except ImportError: | |
|
45 | pass | |
|
46 | try: | |
|
47 | from mercurial import obsolete # since 2.3 (or ad0d6c2b3279) | |
|
48 | except ImportError: | |
|
49 | pass | |
|
50 | try: | |
|
51 | from mercurial import repoview # since 2.5 (or 3a6ddacb7198) | |
|
52 | except ImportError: | |
|
53 | pass | |
|
54 | try: | |
|
55 | from mercurial import scmutil # since 1.9 (or 8b252e826c68) | |
|
56 | except ImportError: | |
|
57 | pass | |
|
58 | ||
|
59 | # for "historical portability": | |
|
60 | # define util.safehasattr forcibly, because util.safehasattr has been | |
|
61 | # available since 1.9.3 (or 94b200a11cf7) | |
|
62 | _undefined = object() | |
|
63 | def safehasattr(thing, attr): | |
|
64 | return getattr(thing, attr, _undefined) is not _undefined | |
|
65 | setattr(util, 'safehasattr', safehasattr) | |
|
66 | ||
|
67 | # for "historical portability": | |
|
68 | # use locally defined empty option list, if formatteropts isn't | |
|
69 | # available, because commands.formatteropts has been available since | |
|
70 | # 3.2 (or 7a7eed5176a4), even though formatting itself has been | |
|
71 | # available since 2.2 (or ae5f92e154d3) | |
|
72 | formatteropts = getattr(commands, "formatteropts", []) | |
|
73 | ||
|
74 | # for "historical portability": | |
|
75 | # use locally defined option list, if debugrevlogopts isn't available, | |
|
76 | # because commands.debugrevlogopts has been available since 3.7 (or | |
|
77 | # 5606f7d0d063), even though cmdutil.openrevlog() has been available | |
|
78 | # since 1.9 (or a79fea6b3e77). | |
|
79 | revlogopts = getattr(commands, "debugrevlogopts", [ | |
|
80 | ('c', 'changelog', False, ('open changelog')), | |
|
81 | ('m', 'manifest', False, ('open manifest')), | |
|
82 | ('', 'dir', False, ('open directory manifest')), | |
|
83 | ]) | |
|
27 | 84 | |
|
28 | 85 | cmdtable = {} |
|
29 | command = cmdutil.command(cmdtable) | |
|
86 | ||
|
87 | # for "historical portability": | |
|
88 | # define parsealiases locally, because cmdutil.parsealiases has been | |
|
89 | # available since 1.5 (or 6252852b4332) | |
|
90 | def parsealiases(cmd): | |
|
91 | return cmd.lstrip("^").split("|") | |
|
92 | ||
|
93 | if safehasattr(cmdutil, 'command'): | |
|
94 | import inspect | |
|
95 | command = cmdutil.command(cmdtable) | |
|
96 | if 'norepo' not in inspect.getargspec(command)[0]: | |
|
97 | # for "historical portability": | |
|
98 | # wrap original cmdutil.command, because "norepo" option has | |
|
99 | # been available since 3.1 (or 75a96326cecb) | |
|
100 | _command = command | |
|
101 | def command(name, options=(), synopsis=None, norepo=False): | |
|
102 | if norepo: | |
|
103 | commands.norepo += ' %s' % ' '.join(parsealiases(name)) | |
|
104 | return _command(name, list(options), synopsis) | |
|
105 | else: | |
|
106 | # for "historical portability": | |
|
107 | # define "@command" annotation locally, because cmdutil.command | |
|
108 | # has been available since 1.9 (or 2daa5179e73f) | |
|
109 | def command(name, options=(), synopsis=None, norepo=False): | |
|
110 | def decorator(func): | |
|
111 | if synopsis: | |
|
112 | cmdtable[name] = func, list(options), synopsis | |
|
113 | else: | |
|
114 | cmdtable[name] = func, list(options) | |
|
115 | if norepo: | |
|
116 | commands.norepo += ' %s' % ' '.join(parsealiases(name)) | |
|
117 | return func | |
|
118 | return decorator | |
|
30 | 119 | |
|
31 | 120 | def getlen(ui): |
|
32 | 121 | if ui.configbool("perf", "stub"): |
@@ -796,3 +885,18 b' def perflrucache(ui, size=4, gets=10000,' | |||
|
796 | 885 | timer, fm = gettimer(ui, opts) |
|
797 | 886 | timer(fn, title=title) |
|
798 | 887 | fm.end() |
|
888 | ||
|
889 | def uisetup(ui): | |
|
890 | if (util.safehasattr(cmdutil, 'openrevlog') and | |
|
891 | not util.safehasattr(commands, 'debugrevlogopts')): | |
|
892 | # for "historical portability": | |
|
893 | # In this case, Mercurial should be 1.9 (or a79fea6b3e77) - | |
|
894 | # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for | |
|
895 | # openrevlog() should cause failure, because it has been | |
|
896 | # available since 3.5 (or 49c583ca48c4). | |
|
897 | def openrevlog(orig, repo, cmd, file_, opts): | |
|
898 | if opts.get('dir') and not util.safehasattr(repo, 'dirlog'): | |
|
899 | raise error.Abort("This version doesn't support --dir option", | |
|
900 | hint="use 3.5 or later") | |
|
901 | return orig(repo, cmd, file_, opts) | |
|
902 | extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog) |
@@ -10,41 +10,32 b'' | |||
|
10 | 10 | |
|
11 | 11 | from __future__ import absolute_import, print_function |
|
12 | 12 | import math |
|
13 | import optparse # cannot use argparse, python 2.7 only | |
|
13 | 14 | import os |
|
14 | 15 | import re |
|
16 | import subprocess | |
|
15 | 17 | import sys |
|
16 | from subprocess import ( | |
|
17 | CalledProcessError, | |
|
18 | check_call, | |
|
19 | PIPE, | |
|
20 | Popen, | |
|
21 | STDOUT, | |
|
22 | ) | |
|
23 | # cannot use argparse, python 2.7 only | |
|
24 | from optparse import ( | |
|
25 | OptionParser, | |
|
26 | ) | |
|
27 | 18 | |
|
28 | 19 | DEFAULTVARIANTS = ['plain', 'min', 'max', 'first', 'last', |
|
29 | 20 | 'reverse', 'reverse+first', 'reverse+last', |
|
30 | 21 | 'sort', 'sort+first', 'sort+last'] |
|
31 | 22 | |
|
32 | 23 | def check_output(*args, **kwargs): |
|
33 | kwargs.setdefault('stderr', PIPE) | |
|
34 | kwargs.setdefault('stdout', PIPE) | |
|
35 | proc = Popen(*args, **kwargs) | |
|
24 | kwargs.setdefault('stderr', subprocess.PIPE) | |
|
25 | kwargs.setdefault('stdout', subprocess.PIPE) | |
|
26 | proc = subprocess.Popen(*args, **kwargs) | |
|
36 | 27 | output, error = proc.communicate() |
|
37 | 28 | if proc.returncode != 0: |
|
38 | raise CalledProcessError(proc.returncode, ' '.join(args[0])) | |
|
29 | raise subprocess.CalledProcessError(proc.returncode, ' '.join(args[0])) | |
|
39 | 30 | return output |
|
40 | 31 | |
|
41 | 32 | def update(rev): |
|
42 | 33 | """update the repo to a revision""" |
|
43 | 34 | try: |
|
44 | check_call(['hg', 'update', '--quiet', '--check', str(rev)]) | |
|
35 | subprocess.check_call(['hg', 'update', '--quiet', '--check', str(rev)]) | |
|
45 | 36 | check_output(['make', 'local'], |
|
46 | 37 | stderr=None) # suppress output except for error/warning |
|
47 | except CalledProcessError as exc: | |
|
38 | except subprocess.CalledProcessError as exc: | |
|
48 | 39 | print('update to revision %s failed, aborting'%rev, file=sys.stderr) |
|
49 | 40 | sys.exit(exc.returncode) |
|
50 | 41 | |
@@ -60,7 +51,7 b' def hg(cmd, repo=None):' | |||
|
60 | 51 | fullcmd += ['--config', |
|
61 | 52 | 'extensions.perf=' + os.path.join(contribdir, 'perf.py')] |
|
62 | 53 | fullcmd += cmd |
|
63 | return check_output(fullcmd, stderr=STDOUT) | |
|
54 | return check_output(fullcmd, stderr=subprocess.STDOUT) | |
|
64 | 55 | |
|
65 | 56 | def perf(revset, target=None, contexts=False): |
|
66 | 57 | """run benchmark for this very revset""" |
@@ -70,7 +61,7 b' def perf(revset, target=None, contexts=F' | |||
|
70 | 61 | args.append('--contexts') |
|
71 | 62 | output = hg(args, repo=target) |
|
72 | 63 | return parseoutput(output) |
|
73 | except CalledProcessError as exc: | |
|
64 | except subprocess.CalledProcessError as exc: | |
|
74 | 65 | print('abort: cannot run revset benchmark: %s'%exc.cmd, file=sys.stderr) |
|
75 | 66 | if getattr(exc, 'output', None) is None: # no output before 2.7 |
|
76 | 67 | print('(no output)', file=sys.stderr) |
@@ -103,9 +94,9 b' def printrevision(rev):' | |||
|
103 | 94 | """print data about a revision""" |
|
104 | 95 | sys.stdout.write("Revision ") |
|
105 | 96 | sys.stdout.flush() |
|
106 | check_call(['hg', 'log', '--rev', str(rev), '--template', | |
|
107 | '{if(tags, " ({tags})")} ' | |
|
108 | '{rev}:{node|short}: {desc|firstline}\n']) | |
|
97 | subprocess.check_call(['hg', 'log', '--rev', str(rev), '--template', | |
|
98 | '{if(tags, " ({tags})")} ' | |
|
99 | '{rev}:{node|short}: {desc|firstline}\n']) | |
|
109 | 100 | |
|
110 | 101 | def idxwidth(nbidx): |
|
111 | 102 | """return the max width of number used for index |
@@ -215,7 +206,7 b' def getrevs(spec):' | |||
|
215 | 206 | """get the list of rev matched by a revset""" |
|
216 | 207 | try: |
|
217 | 208 | out = check_output(['hg', 'log', '--template={rev}\n', '--rev', spec]) |
|
218 | except CalledProcessError as exc: | |
|
209 | except subprocess.CalledProcessError as exc: | |
|
219 | 210 | print("abort, can't get revision from %s"%spec, file=sys.stderr) |
|
220 | 211 | sys.exit(exc.returncode) |
|
221 | 212 | return [r for r in out.split() if r] |
@@ -234,8 +225,8 b' summary output is provided. Use it to de' | |||
|
234 | 225 | point regressions. Revsets to run are specified in a file (or from stdin), one |
|
235 | 226 | revsets per line. Line starting with '#' will be ignored, allowing insertion of |
|
236 | 227 | comments.""" |
|
237 | parser = OptionParser(usage="usage: %prog [options] <revs>", | |
|
238 | description=helptext) | |
|
228 | parser = optparse.OptionParser(usage="usage: %prog [options] <revs>", | |
|
229 | description=helptext) | |
|
239 | 230 | parser.add_option("-f", "--file", |
|
240 | 231 | help="read revset from FILE (stdin if omitted)", |
|
241 | 232 | metavar="FILE") |
@@ -45,6 +45,13 b' import os' | |||
|
45 | 45 | import random |
|
46 | 46 | import sys |
|
47 | 47 | import time |
|
48 | ||
|
49 | from mercurial.i18n import _ | |
|
50 | from mercurial.node import ( | |
|
51 | nullid, | |
|
52 | nullrev, | |
|
53 | short, | |
|
54 | ) | |
|
48 | 55 | from mercurial import ( |
|
49 | 56 | cmdutil, |
|
50 | 57 | context, |
@@ -54,12 +61,6 b' from mercurial import (' | |||
|
54 | 61 | scmutil, |
|
55 | 62 | util, |
|
56 | 63 | ) |
|
57 | from mercurial.i18n import _ | |
|
58 | from mercurial.node import ( | |
|
59 | nullid, | |
|
60 | nullrev, | |
|
61 | short, | |
|
62 | ) | |
|
63 | 64 | |
|
64 | 65 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
65 | 66 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
@@ -506,7 +507,7 b' def renamedirs(dirs, words):' | |||
|
506 | 507 | head = rename(head) |
|
507 | 508 | else: |
|
508 | 509 | head = '' |
|
509 |
renamed = os.path.join(head, |
|
|
510 | renamed = os.path.join(head, next(wordgen)) | |
|
510 | 511 | replacements[dirpath] = renamed |
|
511 | 512 | return renamed |
|
512 | 513 | result = [] |
@@ -3,8 +3,16 b'' | |||
|
3 | 3 | # $ hg init |
|
4 | 4 | # $ undumprevlog < repo.dump |
|
5 | 5 | |
|
6 | from __future__ import absolute_import | |
|
7 | ||
|
6 | 8 | import sys |
|
7 | from mercurial import revlog, node, scmutil, util, transaction | |
|
9 | from mercurial import ( | |
|
10 | node, | |
|
11 | revlog, | |
|
12 | scmutil, | |
|
13 | transaction, | |
|
14 | util, | |
|
15 | ) | |
|
8 | 16 | |
|
9 | 17 | for fp in (sys.stdin, sys.stdout, sys.stderr): |
|
10 | 18 | util.setbinary(fp) |
@@ -79,6 +79,8 b'' | |||
|
79 | 79 | # - Restart the web server and see if things are running. |
|
80 | 80 | # |
|
81 | 81 | |
|
82 | from __future__ import absolute_import | |
|
83 | ||
|
82 | 84 | # Configuration file location |
|
83 | 85 | hgweb_config = r'c:\your\directory\wsgi.config' |
|
84 | 86 | |
@@ -87,7 +89,6 b' path_strip = 0 # Strip this many path ' | |||
|
87 | 89 | path_prefix = 1 # This many path elements are prefixes (depends on the |
|
88 | 90 | # virtual path of the IIS application). |
|
89 | 91 | |
|
90 | from __future__ import absolute_import | |
|
91 | 92 | import sys |
|
92 | 93 | |
|
93 | 94 | # Adjust python path if this is not a system-wide install |
@@ -46,7 +46,6 b' editor = notepad' | |||
|
46 | 46 | ;extdiff = |
|
47 | 47 | ;fetch = |
|
48 | 48 | ;gpg = |
|
49 | ;hgcia = | |
|
50 | 49 | ;hgk = |
|
51 | 50 | ;highlight = |
|
52 | 51 | ;histedit = |
@@ -6,8 +6,11 b'' | |||
|
6 | 6 | # |
|
7 | 7 | # This software may be used and distributed according to the terms of the |
|
8 | 8 | # GNU General Public License version 2 or any later version. |
|
9 | ||
|
10 | from __future__ import absolute_import, print_function | |
|
11 | ||
|
12 | import re | |
|
9 | 13 | import sys |
|
10 | import re | |
|
11 | 14 | |
|
12 | 15 | leadingline = re.compile(r'(^\s*)(\S.*)$') |
|
13 | 16 |
@@ -117,11 +117,11 b' def showdoc(ui):' | |||
|
117 | 117 | ui.write(_("This section contains help for extensions that are " |
|
118 | 118 | "distributed together with Mercurial. Help for other " |
|
119 | 119 | "extensions is available in the help system.")) |
|
120 | ui.write("\n\n" | |
|
120 | ui.write(("\n\n" | |
|
121 | 121 | ".. contents::\n" |
|
122 | 122 | " :class: htmlonly\n" |
|
123 | 123 | " :local:\n" |
|
124 | " :depth: 1\n\n") | |
|
124 | " :depth: 1\n\n")) | |
|
125 | 125 | |
|
126 | 126 | for extensionname in sorted(allextensionnames()): |
|
127 | 127 | mod = extensions.load(ui, extensionname, None) |
@@ -415,7 +415,7 b' class Translator(nodes.NodeVisitor):' | |||
|
415 | 415 | else: |
|
416 | 416 | self._docinfo[name] = node.astext() |
|
417 | 417 | self._docinfo_keys.append(name) |
|
418 | raise nodes.SkipNode | |
|
418 | raise nodes.SkipNode() | |
|
419 | 419 | |
|
420 | 420 | def depart_docinfo_item(self, node): |
|
421 | 421 | pass |
@@ -469,7 +469,7 b' class Translator(nodes.NodeVisitor):' | |||
|
469 | 469 | |
|
470 | 470 | def visit_citation_reference(self, node): |
|
471 | 471 | self.body.append('['+node.astext()+']') |
|
472 | raise nodes.SkipNode | |
|
472 | raise nodes.SkipNode() | |
|
473 | 473 | |
|
474 | 474 | def visit_classifier(self, node): |
|
475 | 475 | pass |
@@ -489,7 +489,7 b' class Translator(nodes.NodeVisitor):' | |||
|
489 | 489 | def visit_comment(self, node, |
|
490 | 490 | sub=re.compile('-(?=-)').sub): |
|
491 | 491 | self.body.append(self.comment(node.astext())) |
|
492 | raise nodes.SkipNode | |
|
492 | raise nodes.SkipNode() | |
|
493 | 493 | |
|
494 | 494 | def visit_contact(self, node): |
|
495 | 495 | self.visit_docinfo_item(node, 'contact') |
@@ -643,7 +643,7 b' class Translator(nodes.NodeVisitor):' | |||
|
643 | 643 | name_normalized = self._field_name.lower().replace(" ","_") |
|
644 | 644 | self._docinfo_names[name_normalized] = self._field_name |
|
645 | 645 | self.visit_docinfo_item(node, name_normalized) |
|
646 | raise nodes.SkipNode | |
|
646 | raise nodes.SkipNode() | |
|
647 | 647 | |
|
648 | 648 | def depart_field_body(self, node): |
|
649 | 649 | pass |
@@ -657,7 +657,7 b' class Translator(nodes.NodeVisitor):' | |||
|
657 | 657 | def visit_field_name(self, node): |
|
658 | 658 | if self._in_docinfo: |
|
659 | 659 | self._field_name = node.astext() |
|
660 | raise nodes.SkipNode | |
|
660 | raise nodes.SkipNode() | |
|
661 | 661 | else: |
|
662 | 662 | self.body.append(self.defs['field_name'][0]) |
|
663 | 663 | |
@@ -693,7 +693,7 b' class Translator(nodes.NodeVisitor):' | |||
|
693 | 693 | |
|
694 | 694 | def visit_footnote_reference(self, node): |
|
695 | 695 | self.body.append('['+self.deunicode(node.astext())+']') |
|
696 | raise nodes.SkipNode | |
|
696 | raise nodes.SkipNode() | |
|
697 | 697 | |
|
698 | 698 | def depart_footnote_reference(self, node): |
|
699 | 699 | pass |
@@ -705,7 +705,7 b' class Translator(nodes.NodeVisitor):' | |||
|
705 | 705 | pass |
|
706 | 706 | |
|
707 | 707 | def visit_header(self, node): |
|
708 |
raise NotImplementedError |
|
|
708 | raise NotImplementedError(node.astext()) | |
|
709 | 709 | |
|
710 | 710 | def depart_header(self, node): |
|
711 | 711 | pass |
@@ -742,7 +742,7 b' class Translator(nodes.NodeVisitor):' | |||
|
742 | 742 | if 'uri' in node.attributes: |
|
743 | 743 | text.append(node.attributes['uri']) |
|
744 | 744 | self.body.append('[image: %s]\n' % ('/'.join(text))) |
|
745 | raise nodes.SkipNode | |
|
745 | raise nodes.SkipNode() | |
|
746 | 746 | |
|
747 | 747 | def visit_important(self, node): |
|
748 | 748 | self.visit_admonition(node, 'important') |
@@ -753,7 +753,7 b' class Translator(nodes.NodeVisitor):' | |||
|
753 | 753 | # footnote and citation |
|
754 | 754 | if (isinstance(node.parent, nodes.footnote) |
|
755 | 755 | or isinstance(node.parent, nodes.citation)): |
|
756 | raise nodes.SkipNode | |
|
756 | raise nodes.SkipNode() | |
|
757 | 757 | self.document.reporter.warning('"unsupported "label"', |
|
758 | 758 | base_node=node) |
|
759 | 759 | self.body.append('[') |
@@ -793,7 +793,7 b' class Translator(nodes.NodeVisitor):' | |||
|
793 | 793 | def visit_list_item(self, node): |
|
794 | 794 | # man 7 man argues to use ".IP" instead of ".TP" |
|
795 | 795 | self.body.append('.IP %s %d\n' % ( |
|
796 |
self._list_char[-1] |
|
|
796 | next(self._list_char[-1]), | |
|
797 | 797 | self._list_char[-1].get_width(),)) |
|
798 | 798 | |
|
799 | 799 | def depart_list_item(self, node): |
@@ -814,7 +814,7 b' class Translator(nodes.NodeVisitor):' | |||
|
814 | 814 | self.body.append(self.defs['literal_block'][1]) |
|
815 | 815 | |
|
816 | 816 | def visit_meta(self, node): |
|
817 |
raise NotImplementedError |
|
|
817 | raise NotImplementedError(node.astext()) | |
|
818 | 818 | |
|
819 | 819 | def depart_meta(self, node): |
|
820 | 820 | pass |
@@ -924,7 +924,7 b' class Translator(nodes.NodeVisitor):' | |||
|
924 | 924 | if node.get('format') == 'manpage': |
|
925 | 925 | self.body.append(node.astext() + "\n") |
|
926 | 926 | # Keep non-manpage raw text out of output: |
|
927 | raise nodes.SkipNode | |
|
927 | raise nodes.SkipNode() | |
|
928 | 928 | |
|
929 | 929 | def visit_reference(self, node): |
|
930 | 930 | """E.g. link or email address.""" |
@@ -963,7 +963,7 b' class Translator(nodes.NodeVisitor):' | |||
|
963 | 963 | |
|
964 | 964 | def visit_substitution_definition(self, node): |
|
965 | 965 | """Internal only.""" |
|
966 | raise nodes.SkipNode | |
|
966 | raise nodes.SkipNode() | |
|
967 | 967 | |
|
968 | 968 | def visit_substitution_reference(self, node): |
|
969 | 969 | self.document.reporter.warning('"substitution_reference" not supported', |
@@ -1009,7 +1009,7 b' class Translator(nodes.NodeVisitor):' | |||
|
1009 | 1009 | |
|
1010 | 1010 | def visit_target(self, node): |
|
1011 | 1011 | # targets are in-document hyper targets, without any use for man-pages. |
|
1012 | raise nodes.SkipNode | |
|
1012 | raise nodes.SkipNode() | |
|
1013 | 1013 | |
|
1014 | 1014 | def visit_tbody(self, node): |
|
1015 | 1015 | pass |
@@ -1053,7 +1053,7 b' class Translator(nodes.NodeVisitor):' | |||
|
1053 | 1053 | self._docinfo['title'] = node.astext() |
|
1054 | 1054 | # document title for .TH |
|
1055 | 1055 | self._docinfo['title_upper'] = node.astext().upper() |
|
1056 | raise nodes.SkipNode | |
|
1056 | raise nodes.SkipNode() | |
|
1057 | 1057 | elif self.section_level == 1: |
|
1058 | 1058 | self.body.append('.SH ') |
|
1059 | 1059 | for n in node.traverse(nodes.Text): |
@@ -11,9 +11,11 b' import os' | |||
|
11 | 11 | import sys |
|
12 | 12 | |
|
13 | 13 | if os.environ.get('HGUNICODEPEDANTRY', False): |
|
14 | reload(sys) | |
|
15 | sys.setdefaultencoding("undefined") | |
|
16 | ||
|
14 | try: | |
|
15 | reload(sys) | |
|
16 | sys.setdefaultencoding("undefined") | |
|
17 | except NameError: | |
|
18 | pass | |
|
17 | 19 | |
|
18 | 20 | libdir = '@LIBDIR@' |
|
19 | 21 | |
@@ -26,9 +28,9 b" if libdir != '@' 'LIBDIR' '@':" | |||
|
26 | 28 | |
|
27 | 29 | # enable importing on demand to reduce startup time |
|
28 | 30 | try: |
|
29 | from mercurial import demandimport; demandimport.enable() | |
|
31 | if sys.version_info[0] < 3: | |
|
32 | from mercurial import demandimport; demandimport.enable() | |
|
30 | 33 | except ImportError: |
|
31 | import sys | |
|
32 | 34 | sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" % |
|
33 | 35 | ' '.join(sys.path)) |
|
34 | 36 | sys.stderr.write("(check your install and PYTHONPATH)\n") |
@@ -26,6 +26,7 b' The threshold at which a file is conside' | |||
|
26 | 26 | |
|
27 | 27 | from __future__ import absolute_import |
|
28 | 28 | |
|
29 | from mercurial.i18n import _ | |
|
29 | 30 | from mercurial import ( |
|
30 | 31 | commands, |
|
31 | 32 | copies, |
@@ -34,7 +35,6 b' from mercurial import (' | |||
|
34 | 35 | scmutil, |
|
35 | 36 | similar |
|
36 | 37 | ) |
|
37 | from mercurial.i18n import _ | |
|
38 | 38 | |
|
39 | 39 | def extsetup(ui): |
|
40 | 40 | entry = extensions.wrapcommand( |
@@ -281,8 +281,6 b' from __future__ import absolute_import' | |||
|
281 | 281 | |
|
282 | 282 | import re |
|
283 | 283 | import time |
|
284 | import urlparse | |
|
285 | import xmlrpclib | |
|
286 | 284 | |
|
287 | 285 | from mercurial.i18n import _ |
|
288 | 286 | from mercurial.node import short |
@@ -293,6 +291,9 b' from mercurial import (' | |||
|
293 | 291 | util, |
|
294 | 292 | ) |
|
295 | 293 | |
|
294 | urlparse = util.urlparse | |
|
295 | xmlrpclib = util.xmlrpclib | |
|
296 | ||
|
296 | 297 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
297 | 298 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
298 | 299 | # be specifying the version(s) of Mercurial they are tested with, or |
@@ -40,18 +40,15 b' Config' | |||
|
40 | 40 | |
|
41 | 41 | from __future__ import absolute_import |
|
42 | 42 | |
|
43 | import SocketServer | |
|
44 | 43 | import errno |
|
45 |
import |
|
|
44 | import hashlib | |
|
46 | 45 | import inspect |
|
47 | 46 | import os |
|
48 | import random | |
|
49 | 47 | import re |
|
48 | import signal | |
|
50 | 49 | import struct |
|
51 | 50 | import sys |
|
52 | import threading | |
|
53 | 51 | import time |
|
54 | import traceback | |
|
55 | 52 | |
|
56 | 53 | from mercurial.i18n import _ |
|
57 | 54 | |
@@ -76,10 +73,11 b" testedwith = 'internal'" | |||
|
76 | 73 | |
|
77 | 74 | def _hashlist(items): |
|
78 | 75 | """return sha1 hexdigest for a list""" |
|
79 |
return |
|
|
76 | return hashlib.sha1(str(items)).hexdigest() | |
|
80 | 77 | |
|
81 | 78 | # sensitive config sections affecting confighash |
|
82 | 79 | _configsections = [ |
|
80 | 'alias', # affects global state commands.table | |
|
83 | 81 | 'extdiff', # uisetup will register new commands |
|
84 | 82 | 'extensions', |
|
85 | 83 | ] |
@@ -150,6 +148,10 b' def _mtimehash(paths):' | |||
|
150 | 148 | |
|
151 | 149 | for chgserver, it is designed that once mtimehash changes, the server is |
|
152 | 150 | considered outdated immediately and should no longer provide service. |
|
151 | ||
|
152 | mtimehash is not included in confighash because we only know the paths of | |
|
153 | extensions after importing them (there is imp.find_module but that faces | |
|
154 | race conditions). We need to calculate confighash without importing. | |
|
153 | 155 | """ |
|
154 | 156 | def trystat(path): |
|
155 | 157 | try: |
@@ -213,18 +215,6 b' def _setuppagercmd(ui, options, cmd):' | |||
|
213 | 215 | ui.setconfig('ui', 'interactive', False, 'pager') |
|
214 | 216 | return p |
|
215 | 217 | |
|
216 | _envvarre = re.compile(r'\$[a-zA-Z_]+') | |
|
217 | ||
|
218 | def _clearenvaliases(cmdtable): | |
|
219 | """Remove stale command aliases referencing env vars; variable expansion | |
|
220 | is done at dispatch.addaliases()""" | |
|
221 | for name, tab in cmdtable.items(): | |
|
222 | cmddef = tab[0] | |
|
223 | if (isinstance(cmddef, dispatch.cmdalias) and | |
|
224 | not cmddef.definition.startswith('!') and # shell alias | |
|
225 | _envvarre.search(cmddef.definition)): | |
|
226 | del cmdtable[name] | |
|
227 | ||
|
228 | 218 | def _newchgui(srcui, csystem): |
|
229 | 219 | class chgui(srcui.__class__): |
|
230 | 220 | def __init__(self, src=None): |
@@ -357,6 +347,7 b' class chgcmdserver(commandserver.server)' | |||
|
357 | 347 | self.capabilities['validate'] = chgcmdserver.validate |
|
358 | 348 | |
|
359 | 349 | def cleanup(self): |
|
350 | super(chgcmdserver, self).cleanup() | |
|
360 | 351 | # dispatch._runcatch() does not flush outputs if exception is not |
|
361 | 352 | # handled by dispatch._dispatch() |
|
362 | 353 | self.ui.flush() |
@@ -508,6 +499,11 b' class chgcmdserver(commandserver.server)' | |||
|
508 | 499 | |
|
509 | 500 | pagercmd = _setuppagercmd(self.ui, options, cmd) |
|
510 | 501 | if pagercmd: |
|
502 | # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so | |
|
503 | # we can exit if the pipe to the pager is closed | |
|
504 | if util.safehasattr(signal, 'SIGPIPE') and \ | |
|
505 | signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN: | |
|
506 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) | |
|
511 | 507 | self.cresult.write(pagercmd) |
|
512 | 508 | else: |
|
513 | 509 | self.cresult.write('\0') |
@@ -525,7 +521,6 b' class chgcmdserver(commandserver.server)' | |||
|
525 | 521 | _log('setenv: %r\n' % sorted(newenv.keys())) |
|
526 | 522 | os.environ.clear() |
|
527 | 523 | os.environ.update(newenv) |
|
528 | _clearenvaliases(commands.table) | |
|
529 | 524 | |
|
530 | 525 | capabilities = commandserver.server.capabilities.copy() |
|
531 | 526 | capabilities.update({'attachio': attachio, |
@@ -534,174 +529,110 b' class chgcmdserver(commandserver.server)' | |||
|
534 | 529 | 'setenv': setenv, |
|
535 | 530 | 'setumask': setumask}) |
|
536 | 531 | |
|
537 | # copied from mercurial/commandserver.py | |
|
538 | class _requesthandler(SocketServer.StreamRequestHandler): | |
|
539 | def handle(self): | |
|
540 | # use a different process group from the master process, making this | |
|
541 | # process pass kernel "is_current_pgrp_orphaned" check so signals like | |
|
542 | # SIGTSTP, SIGTTIN, SIGTTOU are not ignored. | |
|
543 | os.setpgid(0, 0) | |
|
544 | # change random state otherwise forked request handlers would have a | |
|
545 | # same state inherited from parent. | |
|
546 | random.seed() | |
|
547 | ui = self.server.ui | |
|
548 | repo = self.server.repo | |
|
549 | sv = None | |
|
550 | try: | |
|
551 | sv = chgcmdserver(ui, repo, self.rfile, self.wfile, self.connection, | |
|
552 | self.server.hashstate, self.server.baseaddress) | |
|
553 | try: | |
|
554 | sv.serve() | |
|
555 | # handle exceptions that may be raised by command server. most of | |
|
556 | # known exceptions are caught by dispatch. | |
|
557 | except error.Abort as inst: | |
|
558 | ui.warn(_('abort: %s\n') % inst) | |
|
559 | except IOError as inst: | |
|
560 | if inst.errno != errno.EPIPE: | |
|
561 | raise | |
|
562 | except KeyboardInterrupt: | |
|
563 | pass | |
|
564 | finally: | |
|
565 | sv.cleanup() | |
|
566 | except: # re-raises | |
|
567 | # also write traceback to error channel. otherwise client cannot | |
|
568 | # see it because it is written to server's stderr by default. | |
|
569 | if sv: | |
|
570 | cerr = sv.cerr | |
|
571 | else: | |
|
572 | cerr = commandserver.channeledoutput(self.wfile, 'e') | |
|
573 | traceback.print_exc(file=cerr) | |
|
574 | raise | |
|
575 | finally: | |
|
576 | # trigger __del__ since ForkingMixIn uses os._exit | |
|
577 | gc.collect() | |
|
578 | ||
|
579 | 532 | def _tempaddress(address): |
|
580 | 533 | return '%s.%d.tmp' % (address, os.getpid()) |
|
581 | 534 | |
|
582 | 535 | def _hashaddress(address, hashstr): |
|
583 | 536 | return '%s-%s' % (address, hashstr) |
|
584 | 537 | |
|
585 | class AutoExitMixIn: # use old-style to comply with SocketServer design | |
|
586 | lastactive = time.time() | |
|
587 | idletimeout = 3600 # default 1 hour | |
|
538 | class chgunixservicehandler(object): | |
|
539 | """Set of operations for chg services""" | |
|
540 | ||
|
541 | pollinterval = 1 # [sec] | |
|
588 | 542 | |
|
589 | def startautoexitthread(self): | |
|
590 | # note: the auto-exit check here is cheap enough to not use a thread, | |
|
591 | # be done in serve_forever. however SocketServer is hook-unfriendly, | |
|
592 | # you simply cannot hook serve_forever without copying a lot of code. | |
|
593 | # besides, serve_forever's docstring suggests using thread. | |
|
594 | thread = threading.Thread(target=self._autoexitloop) | |
|
595 | thread.daemon = True | |
|
596 | thread.start() | |
|
543 | def __init__(self, ui): | |
|
544 | self.ui = ui | |
|
545 | self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600) | |
|
546 | self._lastactive = time.time() | |
|
547 | ||
|
548 | def bindsocket(self, sock, address): | |
|
549 | self._inithashstate(address) | |
|
550 | self._checkextensions() | |
|
551 | self._bind(sock) | |
|
552 | self._createsymlink() | |
|
597 | 553 | |
|
598 | def _autoexitloop(self, interval=1): | |
|
599 | while True: | |
|
600 | time.sleep(interval) | |
|
601 | if not self.issocketowner(): | |
|
602 | _log('%s is not owned, exiting.\n' % self.server_address) | |
|
603 |
|
|
|
604 | if time.time() - self.lastactive > self.idletimeout: | |
|
605 | _log('being idle too long. exiting.\n') | |
|
606 | break | |
|
607 | self.shutdown() | |
|
554 | def _inithashstate(self, address): | |
|
555 | self._baseaddress = address | |
|
556 | if self.ui.configbool('chgserver', 'skiphash', False): | |
|
557 | self._hashstate = None | |
|
558 | self._realaddress = address | |
|
559 | return | |
|
560 | self._hashstate = hashstate.fromui(self.ui) | |
|
561 | self._realaddress = _hashaddress(address, self._hashstate.confighash) | |
|
608 | 562 | |
|
609 | def process_request(self, request, address): | |
|
610 | self.lastactive = time.time() | |
|
611 | return SocketServer.ForkingMixIn.process_request( | |
|
612 | self, request, address) | |
|
563 | def _checkextensions(self): | |
|
564 | if not self._hashstate: | |
|
565 | return | |
|
566 | if extensions.notloaded(): | |
|
567 | # one or more extensions failed to load. mtimehash becomes | |
|
568 | # meaningless because we do not know the paths of those extensions. | |
|
569 | # set mtimehash to an illegal hash value to invalidate the server. | |
|
570 | self._hashstate.mtimehash = '' | |
|
613 | 571 | |
|
614 |
def |
|
|
572 | def _bind(self, sock): | |
|
615 | 573 | # use a unique temp address so we can stat the file and do ownership |
|
616 | 574 | # check later |
|
617 |
tempaddress = _tempaddress(self. |
|
|
618 | # use relative path instead of full path at bind() if possible, since | |
|
619 | # AF_UNIX path has very small length limit (107 chars) on common | |
|
620 | # platforms (see sys/un.h) | |
|
621 | dirname, basename = os.path.split(tempaddress) | |
|
622 | bakwdfd = None | |
|
623 | if dirname: | |
|
624 | bakwdfd = os.open('.', os.O_DIRECTORY) | |
|
625 | os.chdir(dirname) | |
|
626 | self.socket.bind(basename) | |
|
627 | self._socketstat = os.stat(basename) | |
|
575 | tempaddress = _tempaddress(self._realaddress) | |
|
576 | util.bindunixsocket(sock, tempaddress) | |
|
577 | self._socketstat = os.stat(tempaddress) | |
|
628 | 578 | # rename will replace the old socket file if exists atomically. the |
|
629 | 579 | # old server will detect ownership change and exit. |
|
630 |
util.rename( |
|
|
631 | if bakwdfd: | |
|
632 | os.fchdir(bakwdfd) | |
|
633 | os.close(bakwdfd) | |
|
580 | util.rename(tempaddress, self._realaddress) | |
|
634 | 581 | |
|
635 | def issocketowner(self): | |
|
582 | def _createsymlink(self): | |
|
583 | if self._baseaddress == self._realaddress: | |
|
584 | return | |
|
585 | tempaddress = _tempaddress(self._baseaddress) | |
|
586 | os.symlink(os.path.basename(self._realaddress), tempaddress) | |
|
587 | util.rename(tempaddress, self._baseaddress) | |
|
588 | ||
|
589 | def _issocketowner(self): | |
|
636 | 590 | try: |
|
637 |
stat = os.stat(self. |
|
|
591 | stat = os.stat(self._realaddress) | |
|
638 | 592 | return (stat.st_ino == self._socketstat.st_ino and |
|
639 | 593 | stat.st_mtime == self._socketstat.st_mtime) |
|
640 | 594 | except OSError: |
|
641 | 595 | return False |
|
642 | 596 | |
|
643 |
def unlinksocket |
|
|
644 | if not self.issocketowner(): | |
|
597 | def unlinksocket(self, address): | |
|
598 | if not self._issocketowner(): | |
|
645 | 599 | return |
|
646 | 600 | # it is possible to have a race condition here that we may |
|
647 | 601 | # remove another server's socket file. but that's okay |
|
648 | 602 | # since that server will detect and exit automatically and |
|
649 | 603 | # the client will start a new server on demand. |
|
650 | 604 | try: |
|
651 |
os.unlink(self. |
|
|
605 | os.unlink(self._realaddress) | |
|
652 | 606 | except OSError as exc: |
|
653 | 607 | if exc.errno != errno.ENOENT: |
|
654 | 608 | raise |
|
655 | 609 | |
|
656 | class chgunixservice(commandserver.unixservice): | |
|
657 | def init(self): | |
|
658 | if self.repo: | |
|
659 | # one chgserver can serve multiple repos. drop repo infomation | |
|
660 | self.ui.setconfig('bundle', 'mainreporoot', '', 'repo') | |
|
661 | self.repo = None | |
|
662 | self._inithashstate() | |
|
663 | self._checkextensions() | |
|
664 | class cls(AutoExitMixIn, SocketServer.ForkingMixIn, | |
|
665 | SocketServer.UnixStreamServer): | |
|
666 |
|
|
|
667 | repo = self.repo | |
|
668 | hashstate = self.hashstate | |
|
669 | baseaddress = self.baseaddress | |
|
670 | self.server = cls(self.address, _requesthandler) | |
|
671 | self.server.idletimeout = self.ui.configint( | |
|
672 | 'chgserver', 'idletimeout', self.server.idletimeout) | |
|
673 | self.server.startautoexitthread() | |
|
674 | self._createsymlink() | |
|
610 | def printbanner(self, address): | |
|
611 | # no "listening at" message should be printed to simulate hg behavior | |
|
612 | pass | |
|
613 | ||
|
614 | def shouldexit(self): | |
|
615 | if not self._issocketowner(): | |
|
616 | self.ui.debug('%s is not owned, exiting.\n' % self._realaddress) | |
|
617 | return True | |
|
618 | if time.time() - self._lastactive > self._idletimeout: | |
|
619 | self.ui.debug('being idle too long. exiting.\n') | |
|
620 | return True | |
|
621 | return False | |
|
675 | 622 | |
|
676 | def _inithashstate(self): | |
|
677 | self.baseaddress = self.address | |
|
678 | if self.ui.configbool('chgserver', 'skiphash', False): | |
|
679 | self.hashstate = None | |
|
680 | return | |
|
681 | self.hashstate = hashstate.fromui(self.ui) | |
|
682 | self.address = _hashaddress(self.address, self.hashstate.confighash) | |
|
623 | def newconnection(self): | |
|
624 | self._lastactive = time.time() | |
|
625 | ||
|
626 | def createcmdserver(self, repo, conn, fin, fout): | |
|
627 | return chgcmdserver(self.ui, repo, fin, fout, conn, | |
|
628 | self._hashstate, self._baseaddress) | |
|
683 | 629 | |
|
684 | def _checkextensions(self): | |
|
685 | if not self.hashstate: | |
|
686 | return | |
|
687 | if extensions.notloaded(): | |
|
688 | # one or more extensions failed to load. mtimehash becomes | |
|
689 | # meaningless because we do not know the paths of those extensions. | |
|
690 | # set mtimehash to an illegal hash value to invalidate the server. | |
|
691 | self.hashstate.mtimehash = '' | |
|
692 | ||
|
693 | def _createsymlink(self): | |
|
694 | if self.baseaddress == self.address: | |
|
695 | return | |
|
696 | tempaddress = _tempaddress(self.baseaddress) | |
|
697 | os.symlink(os.path.basename(self.address), tempaddress) | |
|
698 | util.rename(tempaddress, self.baseaddress) | |
|
699 | ||
|
700 | def run(self): | |
|
701 | try: | |
|
702 | self.server.serve_forever() | |
|
703 | finally: | |
|
704 | self.server.unlinksocketfile() | |
|
630 | def chgunixservice(ui, repo, opts): | |
|
631 | if repo: | |
|
632 | # one chgserver can serve multiple repos. drop repo infomation | |
|
633 | ui.setconfig('bundle', 'mainreporoot', '', 'repo') | |
|
634 | h = chgunixservicehandler(ui) | |
|
635 | return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h) | |
|
705 | 636 | |
|
706 | 637 | def uisetup(ui): |
|
707 | 638 | commandserver._servicemap['chgunix'] = chgunixservice |
@@ -156,6 +156,8 b' If ``pagermode`` is not defined, the ``m' | |||
|
156 | 156 | from __future__ import absolute_import |
|
157 | 157 | |
|
158 | 158 | import os |
|
159 | ||
|
160 | from mercurial.i18n import _ | |
|
159 | 161 | from mercurial import ( |
|
160 | 162 | cmdutil, |
|
161 | 163 | commands, |
@@ -165,7 +167,6 b' from mercurial import (' | |||
|
165 | 167 | ui as uimod, |
|
166 | 168 | util, |
|
167 | 169 | ) |
|
168 | from mercurial.i18n import _ | |
|
169 | 170 | |
|
170 | 171 | cmdtable = {} |
|
171 | 172 | command = cmdutil.command(cmdtable) |
@@ -9,11 +9,11 b'' | |||
|
9 | 9 | |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | from mercurial.i18n import _ | |
|
12 | 13 | from mercurial import ( |
|
13 | 14 | cmdutil, |
|
14 | 15 | registrar, |
|
15 | 16 | ) |
|
16 | from mercurial.i18n import _ | |
|
17 | 17 | |
|
18 | 18 | from . import ( |
|
19 | 19 | convcmd, |
@@ -10,11 +10,12 b'' | |||
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import os |
|
13 | ||
|
14 | from mercurial.i18n import _ | |
|
13 | 15 | from mercurial import ( |
|
14 | 16 | demandimport, |
|
15 | 17 | error |
|
16 | 18 | ) |
|
17 | from mercurial.i18n import _ | |
|
18 | 19 | from . import common |
|
19 | 20 | |
|
20 | 21 | # these do not work with demandimport, blacklist |
@@ -7,20 +7,20 b'' | |||
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import base64 |
|
10 | import cPickle as pickle | |
|
11 | 10 | import datetime |
|
12 | 11 | import errno |
|
13 | 12 | import os |
|
14 | 13 | import re |
|
15 | 14 | import subprocess |
|
16 | 15 | |
|
16 | from mercurial.i18n import _ | |
|
17 | 17 | from mercurial import ( |
|
18 | 18 | error, |
|
19 | 19 | phases, |
|
20 | 20 | util, |
|
21 | 21 | ) |
|
22 | from mercurial.i18n import _ | |
|
23 | 22 | |
|
23 | pickle = util.pickle | |
|
24 | 24 | propertycache = util.propertycache |
|
25 | 25 | |
|
26 | 26 | def encodeargs(args): |
@@ -10,13 +10,13 b' import os' | |||
|
10 | 10 | import shlex |
|
11 | 11 | import shutil |
|
12 | 12 | |
|
13 | from mercurial.i18n import _ | |
|
13 | 14 | from mercurial import ( |
|
14 | 15 | encoding, |
|
15 | 16 | error, |
|
16 | 17 | hg, |
|
17 | 18 | util, |
|
18 | 19 | ) |
|
19 | from mercurial.i18n import _ | |
|
20 | 20 | |
|
21 | 21 | from . import ( |
|
22 | 22 | bzr, |
@@ -11,12 +11,12 b' import os' | |||
|
11 | 11 | import re |
|
12 | 12 | import socket |
|
13 | 13 | |
|
14 | from mercurial.i18n import _ | |
|
14 | 15 | from mercurial import ( |
|
15 | 16 | encoding, |
|
16 | 17 | error, |
|
17 | 18 | util, |
|
18 | 19 | ) |
|
19 | from mercurial.i18n import _ | |
|
20 | 20 | |
|
21 | 21 | from . import ( |
|
22 | 22 | common, |
@@ -6,15 +6,16 b'' | |||
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | import cPickle as pickle | |
|
10 | 9 | import os |
|
11 | 10 | import re |
|
12 | 11 | |
|
12 | from mercurial.i18n import _ | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | hook, |
|
15 | 15 | util, |
|
16 | 16 | ) |
|
17 | from mercurial.i18n import _ | |
|
17 | ||
|
18 | pickle = util.pickle | |
|
18 | 19 | |
|
19 | 20 | class logentry(object): |
|
20 | 21 | '''Class logentry has the following attributes: |
@@ -7,10 +7,11 b' from __future__ import absolute_import' | |||
|
7 | 7 | |
|
8 | 8 | import posixpath |
|
9 | 9 | import shlex |
|
10 | ||
|
11 | from mercurial.i18n import _ | |
|
10 | 12 | from mercurial import ( |
|
11 | 13 | error, |
|
12 | 14 | ) |
|
13 | from mercurial.i18n import _ | |
|
14 | 15 | from . import common |
|
15 | 16 | SKIPREV = common.SKIPREV |
|
16 | 17 |
@@ -7,12 +7,13 b'' | |||
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import os |
|
10 | ||
|
11 | from mercurial.i18n import _ | |
|
10 | 12 | from mercurial import ( |
|
11 | 13 | config, |
|
12 | 14 | error, |
|
13 | 15 | node as nodemod, |
|
14 | 16 | ) |
|
15 | from mercurial.i18n import _ | |
|
16 | 17 | |
|
17 | 18 | from . import ( |
|
18 | 19 | common, |
@@ -12,12 +12,13 b' import os' | |||
|
12 | 12 | import shutil |
|
13 | 13 | import stat |
|
14 | 14 | import tempfile |
|
15 | ||
|
16 | from mercurial.i18n import _ | |
|
15 | 17 | from mercurial import ( |
|
16 | 18 | encoding, |
|
17 | 19 | error, |
|
18 | 20 | util, |
|
19 | 21 | ) |
|
20 | from mercurial.i18n import _ | |
|
21 | 22 | from . import common |
|
22 | 23 | |
|
23 | 24 | class gnuarch_source(common.converter_source, common.commandline): |
@@ -22,6 +22,7 b' import os' | |||
|
22 | 22 | import re |
|
23 | 23 | import time |
|
24 | 24 | |
|
25 | from mercurial.i18n import _ | |
|
25 | 26 | from mercurial import ( |
|
26 | 27 | bookmarks, |
|
27 | 28 | context, |
@@ -37,7 +38,6 b' from mercurial import (' | |||
|
37 | 38 | ) |
|
38 | 39 | stringio = util.stringio |
|
39 | 40 | |
|
40 | from mercurial.i18n import _ | |
|
41 | 41 | from . import common |
|
42 | 42 | mapfile = common.mapfile |
|
43 | 43 | NoRepo = common.NoRepo |
@@ -10,11 +10,11 b' from __future__ import absolute_import' | |||
|
10 | 10 | import os |
|
11 | 11 | import re |
|
12 | 12 | |
|
13 | from mercurial.i18n import _ | |
|
13 | 14 | from mercurial import ( |
|
14 | 15 | error, |
|
15 | 16 | util, |
|
16 | 17 | ) |
|
17 | from mercurial.i18n import _ | |
|
18 | 18 | |
|
19 | 19 | from . import common |
|
20 | 20 |
@@ -9,11 +9,11 b' from __future__ import absolute_import' | |||
|
9 | 9 | import marshal |
|
10 | 10 | import re |
|
11 | 11 | |
|
12 | from mercurial.i18n import _ | |
|
12 | 13 | from mercurial import ( |
|
13 | 14 | error, |
|
14 | 15 | util, |
|
15 | 16 | ) |
|
16 | from mercurial.i18n import _ | |
|
17 | 17 | |
|
18 | 18 | from . import common |
|
19 | 19 |
@@ -3,13 +3,13 b'' | |||
|
3 | 3 | # Copyright(C) 2007 Daniel Holth et al |
|
4 | 4 | from __future__ import absolute_import |
|
5 | 5 | |
|
6 | import cPickle as pickle | |
|
7 | 6 | import os |
|
8 | 7 | import re |
|
9 | 8 | import sys |
|
10 | 9 | import tempfile |
|
11 | 10 | import xml.dom.minidom |
|
12 | 11 | |
|
12 | from mercurial.i18n import _ | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | encoding, |
|
15 | 15 | error, |
@@ -17,10 +17,10 b' from mercurial import (' | |||
|
17 | 17 | strutil, |
|
18 | 18 | util, |
|
19 | 19 | ) |
|
20 | from mercurial.i18n import _ | |
|
21 | 20 | |
|
22 | 21 | from . import common |
|
23 | 22 | |
|
23 | pickle = util.pickle | |
|
24 | 24 | stringio = util.stringio |
|
25 | 25 | propertycache = util.propertycache |
|
26 | 26 | urlerr = util.urlerr |
@@ -102,8 +102,7 b' def monkeypatch_method(cls):' | |||
|
102 | 102 | |
|
103 | 103 | @monkeypatch_method(passwordmgr) |
|
104 | 104 | def find_user_password(self, realm, authuri): |
|
105 |
user, passwd = |
|
|
106 | self, realm, authuri) | |
|
105 | user, passwd = self.passwddb.find_user_password(realm, authuri) | |
|
107 | 106 | if user and passwd: |
|
108 | 107 | self._writedebug(user, passwd) |
|
109 | 108 | return (user, passwd) |
@@ -7,12 +7,23 b'' | |||
|
7 | 7 | |
|
8 | 8 | '''pull, update and merge in one command (DEPRECATED)''' |
|
9 | 9 | |
|
10 | from __future__ import absolute_import | |
|
11 | ||
|
10 | 12 | from mercurial.i18n import _ |
|
11 |
from mercurial.node import |
|
|
12 | from mercurial import commands, cmdutil, hg, util, error | |
|
13 | from mercurial.lock import release | |
|
14 |
from mercurial import |
|
|
13 | from mercurial.node import ( | |
|
14 | short, | |
|
15 | ) | |
|
16 | from mercurial import ( | |
|
17 | cmdutil, | |
|
18 | commands, | |
|
19 | error, | |
|
20 | exchange, | |
|
21 | hg, | |
|
22 | lock, | |
|
23 | util, | |
|
24 | ) | |
|
15 | 25 | |
|
26 | release = lock.release | |
|
16 | 27 | cmdtable = {} |
|
17 | 28 | command = cmdutil.command(cmdtable) |
|
18 | 29 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
@@ -91,10 +91,12 b' will disable itself if any of those are ' | |||
|
91 | 91 | |
|
92 | 92 | from __future__ import absolute_import |
|
93 | 93 | |
|
94 | import hashlib | |
|
94 | 95 | import os |
|
95 | 96 | import stat |
|
96 | 97 | import sys |
|
97 | 98 | |
|
99 | from mercurial.i18n import _ | |
|
98 | 100 | from mercurial import ( |
|
99 | 101 | context, |
|
100 | 102 | extensions, |
@@ -105,7 +107,6 b' from mercurial import (' | |||
|
105 | 107 | util, |
|
106 | 108 | ) |
|
107 | 109 | from mercurial import match as matchmod |
|
108 | from mercurial.i18n import _ | |
|
109 | 110 | |
|
110 | 111 | from . import ( |
|
111 | 112 | state, |
@@ -141,7 +142,7 b' def _hashignore(ignore):' | |||
|
141 | 142 | copy. |
|
142 | 143 | |
|
143 | 144 | """ |
|
144 |
sha1 = |
|
|
145 | sha1 = hashlib.sha1() | |
|
145 | 146 | if util.safehasattr(ignore, 'includepat'): |
|
146 | 147 | sha1.update(ignore.includepat) |
|
147 | 148 | sha1.update('\0\0') |
@@ -12,8 +12,8 b' import os' | |||
|
12 | 12 | import socket |
|
13 | 13 | import struct |
|
14 | 14 | |
|
15 | from mercurial.i18n import _ | |
|
15 | 16 | from mercurial import pathutil |
|
16 | from mercurial.i18n import _ | |
|
17 | 17 | |
|
18 | 18 | _version = 4 |
|
19 | 19 | _versionformat = ">I" |
@@ -5,10 +5,21 b'' | |||
|
5 | 5 | |
|
6 | 6 | '''commands to sign and verify changesets''' |
|
7 | 7 | |
|
8 | import os, tempfile, binascii | |
|
9 | from mercurial import util, commands, match, cmdutil, error | |
|
10 | from mercurial import node as hgnode | |
|
8 | from __future__ import absolute_import | |
|
9 | ||
|
10 | import binascii | |
|
11 | import os | |
|
12 | import tempfile | |
|
13 | ||
|
11 | 14 | from mercurial.i18n import _ |
|
15 | from mercurial import ( | |
|
16 | cmdutil, | |
|
17 | commands, | |
|
18 | error, | |
|
19 | match, | |
|
20 | node as hgnode, | |
|
21 | util, | |
|
22 | ) | |
|
12 | 23 | |
|
13 | 24 | cmdtable = {} |
|
14 | 25 | command = cmdutil.command(cmdtable) |
@@ -187,7 +198,7 b' def sigcheck(ui, repo, rev):' | |||
|
187 | 198 | return |
|
188 | 199 | |
|
189 | 200 | # print summary |
|
190 | ui.write("%s is signed by:\n" % hgnode.short(rev)) | |
|
201 | ui.write(_("%s is signed by:\n") % hgnode.short(rev)) | |
|
191 | 202 | for key in keys: |
|
192 | 203 | ui.write(" %s\n" % keystr(ui, key)) |
|
193 | 204 |
@@ -15,8 +15,13 b' commands. When this options is given, an' | |||
|
15 | 15 | revision graph is also shown. |
|
16 | 16 | ''' |
|
17 | 17 | |
|
18 | from __future__ import absolute_import | |
|
19 | ||
|
18 | 20 | from mercurial.i18n import _ |
|
19 |
from mercurial import |
|
|
21 | from mercurial import ( | |
|
22 | cmdutil, | |
|
23 | commands, | |
|
24 | ) | |
|
20 | 25 | |
|
21 | 26 | cmdtable = {} |
|
22 | 27 | command = cmdutil.command(cmdtable) |
@@ -34,10 +34,23 b' Revisions context menu will now display ' | |||
|
34 | 34 | vdiff on hovered and selected revisions. |
|
35 | 35 | ''' |
|
36 | 36 | |
|
37 | from __future__ import absolute_import | |
|
38 | ||
|
37 | 39 | import os |
|
38 | from mercurial import cmdutil, commands, patch, scmutil, obsolete | |
|
39 | from mercurial.node import nullid, nullrev, short | |
|
40 | ||
|
40 | 41 | from mercurial.i18n import _ |
|
42 | from mercurial.node import ( | |
|
43 | nullid, | |
|
44 | nullrev, | |
|
45 | short, | |
|
46 | ) | |
|
47 | from mercurial import ( | |
|
48 | cmdutil, | |
|
49 | commands, | |
|
50 | obsolete, | |
|
51 | patch, | |
|
52 | scmutil, | |
|
53 | ) | |
|
41 | 54 | |
|
42 | 55 | cmdtable = {} |
|
43 | 56 | command = cmdutil.command(cmdtable) |
@@ -68,13 +81,13 b' def difftree(ui, repo, node1=None, node2' | |||
|
68 | 81 | |
|
69 | 82 | for f in modified: |
|
70 | 83 | # TODO get file permissions |
|
71 | ui.write(":100664 100664 %s %s M\t%s\t%s\n" % | |
|
84 | ui.write((":100664 100664 %s %s M\t%s\t%s\n") % | |
|
72 | 85 | (short(mmap[f]), short(mmap2[f]), f, f)) |
|
73 | 86 | for f in added: |
|
74 | ui.write(":000000 100664 %s %s N\t%s\t%s\n" % | |
|
87 | ui.write((":000000 100664 %s %s N\t%s\t%s\n") % | |
|
75 | 88 | (empty, short(mmap2[f]), f, f)) |
|
76 | 89 | for f in removed: |
|
77 | ui.write(":100664 000000 %s %s D\t%s\t%s\n" % | |
|
90 | ui.write((":100664 000000 %s %s D\t%s\t%s\n") % | |
|
78 | 91 | (short(mmap[f]), empty, f, f)) |
|
79 | 92 | ## |
|
80 | 93 |
@@ -26,9 +26,21 b' Pygments will try very hard to identify ' | |||
|
26 | 26 | match (even matches with a low confidence score) will be used. |
|
27 | 27 | """ |
|
28 | 28 | |
|
29 | import highlight | |
|
30 | from mercurial.hgweb import webcommands, webutil, common | |
|
31 | from mercurial import extensions, encoding, fileset | |
|
29 | from __future__ import absolute_import | |
|
30 | ||
|
31 | from . import highlight | |
|
32 | from mercurial.hgweb import ( | |
|
33 | common, | |
|
34 | webcommands, | |
|
35 | webutil, | |
|
36 | ) | |
|
37 | ||
|
38 | from mercurial import ( | |
|
39 | encoding, | |
|
40 | extensions, | |
|
41 | fileset, | |
|
42 | ) | |
|
43 | ||
|
32 | 44 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
33 | 45 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
34 | 46 | # be specifying the version(s) of Mercurial they are tested with, or |
@@ -8,14 +8,27 b'' | |||
|
8 | 8 | # The original module was split in an interface and an implementation |
|
9 | 9 | # file to defer pygments loading and speedup extension setup. |
|
10 | 10 | |
|
11 | from __future__ import absolute_import | |
|
12 | ||
|
13 | import pygments | |
|
14 | import pygments.formatters | |
|
15 | import pygments.lexers | |
|
16 | import pygments.util | |
|
17 | ||
|
11 | 18 | from mercurial import demandimport |
|
12 | 19 | demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__']) |
|
13 | from mercurial import util, encoding | |
|
20 | ||
|
21 | from mercurial import ( | |
|
22 | encoding, | |
|
23 | util, | |
|
24 | ) | |
|
14 | 25 | |
|
15 |
|
|
|
16 |
|
|
|
17 | from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer | |
|
18 | from pygments.formatters import HtmlFormatter | |
|
26 | highlight = pygments.highlight | |
|
27 | ClassNotFound = pygments.util.ClassNotFound | |
|
28 | guess_lexer = pygments.lexers.guess_lexer | |
|
29 | guess_lexer_for_filename = pygments.lexers.guess_lexer_for_filename | |
|
30 | TextLexer = pygments.lexers.TextLexer | |
|
31 | HtmlFormatter = pygments.formatters.HtmlFormatter | |
|
19 | 32 | |
|
20 | 33 | SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" ' |
|
21 | 34 | 'type="text/css" />') |
@@ -68,7 +81,7 b' def pygmentize(field, fctx, style, tmpl,' | |||
|
68 | 81 | coloriter = (s.encode(encoding.encoding, 'replace') |
|
69 | 82 | for s in colorized.splitlines()) |
|
70 | 83 | |
|
71 |
tmpl.filters['colorize'] = lambda x: |
|
|
84 | tmpl.filters['colorize'] = lambda x: next(coloriter) | |
|
72 | 85 | |
|
73 | 86 | oldl = tmpl.cache[field] |
|
74 | 87 | newl = oldl.replace('line|escape', 'line|colorize') |
@@ -169,30 +169,35 b' the drop to be implicit for missing comm' | |||
|
169 | 169 | |
|
170 | 170 | """ |
|
171 | 171 | |
|
172 | import pickle | |
|
172 | from __future__ import absolute_import | |
|
173 | ||
|
173 | 174 | import errno |
|
174 | 175 | import os |
|
175 | 176 | import sys |
|
176 | 177 | |
|
177 | from mercurial import bundle2 | |
|
178 | from mercurial import cmdutil | |
|
179 | from mercurial import discovery | |
|
180 | from mercurial import error | |
|
181 | from mercurial import copies | |
|
182 | from mercurial import context | |
|
183 | from mercurial import destutil | |
|
184 | from mercurial import exchange | |
|
185 | from mercurial import extensions | |
|
186 | from mercurial import hg | |
|
187 | from mercurial import node | |
|
188 | from mercurial import repair | |
|
189 | from mercurial import scmutil | |
|
190 | from mercurial import util | |
|
191 | from mercurial import obsolete | |
|
192 | from mercurial import merge as mergemod | |
|
193 | from mercurial.lock import release | |
|
194 | 178 | from mercurial.i18n import _ |
|
179 | from mercurial import ( | |
|
180 | bundle2, | |
|
181 | cmdutil, | |
|
182 | context, | |
|
183 | copies, | |
|
184 | destutil, | |
|
185 | discovery, | |
|
186 | error, | |
|
187 | exchange, | |
|
188 | extensions, | |
|
189 | hg, | |
|
190 | lock, | |
|
191 | merge as mergemod, | |
|
192 | node, | |
|
193 | obsolete, | |
|
194 | repair, | |
|
195 | scmutil, | |
|
196 | util, | |
|
197 | ) | |
|
195 | 198 | |
|
199 | pickle = util.pickle | |
|
200 | release = lock.release | |
|
196 | 201 | cmdtable = {} |
|
197 | 202 | command = cmdutil.command(cmdtable) |
|
198 | 203 | |
@@ -415,9 +420,7 b' class histeditaction(object):' | |||
|
415 | 420 | <hash> <rev> <summary> |
|
416 | 421 | """ |
|
417 | 422 | ctx = self.repo[self.node] |
|
418 |
summary = |
|
|
419 | if ctx.description(): | |
|
420 | summary = ctx.description().splitlines()[0] | |
|
423 | summary = _getsummary(ctx) | |
|
421 | 424 | line = '%s %s %d %s' % (self.verb, ctx, ctx.rev(), summary) |
|
422 | 425 | # trim to 75 columns by default so it's not stupidly wide in my editor |
|
423 | 426 | # (the 5 more are left for verb) |
@@ -1264,6 +1267,14 b' def _newhistedit(ui, repo, state, revs, ' | |||
|
1264 | 1267 | 'histedit') |
|
1265 | 1268 | state.backupfile = backupfile |
|
1266 | 1269 | |
|
1270 | def _getsummary(ctx): | |
|
1271 | # a common pattern is to extract the summary but default to the empty | |
|
1272 | # string | |
|
1273 | summary = ctx.description() or '' | |
|
1274 | if summary: | |
|
1275 | summary = summary.splitlines()[0] | |
|
1276 | return summary | |
|
1277 | ||
|
1267 | 1278 | def bootstrapcontinue(ui, state, opts): |
|
1268 | 1279 | repo = state.repo |
|
1269 | 1280 | if state.actions: |
@@ -1304,6 +1315,40 b' def ruleeditor(repo, ui, actions, editco' | |||
|
1304 | 1315 | |
|
1305 | 1316 | rules are in the format [ [act, ctx], ...] like in state.rules |
|
1306 | 1317 | """ |
|
1318 | if repo.ui.configbool("experimental", "histedit.autoverb"): | |
|
1319 | newact = util.sortdict() | |
|
1320 | for act in actions: | |
|
1321 | ctx = repo[act.node] | |
|
1322 | summary = _getsummary(ctx) | |
|
1323 | fword = summary.split(' ', 1)[0].lower() | |
|
1324 | added = False | |
|
1325 | ||
|
1326 | # if it doesn't end with the special character '!' just skip this | |
|
1327 | if fword.endswith('!'): | |
|
1328 | fword = fword[:-1] | |
|
1329 | if fword in primaryactions | secondaryactions | tertiaryactions: | |
|
1330 | act.verb = fword | |
|
1331 | # get the target summary | |
|
1332 | tsum = summary[len(fword) + 1:].lstrip() | |
|
1333 | # safe but slow: reverse iterate over the actions so we | |
|
1334 | # don't clash on two commits having the same summary | |
|
1335 | for na, l in reversed(list(newact.iteritems())): | |
|
1336 | actx = repo[na.node] | |
|
1337 | asum = _getsummary(actx) | |
|
1338 | if asum == tsum: | |
|
1339 | added = True | |
|
1340 | l.append(act) | |
|
1341 | break | |
|
1342 | ||
|
1343 | if not added: | |
|
1344 | newact[act] = [] | |
|
1345 | ||
|
1346 | # copy over and flatten the new list | |
|
1347 | actions = [] | |
|
1348 | for na, l in newact.iteritems(): | |
|
1349 | actions.append(na) | |
|
1350 | actions += l | |
|
1351 | ||
|
1307 | 1352 | rules = '\n'.join([act.torule() for act in actions]) |
|
1308 | 1353 | rules += '\n\n' |
|
1309 | 1354 | rules += editcomment |
@@ -89,8 +89,8 b' import os' | |||
|
89 | 89 | import re |
|
90 | 90 | import tempfile |
|
91 | 91 | |
|
92 | from mercurial.i18n import _ | |
|
92 | 93 | from mercurial.hgweb import webcommands |
|
93 | from mercurial.i18n import _ | |
|
94 | 94 | |
|
95 | 95 | from mercurial import ( |
|
96 | 96 | cmdutil, |
@@ -455,7 +455,7 b' def demo(ui, repo, *args, **opts):' | |||
|
455 | 455 | |
|
456 | 456 | uisetup(ui) |
|
457 | 457 | reposetup(ui, repo) |
|
458 | ui.write('[extensions]\nkeyword =\n') | |
|
458 | ui.write(('[extensions]\nkeyword =\n')) | |
|
459 | 459 | demoitems('keyword', ui.configitems('keyword')) |
|
460 | 460 | demoitems('keywordset', ui.configitems('keywordset')) |
|
461 | 461 | demoitems('keywordmaps', kwmaps.iteritems()) |
@@ -735,7 +735,7 b' def reposetup(ui, repo):' | |||
|
735 | 735 | def kwfilectx_cmp(orig, self, fctx): |
|
736 | 736 | # keyword affects data size, comparing wdir and filelog size does |
|
737 | 737 | # not make sense |
|
738 |
if (fctx._file |
|
|
738 | if (fctx._filenode is None and | |
|
739 | 739 | (self._repo._encodefilterpats or |
|
740 | 740 | kwt.match(fctx.path()) and 'l' not in fctx.flags() or |
|
741 | 741 | self.size() - 4 == fctx.size()) or |
@@ -104,14 +104,20 b' largefile. To add the first largefile to' | |||
|
104 | 104 | explicitly do so with the --large flag passed to the :hg:`add` |
|
105 | 105 | command. |
|
106 | 106 | ''' |
|
107 | from __future__ import absolute_import | |
|
107 | 108 | |
|
108 |
from mercurial import |
|
|
109 | from mercurial import ( | |
|
110 | hg, | |
|
111 | localrepo, | |
|
112 | ) | |
|
109 | 113 | |
|
110 | import lfcommands | |
|
111 | import proto | |
|
112 | import reposetup | |
|
113 | import uisetup as uisetupmod | |
|
114 | import overrides | |
|
114 | from . import ( | |
|
115 | lfcommands, | |
|
116 | overrides, | |
|
117 | proto, | |
|
118 | reposetup, | |
|
119 | uisetup as uisetupmod, | |
|
120 | ) | |
|
115 | 121 | |
|
116 | 122 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
117 | 123 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
@@ -7,13 +7,13 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''base class for store implementations and store-related utility code''' |
|
10 | from __future__ import absolute_import | |
|
10 | 11 | |
|
11 | import re | |
|
12 | ||
|
13 | from mercurial import util, node, hg, error | |
|
14 | 12 | from mercurial.i18n import _ |
|
15 | 13 | |
|
16 | import lfutil | |
|
14 | from mercurial import node, util | |
|
15 | ||
|
16 | from . import lfutil | |
|
17 | 17 | |
|
18 | 18 | class StoreError(Exception): |
|
19 | 19 | '''Raised when there is a problem getting files from or putting |
@@ -116,19 +116,26 b' class basestore(object):' | |||
|
116 | 116 | '''Verify the existence (and, optionally, contents) of every big |
|
117 | 117 | file revision referenced by every changeset in revs. |
|
118 | 118 | Return 0 if all is well, non-zero on any errors.''' |
|
119 | failed = False | |
|
120 | 119 | |
|
121 | 120 | self.ui.status(_('searching %d changesets for largefiles\n') % |
|
122 | 121 | len(revs)) |
|
123 | 122 | verified = set() # set of (filename, filenode) tuples |
|
124 | ||
|
123 | filestocheck = [] # list of (cset, filename, expectedhash) | |
|
125 | 124 | for rev in revs: |
|
126 | 125 | cctx = self.repo[rev] |
|
127 | 126 | cset = "%d:%s" % (cctx.rev(), node.short(cctx.node())) |
|
128 | 127 | |
|
129 | 128 | for standin in cctx: |
|
130 | if self._verifyfile(cctx, cset, contents, standin, verified): | |
|
131 |
|
|
|
129 | filename = lfutil.splitstandin(standin) | |
|
130 | if filename: | |
|
131 | fctx = cctx[standin] | |
|
132 | key = (filename, fctx.filenode()) | |
|
133 | if key not in verified: | |
|
134 | verified.add(key) | |
|
135 | expectedhash = fctx.data()[0:40] | |
|
136 | filestocheck.append((cset, filename, expectedhash)) | |
|
137 | ||
|
138 | failed = self._verifyfiles(contents, filestocheck) | |
|
132 | 139 | |
|
133 | 140 | numrevs = len(verified) |
|
134 | 141 | numlfiles = len(set([fname for (fname, fnode) in verified])) |
@@ -150,72 +157,10 b' class basestore(object):' | |||
|
150 | 157 | exist in the store).''' |
|
151 | 158 | raise NotImplementedError('abstract method') |
|
152 | 159 | |
|
153 |
def _verifyfile(self |
|
|
154 |
'''Perform the actual verification of |
|
|
155 | 'cset' is only used in warnings. | |
|
160 | def _verifyfiles(self, contents, filestocheck): | |
|
161 | '''Perform the actual verification of files in the store. | |
|
156 | 162 | 'contents' controls verification of content hash. |
|
157 | 'standin' is the standin path of the largefile to verify. | |
|
158 | 'verified' is maintained as a set of already verified files. | |
|
159 | Returns _true_ if it is a standin and any problems are found! | |
|
163 | 'filestocheck' is list of files to check. | |
|
164 | Returns _true_ if any problems are found! | |
|
160 | 165 | ''' |
|
161 | 166 | raise NotImplementedError('abstract method') |
|
162 | ||
|
163 | import localstore, wirestore | |
|
164 | ||
|
165 | _storeprovider = { | |
|
166 | 'file': [localstore.localstore], | |
|
167 | 'http': [wirestore.wirestore], | |
|
168 | 'https': [wirestore.wirestore], | |
|
169 | 'ssh': [wirestore.wirestore], | |
|
170 | } | |
|
171 | ||
|
172 | _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') | |
|
173 | ||
|
174 | # During clone this function is passed the src's ui object | |
|
175 | # but it needs the dest's ui object so it can read out of | |
|
176 | # the config file. Use repo.ui instead. | |
|
177 | def _openstore(repo, remote=None, put=False): | |
|
178 | ui = repo.ui | |
|
179 | ||
|
180 | if not remote: | |
|
181 | lfpullsource = getattr(repo, 'lfpullsource', None) | |
|
182 | if lfpullsource: | |
|
183 | path = ui.expandpath(lfpullsource) | |
|
184 | elif put: | |
|
185 | path = ui.expandpath('default-push', 'default') | |
|
186 | else: | |
|
187 | path = ui.expandpath('default') | |
|
188 | ||
|
189 | # ui.expandpath() leaves 'default-push' and 'default' alone if | |
|
190 | # they cannot be expanded: fallback to the empty string, | |
|
191 | # meaning the current directory. | |
|
192 | if path == 'default-push' or path == 'default': | |
|
193 | path = '' | |
|
194 | remote = repo | |
|
195 | else: | |
|
196 | path, _branches = hg.parseurl(path) | |
|
197 | remote = hg.peer(repo, {}, path) | |
|
198 | ||
|
199 | # The path could be a scheme so use Mercurial's normal functionality | |
|
200 | # to resolve the scheme to a repository and use its path | |
|
201 | path = util.safehasattr(remote, 'url') and remote.url() or remote.path | |
|
202 | ||
|
203 | match = _scheme_re.match(path) | |
|
204 | if not match: # regular filesystem path | |
|
205 | scheme = 'file' | |
|
206 | else: | |
|
207 | scheme = match.group(1) | |
|
208 | ||
|
209 | try: | |
|
210 | storeproviders = _storeprovider[scheme] | |
|
211 | except KeyError: | |
|
212 | raise error.Abort(_('unsupported URL scheme %r') % scheme) | |
|
213 | ||
|
214 | for classobj in storeproviders: | |
|
215 | try: | |
|
216 | return classobj(ui, repo, remote) | |
|
217 | except lfutil.storeprotonotcapable: | |
|
218 | pass | |
|
219 | ||
|
220 | raise error.Abort(_('%s does not appear to be a largefile store') % | |
|
221 | util.hidepassword(path)) |
@@ -7,20 +7,39 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''High-level command function for lfconvert, plus the cmdtable.''' |
|
10 | from __future__ import absolute_import | |
|
10 | 11 | |
|
11 |
import |
|
|
12 | import errno | |
|
13 | import hashlib | |
|
14 | import os | |
|
12 | 15 | import shutil |
|
13 | 16 | |
|
14 | from mercurial import util, match as match_, hg, node, context, error, \ | |
|
15 | cmdutil, scmutil, commands | |
|
16 | 17 | from mercurial.i18n import _ |
|
17 | from mercurial.lock import release | |
|
18 | 18 | |
|
19 | from hgext.convert import convcmd | |
|
20 | from hgext.convert import filemap | |
|
19 | from mercurial import ( | |
|
20 | cmdutil, | |
|
21 | commands, | |
|
22 | context, | |
|
23 | error, | |
|
24 | hg, | |
|
25 | lock, | |
|
26 | match as matchmod, | |
|
27 | node, | |
|
28 | scmutil, | |
|
29 | util, | |
|
30 | ) | |
|
21 | 31 | |
|
22 | import lfutil | |
|
23 | import basestore | |
|
32 | from ..convert import ( | |
|
33 | convcmd, | |
|
34 | filemap, | |
|
35 | ) | |
|
36 | ||
|
37 | from . import ( | |
|
38 | lfutil, | |
|
39 | storefactory | |
|
40 | ) | |
|
41 | ||
|
42 | release = lock.release | |
|
24 | 43 | |
|
25 | 44 | # -- Commands ---------------------------------------------------------- |
|
26 | 45 | |
@@ -92,7 +111,7 b' def lfconvert(ui, src, dest, *pats, **op' | |||
|
92 | 111 | if not pats: |
|
93 | 112 | pats = ui.configlist(lfutil.longname, 'patterns', default=[]) |
|
94 | 113 | if pats: |
|
95 |
matcher = match |
|
|
114 | matcher = matchmod.match(rsrc.root, '', list(pats)) | |
|
96 | 115 | else: |
|
97 | 116 | matcher = None |
|
98 | 117 | |
@@ -211,7 +230,7 b' def _lfconvert_addchangeset(rsrc, rdst, ' | |||
|
211 | 230 | raise error.Abort(_('largefile %s becomes symlink') % f) |
|
212 | 231 | |
|
213 | 232 | # largefile was modified, update standins |
|
214 |
m = |
|
|
233 | m = hashlib.sha1('') | |
|
215 | 234 | m.update(ctx[f].data()) |
|
216 | 235 | hash = m.hexdigest() |
|
217 | 236 | if f not in lfiletohash or lfiletohash[f] != hash: |
@@ -337,7 +356,7 b' def uploadlfiles(ui, rsrc, rdst, files):' | |||
|
337 | 356 | if not files: |
|
338 | 357 | return |
|
339 | 358 | |
|
340 |
store = |
|
|
359 | store = storefactory.openstore(rsrc, rdst, put=True) | |
|
341 | 360 | |
|
342 | 361 | at = 0 |
|
343 | 362 | ui.debug("sending statlfile command for %d largefiles\n" % len(files)) |
@@ -368,7 +387,7 b' def verifylfiles(ui, repo, all=False, co' | |||
|
368 | 387 | else: |
|
369 | 388 | revs = ['.'] |
|
370 | 389 | |
|
371 |
store = |
|
|
390 | store = storefactory.openstore(repo) | |
|
372 | 391 | return store.verify(revs, contents=contents) |
|
373 | 392 | |
|
374 | 393 | def cachelfiles(ui, repo, node, filelist=None): |
@@ -394,7 +413,7 b' def cachelfiles(ui, repo, node, filelist' | |||
|
394 | 413 | toget.append((lfile, expectedhash)) |
|
395 | 414 | |
|
396 | 415 | if toget: |
|
397 |
store = |
|
|
416 | store = storefactory.openstore(repo) | |
|
398 | 417 | ret = store.get(toget) |
|
399 | 418 | return ret |
|
400 | 419 |
@@ -7,21 +7,30 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''largefiles utility code: must not import other modules in this package.''' |
|
10 | from __future__ import absolute_import | |
|
10 | 11 | |
|
12 | import copy | |
|
13 | import hashlib | |
|
11 | 14 | import os |
|
12 | 15 | import platform |
|
13 | 16 | import stat |
|
14 | import copy | |
|
17 | ||
|
18 | from mercurial.i18n import _ | |
|
15 | 19 | |
|
16 | from mercurial import dirstate, httpconnection, match as match_, util, scmutil | |
|
17 | from mercurial.i18n import _ | |
|
18 | from mercurial import node, error | |
|
20 | from mercurial import ( | |
|
21 | dirstate, | |
|
22 | error, | |
|
23 | httpconnection, | |
|
24 | match as matchmod, | |
|
25 | node, | |
|
26 | scmutil, | |
|
27 | util, | |
|
28 | ) | |
|
19 | 29 | |
|
20 | 30 | shortname = '.hglf' |
|
21 | 31 | shortnameslash = shortname + '/' |
|
22 | 32 | longname = 'largefiles' |
|
23 | 33 | |
|
24 | ||
|
25 | 34 | # -- Private worker functions ------------------------------------------ |
|
26 | 35 | |
|
27 | 36 | def getminsize(ui, assumelfiles, opt, default=10): |
@@ -152,7 +161,7 b' def openlfdirstate(ui, repo, create=True' | |||
|
152 | 161 | |
|
153 | 162 | def lfdirstatestatus(lfdirstate, repo): |
|
154 | 163 | wctx = repo['.'] |
|
155 |
match = match |
|
|
164 | match = matchmod.always(repo.root, repo.getcwd()) | |
|
156 | 165 | unsure, s = lfdirstate.status(match, [], False, False, False) |
|
157 | 166 | modified, clean = s.modified, s.clean |
|
158 | 167 | for lfile in unsure: |
@@ -180,12 +189,11 b' def listlfiles(repo, rev=None, matcher=N' | |||
|
180 | 189 | if rev is not None or repo.dirstate[f] != '?'] |
|
181 | 190 | |
|
182 | 191 | def instore(repo, hash, forcelocal=False): |
|
183 |
'''Return true if a largefile with the given hash exists in the |
|
|
184 | cache.''' | |
|
192 | '''Return true if a largefile with the given hash exists in the store''' | |
|
185 | 193 | return os.path.exists(storepath(repo, hash, forcelocal)) |
|
186 | 194 | |
|
187 | 195 | def storepath(repo, hash, forcelocal=False): |
|
188 |
'''Return the correct location in the repository largefiles |
|
|
196 | '''Return the correct location in the repository largefiles store for a | |
|
189 | 197 | file with the given hash.''' |
|
190 | 198 | if not forcelocal and repo.shared(): |
|
191 | 199 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
@@ -251,7 +259,6 b' def copyalltostore(repo, node):' | |||
|
251 | 259 | realfile = splitstandin(filename) |
|
252 | 260 | copytostore(repo, ctx.node(), realfile) |
|
253 | 261 | |
|
254 | ||
|
255 | 262 | def copytostoreabsolute(repo, file, hash): |
|
256 | 263 | if inusercache(repo.ui, hash): |
|
257 | 264 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
@@ -350,7 +357,7 b' def writestandin(repo, standin, hash, ex' | |||
|
350 | 357 | def copyandhash(instream, outfile): |
|
351 | 358 | '''Read bytes from instream (iterable) and write them to outfile, |
|
352 | 359 | computing the SHA-1 hash of the data along the way. Return the hash.''' |
|
353 |
hasher = |
|
|
360 | hasher = hashlib.sha1('') | |
|
354 | 361 | for data in instream: |
|
355 | 362 | hasher.update(data) |
|
356 | 363 | outfile.write(data) |
@@ -362,7 +369,7 b' def hashrepofile(repo, file):' | |||
|
362 | 369 | def hashfile(file): |
|
363 | 370 | if not os.path.exists(file): |
|
364 | 371 | return '' |
|
365 |
hasher = |
|
|
372 | hasher = hashlib.sha1('') | |
|
366 | 373 | fd = open(file, 'rb') |
|
367 | 374 | for data in util.filechunkiter(fd, 128 * 1024): |
|
368 | 375 | hasher.update(data) |
@@ -391,7 +398,7 b' def urljoin(first, second, *arg):' | |||
|
391 | 398 | def hexsha1(data): |
|
392 | 399 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
393 | 400 | object data""" |
|
394 |
h = |
|
|
401 | h = hashlib.sha1() | |
|
395 | 402 | for chunk in util.filechunkiter(data): |
|
396 | 403 | h.update(chunk) |
|
397 | 404 | return h.hexdigest() |
@@ -533,7 +540,7 b' def updatestandinsbymatch(repo, match):' | |||
|
533 | 540 | # otherwise to update all standins if the largefiles are |
|
534 | 541 | # large. |
|
535 | 542 | lfdirstate = openlfdirstate(ui, repo) |
|
536 |
dirtymatch = match |
|
|
543 | dirtymatch = matchmod.always(repo.root, repo.getcwd()) | |
|
537 | 544 | unsure, s = lfdirstate.status(dirtymatch, [], False, False, |
|
538 | 545 | False) |
|
539 | 546 | modifiedfiles = unsure + s.modified + s.added + s.removed |
@@ -7,11 +7,14 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''store class for local filesystem''' |
|
10 | from __future__ import absolute_import | |
|
10 | 11 | |
|
11 | 12 | from mercurial.i18n import _ |
|
12 | 13 | |
|
13 | import lfutil | |
|
14 |
|
|
|
14 | from . import ( | |
|
15 | basestore, | |
|
16 | lfutil, | |
|
17 | ) | |
|
15 | 18 | |
|
16 | 19 | class localstore(basestore.basestore): |
|
17 | 20 | '''localstore first attempts to grab files out of the store in the remote |
@@ -33,7 +36,6 b' class localstore(basestore.basestore):' | |||
|
33 | 36 | retval[hash] = lfutil.instore(self.remote, hash) |
|
34 | 37 | return retval |
|
35 | 38 | |
|
36 | ||
|
37 | 39 | def _getfile(self, tmpfile, filename, hash): |
|
38 | 40 | path = lfutil.findfile(self.remote, hash) |
|
39 | 41 | if not path: |
@@ -42,29 +44,23 b' class localstore(basestore.basestore):' | |||
|
42 | 44 | with open(path, 'rb') as fd: |
|
43 | 45 | return lfutil.copyandhash(fd, tmpfile) |
|
44 | 46 | |
|
45 |
def _verifyfile(self |
|
|
46 | filename = lfutil.splitstandin(standin) | |
|
47 | if not filename: | |
|
48 | return False | |
|
49 | fctx = cctx[standin] | |
|
50 | key = (filename, fctx.filenode()) | |
|
51 | if key in verified: | |
|
52 |
|
|
|
53 | ||
|
54 | expecthash = fctx.data()[0:40] | |
|
55 | storepath, exists = lfutil.findstorepath(self.remote, expecthash) | |
|
56 | verified.add(key) | |
|
57 | if not exists: | |
|
58 | self.ui.warn( | |
|
59 | _('changeset %s: %s references missing %s\n') | |
|
60 | % (cset, filename, storepath)) | |
|
61 | return True # failed | |
|
62 | ||
|
63 | if contents: | |
|
64 | actualhash = lfutil.hashfile(storepath) | |
|
65 | if actualhash != expecthash: | |
|
47 | def _verifyfiles(self, contents, filestocheck): | |
|
48 | failed = False | |
|
49 | for cset, filename, expectedhash in filestocheck: | |
|
50 | storepath, exists = lfutil.findstorepath(self.repo, expectedhash) | |
|
51 | if not exists: | |
|
52 | storepath, exists = lfutil.findstorepath( | |
|
53 | self.remote, expectedhash) | |
|
54 | if not exists: | |
|
66 | 55 | self.ui.warn( |
|
67 |
_('changeset %s: %s references |
|
|
56 | _('changeset %s: %s references missing %s\n') | |
|
68 | 57 | % (cset, filename, storepath)) |
|
69 | return True # failed | |
|
70 | return False | |
|
58 | failed = True | |
|
59 | elif contents: | |
|
60 | actualhash = lfutil.hashfile(storepath) | |
|
61 | if actualhash != expectedhash: | |
|
62 | self.ui.warn( | |
|
63 | _('changeset %s: %s references corrupted %s\n') | |
|
64 | % (cset, filename, storepath)) | |
|
65 | failed = True | |
|
66 | return failed |
@@ -7,17 +7,31 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''Overridden Mercurial commands and functions for the largefiles extension''' |
|
10 | from __future__ import absolute_import | |
|
10 | 11 | |
|
11 | import os | |
|
12 | 12 | import copy |
|
13 | import os | |
|
13 | 14 | |
|
14 | from mercurial import hg, util, cmdutil, scmutil, match as match_, \ | |
|
15 | archival, pathutil, registrar, revset, error | |
|
16 | 15 | from mercurial.i18n import _ |
|
17 | 16 | |
|
18 | import lfutil | |
|
19 | import lfcommands | |
|
20 | import basestore | |
|
17 | from mercurial import ( | |
|
18 | archival, | |
|
19 | cmdutil, | |
|
20 | error, | |
|
21 | hg, | |
|
22 | match as matchmod, | |
|
23 | pathutil, | |
|
24 | registrar, | |
|
25 | revset, | |
|
26 | scmutil, | |
|
27 | util, | |
|
28 | ) | |
|
29 | ||
|
30 | from . import ( | |
|
31 | lfcommands, | |
|
32 | lfutil, | |
|
33 | storefactory, | |
|
34 | ) | |
|
21 | 35 | |
|
22 | 36 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
23 | 37 | |
@@ -99,13 +113,13 b' def addlargefiles(ui, repo, isaddremove,' | |||
|
99 | 113 | if lfutil.islfilesrepo(repo): |
|
100 | 114 | lfpats = ui.configlist(lfutil.longname, 'patterns', default=[]) |
|
101 | 115 | if lfpats: |
|
102 |
lfmatcher = match |
|
|
116 | lfmatcher = matchmod.match(repo.root, '', list(lfpats)) | |
|
103 | 117 | |
|
104 | 118 | lfnames = [] |
|
105 | 119 | m = matcher |
|
106 | 120 | |
|
107 | 121 | wctx = repo[None] |
|
108 |
for f in repo.walk(match |
|
|
122 | for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)): | |
|
109 | 123 | exact = m.exact(f) |
|
110 | 124 | lfile = lfutil.standin(f) in wctx |
|
111 | 125 | nfile = f in wctx |
@@ -307,7 +321,7 b' def overridelog(orig, ui, repo, *pats, *' | |||
|
307 | 321 | if pat.startswith('set:'): |
|
308 | 322 | return pat |
|
309 | 323 | |
|
310 |
kindpat = match |
|
|
324 | kindpat = matchmod._patsplit(pat, None) | |
|
311 | 325 | |
|
312 | 326 | if kindpat[0] is not None: |
|
313 | 327 | return kindpat[0] + ':' + tostandin(kindpat[1]) |
@@ -532,7 +546,6 b' def mergerecordupdates(orig, repo, actio' | |||
|
532 | 546 | |
|
533 | 547 | return orig(repo, actions, branchmerge) |
|
534 | 548 | |
|
535 | ||
|
536 | 549 | # Override filemerge to prompt the user about how they wish to merge |
|
537 | 550 | # largefiles. This will handle identical edits without prompting the user. |
|
538 | 551 | def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca, |
@@ -626,7 +639,7 b' def overridecopy(orig, ui, repo, pats, o' | |||
|
626 | 639 | # The patterns were previously mangled to add the standin |
|
627 | 640 | # directory; we need to remove that now |
|
628 | 641 | for pat in pats: |
|
629 |
if match |
|
|
642 | if matchmod.patkind(pat) is None and lfutil.shortname in pat: | |
|
630 | 643 | newpats.append(pat.replace(lfutil.shortname, '')) |
|
631 | 644 | else: |
|
632 | 645 | newpats.append(pat) |
@@ -644,7 +657,7 b' def overridecopy(orig, ui, repo, pats, o' | |||
|
644 | 657 | oldmatch = installmatchfn(overridematch) |
|
645 | 658 | listpats = [] |
|
646 | 659 | for pat in pats: |
|
647 |
if match |
|
|
660 | if matchmod.patkind(pat) is not None: | |
|
648 | 661 | listpats.append(pat) |
|
649 | 662 | else: |
|
650 | 663 | listpats.append(makestandin(pat)) |
@@ -977,7 +990,7 b' def overridearchive(orig, repo, dest, no' | |||
|
977 | 990 | if subrepos: |
|
978 | 991 | for subpath in sorted(ctx.substate): |
|
979 | 992 | sub = ctx.workingsub(subpath) |
|
980 |
submatch = match |
|
|
993 | submatch = matchmod.subdirmatcher(subpath, matchfn) | |
|
981 | 994 | sub._repo.lfstatus = True |
|
982 | 995 | sub.archive(archiver, prefix, submatch) |
|
983 | 996 | |
@@ -1025,7 +1038,7 b' def hgsubrepoarchive(orig, repo, archive' | |||
|
1025 | 1038 | |
|
1026 | 1039 | for subpath in sorted(ctx.substate): |
|
1027 | 1040 | sub = ctx.workingsub(subpath) |
|
1028 |
submatch = match |
|
|
1041 | submatch = matchmod.subdirmatcher(subpath, match) | |
|
1029 | 1042 | sub._repo.lfstatus = True |
|
1030 | 1043 | sub.archive(archiver, prefix + repo._path + '/', submatch) |
|
1031 | 1044 | |
@@ -1109,7 +1122,7 b' def _getoutgoings(repo, other, missing, ' | |||
|
1109 | 1122 | lfhashes.add(lfhash) |
|
1110 | 1123 | lfutil.getlfilestoupload(repo, missing, dedup) |
|
1111 | 1124 | if lfhashes: |
|
1112 |
lfexists = |
|
|
1125 | lfexists = storefactory.openstore(repo, other).exists(lfhashes) | |
|
1113 | 1126 | for fn, lfhash in knowns: |
|
1114 | 1127 | if not lfexists[lfhash]: # lfhash doesn't exist on "other" |
|
1115 | 1128 | addfunc(fn, lfhash) |
@@ -1190,7 +1203,7 b' def scmutiladdremove(orig, repo, matcher' | |||
|
1190 | 1203 | return orig(repo, matcher, prefix, opts, dry_run, similarity) |
|
1191 | 1204 | # Get the list of missing largefiles so we can remove them |
|
1192 | 1205 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1193 |
unsure, s = lfdirstate.status(match |
|
|
1206 | unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [], | |
|
1194 | 1207 | False, False, False) |
|
1195 | 1208 | |
|
1196 | 1209 | # Call into the normal remove code, but the removing of the standin, we want |
@@ -1338,7 +1351,7 b' def overridecat(orig, ui, repo, file1, *' | |||
|
1338 | 1351 | else: |
|
1339 | 1352 | hash = lfutil.readstandin(repo, lf, ctx.rev()) |
|
1340 | 1353 | if not lfutil.inusercache(repo.ui, hash): |
|
1341 |
store = |
|
|
1354 | store = storefactory.openstore(repo) | |
|
1342 | 1355 | success, missing = store.get([(lf, hash)]) |
|
1343 | 1356 | if len(success) != 1: |
|
1344 | 1357 | raise error.Abort( |
@@ -1375,7 +1388,7 b' def mergeupdate(orig, repo, node, branch' | |||
|
1375 | 1388 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") |
|
1376 | 1389 | |
|
1377 | 1390 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1378 |
unsure, s = lfdirstate.status(match |
|
|
1391 | unsure, s = lfdirstate.status(matchmod.always(repo.root, | |
|
1379 | 1392 | repo.getcwd()), |
|
1380 | 1393 | [], False, False, False) |
|
1381 | 1394 | pctx = repo['.'] |
@@ -2,18 +2,27 b'' | |||
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | from __future__ import absolute_import | |
|
5 | 6 | |
|
6 | 7 | import os |
|
7 | 8 | import re |
|
8 | 9 | |
|
9 | from mercurial import error, httppeer, util, wireproto | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | |
|
12 | from mercurial import ( | |
|
13 | error, | |
|
14 | httppeer, | |
|
15 | util, | |
|
16 | wireproto, | |
|
17 | ) | |
|
18 | ||
|
19 | from . import ( | |
|
20 | lfutil, | |
|
21 | ) | |
|
22 | ||
|
12 | 23 | urlerr = util.urlerr |
|
13 | 24 | urlreq = util.urlreq |
|
14 | 25 | |
|
15 | import lfutil | |
|
16 | ||
|
17 | 26 | LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.' |
|
18 | 27 | '\n\nPlease enable it in your Mercurial config ' |
|
19 | 28 | 'file.\n') |
@@ -5,20 +5,30 b'' | |||
|
5 | 5 | # GNU General Public License version 2 or any later version. |
|
6 | 6 | |
|
7 | 7 | '''remote largefile store; the base class for wirestore''' |
|
8 | from __future__ import absolute_import | |
|
8 | 9 | |
|
9 | from mercurial import util, wireproto, error | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | |
|
12 | from mercurial import ( | |
|
13 | error, | |
|
14 | util, | |
|
15 | wireproto, | |
|
16 | ) | |
|
17 | ||
|
18 | from . import ( | |
|
19 | basestore, | |
|
20 | lfutil, | |
|
21 | localstore, | |
|
22 | ) | |
|
23 | ||
|
12 | 24 | urlerr = util.urlerr |
|
13 | 25 | urlreq = util.urlreq |
|
14 | 26 | |
|
15 | import lfutil | |
|
16 | import basestore | |
|
17 | ||
|
18 | 27 | class remotestore(basestore.basestore): |
|
19 | 28 | '''a largefile store accessed over a network''' |
|
20 | 29 | def __init__(self, ui, repo, url): |
|
21 | 30 | super(remotestore, self).__init__(ui, repo, url) |
|
31 | self._lstore = localstore.localstore(self.ui, self.repo, self.repo) | |
|
22 | 32 | |
|
23 | 33 | def put(self, source, hash): |
|
24 | 34 | if self.sendfile(source, hash): |
@@ -65,34 +75,43 b' class remotestore(basestore.basestore):' | |||
|
65 | 75 | |
|
66 | 76 | return lfutil.copyandhash(chunks, tmpfile) |
|
67 | 77 | |
|
68 | def _verifyfile(self, cctx, cset, contents, standin, verified): | |
|
69 | filename = lfutil.splitstandin(standin) | |
|
70 | if not filename: | |
|
71 |
|
|
|
72 | fctx = cctx[standin] | |
|
73 | key = (filename, fctx.filenode()) | |
|
74 | if key in verified: | |
|
75 | return False | |
|
78 | def _hashesavailablelocally(self, hashes): | |
|
79 | existslocallymap = self._lstore.exists(hashes) | |
|
80 | localhashes = [hash for hash in hashes if existslocallymap[hash]] | |
|
81 | return localhashes | |
|
76 | 82 | |
|
77 | verified.add(key) | |
|
83 | def _verifyfiles(self, contents, filestocheck): | |
|
84 | failed = False | |
|
85 | expectedhashes = [expectedhash | |
|
86 | for cset, filename, expectedhash in filestocheck] | |
|
87 | localhashes = self._hashesavailablelocally(expectedhashes) | |
|
88 | stats = self._stat([expectedhash for expectedhash in expectedhashes | |
|
89 | if expectedhash not in localhashes]) | |
|
78 | 90 | |
|
79 | expecthash = fctx.data()[0:40] | |
|
80 | stat = self._stat([expecthash])[expecthash] | |
|
81 | if not stat: | |
|
82 | return False | |
|
83 | elif stat == 1: | |
|
84 | self.ui.warn( | |
|
85 | _('changeset %s: %s: contents differ\n') | |
|
86 | % (cset, filename)) | |
|
87 | return True # failed | |
|
88 |
|
|
|
89 | self.ui.warn( | |
|
90 | _('changeset %s: %s missing\n') | |
|
91 | % (cset, filename)) | |
|
92 | return True # failed | |
|
93 | else: | |
|
94 | raise RuntimeError('verify failed: unexpected response from ' | |
|
95 |
|
|
|
91 | for cset, filename, expectedhash in filestocheck: | |
|
92 | if expectedhash in localhashes: | |
|
93 | filetocheck = (cset, filename, expectedhash) | |
|
94 | verifyresult = self._lstore._verifyfiles(contents, | |
|
95 | [filetocheck]) | |
|
96 | if verifyresult: | |
|
97 | failed = True | |
|
98 | else: | |
|
99 | stat = stats[expectedhash] | |
|
100 | if stat: | |
|
101 | if stat == 1: | |
|
102 | self.ui.warn( | |
|
103 | _('changeset %s: %s: contents differ\n') | |
|
104 | % (cset, filename)) | |
|
105 | failed = True | |
|
106 | elif stat == 2: | |
|
107 | self.ui.warn( | |
|
108 | _('changeset %s: %s missing\n') | |
|
109 | % (cset, filename)) | |
|
110 | failed = True | |
|
111 | else: | |
|
112 | raise RuntimeError('verify failed: unexpected response ' | |
|
113 | 'from statlfile (%r)' % stat) | |
|
114 | return failed | |
|
96 | 115 | |
|
97 | 116 | def batch(self): |
|
98 | 117 | '''Support for remote batching.''' |
@@ -7,14 +7,23 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''setup for largefiles repositories: reposetup''' |
|
10 | from __future__ import absolute_import | |
|
11 | ||
|
10 | 12 | import copy |
|
11 | 13 | |
|
12 | from mercurial import error, match as match_, error | |
|
13 | 14 | from mercurial.i18n import _ |
|
14 | from mercurial import scmutil, localrepo | |
|
15 | 15 | |
|
16 | import lfcommands | |
|
17 | import lfutil | |
|
16 | from mercurial import ( | |
|
17 | error, | |
|
18 | localrepo, | |
|
19 | match as matchmod, | |
|
20 | scmutil, | |
|
21 | ) | |
|
22 | ||
|
23 | from . import ( | |
|
24 | lfcommands, | |
|
25 | lfutil, | |
|
26 | ) | |
|
18 | 27 | |
|
19 | 28 | def reposetup(ui, repo): |
|
20 | 29 | # wire repositories should be given new wireproto functions |
@@ -94,7 +103,7 b' def reposetup(ui, repo):' | |||
|
94 | 103 | parentworking = working and ctx1 == self['.'] |
|
95 | 104 | |
|
96 | 105 | if match is None: |
|
97 |
match = match |
|
|
106 | match = matchmod.always(self.root, self.getcwd()) | |
|
98 | 107 | |
|
99 | 108 | wlock = None |
|
100 | 109 | try: |
@@ -1,180 +1,28 b'' | |||
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
|
4 | # Copyright 2010-2011 Unity Technologies | |
|
5 | # | |
|
6 | 1 |
|
|
7 | 2 | # GNU General Public License version 2 or any later version. |
|
8 | 3 | |
|
9 | '''base class for store implementations and store-related utility code''' | |
|
4 | from __future__ import absolute_import | |
|
10 | 5 | |
|
11 | 6 | import re |
|
12 | 7 | |
|
13 | from mercurial import util, node, hg, error | |
|
14 | 8 | from mercurial.i18n import _ |
|
15 | 9 | |
|
16 | import lfutil | |
|
17 | ||
|
18 | class StoreError(Exception): | |
|
19 | '''Raised when there is a problem getting files from or putting | |
|
20 | files to a central store.''' | |
|
21 | def __init__(self, filename, hash, url, detail): | |
|
22 | self.filename = filename | |
|
23 | self.hash = hash | |
|
24 | self.url = url | |
|
25 | self.detail = detail | |
|
26 | ||
|
27 | def longmessage(self): | |
|
28 | return (_("error getting id %s from url %s for file %s: %s\n") % | |
|
29 | (self.hash, util.hidepassword(self.url), self.filename, | |
|
30 | self.detail)) | |
|
31 | ||
|
32 | def __str__(self): | |
|
33 | return "%s: %s" % (util.hidepassword(self.url), self.detail) | |
|
34 | ||
|
35 | class basestore(object): | |
|
36 | def __init__(self, ui, repo, url): | |
|
37 | self.ui = ui | |
|
38 | self.repo = repo | |
|
39 | self.url = url | |
|
40 | ||
|
41 | def put(self, source, hash): | |
|
42 | '''Put source file into the store so it can be retrieved by hash.''' | |
|
43 | raise NotImplementedError('abstract method') | |
|
44 | ||
|
45 | def exists(self, hashes): | |
|
46 | '''Check to see if the store contains the given hashes. Given an | |
|
47 | iterable of hashes it returns a mapping from hash to bool.''' | |
|
48 | raise NotImplementedError('abstract method') | |
|
49 | ||
|
50 | def get(self, files): | |
|
51 | '''Get the specified largefiles from the store and write to local | |
|
52 | files under repo.root. files is a list of (filename, hash) | |
|
53 | tuples. Return (success, missing), lists of files successfully | |
|
54 | downloaded and those not found in the store. success is a list | |
|
55 | of (filename, hash) tuples; missing is a list of filenames that | |
|
56 | we could not get. (The detailed error message will already have | |
|
57 | been presented to the user, so missing is just supplied as a | |
|
58 | summary.)''' | |
|
59 | success = [] | |
|
60 | missing = [] | |
|
61 | ui = self.ui | |
|
62 | ||
|
63 | at = 0 | |
|
64 | available = self.exists(set(hash for (_filename, hash) in files)) | |
|
65 | for filename, hash in files: | |
|
66 | ui.progress(_('getting largefiles'), at, unit=_('files'), | |
|
67 | total=len(files)) | |
|
68 | at += 1 | |
|
69 | ui.note(_('getting %s:%s\n') % (filename, hash)) | |
|
70 | ||
|
71 | if not available.get(hash): | |
|
72 | ui.warn(_('%s: largefile %s not available from %s\n') | |
|
73 | % (filename, hash, util.hidepassword(self.url))) | |
|
74 | missing.append(filename) | |
|
75 | continue | |
|
76 | ||
|
77 | if self._gethash(filename, hash): | |
|
78 | success.append((filename, hash)) | |
|
79 | else: | |
|
80 | missing.append(filename) | |
|
81 | ||
|
82 | ui.progress(_('getting largefiles'), None) | |
|
83 | return (success, missing) | |
|
84 | ||
|
85 | def _gethash(self, filename, hash): | |
|
86 | """Get file with the provided hash and store it in the local repo's | |
|
87 | store and in the usercache. | |
|
88 | filename is for informational messages only. | |
|
89 | """ | |
|
90 | util.makedirs(lfutil.storepath(self.repo, '')) | |
|
91 | storefilename = lfutil.storepath(self.repo, hash) | |
|
10 | from mercurial import ( | |
|
11 | error, | |
|
12 | hg, | |
|
13 | util, | |
|
14 | ) | |
|
92 | 15 | |
|
93 | tmpname = storefilename + '.tmp' | |
|
94 | tmpfile = util.atomictempfile(tmpname, | |
|
95 | createmode=self.repo.store.createmode) | |
|
96 | ||
|
97 | try: | |
|
98 | gothash = self._getfile(tmpfile, filename, hash) | |
|
99 | except StoreError as err: | |
|
100 | self.ui.warn(err.longmessage()) | |
|
101 | gothash = "" | |
|
102 | tmpfile.close() | |
|
103 | ||
|
104 | if gothash != hash: | |
|
105 | if gothash != "": | |
|
106 | self.ui.warn(_('%s: data corruption (expected %s, got %s)\n') | |
|
107 | % (filename, hash, gothash)) | |
|
108 | util.unlink(tmpname) | |
|
109 | return False | |
|
110 | ||
|
111 | util.rename(tmpname, storefilename) | |
|
112 | lfutil.linktousercache(self.repo, hash) | |
|
113 | return True | |
|
114 | ||
|
115 | def verify(self, revs, contents=False): | |
|
116 | '''Verify the existence (and, optionally, contents) of every big | |
|
117 | file revision referenced by every changeset in revs. | |
|
118 | Return 0 if all is well, non-zero on any errors.''' | |
|
119 | failed = False | |
|
120 | ||
|
121 | self.ui.status(_('searching %d changesets for largefiles\n') % | |
|
122 | len(revs)) | |
|
123 | verified = set() # set of (filename, filenode) tuples | |
|
124 | ||
|
125 | for rev in revs: | |
|
126 | cctx = self.repo[rev] | |
|
127 | cset = "%d:%s" % (cctx.rev(), node.short(cctx.node())) | |
|
128 | ||
|
129 | for standin in cctx: | |
|
130 | if self._verifyfile(cctx, cset, contents, standin, verified): | |
|
131 | failed = True | |
|
132 | ||
|
133 | numrevs = len(verified) | |
|
134 | numlfiles = len(set([fname for (fname, fnode) in verified])) | |
|
135 | if contents: | |
|
136 | self.ui.status( | |
|
137 | _('verified contents of %d revisions of %d largefiles\n') | |
|
138 | % (numrevs, numlfiles)) | |
|
139 | else: | |
|
140 | self.ui.status( | |
|
141 | _('verified existence of %d revisions of %d largefiles\n') | |
|
142 | % (numrevs, numlfiles)) | |
|
143 | return int(failed) | |
|
144 | ||
|
145 | def _getfile(self, tmpfile, filename, hash): | |
|
146 | '''Fetch one revision of one file from the store and write it | |
|
147 | to tmpfile. Compute the hash of the file on-the-fly as it | |
|
148 | downloads and return the hash. Close tmpfile. Raise | |
|
149 | StoreError if unable to download the file (e.g. it does not | |
|
150 | exist in the store).''' | |
|
151 | raise NotImplementedError('abstract method') | |
|
152 | ||
|
153 | def _verifyfile(self, cctx, cset, contents, standin, verified): | |
|
154 | '''Perform the actual verification of a file in the store. | |
|
155 | 'cset' is only used in warnings. | |
|
156 | 'contents' controls verification of content hash. | |
|
157 | 'standin' is the standin path of the largefile to verify. | |
|
158 | 'verified' is maintained as a set of already verified files. | |
|
159 | Returns _true_ if it is a standin and any problems are found! | |
|
160 | ''' | |
|
161 | raise NotImplementedError('abstract method') | |
|
162 | ||
|
163 | import localstore, wirestore | |
|
164 | ||
|
165 | _storeprovider = { | |
|
166 | 'file': [localstore.localstore], | |
|
167 | 'http': [wirestore.wirestore], | |
|
168 | 'https': [wirestore.wirestore], | |
|
169 | 'ssh': [wirestore.wirestore], | |
|
170 | } | |
|
171 | ||
|
172 | _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') | |
|
16 | from . import ( | |
|
17 | lfutil, | |
|
18 | localstore, | |
|
19 | wirestore, | |
|
20 | ) | |
|
173 | 21 | |
|
174 | 22 | # During clone this function is passed the src's ui object |
|
175 | 23 | # but it needs the dest's ui object so it can read out of |
|
176 | 24 | # the config file. Use repo.ui instead. |
|
177 |
def |
|
|
25 | def openstore(repo, remote=None, put=False): | |
|
178 | 26 | ui = repo.ui |
|
179 | 27 | |
|
180 | 28 | if not remote: |
@@ -219,3 +67,12 b' def _openstore(repo, remote=None, put=Fa' | |||
|
219 | 67 | |
|
220 | 68 | raise error.Abort(_('%s does not appear to be a largefile store') % |
|
221 | 69 | util.hidepassword(path)) |
|
70 | ||
|
71 | _storeprovider = { | |
|
72 | 'file': [localstore.localstore], | |
|
73 | 'http': [wirestore.wirestore], | |
|
74 | 'https': [wirestore.wirestore], | |
|
75 | 'ssh': [wirestore.wirestore], | |
|
76 | } | |
|
77 | ||
|
78 | _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') |
@@ -7,14 +7,36 b'' | |||
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''setup for largefiles extension: uisetup''' |
|
10 | from __future__ import absolute_import | |
|
10 | 11 | |
|
11 | from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \ | |
|
12 | httppeer, merge, scmutil, sshpeer, wireproto, subrepo, copies, exchange | |
|
13 | 12 | from mercurial.i18n import _ |
|
14 | from mercurial.hgweb import hgweb_mod, webcommands | |
|
13 | ||
|
14 | from mercurial.hgweb import ( | |
|
15 | hgweb_mod, | |
|
16 | webcommands, | |
|
17 | ) | |
|
15 | 18 | |
|
16 | import overrides | |
|
17 | import proto | |
|
19 | from mercurial import ( | |
|
20 | archival, | |
|
21 | cmdutil, | |
|
22 | commands, | |
|
23 | copies, | |
|
24 | exchange, | |
|
25 | extensions, | |
|
26 | filemerge, | |
|
27 | hg, | |
|
28 | httppeer, | |
|
29 | merge, | |
|
30 | scmutil, | |
|
31 | sshpeer, | |
|
32 | subrepo, | |
|
33 | wireproto, | |
|
34 | ) | |
|
35 | ||
|
36 | from . import ( | |
|
37 | overrides, | |
|
38 | proto, | |
|
39 | ) | |
|
18 | 40 | |
|
19 | 41 | def uisetup(ui): |
|
20 | 42 | # Disable auto-status for some commands which assume that all |
@@ -4,9 +4,12 b'' | |||
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | '''largefile store working over Mercurial's wire protocol''' |
|
7 | from __future__ import absolute_import | |
|
7 | 8 | |
|
8 | import lfutil | |
|
9 | import remotestore | |
|
9 | from . import ( | |
|
10 | lfutil, | |
|
11 | remotestore, | |
|
12 | ) | |
|
10 | 13 | |
|
11 | 14 | class wirestore(remotestore.remotestore): |
|
12 | 15 | def __init__(self, ui, repo, remote): |
@@ -92,7 +92,7 b' def uisetup(ui):' | |||
|
92 | 92 | Arguments are passed on as environment variables. |
|
93 | 93 | |
|
94 | 94 | """ |
|
95 |
script = |
|
|
95 | script = self.config('logtoprocess', event) | |
|
96 | 96 | if script: |
|
97 | 97 | if msg: |
|
98 | 98 | # try to format the log message given the remaining |
@@ -62,19 +62,39 b' This extension used to provide a strip c' | |||
|
62 | 62 | in the strip extension. |
|
63 | 63 | ''' |
|
64 | 64 | |
|
65 | from __future__ import absolute_import | |
|
66 | ||
|
67 | import errno | |
|
68 | import os | |
|
69 | import re | |
|
70 | import shutil | |
|
65 | 71 | from mercurial.i18n import _ |
|
66 |
from mercurial.node import |
|
|
67 | from mercurial.lock import release | |
|
68 | from mercurial import commands, cmdutil, hg, scmutil, util, revset | |
|
69 | from mercurial import dispatch | |
|
70 | from mercurial import extensions, error, phases | |
|
71 | from mercurial import patch as patchmod | |
|
72 | from mercurial import lock as lockmod | |
|
73 |
from mercurial import |
|
|
74 | from mercurial import registrar | |
|
75 | from mercurial import subrepo | |
|
76 | import os, re, errno, shutil | |
|
77 | ||
|
72 | from mercurial.node import ( | |
|
73 | bin, | |
|
74 | hex, | |
|
75 | nullid, | |
|
76 | nullrev, | |
|
77 | short, | |
|
78 | ) | |
|
79 | from mercurial import ( | |
|
80 | cmdutil, | |
|
81 | commands, | |
|
82 | dispatch, | |
|
83 | error, | |
|
84 | extensions, | |
|
85 | hg, | |
|
86 | localrepo, | |
|
87 | lock as lockmod, | |
|
88 | patch as patchmod, | |
|
89 | phases, | |
|
90 | registrar, | |
|
91 | revset, | |
|
92 | scmutil, | |
|
93 | subrepo, | |
|
94 | util, | |
|
95 | ) | |
|
96 | ||
|
97 | release = lockmod.release | |
|
78 | 98 | seriesopts = [('s', 'summary', None, _('print first line of patch header'))] |
|
79 | 99 | |
|
80 | 100 | cmdtable = {} |
@@ -139,6 +139,7 b' import fnmatch' | |||
|
139 | 139 | import socket |
|
140 | 140 | import time |
|
141 | 141 | |
|
142 | from mercurial.i18n import _ | |
|
142 | 143 | from mercurial import ( |
|
143 | 144 | cmdutil, |
|
144 | 145 | error, |
@@ -146,7 +147,6 b' from mercurial import (' | |||
|
146 | 147 | patch, |
|
147 | 148 | util, |
|
148 | 149 | ) |
|
149 | from mercurial.i18n import _ | |
|
150 | 150 | |
|
151 | 151 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
152 | 152 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
@@ -363,7 +363,7 b' class notifier(object):' | |||
|
363 | 363 | s = patch.diffstat(difflines) |
|
364 | 364 | # s may be nil, don't include the header if it is |
|
365 | 365 | if s: |
|
366 | self.ui.write('\ndiffstat:\n\n%s' % s) | |
|
366 | self.ui.write(_('\ndiffstat:\n\n%s') % s) | |
|
367 | 367 | |
|
368 | 368 | if maxdiff == 0: |
|
369 | 369 | return |
@@ -66,6 +66,7 b' import signal' | |||
|
66 | 66 | import subprocess |
|
67 | 67 | import sys |
|
68 | 68 | |
|
69 | from mercurial.i18n import _ | |
|
69 | 70 | from mercurial import ( |
|
70 | 71 | cmdutil, |
|
71 | 72 | commands, |
@@ -73,7 +74,6 b' from mercurial import (' | |||
|
73 | 74 | extensions, |
|
74 | 75 | util, |
|
75 | 76 | ) |
|
76 | from mercurial.i18n import _ | |
|
77 | 77 | |
|
78 | 78 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
79 | 79 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
@@ -71,6 +71,7 b' import os' | |||
|
71 | 71 | import socket |
|
72 | 72 | import tempfile |
|
73 | 73 | |
|
74 | from mercurial.i18n import _ | |
|
74 | 75 | from mercurial import ( |
|
75 | 76 | cmdutil, |
|
76 | 77 | commands, |
@@ -83,7 +84,6 b' from mercurial import (' | |||
|
83 | 84 | util, |
|
84 | 85 | ) |
|
85 | 86 | stringio = util.stringio |
|
86 | from mercurial.i18n import _ | |
|
87 | 87 | |
|
88 | 88 | cmdtable = {} |
|
89 | 89 | command = cmdutil.command(cmdtable) |
@@ -708,13 +708,7 b' def email(ui, repo, *revs, **opts):' | |||
|
708 | 708 | fp.close() |
|
709 | 709 | else: |
|
710 | 710 | if not sendmail: |
|
711 | verifycert = ui.config('smtp', 'verifycert', 'strict') | |
|
712 | if opts.get('insecure'): | |
|
713 | ui.setconfig('smtp', 'verifycert', 'loose', 'patchbomb') | |
|
714 | try: | |
|
715 | sendmail = mail.connect(ui, mbox=mbox) | |
|
716 | finally: | |
|
717 | ui.setconfig('smtp', 'verifycert', verifycert, 'patchbomb') | |
|
711 | sendmail = mail.connect(ui, mbox=mbox) | |
|
718 | 712 | ui.status(_('sending '), subj, ' ...\n') |
|
719 | 713 | ui.progress(_('sending'), i, item=subj, total=len(msgs), |
|
720 | 714 | unit=_('emails')) |
@@ -27,6 +27,7 b' from __future__ import absolute_import' | |||
|
27 | 27 | |
|
28 | 28 | import os |
|
29 | 29 | |
|
30 | from mercurial.i18n import _ | |
|
30 | 31 | from mercurial import ( |
|
31 | 32 | cmdutil, |
|
32 | 33 | commands, |
@@ -34,7 +35,6 b' from mercurial import (' | |||
|
34 | 35 | scmutil, |
|
35 | 36 | util, |
|
36 | 37 | ) |
|
37 | from mercurial.i18n import _ | |
|
38 | 38 | |
|
39 | 39 | cmdtable = {} |
|
40 | 40 | command = cmdutil.command(cmdtable) |
@@ -84,13 +84,13 b' def purge(ui, repo, *dirs, **opts):' | |||
|
84 | 84 | list of files that this program would delete, use the --print |
|
85 | 85 | option. |
|
86 | 86 | ''' |
|
87 |
act = not opts |
|
|
87 | act = not opts.get('print') | |
|
88 | 88 | eol = '\n' |
|
89 |
if opts |
|
|
89 | if opts.get('print0'): | |
|
90 | 90 | eol = '\0' |
|
91 | 91 | act = False # --print0 implies --print |
|
92 |
removefiles = opts |
|
|
93 |
removedirs = opts |
|
|
92 | removefiles = opts.get('files') | |
|
93 | removedirs = opts.get('dirs') | |
|
94 | 94 | if not removefiles and not removedirs: |
|
95 | 95 | removefiles = True |
|
96 | 96 | removedirs = True |
@@ -101,7 +101,7 b' def purge(ui, repo, *dirs, **opts):' | |||
|
101 | 101 | remove_func(repo.wjoin(name)) |
|
102 | 102 | except OSError: |
|
103 | 103 | m = _('%s cannot be removed') % name |
|
104 |
if opts |
|
|
104 | if opts.get('abort_on_err'): | |
|
105 | 105 | raise error.Abort(m) |
|
106 | 106 | ui.warn(_('warning: %s\n') % m) |
|
107 | 107 | else: |
@@ -111,7 +111,7 b' def purge(ui, repo, *dirs, **opts):' | |||
|
111 | 111 | if removedirs: |
|
112 | 112 | directories = [] |
|
113 | 113 | match.explicitdir = match.traversedir = directories.append |
|
114 |
status = repo.status(match=match, ignored=opts |
|
|
114 | status = repo.status(match=match, ignored=opts.get('all'), unknown=True) | |
|
115 | 115 | |
|
116 | 116 | if removefiles: |
|
117 | 117 | for f in sorted(status.unknown + status.ignored): |
This diff has been collapsed as it changes many lines, (810 lines changed) Show them Hide them | |||
@@ -14,14 +14,42 b' For more information:' | |||
|
14 | 14 | https://mercurial-scm.org/wiki/RebaseExtension |
|
15 | 15 | ''' |
|
16 | 16 | |
|
17 | from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks | |
|
18 | from mercurial import extensions, patch, scmutil, phases, obsolete, error | |
|
19 | from mercurial import copies, destutil, repoview, registrar, revset | |
|
20 | from mercurial.commands import templateopts | |
|
21 | from mercurial.node import nullrev, nullid, hex, short | |
|
22 | from mercurial.lock import release | |
|
17 | from __future__ import absolute_import | |
|
18 | ||
|
19 | import errno | |
|
20 | import os | |
|
21 | ||
|
23 | 22 | from mercurial.i18n import _ |
|
24 | import os, errno | |
|
23 | from mercurial.node import ( | |
|
24 | hex, | |
|
25 | nullid, | |
|
26 | nullrev, | |
|
27 | short, | |
|
28 | ) | |
|
29 | from mercurial import ( | |
|
30 | bookmarks, | |
|
31 | cmdutil, | |
|
32 | commands, | |
|
33 | copies, | |
|
34 | destutil, | |
|
35 | error, | |
|
36 | extensions, | |
|
37 | hg, | |
|
38 | lock, | |
|
39 | merge, | |
|
40 | obsolete, | |
|
41 | patch, | |
|
42 | phases, | |
|
43 | registrar, | |
|
44 | repair, | |
|
45 | repoview, | |
|
46 | revset, | |
|
47 | scmutil, | |
|
48 | util, | |
|
49 | ) | |
|
50 | ||
|
51 | release = lock.release | |
|
52 | templateopts = commands.templateopts | |
|
25 | 53 | |
|
26 | 54 | # The following constants are used throughout the rebase module. The ordering of |
|
27 | 55 | # their values must be maintained. |
@@ -91,6 +119,394 b' def _revsetdestrebase(repo, subset, x):' | |||
|
91 | 119 | sourceset = revset.getset(repo, revset.fullreposet(repo), x) |
|
92 | 120 | return subset & revset.baseset([_destrebase(repo, sourceset)]) |
|
93 | 121 | |
|
122 | class rebaseruntime(object): | |
|
123 | """This class is a container for rebase runtime state""" | |
|
124 | def __init__(self, repo, ui, opts=None): | |
|
125 | if opts is None: | |
|
126 | opts = {} | |
|
127 | ||
|
128 | self.repo = repo | |
|
129 | self.ui = ui | |
|
130 | self.opts = opts | |
|
131 | self.originalwd = None | |
|
132 | self.external = nullrev | |
|
133 | # Mapping between the old revision id and either what is the new rebased | |
|
134 | # revision or what needs to be done with the old revision. The state | |
|
135 | # dict will be what contains most of the rebase progress state. | |
|
136 | self.state = {} | |
|
137 | self.activebookmark = None | |
|
138 | self.currentbookmarks = None | |
|
139 | self.target = None | |
|
140 | self.skipped = set() | |
|
141 | self.targetancestors = set() | |
|
142 | ||
|
143 | self.collapsef = opts.get('collapse', False) | |
|
144 | self.collapsemsg = cmdutil.logmessage(ui, opts) | |
|
145 | self.date = opts.get('date', None) | |
|
146 | ||
|
147 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion | |
|
148 | self.extrafns = [_savegraft] | |
|
149 | if e: | |
|
150 | self.extrafns = [e] | |
|
151 | ||
|
152 | self.keepf = opts.get('keep', False) | |
|
153 | self.keepbranchesf = opts.get('keepbranches', False) | |
|
154 | # keepopen is not meant for use on the command line, but by | |
|
155 | # other extensions | |
|
156 | self.keepopen = opts.get('keepopen', False) | |
|
157 | self.obsoletenotrebased = {} | |
|
158 | ||
|
159 | def restorestatus(self): | |
|
160 | """Restore a previously stored status""" | |
|
161 | repo = self.repo | |
|
162 | keepbranches = None | |
|
163 | target = None | |
|
164 | collapse = False | |
|
165 | external = nullrev | |
|
166 | activebookmark = None | |
|
167 | state = {} | |
|
168 | ||
|
169 | try: | |
|
170 | f = repo.vfs("rebasestate") | |
|
171 | for i, l in enumerate(f.read().splitlines()): | |
|
172 | if i == 0: | |
|
173 | originalwd = repo[l].rev() | |
|
174 | elif i == 1: | |
|
175 | target = repo[l].rev() | |
|
176 | elif i == 2: | |
|
177 | external = repo[l].rev() | |
|
178 | elif i == 3: | |
|
179 | collapse = bool(int(l)) | |
|
180 | elif i == 4: | |
|
181 | keep = bool(int(l)) | |
|
182 | elif i == 5: | |
|
183 | keepbranches = bool(int(l)) | |
|
184 | elif i == 6 and not (len(l) == 81 and ':' in l): | |
|
185 | # line 6 is a recent addition, so for backwards | |
|
186 | # compatibility check that the line doesn't look like the | |
|
187 | # oldrev:newrev lines | |
|
188 | activebookmark = l | |
|
189 | else: | |
|
190 | oldrev, newrev = l.split(':') | |
|
191 | if newrev in (str(nullmerge), str(revignored), | |
|
192 | str(revprecursor), str(revpruned)): | |
|
193 | state[repo[oldrev].rev()] = int(newrev) | |
|
194 | elif newrev == nullid: | |
|
195 | state[repo[oldrev].rev()] = revtodo | |
|
196 | # Legacy compat special case | |
|
197 | else: | |
|
198 | state[repo[oldrev].rev()] = repo[newrev].rev() | |
|
199 | ||
|
200 | except IOError as err: | |
|
201 | if err.errno != errno.ENOENT: | |
|
202 | raise | |
|
203 | cmdutil.wrongtooltocontinue(repo, _('rebase')) | |
|
204 | ||
|
205 | if keepbranches is None: | |
|
206 | raise error.Abort(_('.hg/rebasestate is incomplete')) | |
|
207 | ||
|
208 | skipped = set() | |
|
209 | # recompute the set of skipped revs | |
|
210 | if not collapse: | |
|
211 | seen = set([target]) | |
|
212 | for old, new in sorted(state.items()): | |
|
213 | if new != revtodo and new in seen: | |
|
214 | skipped.add(old) | |
|
215 | seen.add(new) | |
|
216 | repo.ui.debug('computed skipped revs: %s\n' % | |
|
217 | (' '.join(str(r) for r in sorted(skipped)) or None)) | |
|
218 | repo.ui.debug('rebase status resumed\n') | |
|
219 | _setrebasesetvisibility(repo, state.keys()) | |
|
220 | ||
|
221 | self.originalwd = originalwd | |
|
222 | self.target = target | |
|
223 | self.state = state | |
|
224 | self.skipped = skipped | |
|
225 | self.collapsef = collapse | |
|
226 | self.keepf = keep | |
|
227 | self.keepbranchesf = keepbranches | |
|
228 | self.external = external | |
|
229 | self.activebookmark = activebookmark | |
|
230 | ||
|
231 | def _handleskippingobsolete(self, rebaserevs, obsoleterevs, target): | |
|
232 | """Compute structures necessary for skipping obsolete revisions | |
|
233 | ||
|
234 | rebaserevs: iterable of all revisions that are to be rebased | |
|
235 | obsoleterevs: iterable of all obsolete revisions in rebaseset | |
|
236 | target: a destination revision for the rebase operation | |
|
237 | """ | |
|
238 | self.obsoletenotrebased = {} | |
|
239 | if not self.ui.configbool('experimental', 'rebaseskipobsolete', | |
|
240 | default=True): | |
|
241 | return | |
|
242 | rebaseset = set(rebaserevs) | |
|
243 | obsoleteset = set(obsoleterevs) | |
|
244 | self.obsoletenotrebased = _computeobsoletenotrebased(self.repo, | |
|
245 | obsoleteset, target) | |
|
246 | skippedset = set(self.obsoletenotrebased) | |
|
247 | _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset) | |
|
248 | ||
|
249 | def _prepareabortorcontinue(self, isabort): | |
|
250 | try: | |
|
251 | self.restorestatus() | |
|
252 | self.collapsemsg = restorecollapsemsg(self.repo) | |
|
253 | except error.RepoLookupError: | |
|
254 | if isabort: | |
|
255 | clearstatus(self.repo) | |
|
256 | clearcollapsemsg(self.repo) | |
|
257 | self.repo.ui.warn(_('rebase aborted (no revision is removed,' | |
|
258 | ' only broken state is cleared)\n')) | |
|
259 | return 0 | |
|
260 | else: | |
|
261 | msg = _('cannot continue inconsistent rebase') | |
|
262 | hint = _('use "hg rebase --abort" to clear broken state') | |
|
263 | raise error.Abort(msg, hint=hint) | |
|
264 | if isabort: | |
|
265 | return abort(self.repo, self.originalwd, self.target, | |
|
266 | self.state, activebookmark=self.activebookmark) | |
|
267 | ||
|
268 | obsrevs = (r for r, st in self.state.items() if st == revprecursor) | |
|
269 | self._handleskippingobsolete(self.state.keys(), obsrevs, self.target) | |
|
270 | ||
|
271 | def _preparenewrebase(self, dest, rebaseset): | |
|
272 | if dest is None: | |
|
273 | return _nothingtorebase() | |
|
274 | ||
|
275 | allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt) | |
|
276 | if (not (self.keepf or allowunstable) | |
|
277 | and self.repo.revs('first(children(%ld) - %ld)', | |
|
278 | rebaseset, rebaseset)): | |
|
279 | raise error.Abort( | |
|
280 | _("can't remove original changesets with" | |
|
281 | " unrebased descendants"), | |
|
282 | hint=_('use --keep to keep original changesets')) | |
|
283 | ||
|
284 | obsrevs = _filterobsoleterevs(self.repo, rebaseset) | |
|
285 | self._handleskippingobsolete(rebaseset, obsrevs, dest) | |
|
286 | ||
|
287 | result = buildstate(self.repo, dest, rebaseset, self.collapsef, | |
|
288 | self.obsoletenotrebased) | |
|
289 | ||
|
290 | if not result: | |
|
291 | # Empty state built, nothing to rebase | |
|
292 | self.ui.status(_('nothing to rebase\n')) | |
|
293 | return _nothingtorebase() | |
|
294 | ||
|
295 | root = min(rebaseset) | |
|
296 | if not self.keepf and not self.repo[root].mutable(): | |
|
297 | raise error.Abort(_("can't rebase public changeset %s") | |
|
298 | % self.repo[root], | |
|
299 | hint=_('see "hg help phases" for details')) | |
|
300 | ||
|
301 | (self.originalwd, self.target, self.state) = result | |
|
302 | if self.collapsef: | |
|
303 | self.targetancestors = self.repo.changelog.ancestors( | |
|
304 | [self.target], | |
|
305 | inclusive=True) | |
|
306 | self.external = externalparent(self.repo, self.state, | |
|
307 | self.targetancestors) | |
|
308 | ||
|
309 | if dest.closesbranch() and not self.keepbranchesf: | |
|
310 | self.ui.status(_('reopening closed branch head %s\n') % dest) | |
|
311 | ||
|
312 | def _performrebase(self): | |
|
313 | repo, ui, opts = self.repo, self.ui, self.opts | |
|
314 | if self.keepbranchesf: | |
|
315 | # insert _savebranch at the start of extrafns so if | |
|
316 | # there's a user-provided extrafn it can clobber branch if | |
|
317 | # desired | |
|
318 | self.extrafns.insert(0, _savebranch) | |
|
319 | if self.collapsef: | |
|
320 | branches = set() | |
|
321 | for rev in self.state: | |
|
322 | branches.add(repo[rev].branch()) | |
|
323 | if len(branches) > 1: | |
|
324 | raise error.Abort(_('cannot collapse multiple named ' | |
|
325 | 'branches')) | |
|
326 | ||
|
327 | # Rebase | |
|
328 | if not self.targetancestors: | |
|
329 | self.targetancestors = repo.changelog.ancestors([self.target], | |
|
330 | inclusive=True) | |
|
331 | ||
|
332 | # Keep track of the current bookmarks in order to reset them later | |
|
333 | self.currentbookmarks = repo._bookmarks.copy() | |
|
334 | self.activebookmark = self.activebookmark or repo._activebookmark | |
|
335 | if self.activebookmark: | |
|
336 | bookmarks.deactivate(repo) | |
|
337 | ||
|
338 | sortedrevs = sorted(self.state) | |
|
339 | total = len(self.state) | |
|
340 | pos = 0 | |
|
341 | for rev in sortedrevs: | |
|
342 | ctx = repo[rev] | |
|
343 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, | |
|
344 | ctx.description().split('\n', 1)[0]) | |
|
345 | names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) | |
|
346 | if names: | |
|
347 | desc += ' (%s)' % ' '.join(names) | |
|
348 | pos += 1 | |
|
349 | if self.state[rev] == revtodo: | |
|
350 | ui.status(_('rebasing %s\n') % desc) | |
|
351 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), | |
|
352 | _('changesets'), total) | |
|
353 | p1, p2, base = defineparents(repo, rev, self.target, | |
|
354 | self.state, | |
|
355 | self.targetancestors, | |
|
356 | self.obsoletenotrebased) | |
|
357 | storestatus(repo, self.originalwd, self.target, | |
|
358 | self.state, self.collapsef, self.keepf, | |
|
359 | self.keepbranchesf, self.external, | |
|
360 | self.activebookmark) | |
|
361 | storecollapsemsg(repo, self.collapsemsg) | |
|
362 | if len(repo[None].parents()) == 2: | |
|
363 | repo.ui.debug('resuming interrupted rebase\n') | |
|
364 | else: | |
|
365 | try: | |
|
366 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
367 | 'rebase') | |
|
368 | stats = rebasenode(repo, rev, p1, base, self.state, | |
|
369 | self.collapsef, self.target) | |
|
370 | if stats and stats[3] > 0: | |
|
371 | raise error.InterventionRequired( | |
|
372 | _('unresolved conflicts (see hg ' | |
|
373 | 'resolve, then hg rebase --continue)')) | |
|
374 | finally: | |
|
375 | ui.setconfig('ui', 'forcemerge', '', 'rebase') | |
|
376 | if not self.collapsef: | |
|
377 | merging = p2 != nullrev | |
|
378 | editform = cmdutil.mergeeditform(merging, 'rebase') | |
|
379 | editor = cmdutil.getcommiteditor(editform=editform, **opts) | |
|
380 | newnode = concludenode(repo, rev, p1, p2, | |
|
381 | extrafn=_makeextrafn(self.extrafns), | |
|
382 | editor=editor, | |
|
383 | keepbranches=self.keepbranchesf, | |
|
384 | date=self.date) | |
|
385 | else: | |
|
386 | # Skip commit if we are collapsing | |
|
387 | repo.dirstate.beginparentchange() | |
|
388 | repo.setparents(repo[p1].node()) | |
|
389 | repo.dirstate.endparentchange() | |
|
390 | newnode = None | |
|
391 | # Update the state | |
|
392 | if newnode is not None: | |
|
393 | self.state[rev] = repo[newnode].rev() | |
|
394 | ui.debug('rebased as %s\n' % short(newnode)) | |
|
395 | else: | |
|
396 | if not self.collapsef: | |
|
397 | ui.warn(_('note: rebase of %d:%s created no changes ' | |
|
398 | 'to commit\n') % (rev, ctx)) | |
|
399 | self.skipped.add(rev) | |
|
400 | self.state[rev] = p1 | |
|
401 | ui.debug('next revision set to %s\n' % p1) | |
|
402 | elif self.state[rev] == nullmerge: | |
|
403 | ui.debug('ignoring null merge rebase of %s\n' % rev) | |
|
404 | elif self.state[rev] == revignored: | |
|
405 | ui.status(_('not rebasing ignored %s\n') % desc) | |
|
406 | elif self.state[rev] == revprecursor: | |
|
407 | targetctx = repo[self.obsoletenotrebased[rev]] | |
|
408 | desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx, | |
|
409 | targetctx.description().split('\n', 1)[0]) | |
|
410 | msg = _('note: not rebasing %s, already in destination as %s\n') | |
|
411 | ui.status(msg % (desc, desctarget)) | |
|
412 | elif self.state[rev] == revpruned: | |
|
413 | msg = _('note: not rebasing %s, it has no successor\n') | |
|
414 | ui.status(msg % desc) | |
|
415 | else: | |
|
416 | ui.status(_('already rebased %s as %s\n') % | |
|
417 | (desc, repo[self.state[rev]])) | |
|
418 | ||
|
419 | ui.progress(_('rebasing'), None) | |
|
420 | ui.note(_('rebase merging completed\n')) | |
|
421 | ||
|
422 | def _finishrebase(self): | |
|
423 | repo, ui, opts = self.repo, self.ui, self.opts | |
|
424 | if self.collapsef and not self.keepopen: | |
|
425 | p1, p2, _base = defineparents(repo, min(self.state), | |
|
426 | self.target, self.state, | |
|
427 | self.targetancestors, | |
|
428 | self.obsoletenotrebased) | |
|
429 | editopt = opts.get('edit') | |
|
430 | editform = 'rebase.collapse' | |
|
431 | if self.collapsemsg: | |
|
432 | commitmsg = self.collapsemsg | |
|
433 | else: | |
|
434 | commitmsg = 'Collapsed revision' | |
|
435 | for rebased in self.state: | |
|
436 | if rebased not in self.skipped and\ | |
|
437 | self.state[rebased] > nullmerge: | |
|
438 | commitmsg += '\n* %s' % repo[rebased].description() | |
|
439 | editopt = True | |
|
440 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) | |
|
441 | revtoreuse = max(self.state) | |
|
442 | newnode = concludenode(repo, revtoreuse, p1, self.external, | |
|
443 | commitmsg=commitmsg, | |
|
444 | extrafn=_makeextrafn(self.extrafns), | |
|
445 | editor=editor, | |
|
446 | keepbranches=self.keepbranchesf, | |
|
447 | date=self.date) | |
|
448 | if newnode is None: | |
|
449 | newrev = self.target | |
|
450 | else: | |
|
451 | newrev = repo[newnode].rev() | |
|
452 | for oldrev in self.state.iterkeys(): | |
|
453 | if self.state[oldrev] > nullmerge: | |
|
454 | self.state[oldrev] = newrev | |
|
455 | ||
|
456 | if 'qtip' in repo.tags(): | |
|
457 | updatemq(repo, self.state, self.skipped, **opts) | |
|
458 | ||
|
459 | if self.currentbookmarks: | |
|
460 | # Nodeids are needed to reset bookmarks | |
|
461 | nstate = {} | |
|
462 | for k, v in self.state.iteritems(): | |
|
463 | if v > nullmerge: | |
|
464 | nstate[repo[k].node()] = repo[v].node() | |
|
465 | elif v == revprecursor: | |
|
466 | succ = self.obsoletenotrebased[k] | |
|
467 | nstate[repo[k].node()] = repo[succ].node() | |
|
468 | # XXX this is the same as dest.node() for the non-continue path -- | |
|
469 | # this should probably be cleaned up | |
|
470 | targetnode = repo[self.target].node() | |
|
471 | ||
|
472 | # restore original working directory | |
|
473 | # (we do this before stripping) | |
|
474 | newwd = self.state.get(self.originalwd, self.originalwd) | |
|
475 | if newwd == revprecursor: | |
|
476 | newwd = self.obsoletenotrebased[self.originalwd] | |
|
477 | elif newwd < 0: | |
|
478 | # original directory is a parent of rebase set root or ignored | |
|
479 | newwd = self.originalwd | |
|
480 | if newwd not in [c.rev() for c in repo[None].parents()]: | |
|
481 | ui.note(_("update back to initial working directory parent\n")) | |
|
482 | hg.updaterepo(repo, newwd, False) | |
|
483 | ||
|
484 | if not self.keepf: | |
|
485 | collapsedas = None | |
|
486 | if self.collapsef: | |
|
487 | collapsedas = newnode | |
|
488 | clearrebased(ui, repo, self.state, self.skipped, collapsedas) | |
|
489 | ||
|
490 | with repo.transaction('bookmark') as tr: | |
|
491 | if self.currentbookmarks: | |
|
492 | updatebookmarks(repo, targetnode, nstate, | |
|
493 | self.currentbookmarks, tr) | |
|
494 | if self.activebookmark not in repo._bookmarks: | |
|
495 | # active bookmark was divergent one and has been deleted | |
|
496 | self.activebookmark = None | |
|
497 | clearstatus(repo) | |
|
498 | clearcollapsemsg(repo) | |
|
499 | ||
|
500 | ui.note(_("rebase completed\n")) | |
|
501 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) | |
|
502 | if self.skipped: | |
|
503 | skippedlen = len(self.skipped) | |
|
504 | ui.note(_("%d revisions have been skipped\n") % skippedlen) | |
|
505 | ||
|
506 | if (self.activebookmark and | |
|
507 | repo['.'].node() == repo._bookmarks[self.activebookmark]): | |
|
508 | bookmarks.activate(repo, self.activebookmark) | |
|
509 | ||
|
94 | 510 | @command('rebase', |
|
95 | 511 | [('s', 'source', '', |
|
96 | 512 | _('rebase the specified changeset and descendants'), _('REV')), |
@@ -201,16 +617,7 b' def rebase(ui, repo, **opts):' | |||
|
201 | 617 | unresolved conflicts. |
|
202 | 618 | |
|
203 | 619 | """ |
|
204 | originalwd = target = None | |
|
205 | activebookmark = None | |
|
206 | external = nullrev | |
|
207 | # Mapping between the old revision id and either what is the new rebased | |
|
208 | # revision or what needs to be done with the old revision. The state dict | |
|
209 | # will be what contains most of the rebase progress state. | |
|
210 | state = {} | |
|
211 | skipped = set() | |
|
212 | targetancestors = set() | |
|
213 | ||
|
620 | rbsrt = rebaseruntime(repo, ui, opts) | |
|
214 | 621 | |
|
215 | 622 | lock = wlock = None |
|
216 | 623 | try: |
@@ -227,19 +634,6 b' def rebase(ui, repo, **opts):' | |||
|
227 | 634 | destspace = opts.get('_destspace') |
|
228 | 635 | contf = opts.get('continue') |
|
229 | 636 | abortf = opts.get('abort') |
|
230 | collapsef = opts.get('collapse', False) | |
|
231 | collapsemsg = cmdutil.logmessage(ui, opts) | |
|
232 | date = opts.get('date', None) | |
|
233 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion | |
|
234 | extrafns = [_savegraft] | |
|
235 | if e: | |
|
236 | extrafns = [e] | |
|
237 | keepf = opts.get('keep', False) | |
|
238 | keepbranchesf = opts.get('keepbranches', False) | |
|
239 | # keepopen is not meant for use on the command line, but by | |
|
240 | # other extensions | |
|
241 | keepopen = opts.get('keepopen', False) | |
|
242 | ||
|
243 | 637 | if opts.get('interactive'): |
|
244 | 638 | try: |
|
245 | 639 | if extensions.find('histedit'): |
@@ -251,14 +645,14 b' def rebase(ui, repo, **opts):' | |||
|
251 | 645 | "'histedit' extension (see \"%s\")") % help |
|
252 | 646 | raise error.Abort(msg) |
|
253 | 647 | |
|
254 | if collapsemsg and not collapsef: | |
|
648 | if rbsrt.collapsemsg and not rbsrt.collapsef: | |
|
255 | 649 | raise error.Abort( |
|
256 | 650 | _('message can only be specified with collapse')) |
|
257 | 651 | |
|
258 | 652 | if contf or abortf: |
|
259 | 653 | if contf and abortf: |
|
260 | 654 | raise error.Abort(_('cannot use both abort and continue')) |
|
261 | if collapsef: | |
|
655 | if rbsrt.collapsef: | |
|
262 | 656 | raise error.Abort( |
|
263 | 657 | _('cannot use collapse with continue or abort')) |
|
264 | 658 | if srcf or basef or destf: |
@@ -267,265 +661,18 b' def rebase(ui, repo, **opts):' | |||
|
267 | 661 | if abortf and opts.get('tool', False): |
|
268 | 662 | ui.warn(_('tool option will be ignored\n')) |
|
269 | 663 | |
|
270 | try: | |
|
271 | (originalwd, target, state, skipped, collapsef, keepf, | |
|
272 | keepbranchesf, external, activebookmark) = restorestatus(repo) | |
|
273 | collapsemsg = restorecollapsemsg(repo) | |
|
274 | except error.RepoLookupError: | |
|
275 | if abortf: | |
|
276 | clearstatus(repo) | |
|
277 | clearcollapsemsg(repo) | |
|
278 | repo.ui.warn(_('rebase aborted (no revision is removed,' | |
|
279 | ' only broken state is cleared)\n')) | |
|
280 | return 0 | |
|
281 | else: | |
|
282 | msg = _('cannot continue inconsistent rebase') | |
|
283 | hint = _('use "hg rebase --abort" to clear broken state') | |
|
284 | raise error.Abort(msg, hint=hint) | |
|
285 | if abortf: | |
|
286 | return abort(repo, originalwd, target, state, | |
|
287 | activebookmark=activebookmark) | |
|
288 | ||
|
289 | obsoletenotrebased = {} | |
|
290 | if ui.configbool('experimental', 'rebaseskipobsolete', | |
|
291 | default=True): | |
|
292 | rebaseobsrevs = set([r for r, status in state.items() | |
|
293 | if status == revprecursor]) | |
|
294 | rebasesetrevs = set(state.keys()) | |
|
295 | obsoletenotrebased = _computeobsoletenotrebased(repo, | |
|
296 | rebaseobsrevs, | |
|
297 | target) | |
|
298 | rebaseobsskipped = set(obsoletenotrebased) | |
|
299 | _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, | |
|
300 | rebaseobsskipped) | |
|
664 | retcode = rbsrt._prepareabortorcontinue(abortf) | |
|
665 | if retcode is not None: | |
|
666 | return retcode | |
|
301 | 667 | else: |
|
302 | 668 | dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf, |
|
303 | 669 | destspace=destspace) |
|
304 | if dest is None: | |
|
305 | return _nothingtorebase() | |
|
306 | ||
|
307 | allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) | |
|
308 | if (not (keepf or allowunstable) | |
|
309 | and repo.revs('first(children(%ld) - %ld)', | |
|
310 | rebaseset, rebaseset)): | |
|
311 | raise error.Abort( | |
|
312 | _("can't remove original changesets with" | |
|
313 | " unrebased descendants"), | |
|
314 | hint=_('use --keep to keep original changesets')) | |
|
315 | ||
|
316 | obsoletenotrebased = {} | |
|
317 | if ui.configbool('experimental', 'rebaseskipobsolete', | |
|
318 | default=True): | |
|
319 | rebasesetrevs = set(rebaseset) | |
|
320 | rebaseobsrevs = _filterobsoleterevs(repo, rebasesetrevs) | |
|
321 | obsoletenotrebased = _computeobsoletenotrebased(repo, | |
|
322 | rebaseobsrevs, | |
|
323 | dest) | |
|
324 | rebaseobsskipped = set(obsoletenotrebased) | |
|
325 | _checkobsrebase(repo, ui, rebaseobsrevs, | |
|
326 | rebasesetrevs, | |
|
327 | rebaseobsskipped) | |
|
328 | ||
|
329 | result = buildstate(repo, dest, rebaseset, collapsef, | |
|
330 | obsoletenotrebased) | |
|
331 | ||
|
332 | if not result: | |
|
333 | # Empty state built, nothing to rebase | |
|
334 | ui.status(_('nothing to rebase\n')) | |
|
335 | return _nothingtorebase() | |
|
336 | ||
|
337 | root = min(rebaseset) | |
|
338 | if not keepf and not repo[root].mutable(): | |
|
339 | raise error.Abort(_("can't rebase public changeset %s") | |
|
340 | % repo[root], | |
|
341 | hint=_('see "hg help phases" for details')) | |
|
342 | ||
|
343 | originalwd, target, state = result | |
|
344 | if collapsef: | |
|
345 | targetancestors = repo.changelog.ancestors([target], | |
|
346 | inclusive=True) | |
|
347 | external = externalparent(repo, state, targetancestors) | |
|
348 | ||
|
349 | if dest.closesbranch() and not keepbranchesf: | |
|
350 | ui.status(_('reopening closed branch head %s\n') % dest) | |
|
351 | ||
|
352 | if keepbranchesf: | |
|
353 | # insert _savebranch at the start of extrafns so if | |
|
354 | # there's a user-provided extrafn it can clobber branch if | |
|
355 | # desired | |
|
356 | extrafns.insert(0, _savebranch) | |
|
357 | if collapsef: | |
|
358 | branches = set() | |
|
359 | for rev in state: | |
|
360 | branches.add(repo[rev].branch()) | |
|
361 | if len(branches) > 1: | |
|
362 | raise error.Abort(_('cannot collapse multiple named ' | |
|
363 | 'branches')) | |
|
364 | ||
|
365 | # Rebase | |
|
366 | if not targetancestors: | |
|
367 | targetancestors = repo.changelog.ancestors([target], inclusive=True) | |
|
368 | ||
|
369 | # Keep track of the current bookmarks in order to reset them later | |
|
370 | currentbookmarks = repo._bookmarks.copy() | |
|
371 | activebookmark = activebookmark or repo._activebookmark | |
|
372 | if activebookmark: | |
|
373 | bookmarks.deactivate(repo) | |
|
374 | ||
|
375 | extrafn = _makeextrafn(extrafns) | |
|
670 | retcode = rbsrt._preparenewrebase(dest, rebaseset) | |
|
671 | if retcode is not None: | |
|
672 | return retcode | |
|
376 | 673 | |
|
377 | sortedstate = sorted(state) | |
|
378 | total = len(sortedstate) | |
|
379 | pos = 0 | |
|
380 | for rev in sortedstate: | |
|
381 | ctx = repo[rev] | |
|
382 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, | |
|
383 | ctx.description().split('\n', 1)[0]) | |
|
384 | names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) | |
|
385 | if names: | |
|
386 | desc += ' (%s)' % ' '.join(names) | |
|
387 | pos += 1 | |
|
388 | if state[rev] == revtodo: | |
|
389 | ui.status(_('rebasing %s\n') % desc) | |
|
390 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), | |
|
391 | _('changesets'), total) | |
|
392 | p1, p2, base = defineparents(repo, rev, target, state, | |
|
393 | targetancestors) | |
|
394 | storestatus(repo, originalwd, target, state, collapsef, keepf, | |
|
395 | keepbranchesf, external, activebookmark) | |
|
396 | storecollapsemsg(repo, collapsemsg) | |
|
397 | if len(repo[None].parents()) == 2: | |
|
398 | repo.ui.debug('resuming interrupted rebase\n') | |
|
399 | else: | |
|
400 | try: | |
|
401 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
402 | 'rebase') | |
|
403 | stats = rebasenode(repo, rev, p1, base, state, | |
|
404 | collapsef, target) | |
|
405 | if stats and stats[3] > 0: | |
|
406 | raise error.InterventionRequired( | |
|
407 | _('unresolved conflicts (see hg ' | |
|
408 | 'resolve, then hg rebase --continue)')) | |
|
409 | finally: | |
|
410 | ui.setconfig('ui', 'forcemerge', '', 'rebase') | |
|
411 | if not collapsef: | |
|
412 | merging = p2 != nullrev | |
|
413 | editform = cmdutil.mergeeditform(merging, 'rebase') | |
|
414 | editor = cmdutil.getcommiteditor(editform=editform, **opts) | |
|
415 | newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn, | |
|
416 | editor=editor, | |
|
417 | keepbranches=keepbranchesf, | |
|
418 | date=date) | |
|
419 | else: | |
|
420 | # Skip commit if we are collapsing | |
|
421 | repo.dirstate.beginparentchange() | |
|
422 | repo.setparents(repo[p1].node()) | |
|
423 | repo.dirstate.endparentchange() | |
|
424 | newnode = None | |
|
425 | # Update the state | |
|
426 | if newnode is not None: | |
|
427 | state[rev] = repo[newnode].rev() | |
|
428 | ui.debug('rebased as %s\n' % short(newnode)) | |
|
429 | else: | |
|
430 | if not collapsef: | |
|
431 | ui.warn(_('note: rebase of %d:%s created no changes ' | |
|
432 | 'to commit\n') % (rev, ctx)) | |
|
433 | skipped.add(rev) | |
|
434 | state[rev] = p1 | |
|
435 | ui.debug('next revision set to %s\n' % p1) | |
|
436 | elif state[rev] == nullmerge: | |
|
437 | ui.debug('ignoring null merge rebase of %s\n' % rev) | |
|
438 | elif state[rev] == revignored: | |
|
439 | ui.status(_('not rebasing ignored %s\n') % desc) | |
|
440 | elif state[rev] == revprecursor: | |
|
441 | targetctx = repo[obsoletenotrebased[rev]] | |
|
442 | desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx, | |
|
443 | targetctx.description().split('\n', 1)[0]) | |
|
444 | msg = _('note: not rebasing %s, already in destination as %s\n') | |
|
445 | ui.status(msg % (desc, desctarget)) | |
|
446 | elif state[rev] == revpruned: | |
|
447 | msg = _('note: not rebasing %s, it has no successor\n') | |
|
448 | ui.status(msg % desc) | |
|
449 | else: | |
|
450 | ui.status(_('already rebased %s as %s\n') % | |
|
451 | (desc, repo[state[rev]])) | |
|
452 | ||
|
453 | ui.progress(_('rebasing'), None) | |
|
454 | ui.note(_('rebase merging completed\n')) | |
|
455 | ||
|
456 | if collapsef and not keepopen: | |
|
457 | p1, p2, _base = defineparents(repo, min(state), target, | |
|
458 | state, targetancestors) | |
|
459 | editopt = opts.get('edit') | |
|
460 | editform = 'rebase.collapse' | |
|
461 | if collapsemsg: | |
|
462 | commitmsg = collapsemsg | |
|
463 | else: | |
|
464 | commitmsg = 'Collapsed revision' | |
|
465 | for rebased in state: | |
|
466 | if rebased not in skipped and state[rebased] > nullmerge: | |
|
467 | commitmsg += '\n* %s' % repo[rebased].description() | |
|
468 | editopt = True | |
|
469 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) | |
|
470 | newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg, | |
|
471 | extrafn=extrafn, editor=editor, | |
|
472 | keepbranches=keepbranchesf, | |
|
473 | date=date) | |
|
474 | if newnode is None: | |
|
475 | newrev = target | |
|
476 | else: | |
|
477 | newrev = repo[newnode].rev() | |
|
478 | for oldrev in state.iterkeys(): | |
|
479 | if state[oldrev] > nullmerge: | |
|
480 | state[oldrev] = newrev | |
|
481 | ||
|
482 | if 'qtip' in repo.tags(): | |
|
483 | updatemq(repo, state, skipped, **opts) | |
|
484 | ||
|
485 | if currentbookmarks: | |
|
486 | # Nodeids are needed to reset bookmarks | |
|
487 | nstate = {} | |
|
488 | for k, v in state.iteritems(): | |
|
489 | if v > nullmerge: | |
|
490 | nstate[repo[k].node()] = repo[v].node() | |
|
491 | # XXX this is the same as dest.node() for the non-continue path -- | |
|
492 | # this should probably be cleaned up | |
|
493 | targetnode = repo[target].node() | |
|
494 | ||
|
495 | # restore original working directory | |
|
496 | # (we do this before stripping) | |
|
497 | newwd = state.get(originalwd, originalwd) | |
|
498 | if newwd < 0: | |
|
499 | # original directory is a parent of rebase set root or ignored | |
|
500 | newwd = originalwd | |
|
501 | if newwd not in [c.rev() for c in repo[None].parents()]: | |
|
502 | ui.note(_("update back to initial working directory parent\n")) | |
|
503 | hg.updaterepo(repo, newwd, False) | |
|
504 | ||
|
505 | if not keepf: | |
|
506 | collapsedas = None | |
|
507 | if collapsef: | |
|
508 | collapsedas = newnode | |
|
509 | clearrebased(ui, repo, state, skipped, collapsedas) | |
|
510 | ||
|
511 | with repo.transaction('bookmark') as tr: | |
|
512 | if currentbookmarks: | |
|
513 | updatebookmarks(repo, targetnode, nstate, currentbookmarks, tr) | |
|
514 | if activebookmark not in repo._bookmarks: | |
|
515 | # active bookmark was divergent one and has been deleted | |
|
516 | activebookmark = None | |
|
517 | clearstatus(repo) | |
|
518 | clearcollapsemsg(repo) | |
|
519 | ||
|
520 | ui.note(_("rebase completed\n")) | |
|
521 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) | |
|
522 | if skipped: | |
|
523 | ui.note(_("%d revisions have been skipped\n") % len(skipped)) | |
|
524 | ||
|
525 | if (activebookmark and | |
|
526 | repo['.'].node() == repo._bookmarks[activebookmark]): | |
|
527 | bookmarks.activate(repo, activebookmark) | |
|
528 | ||
|
674 | rbsrt._performrebase() | |
|
675 | rbsrt._finishrebase() | |
|
529 | 676 | finally: |
|
530 | 677 | release(lock, wlock) |
|
531 | 678 | |
@@ -733,21 +880,12 b' def _checkobsrebase(repo, ui,' | |||
|
733 | 880 | "experimental.allowdivergence=True") |
|
734 | 881 | raise error.Abort(msg % (",".join(divhashes),), hint=h) |
|
735 | 882 | |
|
736 | # - plain prune (no successor) changesets are rebased | |
|
737 | # - split changesets are not rebased if at least one of the | |
|
738 | # changeset resulting from the split is an ancestor of dest | |
|
739 | rebaseset = rebasesetrevs - rebaseobsskipped | |
|
740 | if rebasesetrevs and not rebaseset: | |
|
741 | msg = _('all requested changesets have equivalents ' | |
|
742 | 'or were marked as obsolete') | |
|
743 | hint = _('to force the rebase, set the config ' | |
|
744 | 'experimental.rebaseskipobsolete to False') | |
|
745 | raise error.Abort(msg, hint=hint) | |
|
746 | ||
|
747 | def defineparents(repo, rev, target, state, targetancestors): | |
|
883 | def defineparents(repo, rev, target, state, targetancestors, | |
|
884 | obsoletenotrebased): | |
|
748 | 885 | 'Return the new parent relationship of the revision that will be rebased' |
|
749 | 886 | parents = repo[rev].parents() |
|
750 | 887 | p1 = p2 = nullrev |
|
888 | rp1 = None | |
|
751 | 889 | |
|
752 | 890 | p1n = parents[0].rev() |
|
753 | 891 | if p1n in targetancestors: |
@@ -771,6 +909,8 b' def defineparents(repo, rev, target, sta' | |||
|
771 | 909 | if p2n in state: |
|
772 | 910 | if p1 == target: # p1n in targetancestors or external |
|
773 | 911 | p1 = state[p2n] |
|
912 | if p1 == revprecursor: | |
|
913 | rp1 = obsoletenotrebased[p2n] | |
|
774 | 914 | elif state[p2n] in revskipped: |
|
775 | 915 | p2 = nearestrebased(repo, p2n, state) |
|
776 | 916 | if p2 is None: |
@@ -784,7 +924,7 b' def defineparents(repo, rev, target, sta' | |||
|
784 | 924 | 'would have 3 parents') % rev) |
|
785 | 925 | p2 = p2n |
|
786 | 926 | repo.ui.debug(" future parents are %d and %d\n" % |
|
787 | (repo[p1].rev(), repo[p2].rev())) | |
|
927 | (repo[rp1 or p1].rev(), repo[p2].rev())) | |
|
788 | 928 | |
|
789 | 929 | if not any(p.rev() in state for p in parents): |
|
790 | 930 | # Case (1) root changeset of a non-detaching rebase set. |
@@ -828,6 +968,8 b' def defineparents(repo, rev, target, sta' | |||
|
828 | 968 | # make it feasible to consider different cases separately. In these |
|
829 | 969 | # other cases we currently just leave it to the user to correctly |
|
830 | 970 | # resolve an impossible merge using a wrong ancestor. |
|
971 | # | |
|
972 | # xx, p1 could be -4, and both parents could probably be -4... | |
|
831 | 973 | for p in repo[rev].parents(): |
|
832 | 974 | if state.get(p.rev()) == p1: |
|
833 | 975 | base = p.rev() |
@@ -838,7 +980,7 b' def defineparents(repo, rev, target, sta' | |||
|
838 | 980 | # Raise because this function is called wrong (see issue 4106) |
|
839 | 981 | raise AssertionError('no base found to rebase on ' |
|
840 | 982 | '(defineparents called wrong)') |
|
841 | return p1, p2, base | |
|
983 | return rp1 or p1, p2, base | |
|
842 | 984 | |
|
843 | 985 | def isagitpatch(repo, patchname): |
|
844 | 986 | 'Return true if the given patch is in git format' |
@@ -952,68 +1094,6 b' def clearstatus(repo):' | |||
|
952 | 1094 | _clearrebasesetvisibiliy(repo) |
|
953 | 1095 | util.unlinkpath(repo.join("rebasestate"), ignoremissing=True) |
|
954 | 1096 | |
|
955 | def restorestatus(repo): | |
|
956 | 'Restore a previously stored status' | |
|
957 | keepbranches = None | |
|
958 | target = None | |
|
959 | collapse = False | |
|
960 | external = nullrev | |
|
961 | activebookmark = None | |
|
962 | state = {} | |
|
963 | ||
|
964 | try: | |
|
965 | f = repo.vfs("rebasestate") | |
|
966 | for i, l in enumerate(f.read().splitlines()): | |
|
967 | if i == 0: | |
|
968 | originalwd = repo[l].rev() | |
|
969 | elif i == 1: | |
|
970 | target = repo[l].rev() | |
|
971 | elif i == 2: | |
|
972 | external = repo[l].rev() | |
|
973 | elif i == 3: | |
|
974 | collapse = bool(int(l)) | |
|
975 | elif i == 4: | |
|
976 | keep = bool(int(l)) | |
|
977 | elif i == 5: | |
|
978 | keepbranches = bool(int(l)) | |
|
979 | elif i == 6 and not (len(l) == 81 and ':' in l): | |
|
980 | # line 6 is a recent addition, so for backwards compatibility | |
|
981 | # check that the line doesn't look like the oldrev:newrev lines | |
|
982 | activebookmark = l | |
|
983 | else: | |
|
984 | oldrev, newrev = l.split(':') | |
|
985 | if newrev in (str(nullmerge), str(revignored), | |
|
986 | str(revprecursor), str(revpruned)): | |
|
987 | state[repo[oldrev].rev()] = int(newrev) | |
|
988 | elif newrev == nullid: | |
|
989 | state[repo[oldrev].rev()] = revtodo | |
|
990 | # Legacy compat special case | |
|
991 | else: | |
|
992 | state[repo[oldrev].rev()] = repo[newrev].rev() | |
|
993 | ||
|
994 | except IOError as err: | |
|
995 | if err.errno != errno.ENOENT: | |
|
996 | raise | |
|
997 | cmdutil.wrongtooltocontinue(repo, _('rebase')) | |
|
998 | ||
|
999 | if keepbranches is None: | |
|
1000 | raise error.Abort(_('.hg/rebasestate is incomplete')) | |
|
1001 | ||
|
1002 | skipped = set() | |
|
1003 | # recompute the set of skipped revs | |
|
1004 | if not collapse: | |
|
1005 | seen = set([target]) | |
|
1006 | for old, new in sorted(state.items()): | |
|
1007 | if new != revtodo and new in seen: | |
|
1008 | skipped.add(old) | |
|
1009 | seen.add(new) | |
|
1010 | repo.ui.debug('computed skipped revs: %s\n' % | |
|
1011 | (' '.join(str(r) for r in sorted(skipped)) or None)) | |
|
1012 | repo.ui.debug('rebase status resumed\n') | |
|
1013 | _setrebasesetvisibility(repo, state.keys()) | |
|
1014 | return (originalwd, target, state, skipped, | |
|
1015 | collapse, keep, keepbranches, external, activebookmark) | |
|
1016 | ||
|
1017 | 1097 | def needupdate(repo, state): |
|
1018 | 1098 | '''check whether we should `update --clean` away from a merge, or if |
|
1019 | 1099 | somehow the working dir got forcibly updated, e.g. by older hg''' |
@@ -1336,7 +1416,9 b' def summaryhook(ui, repo):' | |||
|
1336 | 1416 | if not os.path.exists(repo.join('rebasestate')): |
|
1337 | 1417 | return |
|
1338 | 1418 | try: |
|
1339 | state = restorestatus(repo)[2] | |
|
1419 | rbsrt = rebaseruntime(repo, ui, {}) | |
|
1420 | rbsrt.restorestatus() | |
|
1421 | state = rbsrt.state | |
|
1340 | 1422 | except error.RepoLookupError: |
|
1341 | 1423 | # i18n: column positioning for "hg summary" |
|
1342 | 1424 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') |
@@ -12,13 +12,13 b' The feature provided by this extension h' | |||
|
12 | 12 | |
|
13 | 13 | from __future__ import absolute_import |
|
14 | 14 | |
|
15 | from mercurial.i18n import _ | |
|
15 | 16 | from mercurial import ( |
|
16 | 17 | cmdutil, |
|
17 | 18 | commands, |
|
18 | 19 | error, |
|
19 | 20 | extensions, |
|
20 | 21 | ) |
|
21 | from mercurial.i18n import _ | |
|
22 | 22 | |
|
23 | 23 | cmdtable = {} |
|
24 | 24 | command = cmdutil.command(cmdtable) |
@@ -11,13 +11,13 b' from __future__ import absolute_import' | |||
|
11 | 11 | import os |
|
12 | 12 | import stat |
|
13 | 13 | |
|
14 | from mercurial.i18n import _ | |
|
14 | 15 | from mercurial import ( |
|
15 | 16 | cmdutil, |
|
16 | 17 | error, |
|
17 | 18 | hg, |
|
18 | 19 | util, |
|
19 | 20 | ) |
|
20 | from mercurial.i18n import _ | |
|
21 | 21 | |
|
22 | 22 | cmdtable = {} |
|
23 | 23 | command = cmdutil.command(cmdtable) |
@@ -43,6 +43,8 b' from __future__ import absolute_import' | |||
|
43 | 43 | |
|
44 | 44 | import os |
|
45 | 45 | import re |
|
46 | ||
|
47 | from mercurial.i18n import _ | |
|
46 | 48 | from mercurial import ( |
|
47 | 49 | cmdutil, |
|
48 | 50 | error, |
@@ -51,7 +53,6 b' from mercurial import (' | |||
|
51 | 53 | templater, |
|
52 | 54 | util, |
|
53 | 55 | ) |
|
54 | from mercurial.i18n import _ | |
|
55 | 56 | |
|
56 | 57 | cmdtable = {} |
|
57 | 58 | command = cmdutil.command(cmdtable) |
@@ -37,10 +37,22 b' The following ``share.`` config options ' | |||
|
37 | 37 | The default naming mode is "identity." |
|
38 | 38 | ''' |
|
39 | 39 | |
|
40 | from __future__ import absolute_import | |
|
41 | ||
|
42 | import errno | |
|
40 | 43 | from mercurial.i18n import _ |
|
41 | from mercurial import cmdutil, commands, hg, util, extensions, bookmarks, error | |
|
42 | from mercurial.hg import repository, parseurl | |
|
43 | import errno | |
|
44 | from mercurial import ( | |
|
45 | bookmarks, | |
|
46 | cmdutil, | |
|
47 | commands, | |
|
48 | error, | |
|
49 | extensions, | |
|
50 | hg, | |
|
51 | util, | |
|
52 | ) | |
|
53 | ||
|
54 | repository = hg.repository | |
|
55 | parseurl = hg.parseurl | |
|
44 | 56 | |
|
45 | 57 | cmdtable = {} |
|
46 | 58 | command = cmdutil.command(cmdtable) |
@@ -135,7 +147,7 b' def _hassharedbookmarks(repo):' | |||
|
135 | 147 | if inst.errno != errno.ENOENT: |
|
136 | 148 | raise |
|
137 | 149 | return False |
|
138 |
return |
|
|
150 | return hg.sharedbookmarks in shared | |
|
139 | 151 | |
|
140 | 152 | def _getsrcrepo(repo): |
|
141 | 153 | """ |
@@ -145,10 +157,15 b' def _getsrcrepo(repo):' | |||
|
145 | 157 | if repo.sharedpath == repo.path: |
|
146 | 158 | return None |
|
147 | 159 | |
|
160 | if util.safehasattr(repo, 'srcrepo') and repo.srcrepo: | |
|
161 | return repo.srcrepo | |
|
162 | ||
|
148 | 163 | # the sharedpath always ends in the .hg; we want the path to the repo |
|
149 | 164 | source = repo.vfs.split(repo.sharedpath)[0] |
|
150 | 165 | srcurl, branches = parseurl(source) |
|
151 |
|
|
|
166 | srcrepo = repository(repo.ui, srcurl) | |
|
167 | repo.srcrepo = srcrepo | |
|
168 | return srcrepo | |
|
152 | 169 | |
|
153 | 170 | def getbkfile(orig, repo): |
|
154 | 171 | if _hassharedbookmarks(repo): |
@@ -25,6 +25,8 b' from __future__ import absolute_import' | |||
|
25 | 25 | import collections |
|
26 | 26 | import errno |
|
27 | 27 | import itertools |
|
28 | ||
|
29 | from mercurial.i18n import _ | |
|
28 | 30 | from mercurial import ( |
|
29 | 31 | bundle2, |
|
30 | 32 | bundlerepo, |
@@ -45,7 +47,6 b' from mercurial import (' | |||
|
45 | 47 | templatefilters, |
|
46 | 48 | util, |
|
47 | 49 | ) |
|
48 | from mercurial.i18n import _ | |
|
49 | 50 | |
|
50 | 51 | from . import ( |
|
51 | 52 | rebase, |
@@ -164,21 +165,26 b' class shelvedstate(object):' | |||
|
164 | 165 | raise error.Abort(_('this version of shelve is incompatible ' |
|
165 | 166 | 'with the version used in this repo')) |
|
166 | 167 | name = fp.readline().strip() |
|
167 | wctx = fp.readline().strip() | |
|
168 | pendingctx = fp.readline().strip() | |
|
168 | wctx = nodemod.bin(fp.readline().strip()) | |
|
169 | pendingctx = nodemod.bin(fp.readline().strip()) | |
|
169 | 170 | parents = [nodemod.bin(h) for h in fp.readline().split()] |
|
170 | 171 | stripnodes = [nodemod.bin(h) for h in fp.readline().split()] |
|
171 | 172 | branchtorestore = fp.readline().strip() |
|
173 | except (ValueError, TypeError) as err: | |
|
174 | raise error.CorruptedState(str(err)) | |
|
172 | 175 | finally: |
|
173 | 176 | fp.close() |
|
174 | 177 | |
|
175 | obj = cls() | |
|
176 |
obj |
|
|
177 | obj.wctx = repo[nodemod.bin(wctx)] | |
|
178 |
obj. |
|
|
179 |
obj.p |
|
|
180 | obj.stripnodes = stripnodes | |
|
181 | obj.branchtorestore = branchtorestore | |
|
178 | try: | |
|
179 | obj = cls() | |
|
180 | obj.name = name | |
|
181 | obj.wctx = repo[wctx] | |
|
182 | obj.pendingctx = repo[pendingctx] | |
|
183 | obj.parents = parents | |
|
184 | obj.stripnodes = stripnodes | |
|
185 | obj.branchtorestore = branchtorestore | |
|
186 | except error.RepoLookupError as err: | |
|
187 | raise error.CorruptedState(str(err)) | |
|
182 | 188 | |
|
183 | 189 | return obj |
|
184 | 190 | |
@@ -225,28 +231,10 b' def cleanupoldbackups(repo):' | |||
|
225 | 231 | def _aborttransaction(repo): |
|
226 | 232 | '''Abort current transaction for shelve/unshelve, but keep dirstate |
|
227 | 233 | ''' |
|
228 | backupname = 'dirstate.shelve' | |
|
229 | dirstatebackup = None | |
|
230 | try: | |
|
231 | # create backup of (un)shelved dirstate, because aborting transaction | |
|
232 | # should restore dirstate to one at the beginning of the | |
|
233 | # transaction, which doesn't include the result of (un)shelving | |
|
234 | fp = repo.vfs.open(backupname, "w") | |
|
235 | dirstatebackup = backupname | |
|
236 | # clearing _dirty/_dirtypl of dirstate by _writedirstate below | |
|
237 | # is unintentional. but it doesn't cause problem in this case, | |
|
238 | # because no code path refers them until transaction is aborted. | |
|
239 | repo.dirstate._writedirstate(fp) # write in-memory changes forcibly | |
|
240 | ||
|
241 | tr = repo.currenttransaction() | |
|
242 | tr.abort() | |
|
243 | ||
|
244 | # restore to backuped dirstate | |
|
245 | repo.vfs.rename(dirstatebackup, 'dirstate') | |
|
246 | dirstatebackup = None | |
|
247 | finally: | |
|
248 | if dirstatebackup: | |
|
249 | repo.vfs.unlink(dirstatebackup) | |
|
234 | tr = repo.currenttransaction() | |
|
235 | repo.dirstate.savebackup(tr, suffix='.shelve') | |
|
236 | tr.abort() | |
|
237 | repo.dirstate.restorebackup(None, suffix='.shelve') | |
|
250 | 238 | |
|
251 | 239 | def createcmd(ui, repo, pats, opts): |
|
252 | 240 | """subcommand that creates a new shelve""" |
@@ -683,6 +671,20 b' def _dounshelve(ui, repo, *shelved, **op' | |||
|
683 | 671 | if err.errno != errno.ENOENT: |
|
684 | 672 | raise |
|
685 | 673 | cmdutil.wrongtooltocontinue(repo, _('unshelve')) |
|
674 | except error.CorruptedState as err: | |
|
675 | ui.debug(str(err) + '\n') | |
|
676 | if continuef: | |
|
677 | msg = _('corrupted shelved state file') | |
|
678 | hint = _('please run hg unshelve --abort to abort unshelve ' | |
|
679 | 'operation') | |
|
680 | raise error.Abort(msg, hint=hint) | |
|
681 | elif abortf: | |
|
682 | msg = _('could not read shelved state file, your working copy ' | |
|
683 | 'may be in an unexpected state\nplease update to some ' | |
|
684 | 'commit\n') | |
|
685 | ui.warn(msg) | |
|
686 | shelvedstate.clear(repo) | |
|
687 | return | |
|
686 | 688 | |
|
687 | 689 | if abortf: |
|
688 | 690 | return unshelveabort(ui, repo, state, opts) |
@@ -5,6 +5,7 b' repository. See the command help for det' | |||
|
5 | 5 | """ |
|
6 | 6 | from __future__ import absolute_import |
|
7 | 7 | |
|
8 | from mercurial.i18n import _ | |
|
8 | 9 | from mercurial import ( |
|
9 | 10 | bookmarks as bookmarksmod, |
|
10 | 11 | cmdutil, |
@@ -17,7 +18,6 b' from mercurial import (' | |||
|
17 | 18 | scmutil, |
|
18 | 19 | util, |
|
19 | 20 | ) |
|
20 | from mercurial.i18n import _ | |
|
21 | 21 | nullid = nodemod.nullid |
|
22 | 22 | release = lockmod.release |
|
23 | 23 |
@@ -49,11 +49,11 b' from __future__ import absolute_import' | |||
|
49 | 49 | import os |
|
50 | 50 | import sys |
|
51 | 51 | |
|
52 | from mercurial.i18n import _ | |
|
52 | 53 | from mercurial import ( |
|
53 | 54 | encoding, |
|
54 | 55 | error, |
|
55 | 56 | ) |
|
56 | from mercurial.i18n import _ | |
|
57 | 57 | |
|
58 | 58 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
59 | 59 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
@@ -192,5 +192,5 b' def extsetup(ui):' | |||
|
192 | 192 | # command line options is not yet applied when |
|
193 | 193 | # extensions.loadall() is called. |
|
194 | 194 | if '--debug' in sys.argv: |
|
195 | ui.write("[win32mbcs] activated with encoding: %s\n" | |
|
195 | ui.write(("[win32mbcs] activated with encoding: %s\n") | |
|
196 | 196 | % _encoding) |
@@ -41,10 +41,16 b' pushed or pulled::' | |||
|
41 | 41 | # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr |
|
42 | 42 | ''' |
|
43 | 43 | |
|
44 | from __future__ import absolute_import | |
|
45 | ||
|
46 | import re | |
|
44 | 47 | from mercurial.i18n import _ |
|
45 |
from mercurial.node import |
|
|
46 | from mercurial import util | |
|
47 | import re | |
|
48 | from mercurial.node import ( | |
|
49 | short, | |
|
50 | ) | |
|
51 | from mercurial import ( | |
|
52 | util, | |
|
53 | ) | |
|
48 | 54 | |
|
49 | 55 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
50 | 56 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
@@ -20,7 +20,11 b' Use xgettext like normal to extract stri' | |||
|
20 | 20 | join the message cataloges to get the final catalog. |
|
21 | 21 | """ |
|
22 | 22 | |
|
23 | import os, sys, inspect | |
|
23 | from __future__ import absolute_import, print_function | |
|
24 | ||
|
25 | import inspect | |
|
26 | import os | |
|
27 | import sys | |
|
24 | 28 | |
|
25 | 29 | |
|
26 | 30 | def escape(s): |
@@ -95,7 +99,7 b' def docstrings(path):' | |||
|
95 | 99 | if mod.__doc__: |
|
96 | 100 | src = open(path).read() |
|
97 | 101 | lineno = 1 + offset(src, mod.__doc__, path, 7) |
|
98 |
print |
|
|
102 | print(poentry(path, lineno, mod.__doc__)) | |
|
99 | 103 | |
|
100 | 104 | functions = list(getattr(mod, 'i18nfunctions', [])) |
|
101 | 105 | functions = [(f, True) for f in functions] |
@@ -115,12 +119,12 b' def docstrings(path):' | |||
|
115 | 119 | if rstrip: |
|
116 | 120 | doc = doc.rstrip() |
|
117 | 121 | lineno += offset(src, doc, name, 1) |
|
118 |
print |
|
|
122 | print(poentry(path, lineno, doc)) | |
|
119 | 123 | |
|
120 | 124 | |
|
121 | 125 | def rawtext(path): |
|
122 | 126 | src = open(path).read() |
|
123 |
print |
|
|
127 | print(poentry(path, 1, src)) | |
|
124 | 128 | |
|
125 | 129 | |
|
126 | 130 | if __name__ == "__main__": |
@@ -13,6 +13,8 b' modify entries, comments or metadata, et' | |||
|
13 | 13 | :func:`~polib.mofile` convenience functions. |
|
14 | 14 | """ |
|
15 | 15 | |
|
16 | from __future__ import absolute_import | |
|
17 | ||
|
16 | 18 | __author__ = 'David Jean Louis <izimobil@gmail.com>' |
|
17 | 19 | __version__ = '0.6.4' |
|
18 | 20 | __all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry', |
@@ -5,9 +5,11 b'' | |||
|
5 | 5 | # license: MIT/X11/Expat |
|
6 | 6 | # |
|
7 | 7 | |
|
8 | from __future__ import absolute_import, print_function | |
|
9 | ||
|
10 | import polib | |
|
8 | 11 | import re |
|
9 | 12 | import sys |
|
10 | import polib | |
|
11 | 13 | |
|
12 | 14 | def addentry(po, entry, cache): |
|
13 | 15 | e = cache.get(entry.msgid) |
@@ -67,8 +69,8 b' if __name__ == "__main__":' | |||
|
67 | 69 | continue |
|
68 | 70 | else: |
|
69 | 71 | # lines following directly, unexpected |
|
70 |
print |
|
|
71 | ' %s' % directive | |
|
72 | print('Warning: text follows line with directive' \ | |
|
73 | ' %s' % directive) | |
|
72 | 74 | comment = 'do not translate: .. %s::' % directive |
|
73 | 75 | if not newentry.comment: |
|
74 | 76 | newentry.comment = comment |
@@ -12,36 +12,13 b' import os' | |||
|
12 | 12 | import sys |
|
13 | 13 | import zipimport |
|
14 | 14 | |
|
15 | from . import ( | |
|
16 | policy | |
|
17 | ) | |
|
18 | ||
|
15 | 19 | __all__ = [] |
|
16 | 20 | |
|
17 | # Rules for how modules can be loaded. Values are: | |
|
18 | # | |
|
19 | # c - require C extensions | |
|
20 | # allow - allow pure Python implementation when C loading fails | |
|
21 | # py - only load pure Python modules | |
|
22 | # | |
|
23 | # By default, require the C extensions for performance reasons. | |
|
24 | modulepolicy = 'c' | |
|
25 | try: | |
|
26 | from . import __modulepolicy__ | |
|
27 | modulepolicy = __modulepolicy__.modulepolicy | |
|
28 | except ImportError: | |
|
29 | pass | |
|
30 | ||
|
31 | # PyPy doesn't load C extensions. | |
|
32 | # | |
|
33 | # The canonical way to do this is to test platform.python_implementation(). | |
|
34 | # But we don't import platform and don't bloat for it here. | |
|
35 | if '__pypy__' in sys.builtin_module_names: | |
|
36 | modulepolicy = 'py' | |
|
37 | ||
|
38 | # Our C extensions aren't yet compatible with Python 3. So use pure Python | |
|
39 | # on Python 3 for now. | |
|
40 | if sys.version_info[0] >= 3: | |
|
41 | modulepolicy = 'py' | |
|
42 | ||
|
43 | # Environment variable can always force settings. | |
|
44 | modulepolicy = os.environ.get('HGMODULEPOLICY', modulepolicy) | |
|
21 | modulepolicy = policy.policy | |
|
45 | 22 | |
|
46 | 23 | # Modules that have both Python and C implementations. See also the |
|
47 | 24 | # set of .py files under mercurial/pure/. |
@@ -82,7 +59,7 b' class hgimporter(object):' | |||
|
82 | 59 | return zl |
|
83 | 60 | |
|
84 | 61 | try: |
|
85 |
if modulepolicy |
|
|
62 | if modulepolicy in policy.policynoc: | |
|
86 | 63 | raise ImportError() |
|
87 | 64 | |
|
88 | 65 | zl = ziploader('mercurial') |
@@ -109,7 +86,7 b' class hgimporter(object):' | |||
|
109 | 86 | stem = name.split('.')[-1] |
|
110 | 87 | |
|
111 | 88 | try: |
|
112 |
if modulepolicy |
|
|
89 | if modulepolicy in policy.policynoc: | |
|
113 | 90 | raise ImportError() |
|
114 | 91 | |
|
115 | 92 | modinfo = imp.find_module(stem, mercurial.__path__) |
@@ -144,9 +121,238 b' class hgimporter(object):' | |||
|
144 | 121 | sys.modules[name] = mod |
|
145 | 122 | return mod |
|
146 | 123 | |
|
124 | # Python 3 uses a custom module loader that transforms source code between | |
|
125 | # source file reading and compilation. This is done by registering a custom | |
|
126 | # finder that changes the spec for Mercurial modules to use a custom loader. | |
|
127 | if sys.version_info[0] >= 3: | |
|
128 | from . import pure | |
|
129 | import importlib | |
|
130 | import io | |
|
131 | import token | |
|
132 | import tokenize | |
|
133 | ||
|
134 | class hgpathentryfinder(importlib.abc.MetaPathFinder): | |
|
135 | """A sys.meta_path finder that uses a custom module loader.""" | |
|
136 | def find_spec(self, fullname, path, target=None): | |
|
137 | # Only handle Mercurial-related modules. | |
|
138 | if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')): | |
|
139 | return None | |
|
140 | ||
|
141 | # This assumes Python 3 doesn't support loading C modules. | |
|
142 | if fullname in _dualmodules: | |
|
143 | stem = fullname.split('.')[-1] | |
|
144 | fullname = 'mercurial.pure.%s' % stem | |
|
145 | target = pure | |
|
146 | assert len(path) == 1 | |
|
147 | path = [os.path.join(path[0], 'pure')] | |
|
148 | ||
|
149 | # Try to find the module using other registered finders. | |
|
150 | spec = None | |
|
151 | for finder in sys.meta_path: | |
|
152 | if finder == self: | |
|
153 | continue | |
|
154 | ||
|
155 | spec = finder.find_spec(fullname, path, target=target) | |
|
156 | if spec: | |
|
157 | break | |
|
158 | ||
|
159 | # This is a Mercurial-related module but we couldn't find it | |
|
160 | # using the previously-registered finders. This likely means | |
|
161 | # the module doesn't exist. | |
|
162 | if not spec: | |
|
163 | return None | |
|
164 | ||
|
165 | if fullname.startswith('mercurial.pure.'): | |
|
166 | spec.name = spec.name.replace('.pure.', '.') | |
|
167 | ||
|
168 | # TODO need to support loaders from alternate specs, like zip | |
|
169 | # loaders. | |
|
170 | spec.loader = hgloader(spec.name, spec.origin) | |
|
171 | return spec | |
|
172 | ||
|
173 | def replacetokens(tokens): | |
|
174 | """Transform a stream of tokens from raw to Python 3. | |
|
175 | ||
|
176 | It is called by the custom module loading machinery to rewrite | |
|
177 | source/tokens between source decoding and compilation. | |
|
178 | ||
|
179 | Returns a generator of possibly rewritten tokens. | |
|
180 | ||
|
181 | The input token list may be mutated as part of processing. However, | |
|
182 | its changes do not necessarily match the output token stream. | |
|
183 | ||
|
184 | REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION | |
|
185 | OR CACHED FILES WON'T GET INVALIDATED PROPERLY. | |
|
186 | """ | |
|
187 | for i, t in enumerate(tokens): | |
|
188 | # Convert most string literals to byte literals. String literals | |
|
189 | # in Python 2 are bytes. String literals in Python 3 are unicode. | |
|
190 | # Most strings in Mercurial are bytes and unicode strings are rare. | |
|
191 | # Rather than rewrite all string literals to use ``b''`` to indicate | |
|
192 | # byte strings, we apply this token transformer to insert the ``b`` | |
|
193 | # prefix nearly everywhere. | |
|
194 | if t.type == token.STRING: | |
|
195 | s = t.string | |
|
196 | ||
|
197 | # Preserve docstrings as string literals. This is inconsistent | |
|
198 | # with regular unprefixed strings. However, the | |
|
199 | # "from __future__" parsing (which allows a module docstring to | |
|
200 | # exist before it) doesn't properly handle the docstring if it | |
|
201 | # is b''' prefixed, leading to a SyntaxError. We leave all | |
|
202 | # docstrings as unprefixed to avoid this. This means Mercurial | |
|
203 | # components touching docstrings need to handle unicode, | |
|
204 | # unfortunately. | |
|
205 | if s[0:3] in ("'''", '"""'): | |
|
206 | yield t | |
|
207 | continue | |
|
208 | ||
|
209 | # If the first character isn't a quote, it is likely a string | |
|
210 | # prefixing character (such as 'b', 'u', or 'r'. Ignore. | |
|
211 | if s[0] not in ("'", '"'): | |
|
212 | yield t | |
|
213 | continue | |
|
214 | ||
|
215 | # String literal. Prefix to make a b'' string. | |
|
216 | yield tokenize.TokenInfo(t.type, 'b%s' % s, t.start, t.end, | |
|
217 | t.line) | |
|
218 | continue | |
|
219 | ||
|
220 | try: | |
|
221 | nexttoken = tokens[i + 1] | |
|
222 | except IndexError: | |
|
223 | nexttoken = None | |
|
224 | ||
|
225 | try: | |
|
226 | prevtoken = tokens[i - 1] | |
|
227 | except IndexError: | |
|
228 | prevtoken = None | |
|
229 | ||
|
230 | # This looks like a function call. | |
|
231 | if (t.type == token.NAME and nexttoken and | |
|
232 | nexttoken.type == token.OP and nexttoken.string == '('): | |
|
233 | fn = t.string | |
|
234 | ||
|
235 | # *attr() builtins don't accept byte strings to 2nd argument. | |
|
236 | # Rewrite the token to include the unicode literal prefix so | |
|
237 | # the string transformer above doesn't add the byte prefix. | |
|
238 | if fn in ('getattr', 'setattr', 'hasattr', 'safehasattr'): | |
|
239 | try: | |
|
240 | # (NAME, 'getattr') | |
|
241 | # (OP, '(') | |
|
242 | # (NAME, 'foo') | |
|
243 | # (OP, ',') | |
|
244 | # (NAME|STRING, foo) | |
|
245 | st = tokens[i + 4] | |
|
246 | if (st.type == token.STRING and | |
|
247 | st.string[0] in ("'", '"')): | |
|
248 | rt = tokenize.TokenInfo(st.type, 'u%s' % st.string, | |
|
249 | st.start, st.end, st.line) | |
|
250 | tokens[i + 4] = rt | |
|
251 | except IndexError: | |
|
252 | pass | |
|
253 | ||
|
254 | # .encode() and .decode() on str/bytes/unicode don't accept | |
|
255 | # byte strings on Python 3. Rewrite the token to include the | |
|
256 | # unicode literal prefix so the string transformer above doesn't | |
|
257 | # add the byte prefix. | |
|
258 | if (fn in ('encode', 'decode') and | |
|
259 | prevtoken.type == token.OP and prevtoken.string == '.'): | |
|
260 | # (OP, '.') | |
|
261 | # (NAME, 'encode') | |
|
262 | # (OP, '(') | |
|
263 | # (STRING, 'utf-8') | |
|
264 | # (OP, ')') | |
|
265 | try: | |
|
266 | st = tokens[i + 2] | |
|
267 | if (st.type == token.STRING and | |
|
268 | st.string[0] in ("'", '"')): | |
|
269 | rt = tokenize.TokenInfo(st.type, 'u%s' % st.string, | |
|
270 | st.start, st.end, st.line) | |
|
271 | tokens[i + 2] = rt | |
|
272 | except IndexError: | |
|
273 | pass | |
|
274 | ||
|
275 | # Emit unmodified token. | |
|
276 | yield t | |
|
277 | ||
|
278 | # Header to add to bytecode files. This MUST be changed when | |
|
279 | # ``replacetoken`` or any mechanism that changes semantics of module | |
|
280 | # loading is changed. Otherwise cached bytecode may get loaded without | |
|
281 | # the new transformation mechanisms applied. | |
|
282 | BYTECODEHEADER = b'HG\x00\x01' | |
|
283 | ||
|
284 | class hgloader(importlib.machinery.SourceFileLoader): | |
|
285 | """Custom module loader that transforms source code. | |
|
286 | ||
|
287 | When the source code is converted to a code object, we transform | |
|
288 | certain patterns to be Python 3 compatible. This allows us to write code | |
|
289 | that is natively Python 2 and compatible with Python 3 without | |
|
290 | making the code excessively ugly. | |
|
291 | ||
|
292 | We do this by transforming the token stream between parse and compile. | |
|
293 | ||
|
294 | Implementing transformations invalidates caching assumptions made | |
|
295 | by the built-in importer. The built-in importer stores a header on | |
|
296 | saved bytecode files indicating the Python/bytecode version. If the | |
|
297 | version changes, the cached bytecode is ignored. The Mercurial | |
|
298 | transformations could change at any time. This means we need to check | |
|
299 | that cached bytecode was generated with the current transformation | |
|
300 | code or there could be a mismatch between cached bytecode and what | |
|
301 | would be generated from this class. | |
|
302 | ||
|
303 | We supplement the bytecode caching layer by wrapping ``get_data`` | |
|
304 | and ``set_data``. These functions are called when the | |
|
305 | ``SourceFileLoader`` retrieves and saves bytecode cache files, | |
|
306 | respectively. We simply add an additional header on the file. As | |
|
307 | long as the version in this file is changed when semantics change, | |
|
308 | cached bytecode should be invalidated when transformations change. | |
|
309 | ||
|
310 | The added header has the form ``HG<VERSION>``. That is a literal | |
|
311 | ``HG`` with 2 binary bytes indicating the transformation version. | |
|
312 | """ | |
|
313 | def get_data(self, path): | |
|
314 | data = super(hgloader, self).get_data(path) | |
|
315 | ||
|
316 | if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): | |
|
317 | return data | |
|
318 | ||
|
319 | # There should be a header indicating the Mercurial transformation | |
|
320 | # version. If it doesn't exist or doesn't match the current version, | |
|
321 | # we raise an OSError because that is what | |
|
322 | # ``SourceFileLoader.get_code()`` expects when loading bytecode | |
|
323 | # paths to indicate the cached file is "bad." | |
|
324 | if data[0:2] != b'HG': | |
|
325 | raise OSError('no hg header') | |
|
326 | if data[0:4] != BYTECODEHEADER: | |
|
327 | raise OSError('hg header version mismatch') | |
|
328 | ||
|
329 | return data[4:] | |
|
330 | ||
|
331 | def set_data(self, path, data, *args, **kwargs): | |
|
332 | if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): | |
|
333 | data = BYTECODEHEADER + data | |
|
334 | ||
|
335 | return super(hgloader, self).set_data(path, data, *args, **kwargs) | |
|
336 | ||
|
337 | def source_to_code(self, data, path): | |
|
338 | """Perform token transformation before compilation.""" | |
|
339 | buf = io.BytesIO(data) | |
|
340 | tokens = tokenize.tokenize(buf.readline) | |
|
341 | data = tokenize.untokenize(replacetokens(list(tokens))) | |
|
342 | # Python's built-in importer strips frames from exceptions raised | |
|
343 | # for this code. Unfortunately, that mechanism isn't extensible | |
|
344 | # and our frame will be blamed for the import failure. There | |
|
345 | # are extremely hacky ways to do frame stripping. We haven't | |
|
346 | # implemented them because they are very ugly. | |
|
347 | return super(hgloader, self).source_to_code(data, path) | |
|
348 | ||
|
147 | 349 | # We automagically register our custom importer as a side-effect of loading. |
|
148 | 350 | # This is necessary to ensure that any entry points are able to import |
|
149 | 351 | # mercurial.* modules without having to perform this registration themselves. |
|
150 | if not any(isinstance(x, hgimporter) for x in sys.meta_path): | |
|
352 | if sys.version_info[0] >= 3: | |
|
353 | _importercls = hgpathentryfinder | |
|
354 | else: | |
|
355 | _importercls = hgimporter | |
|
356 | if not any(isinstance(x, _importercls) for x in sys.meta_path): | |
|
151 | 357 | # meta_path is used before any implicit finders and before sys.path. |
|
152 |
sys.meta_path.insert(0, |
|
|
358 | sys.meta_path.insert(0, _importercls()) |
@@ -291,7 +291,7 b' class lazyancestors(object):' | |||
|
291 | 291 | def __nonzero__(self): |
|
292 | 292 | """False if the set is empty, True otherwise.""" |
|
293 | 293 | try: |
|
294 |
iter(self) |
|
|
294 | next(iter(self)) | |
|
295 | 295 | return True |
|
296 | 296 | except StopIteration: |
|
297 | 297 | return False |
@@ -9,37 +9,25 b'' | |||
|
9 | 9 | Based roughly on Python difflib |
|
10 | 10 | */ |
|
11 | 11 | |
|
12 | #define PY_SSIZE_T_CLEAN | |
|
13 | #include <Python.h> | |
|
14 | 12 | #include <stdlib.h> |
|
15 | 13 | #include <string.h> |
|
16 | 14 | #include <limits.h> |
|
17 | 15 | |
|
18 |
#include " |
|
|
19 | ||
|
20 | struct line { | |
|
21 | int hash, n, e; | |
|
22 | Py_ssize_t len; | |
|
23 | const char *l; | |
|
24 | }; | |
|
16 | #include "compat.h" | |
|
17 | #include "bitmanipulation.h" | |
|
18 | #include "bdiff.h" | |
|
25 | 19 | |
|
26 | 20 | struct pos { |
|
27 | 21 | int pos, len; |
|
28 | 22 | }; |
|
29 | 23 | |
|
30 | struct hunk; | |
|
31 | struct hunk { | |
|
32 | int a1, a2, b1, b2; | |
|
33 | struct hunk *next; | |
|
34 | }; | |
|
35 | ||
|
36 | static int splitlines(const char *a, Py_ssize_t len, struct line **lr) | |
|
24 | int bdiff_splitlines(const char *a, ssize_t len, struct bdiff_line **lr) | |
|
37 | 25 | { |
|
38 | 26 | unsigned hash; |
|
39 | 27 | int i; |
|
40 | 28 | const char *p, *b = a; |
|
41 | 29 | const char * const plast = a + len - 1; |
|
42 | struct line *l; | |
|
30 | struct bdiff_line *l; | |
|
43 | 31 | |
|
44 | 32 | /* count the lines */ |
|
45 | 33 | i = 1; /* extra line for sentinel */ |
@@ -47,7 +35,7 b' static int splitlines(const char *a, Py_' | |||
|
47 | 35 | if (*p == '\n' || p == plast) |
|
48 | 36 | i++; |
|
49 | 37 | |
|
50 | *lr = l = (struct line *)malloc(sizeof(struct line) * i); | |
|
38 | *lr = l = (struct bdiff_line *)malloc(sizeof(struct bdiff_line) * i); | |
|
51 | 39 | if (!l) |
|
52 | 40 | return -1; |
|
53 | 41 | |
@@ -75,12 +63,13 b' static int splitlines(const char *a, Py_' | |||
|
75 | 63 | return i - 1; |
|
76 | 64 | } |
|
77 | 65 | |
|
78 | static inline int cmp(struct line *a, struct line *b) | |
|
66 | static inline int cmp(struct bdiff_line *a, struct bdiff_line *b) | |
|
79 | 67 | { |
|
80 | 68 | return a->hash != b->hash || a->len != b->len || memcmp(a->l, b->l, a->len); |
|
81 | 69 | } |
|
82 | 70 | |
|
83 |
static int equatelines(struct line *a, int an, struct line *b, |
|
|
71 | static int equatelines(struct bdiff_line *a, int an, struct bdiff_line *b, | |
|
72 | int bn) | |
|
84 | 73 | { |
|
85 | 74 | int i, j, buckets = 1, t, scale; |
|
86 | 75 | struct pos *h = NULL; |
@@ -145,7 +134,8 b' static int equatelines(struct line *a, i' | |||
|
145 | 134 | return 1; |
|
146 | 135 | } |
|
147 | 136 | |
|
148 |
static int longest_match(struct line *a, struct line *b, |
|
|
137 | static int longest_match(struct bdiff_line *a, struct bdiff_line *b, | |
|
138 | struct pos *pos, | |
|
149 | 139 | int a1, int a2, int b1, int b2, int *omi, int *omj) |
|
150 | 140 | { |
|
151 | 141 | int mi = a1, mj = b1, mk = 0, i, j, k, half; |
@@ -206,8 +196,9 b' static int longest_match(struct line *a,' | |||
|
206 | 196 | return mk; |
|
207 | 197 | } |
|
208 | 198 | |
|
209 |
static struct hunk *recurse(struct line *a, struct line *b, |
|
|
210 | int a1, int a2, int b1, int b2, struct hunk *l) | |
|
199 | static struct bdiff_hunk *recurse(struct bdiff_line *a, struct bdiff_line *b, | |
|
200 | struct pos *pos, | |
|
201 | int a1, int a2, int b1, int b2, struct bdiff_hunk *l) | |
|
211 | 202 | { |
|
212 | 203 | int i, j, k; |
|
213 | 204 | |
@@ -222,7 +213,7 b' static struct hunk *recurse(struct line ' | |||
|
222 | 213 | if (!l) |
|
223 | 214 | return NULL; |
|
224 | 215 | |
|
225 | l->next = (struct hunk *)malloc(sizeof(struct hunk)); | |
|
216 | l->next = (struct bdiff_hunk *)malloc(sizeof(struct bdiff_hunk)); | |
|
226 | 217 | if (!l->next) |
|
227 | 218 | return NULL; |
|
228 | 219 | |
@@ -239,10 +230,10 b' static struct hunk *recurse(struct line ' | |||
|
239 | 230 | } |
|
240 | 231 | } |
|
241 | 232 | |
|
242 |
|
|
|
243 | struct hunk *base) | |
|
233 | int bdiff_diff(struct bdiff_line *a, int an, struct bdiff_line *b, | |
|
234 | int bn, struct bdiff_hunk *base) | |
|
244 | 235 | { |
|
245 | struct hunk *curr; | |
|
236 | struct bdiff_hunk *curr; | |
|
246 | 237 | struct pos *pos; |
|
247 | 238 | int t, count = 0; |
|
248 | 239 | |
@@ -258,7 +249,7 b' static int diff(struct line *a, int an, ' | |||
|
258 | 249 | return -1; |
|
259 | 250 | |
|
260 | 251 | /* sentinel end hunk */ |
|
261 | curr->next = (struct hunk *)malloc(sizeof(struct hunk)); | |
|
252 | curr->next = (struct bdiff_hunk *)malloc(sizeof(struct bdiff_hunk)); | |
|
262 | 253 | if (!curr->next) |
|
263 | 254 | return -1; |
|
264 | 255 | curr = curr->next; |
@@ -271,7 +262,7 b' static int diff(struct line *a, int an, ' | |||
|
271 | 262 | |
|
272 | 263 | /* normalize the hunk list, try to push each hunk towards the end */ |
|
273 | 264 | for (curr = base->next; curr; curr = curr->next) { |
|
274 | struct hunk *next = curr->next; | |
|
265 | struct bdiff_hunk *next = curr->next; | |
|
275 | 266 | |
|
276 | 267 | if (!next) |
|
277 | 268 | break; |
@@ -293,195 +284,13 b' static int diff(struct line *a, int an, ' | |||
|
293 | 284 | return count; |
|
294 | 285 | } |
|
295 | 286 | |
|
296 |
|
|
|
287 | void bdiff_freehunks(struct bdiff_hunk *l) | |
|
297 | 288 | { |
|
298 | struct hunk *n; | |
|
289 | struct bdiff_hunk *n; | |
|
299 | 290 | for (; l; l = n) { |
|
300 | 291 | n = l->next; |
|
301 | 292 | free(l); |
|
302 | 293 | } |
|
303 | 294 | } |
|
304 | 295 | |
|
305 | static PyObject *blocks(PyObject *self, PyObject *args) | |
|
306 | { | |
|
307 | PyObject *sa, *sb, *rl = NULL, *m; | |
|
308 | struct line *a, *b; | |
|
309 | struct hunk l, *h; | |
|
310 | int an, bn, count, pos = 0; | |
|
311 | 296 | |
|
312 | l.next = NULL; | |
|
313 | ||
|
314 | if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb)) | |
|
315 | return NULL; | |
|
316 | ||
|
317 | an = splitlines(PyBytes_AsString(sa), PyBytes_Size(sa), &a); | |
|
318 | bn = splitlines(PyBytes_AsString(sb), PyBytes_Size(sb), &b); | |
|
319 | ||
|
320 | if (!a || !b) | |
|
321 | goto nomem; | |
|
322 | ||
|
323 | count = diff(a, an, b, bn, &l); | |
|
324 | if (count < 0) | |
|
325 | goto nomem; | |
|
326 | ||
|
327 | rl = PyList_New(count); | |
|
328 | if (!rl) | |
|
329 | goto nomem; | |
|
330 | ||
|
331 | for (h = l.next; h; h = h->next) { | |
|
332 | m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2); | |
|
333 | PyList_SetItem(rl, pos, m); | |
|
334 | pos++; | |
|
335 | } | |
|
336 | ||
|
337 | nomem: | |
|
338 | free(a); | |
|
339 | free(b); | |
|
340 | freehunks(l.next); | |
|
341 | return rl ? rl : PyErr_NoMemory(); | |
|
342 | } | |
|
343 | ||
|
344 | static PyObject *bdiff(PyObject *self, PyObject *args) | |
|
345 | { | |
|
346 | char *sa, *sb, *rb; | |
|
347 | PyObject *result = NULL; | |
|
348 | struct line *al, *bl; | |
|
349 | struct hunk l, *h; | |
|
350 | int an, bn, count; | |
|
351 | Py_ssize_t len = 0, la, lb; | |
|
352 | PyThreadState *_save; | |
|
353 | ||
|
354 | l.next = NULL; | |
|
355 | ||
|
356 | if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb)) | |
|
357 | return NULL; | |
|
358 | ||
|
359 | if (la > UINT_MAX || lb > UINT_MAX) { | |
|
360 | PyErr_SetString(PyExc_ValueError, "bdiff inputs too large"); | |
|
361 | return NULL; | |
|
362 | } | |
|
363 | ||
|
364 | _save = PyEval_SaveThread(); | |
|
365 | an = splitlines(sa, la, &al); | |
|
366 | bn = splitlines(sb, lb, &bl); | |
|
367 | if (!al || !bl) | |
|
368 | goto nomem; | |
|
369 | ||
|
370 | count = diff(al, an, bl, bn, &l); | |
|
371 | if (count < 0) | |
|
372 | goto nomem; | |
|
373 | ||
|
374 | /* calculate length of output */ | |
|
375 | la = lb = 0; | |
|
376 | for (h = l.next; h; h = h->next) { | |
|
377 | if (h->a1 != la || h->b1 != lb) | |
|
378 | len += 12 + bl[h->b1].l - bl[lb].l; | |
|
379 | la = h->a2; | |
|
380 | lb = h->b2; | |
|
381 | } | |
|
382 | PyEval_RestoreThread(_save); | |
|
383 | _save = NULL; | |
|
384 | ||
|
385 | result = PyBytes_FromStringAndSize(NULL, len); | |
|
386 | ||
|
387 | if (!result) | |
|
388 | goto nomem; | |
|
389 | ||
|
390 | /* build binary patch */ | |
|
391 | rb = PyBytes_AsString(result); | |
|
392 | la = lb = 0; | |
|
393 | ||
|
394 | for (h = l.next; h; h = h->next) { | |
|
395 | if (h->a1 != la || h->b1 != lb) { | |
|
396 | len = bl[h->b1].l - bl[lb].l; | |
|
397 | putbe32((uint32_t)(al[la].l - al->l), rb); | |
|
398 | putbe32((uint32_t)(al[h->a1].l - al->l), rb + 4); | |
|
399 | putbe32((uint32_t)len, rb + 8); | |
|
400 | memcpy(rb + 12, bl[lb].l, len); | |
|
401 | rb += 12 + len; | |
|
402 | } | |
|
403 | la = h->a2; | |
|
404 | lb = h->b2; | |
|
405 | } | |
|
406 | ||
|
407 | nomem: | |
|
408 | if (_save) | |
|
409 | PyEval_RestoreThread(_save); | |
|
410 | free(al); | |
|
411 | free(bl); | |
|
412 | freehunks(l.next); | |
|
413 | return result ? result : PyErr_NoMemory(); | |
|
414 | } | |
|
415 | ||
|
416 | /* | |
|
417 | * If allws != 0, remove all whitespace (' ', \t and \r). Otherwise, | |
|
418 | * reduce whitespace sequences to a single space and trim remaining whitespace | |
|
419 | * from end of lines. | |
|
420 | */ | |
|
421 | static PyObject *fixws(PyObject *self, PyObject *args) | |
|
422 | { | |
|
423 | PyObject *s, *result = NULL; | |
|
424 | char allws, c; | |
|
425 | const char *r; | |
|
426 | Py_ssize_t i, rlen, wlen = 0; | |
|
427 | char *w; | |
|
428 | ||
|
429 | if (!PyArg_ParseTuple(args, "Sb:fixws", &s, &allws)) | |
|
430 | return NULL; | |
|
431 | r = PyBytes_AsString(s); | |
|
432 | rlen = PyBytes_Size(s); | |
|
433 | ||
|
434 | w = (char *)malloc(rlen ? rlen : 1); | |
|
435 | if (!w) | |
|
436 | goto nomem; | |
|
437 | ||
|
438 | for (i = 0; i != rlen; i++) { | |
|
439 | c = r[i]; | |
|
440 | if (c == ' ' || c == '\t' || c == '\r') { | |
|
441 | if (!allws && (wlen == 0 || w[wlen - 1] != ' ')) | |
|
442 | w[wlen++] = ' '; | |
|
443 | } else if (c == '\n' && !allws | |
|
444 | && wlen > 0 && w[wlen - 1] == ' ') { | |
|
445 | w[wlen - 1] = '\n'; | |
|
446 | } else { | |
|
447 | w[wlen++] = c; | |
|
448 | } | |
|
449 | } | |
|
450 | ||
|
451 | result = PyBytes_FromStringAndSize(w, wlen); | |
|
452 | ||
|
453 | nomem: | |
|
454 | free(w); | |
|
455 | return result ? result : PyErr_NoMemory(); | |
|
456 | } | |
|
457 | ||
|
458 | ||
|
459 | static char mdiff_doc[] = "Efficient binary diff."; | |
|
460 | ||
|
461 | static PyMethodDef methods[] = { | |
|
462 | {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"}, | |
|
463 | {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"}, | |
|
464 | {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"}, | |
|
465 | {NULL, NULL} | |
|
466 | }; | |
|
467 | ||
|
468 | #ifdef IS_PY3K | |
|
469 | static struct PyModuleDef bdiff_module = { | |
|
470 | PyModuleDef_HEAD_INIT, | |
|
471 | "bdiff", | |
|
472 | mdiff_doc, | |
|
473 | -1, | |
|
474 | methods | |
|
475 | }; | |
|
476 | ||
|
477 | PyMODINIT_FUNC PyInit_bdiff(void) | |
|
478 | { | |
|
479 | return PyModule_Create(&bdiff_module); | |
|
480 | } | |
|
481 | #else | |
|
482 | PyMODINIT_FUNC initbdiff(void) | |
|
483 | { | |
|
484 | Py_InitModule3("bdiff", methods, mdiff_doc); | |
|
485 | } | |
|
486 | #endif | |
|
487 |
@@ -17,6 +17,7 b' from .node import (' | |||
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | encoding, |
|
20 | error, | |
|
20 | 21 | lock as lockmod, |
|
21 | 22 | obsolete, |
|
22 | 23 | util, |
@@ -109,39 +110,6 b' class bmstore(dict):' | |||
|
109 | 110 | location='plain') |
|
110 | 111 | tr.hookargs['bookmark_moved'] = '1' |
|
111 | 112 | |
|
112 | def write(self): | |
|
113 | '''Write bookmarks | |
|
114 | ||
|
115 | Write the given bookmark => hash dictionary to the .hg/bookmarks file | |
|
116 | in a format equal to those of localtags. | |
|
117 | ||
|
118 | We also store a backup of the previous state in undo.bookmarks that | |
|
119 | can be copied back on rollback. | |
|
120 | ''' | |
|
121 | msg = 'bm.write() is deprecated, use bm.recordchange(transaction)' | |
|
122 | self._repo.ui.deprecwarn(msg, '3.7') | |
|
123 | # TODO: writing the active bookmark should probably also use a | |
|
124 | # transaction. | |
|
125 | self._writeactive() | |
|
126 | if self._clean: | |
|
127 | return | |
|
128 | repo = self._repo | |
|
129 | if (repo.ui.configbool('devel', 'all-warnings') | |
|
130 | or repo.ui.configbool('devel', 'check-locks')): | |
|
131 | l = repo._wlockref and repo._wlockref() | |
|
132 | if l is None or not l.held: | |
|
133 | repo.ui.develwarn('bookmarks write with no wlock') | |
|
134 | ||
|
135 | tr = repo.currenttransaction() | |
|
136 | if tr: | |
|
137 | self.recordchange(tr) | |
|
138 | # invalidatevolatilesets() is omitted because this doesn't | |
|
139 | # write changes out actually | |
|
140 | return | |
|
141 | ||
|
142 | self._writerepo(repo) | |
|
143 | repo.invalidatevolatilesets() | |
|
144 | ||
|
145 | 113 | def _writerepo(self, repo): |
|
146 | 114 | """Factored out for extensibility""" |
|
147 | 115 | rbm = repo._bookmarks |
@@ -150,7 +118,8 b' class bmstore(dict):' | |||
|
150 | 118 | rbm._writeactive() |
|
151 | 119 | |
|
152 | 120 | with repo.wlock(): |
|
153 |
file_ = repo.vfs('bookmarks', 'w', atomictemp=True |
|
|
121 | file_ = repo.vfs('bookmarks', 'w', atomictemp=True, | |
|
122 | checkambig=True) | |
|
154 | 123 | try: |
|
155 | 124 | self._write(file_) |
|
156 | 125 | except: # re-raises |
@@ -164,7 +133,8 b' class bmstore(dict):' | |||
|
164 | 133 | return |
|
165 | 134 | with self._repo.wlock(): |
|
166 | 135 | if self._active is not None: |
|
167 |
f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True |
|
|
136 | f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True, | |
|
137 | checkambig=True) | |
|
168 | 138 | try: |
|
169 | 139 | f.write(encoding.fromlocal(self._active)) |
|
170 | 140 | finally: |
@@ -185,7 +155,10 b' class bmstore(dict):' | |||
|
185 | 155 | |
|
186 | 156 | def expandname(self, bname): |
|
187 | 157 | if bname == '.': |
|
188 |
|
|
|
158 | if self.active: | |
|
159 | return self.active | |
|
160 | else: | |
|
161 | raise error.Abort(_("no active bookmark")) | |
|
189 | 162 | return bname |
|
190 | 163 | |
|
191 | 164 | def _readactive(repo, marks): |
@@ -363,7 +363,7 b' class revbranchcache(object):' | |||
|
363 | 363 | bndata = repo.vfs.read(_rbcnames) |
|
364 | 364 | self._rbcsnameslen = len(bndata) # for verification before writing |
|
365 | 365 | self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')] |
|
366 |
except (IOError, OSError) |
|
|
366 | except (IOError, OSError): | |
|
367 | 367 | if readonly: |
|
368 | 368 | # don't try to use cache - fall back to the slow path |
|
369 | 369 | self.branchinfo = self._branchinfo |
@@ -402,10 +402,9 b' class revbranchcache(object):' | |||
|
402 | 402 | if rev == nullrev: |
|
403 | 403 | return changelog.branchinfo(rev) |
|
404 | 404 | |
|
405 |
# if requested rev is |
|
|
405 | # if requested rev isn't allocated, grow and cache the rev info | |
|
406 | 406 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
407 | self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize - | |
|
408 | len(self._rbcrevs))) | |
|
407 | return self._branchinfo(rev) | |
|
409 | 408 | |
|
410 | 409 | # fast path: extract data from cache, use it if node is matching |
|
411 | 410 | reponode = changelog.node(rev)[:_rbcnodelen] |
@@ -452,6 +451,10 b' class revbranchcache(object):' | |||
|
452 | 451 | rbcrevidx = rev * _rbcrecsize |
|
453 | 452 | rec = array('c') |
|
454 | 453 | rec.fromstring(pack(_rbcrecfmt, node, branchidx)) |
|
454 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | |
|
455 | self._rbcrevs.extend('\0' * | |
|
456 | (len(self._repo.changelog) * _rbcrecsize - | |
|
457 | len(self._rbcrevs))) | |
|
455 | 458 | self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec |
|
456 | 459 | self._rbcrevslen = min(self._rbcrevslen, rev) |
|
457 | 460 |
@@ -690,7 +690,7 b' class unbundle20(unpackermixin):' | |||
|
690 | 690 | |
|
691 | 691 | def _processallparams(self, paramsblock): |
|
692 | 692 | """""" |
|
693 |
params = |
|
|
693 | params = util.sortdict() | |
|
694 | 694 | for p in paramsblock.split(' '): |
|
695 | 695 | p = p.split('=', 1) |
|
696 | 696 | p = [urlreq.unquote(i) for i in p] |
@@ -1115,8 +1115,8 b' class unbundlepart(unpackermixin):' | |||
|
1115 | 1115 | self.mandatoryparams = tuple(mandatoryparams) |
|
1116 | 1116 | self.advisoryparams = tuple(advisoryparams) |
|
1117 | 1117 | # user friendly UI |
|
1118 | self.params = dict(self.mandatoryparams) | |
|
1119 |
self.params.update( |
|
|
1118 | self.params = util.sortdict(self.mandatoryparams) | |
|
1119 | self.params.update(self.advisoryparams) | |
|
1120 | 1120 | self.mandatorykeys = frozenset(p[0] for p in mandatoryparams) |
|
1121 | 1121 | |
|
1122 | 1122 | def _payloadchunks(self, chunknum=0): |
@@ -1294,6 +1294,9 b' def writebundle(ui, cg, filename, bundle' | |||
|
1294 | 1294 | bundle.setcompression(compression) |
|
1295 | 1295 | part = bundle.newpart('changegroup', data=cg.getchunks()) |
|
1296 | 1296 | part.addparam('version', cg.version) |
|
1297 | if 'clcount' in cg.extras: | |
|
1298 | part.addparam('nbchanges', str(cg.extras['clcount']), | |
|
1299 | mandatory=False) | |
|
1297 | 1300 | chunkiter = bundle.getchunks() |
|
1298 | 1301 | else: |
|
1299 | 1302 | # compression argument is only for the bundle2 case |
@@ -291,7 +291,7 b' class bundlerepository(localrepo.localre' | |||
|
291 | 291 | ".cg%sun" % version) |
|
292 | 292 | |
|
293 | 293 | if cgstream is None: |
|
294 | raise error.Abort('No changegroups found') | |
|
294 | raise error.Abort(_('No changegroups found')) | |
|
295 | 295 | cgstream.seek(0) |
|
296 | 296 | |
|
297 | 297 | self.bundle = changegroup.getunbundler(version, cgstream, 'UN') |
@@ -135,7 +135,7 b' class cg1unpacker(object):' | |||
|
135 | 135 | version = '01' |
|
136 | 136 | _grouplistcount = 1 # One list of files after the manifests |
|
137 | 137 | |
|
138 | def __init__(self, fh, alg): | |
|
138 | def __init__(self, fh, alg, extras=None): | |
|
139 | 139 | if alg == 'UN': |
|
140 | 140 | alg = None # get more modern without breaking too much |
|
141 | 141 | if not alg in util.decompressors: |
@@ -145,6 +145,7 b' class cg1unpacker(object):' | |||
|
145 | 145 | alg = '_truncatedBZ' |
|
146 | 146 | self._stream = util.decompressors[alg](fh) |
|
147 | 147 | self._type = alg |
|
148 | self.extras = extras or {} | |
|
148 | 149 | self.callback = None |
|
149 | 150 | |
|
150 | 151 | # These methods (compressed, read, seek, tell) all appear to only |
@@ -530,6 +531,17 b' class cg1packer(object):' | |||
|
530 | 531 | def fileheader(self, fname): |
|
531 | 532 | return chunkheader(len(fname)) + fname |
|
532 | 533 | |
|
534 | # Extracted both for clarity and for overriding in extensions. | |
|
535 | def _sortgroup(self, revlog, nodelist, lookup): | |
|
536 | """Sort nodes for change group and turn them into revnums.""" | |
|
537 | # for generaldelta revlogs, we linearize the revs; this will both be | |
|
538 | # much quicker and generate a much smaller bundle | |
|
539 | if (revlog._generaldelta and self._reorder is None) or self._reorder: | |
|
540 | dag = dagutil.revlogdag(revlog) | |
|
541 | return dag.linearize(set(revlog.rev(n) for n in nodelist)) | |
|
542 | else: | |
|
543 | return sorted([revlog.rev(n) for n in nodelist]) | |
|
544 | ||
|
533 | 545 | def group(self, nodelist, revlog, lookup, units=None): |
|
534 | 546 | """Calculate a delta group, yielding a sequence of changegroup chunks |
|
535 | 547 | (strings). |
@@ -549,14 +561,7 b' class cg1packer(object):' | |||
|
549 | 561 | yield self.close() |
|
550 | 562 | return |
|
551 | 563 | |
|
552 | # for generaldelta revlogs, we linearize the revs; this will both be | |
|
553 | # much quicker and generate a much smaller bundle | |
|
554 | if (revlog._generaldelta and self._reorder is None) or self._reorder: | |
|
555 | dag = dagutil.revlogdag(revlog) | |
|
556 | revs = set(revlog.rev(n) for n in nodelist) | |
|
557 | revs = dag.linearize(revs) | |
|
558 | else: | |
|
559 | revs = sorted([revlog.rev(n) for n in nodelist]) | |
|
564 | revs = self._sortgroup(revlog, nodelist, lookup) | |
|
560 | 565 | |
|
561 | 566 | # add the parent of the first rev |
|
562 | 567 | p = revlog.parentrevs(revs[0])[0] |
@@ -724,10 +729,11 b' class cg1packer(object):' | |||
|
724 | 729 | dir = min(tmfnodes) |
|
725 | 730 | nodes = tmfnodes[dir] |
|
726 | 731 | prunednodes = self.prune(dirlog(dir), nodes, commonrevs) |
|
727 |
f |
|
|
728 | makelookupmflinknode(dir)): | |
|
729 | size += len(x) | |
|
730 |
|
|
|
732 | if not dir or prunednodes: | |
|
733 | for x in self._packmanifests(dir, prunednodes, | |
|
734 | makelookupmflinknode(dir)): | |
|
735 | size += len(x) | |
|
736 | yield x | |
|
731 | 737 | del tmfnodes[dir] |
|
732 | 738 | self._verbosenote(_('%8.i (manifests)\n') % size) |
|
733 | 739 | yield self._manifestsdone() |
@@ -895,8 +901,8 b' def getbundler(version, repo, bundlecaps' | |||
|
895 | 901 | assert version in supportedoutgoingversions(repo) |
|
896 | 902 | return _packermap[version][0](repo, bundlecaps) |
|
897 | 903 | |
|
898 | def getunbundler(version, fh, alg): | |
|
899 | return _packermap[version][1](fh, alg) | |
|
904 | def getunbundler(version, fh, alg, extras=None): | |
|
905 | return _packermap[version][1](fh, alg, extras=extras) | |
|
900 | 906 | |
|
901 | 907 | def _changegroupinfo(repo, nodes, source): |
|
902 | 908 | if repo.ui.verbose or source == 'bundle': |
@@ -924,7 +930,8 b' def getsubsetraw(repo, outgoing, bundler' | |||
|
924 | 930 | |
|
925 | 931 | def getsubset(repo, outgoing, bundler, source, fastpath=False): |
|
926 | 932 | gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath) |
|
927 |
return getunbundler(bundler.version, util.chunkbuffer(gengroup), None |
|
|
933 | return getunbundler(bundler.version, util.chunkbuffer(gengroup), None, | |
|
934 | {'clcount': len(outgoing.missing)}) | |
|
928 | 935 | |
|
929 | 936 | def changegroupsubset(repo, roots, heads, source, version='01'): |
|
930 | 937 | """Compute a changegroup consisting of all the nodes that are |
@@ -83,7 +83,7 b' def filterchunks(ui, originalhunks, usec' | |||
|
83 | 83 | else: |
|
84 | 84 | recordfn = crecordmod.chunkselector |
|
85 | 85 | |
|
86 |
return crecordmod.filterpatch(ui, originalhunks, recordfn |
|
|
86 | return crecordmod.filterpatch(ui, originalhunks, recordfn) | |
|
87 | 87 | |
|
88 | 88 | else: |
|
89 | 89 | return patch.filterpatch(ui, originalhunks, operation) |
@@ -91,9 +91,9 b' def filterchunks(ui, originalhunks, usec' | |||
|
91 | 91 | def recordfilter(ui, originalhunks, operation=None): |
|
92 | 92 | """ Prompts the user to filter the originalhunks and return a list of |
|
93 | 93 | selected hunks. |
|
94 |
*operation* is used for |
|
|
95 |
|
|
|
96 | *operation* has to be a translated string. | |
|
94 | *operation* is used for to build ui messages to indicate the user what | |
|
95 | kind of filtering they are doing: reverting, committing, shelving, etc. | |
|
96 | (see patch.filterpatch). | |
|
97 | 97 | """ |
|
98 | 98 | usecurses = crecordmod.checkcurses(ui) |
|
99 | 99 | testfile = ui.config('experimental', 'crecordtest', None) |
@@ -532,7 +532,7 b' def openrevlog(repo, cmd, file_, opts):' | |||
|
532 | 532 | msg = _('cannot specify --changelog and --manifest at the same time') |
|
533 | 533 | elif cl and dir: |
|
534 | 534 | msg = _('cannot specify --changelog and --dir at the same time') |
|
535 | elif cl or mf: | |
|
535 | elif cl or mf or dir: | |
|
536 | 536 | if file_: |
|
537 | 537 | msg = _('cannot specify filename with --changelog or --manifest') |
|
538 | 538 | elif not repo: |
@@ -549,7 +549,7 b' def openrevlog(repo, cmd, file_, opts):' | |||
|
549 | 549 | if 'treemanifest' not in repo.requirements: |
|
550 | 550 | raise error.Abort(_("--dir can only be used on repos with " |
|
551 | 551 | "treemanifest enabled")) |
|
552 |
dirlog = repo.dirlog( |
|
|
552 | dirlog = repo.dirlog(dir) | |
|
553 | 553 | if len(dirlog): |
|
554 | 554 | r = dirlog |
|
555 | 555 | elif mf: |
@@ -1405,24 +1405,24 b' class jsonchangeset(changeset_printer):' | |||
|
1405 | 1405 | self.ui.write(",\n {") |
|
1406 | 1406 | |
|
1407 | 1407 | if self.ui.quiet: |
|
1408 | self.ui.write('\n "rev": %s' % jrev) | |
|
1409 | self.ui.write(',\n "node": %s' % jnode) | |
|
1408 | self.ui.write(('\n "rev": %s') % jrev) | |
|
1409 | self.ui.write((',\n "node": %s') % jnode) | |
|
1410 | 1410 | self.ui.write('\n }') |
|
1411 | 1411 | return |
|
1412 | 1412 | |
|
1413 | self.ui.write('\n "rev": %s' % jrev) | |
|
1414 | self.ui.write(',\n "node": %s' % jnode) | |
|
1415 | self.ui.write(',\n "branch": "%s"' % j(ctx.branch())) | |
|
1416 | self.ui.write(',\n "phase": "%s"' % ctx.phasestr()) | |
|
1417 | self.ui.write(',\n "user": "%s"' % j(ctx.user())) | |
|
1418 | self.ui.write(',\n "date": [%d, %d]' % ctx.date()) | |
|
1419 | self.ui.write(',\n "desc": "%s"' % j(ctx.description())) | |
|
1420 | ||
|
1421 | self.ui.write(',\n "bookmarks": [%s]' % | |
|
1413 | self.ui.write(('\n "rev": %s') % jrev) | |
|
1414 | self.ui.write((',\n "node": %s') % jnode) | |
|
1415 | self.ui.write((',\n "branch": "%s"') % j(ctx.branch())) | |
|
1416 | self.ui.write((',\n "phase": "%s"') % ctx.phasestr()) | |
|
1417 | self.ui.write((',\n "user": "%s"') % j(ctx.user())) | |
|
1418 | self.ui.write((',\n "date": [%d, %d]') % ctx.date()) | |
|
1419 | self.ui.write((',\n "desc": "%s"') % j(ctx.description())) | |
|
1420 | ||
|
1421 | self.ui.write((',\n "bookmarks": [%s]') % | |
|
1422 | 1422 | ", ".join('"%s"' % j(b) for b in ctx.bookmarks())) |
|
1423 | self.ui.write(',\n "tags": [%s]' % | |
|
1423 | self.ui.write((',\n "tags": [%s]') % | |
|
1424 | 1424 | ", ".join('"%s"' % j(t) for t in ctx.tags())) |
|
1425 | self.ui.write(',\n "parents": [%s]' % | |
|
1425 | self.ui.write((',\n "parents": [%s]') % | |
|
1426 | 1426 | ", ".join('"%s"' % c.hex() for c in ctx.parents())) |
|
1427 | 1427 | |
|
1428 | 1428 | if self.ui.debugflag: |
@@ -1430,26 +1430,26 b' class jsonchangeset(changeset_printer):' | |||
|
1430 | 1430 | jmanifestnode = 'null' |
|
1431 | 1431 | else: |
|
1432 | 1432 | jmanifestnode = '"%s"' % hex(ctx.manifestnode()) |
|
1433 | self.ui.write(',\n "manifest": %s' % jmanifestnode) | |
|
1434 | ||
|
1435 | self.ui.write(',\n "extra": {%s}' % | |
|
1433 | self.ui.write((',\n "manifest": %s') % jmanifestnode) | |
|
1434 | ||
|
1435 | self.ui.write((',\n "extra": {%s}') % | |
|
1436 | 1436 | ", ".join('"%s": "%s"' % (j(k), j(v)) |
|
1437 | 1437 | for k, v in ctx.extra().items())) |
|
1438 | 1438 | |
|
1439 | 1439 | files = ctx.p1().status(ctx) |
|
1440 | self.ui.write(',\n "modified": [%s]' % | |
|
1440 | self.ui.write((',\n "modified": [%s]') % | |
|
1441 | 1441 | ", ".join('"%s"' % j(f) for f in files[0])) |
|
1442 | self.ui.write(',\n "added": [%s]' % | |
|
1442 | self.ui.write((',\n "added": [%s]') % | |
|
1443 | 1443 | ", ".join('"%s"' % j(f) for f in files[1])) |
|
1444 | self.ui.write(',\n "removed": [%s]' % | |
|
1444 | self.ui.write((',\n "removed": [%s]') % | |
|
1445 | 1445 | ", ".join('"%s"' % j(f) for f in files[2])) |
|
1446 | 1446 | |
|
1447 | 1447 | elif self.ui.verbose: |
|
1448 | self.ui.write(',\n "files": [%s]' % | |
|
1448 | self.ui.write((',\n "files": [%s]') % | |
|
1449 | 1449 | ", ".join('"%s"' % j(f) for f in ctx.files())) |
|
1450 | 1450 | |
|
1451 | 1451 | if copies: |
|
1452 | self.ui.write(',\n "copies": {%s}' % | |
|
1452 | self.ui.write((',\n "copies": {%s}') % | |
|
1453 | 1453 | ", ".join('"%s": "%s"' % (j(k), j(v)) |
|
1454 | 1454 | for k, v in copies)) |
|
1455 | 1455 | |
@@ -1463,12 +1463,13 b' class jsonchangeset(changeset_printer):' | |||
|
1463 | 1463 | self.ui.pushbuffer() |
|
1464 | 1464 | diffordiffstat(self.ui, self.repo, diffopts, prev, node, |
|
1465 | 1465 | match=matchfn, stat=True) |
|
1466 |
self.ui.write(',\n "diffstat": "%s"' |
|
|
1466 | self.ui.write((',\n "diffstat": "%s"') | |
|
1467 | % j(self.ui.popbuffer())) | |
|
1467 | 1468 | if diff: |
|
1468 | 1469 | self.ui.pushbuffer() |
|
1469 | 1470 | diffordiffstat(self.ui, self.repo, diffopts, prev, node, |
|
1470 | 1471 | match=matchfn, stat=False) |
|
1471 | self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer())) | |
|
1472 | self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer())) | |
|
1472 | 1473 | |
|
1473 | 1474 | self.ui.write("\n }") |
|
1474 | 1475 | |
@@ -1998,7 +1999,7 b' def _makelogrevset(repo, pats, opts, rev' | |||
|
1998 | 1999 | followfirst = 0 |
|
1999 | 2000 | # --follow with FILE behavior depends on revs... |
|
2000 | 2001 | it = iter(revs) |
|
2001 |
startrev = |
|
|
2002 | startrev = next(it) | |
|
2002 | 2003 | followdescendants = startrev < next(it, startrev) |
|
2003 | 2004 | |
|
2004 | 2005 | # branch and only_branch are really aliases and must be handled at |
@@ -2147,7 +2148,8 b' def getgraphlogrevs(repo, pats, opts):' | |||
|
2147 | 2148 | if opts.get('rev'): |
|
2148 | 2149 | # User-specified revs might be unsorted, but don't sort before |
|
2149 | 2150 | # _makelogrevset because it might depend on the order of revs |
|
2150 | revs.sort(reverse=True) | |
|
2151 | if not (revs.isdescending() or revs.istopo()): | |
|
2152 | revs.sort(reverse=True) | |
|
2151 | 2153 | if expr: |
|
2152 | 2154 | # Revset matchers often operate faster on revisions in changelog |
|
2153 | 2155 | # order, because most filters deal with the changelog. |
@@ -3071,7 +3073,7 b' def revert(ui, repo, ctx, parents, *pats' | |||
|
3071 | 3073 | |
|
3072 | 3074 | # tell newly modified apart. |
|
3073 | 3075 | dsmodified &= modified |
|
3074 |
dsmodified |= modified & dsadded # dirstate added may need |
|
|
3076 | dsmodified |= modified & dsadded # dirstate added may need backup | |
|
3075 | 3077 | modified -= dsmodified |
|
3076 | 3078 | |
|
3077 | 3079 | # We need to wait for some post-processing to update this set |
@@ -3141,11 +3143,17 b' def revert(ui, repo, ctx, parents, *pats' | |||
|
3141 | 3143 | # All set to `discard` if `no-backup` is set do avoid checking |
|
3142 | 3144 | # no_backup lower in the code. |
|
3143 | 3145 | # These values are ordered for comparison purposes |
|
3146 | backupinteractive = 3 # do backup if interactively modified | |
|
3144 | 3147 | backup = 2 # unconditionally do backup |
|
3145 | 3148 | check = 1 # check if the existing file differs from target |
|
3146 | 3149 | discard = 0 # never do backup |
|
3147 | 3150 | if opts.get('no_backup'): |
|
3148 | backup = check = discard | |
|
3151 | backupinteractive = backup = check = discard | |
|
3152 | if interactive: | |
|
3153 | dsmodifiedbackup = backupinteractive | |
|
3154 | else: | |
|
3155 | dsmodifiedbackup = backup | |
|
3156 | tobackup = set() | |
|
3149 | 3157 | |
|
3150 | 3158 | backupanddel = actions['remove'] |
|
3151 | 3159 | if not opts.get('no_backup'): |
@@ -3163,7 +3171,7 b' def revert(ui, repo, ctx, parents, *pats' | |||
|
3163 | 3171 | # Modified compared to target, but local file is deleted |
|
3164 | 3172 | (deleted, actions['revert'], discard), |
|
3165 | 3173 | # Modified compared to target, local change |
|
3166 | (dsmodified, actions['revert'], backup), | |
|
3174 | (dsmodified, actions['revert'], dsmodifiedbackup), | |
|
3167 | 3175 | # Added since target |
|
3168 | 3176 | (added, actions['remove'], discard), |
|
3169 | 3177 | # Added in working directory |
@@ -3198,8 +3206,12 b' def revert(ui, repo, ctx, parents, *pats' | |||
|
3198 | 3206 | continue |
|
3199 | 3207 | if xlist is not None: |
|
3200 | 3208 | xlist.append(abs) |
|
3201 |
if dobackup |
|
|
3202 | or wctx[abs].cmp(ctx[abs])): | |
|
3209 | if dobackup: | |
|
3210 | # If in interactive mode, don't automatically create | |
|
3211 | # .orig files (issue4793) | |
|
3212 | if dobackup == backupinteractive: | |
|
3213 | tobackup.add(abs) | |
|
3214 | elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])): | |
|
3203 | 3215 | bakname = scmutil.origpath(ui, repo, rel) |
|
3204 | 3216 | ui.note(_('saving current version of %s as %s\n') % |
|
3205 | 3217 | (rel, bakname)) |
@@ -3219,7 +3231,7 b' def revert(ui, repo, ctx, parents, *pats' | |||
|
3219 | 3231 | if not opts.get('dry_run'): |
|
3220 | 3232 | needdata = ('revert', 'add', 'undelete') |
|
3221 | 3233 | _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata]) |
|
3222 | _performrevert(repo, parents, ctx, actions, interactive) | |
|
3234 | _performrevert(repo, parents, ctx, actions, interactive, tobackup) | |
|
3223 | 3235 | |
|
3224 | 3236 | if targetsubs: |
|
3225 | 3237 | # Revert the subrepos on the revert list |
@@ -3234,7 +3246,8 b' def _revertprefetch(repo, ctx, *files):' | |||
|
3234 | 3246 | """Let extension changing the storage layer prefetch content""" |
|
3235 | 3247 | pass |
|
3236 | 3248 | |
|
3237 |
def _performrevert(repo, parents, ctx, actions, interactive=False |
|
|
3249 | def _performrevert(repo, parents, ctx, actions, interactive=False, | |
|
3250 | tobackup=None): | |
|
3238 | 3251 | """function that actually perform all the actions computed for revert |
|
3239 | 3252 | |
|
3240 | 3253 | This is an independent function to let extension to plug in and react to |
@@ -3301,10 +3314,12 b' def _performrevert(repo, parents, ctx, a' | |||
|
3301 | 3314 | else: |
|
3302 | 3315 | diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts) |
|
3303 | 3316 | originalchunks = patch.parsepatch(diff) |
|
3317 | operation = 'discard' if node == parent else 'revert' | |
|
3304 | 3318 | |
|
3305 | 3319 | try: |
|
3306 | 3320 | |
|
3307 |
chunks, opts = recordfilter(repo.ui, originalchunks |
|
|
3321 | chunks, opts = recordfilter(repo.ui, originalchunks, | |
|
3322 | operation=operation) | |
|
3308 | 3323 | if reversehunks: |
|
3309 | 3324 | chunks = patch.reversehunks(chunks) |
|
3310 | 3325 | |
@@ -3312,9 +3327,18 b' def _performrevert(repo, parents, ctx, a' | |||
|
3312 | 3327 | raise error.Abort(_('error parsing patch: %s') % err) |
|
3313 | 3328 | |
|
3314 | 3329 | newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks) |
|
3330 | if tobackup is None: | |
|
3331 | tobackup = set() | |
|
3315 | 3332 | # Apply changes |
|
3316 | 3333 | fp = stringio() |
|
3317 | 3334 | for c in chunks: |
|
3335 | # Create a backup file only if this hunk should be backed up | |
|
3336 | if ishunk(c) and c.header.filename() in tobackup: | |
|
3337 | abs = c.header.filename() | |
|
3338 | target = repo.wjoin(abs) | |
|
3339 | bakname = scmutil.origpath(repo.ui, repo, m.rel(abs)) | |
|
3340 | util.copyfile(target, bakname) | |
|
3341 | tobackup.remove(abs) | |
|
3318 | 3342 | c.write(fp) |
|
3319 | 3343 | dopatch = fp.tell() |
|
3320 | 3344 | fp.seek(0) |
@@ -3518,7 +3542,7 b' class dirstateguard(object):' | |||
|
3518 | 3542 | def __init__(self, repo, name): |
|
3519 | 3543 | self._repo = repo |
|
3520 | 3544 | self._suffix = '.backup.%s.%d' % (name, id(self)) |
|
3521 |
repo.dirstate. |
|
|
3545 | repo.dirstate.savebackup(repo.currenttransaction(), self._suffix) | |
|
3522 | 3546 | self._active = True |
|
3523 | 3547 | self._closed = False |
|
3524 | 3548 | |
@@ -3536,13 +3560,13 b' class dirstateguard(object):' | |||
|
3536 | 3560 | % self._suffix) |
|
3537 | 3561 | raise error.Abort(msg) |
|
3538 | 3562 | |
|
3539 |
self._repo.dirstate. |
|
|
3563 | self._repo.dirstate.clearbackup(self._repo.currenttransaction(), | |
|
3540 | 3564 | self._suffix) |
|
3541 | 3565 | self._active = False |
|
3542 | 3566 | self._closed = True |
|
3543 | 3567 | |
|
3544 | 3568 | def _abort(self): |
|
3545 |
self._repo.dirstate. |
|
|
3569 | self._repo.dirstate.restorebackup(self._repo.currenttransaction(), | |
|
3546 | 3570 | self._suffix) |
|
3547 | 3571 | self._active = False |
|
3548 | 3572 |
@@ -59,6 +59,7 b' from . import (' | |||
|
59 | 59 | obsolete, |
|
60 | 60 | patch, |
|
61 | 61 | phases, |
|
62 | policy, | |
|
62 | 63 | pvec, |
|
63 | 64 | repair, |
|
64 | 65 | revlog, |
@@ -215,7 +216,7 b' subrepoopts = [' | |||
|
215 | 216 | debugrevlogopts = [ |
|
216 | 217 | ('c', 'changelog', False, _('open changelog')), |
|
217 | 218 | ('m', 'manifest', False, _('open manifest')), |
|
218 |
('', 'dir', |
|
|
219 | ('', 'dir', '', _('open directory manifest')), | |
|
219 | 220 | ] |
|
220 | 221 | |
|
221 | 222 | # Commands start here, listed alphabetically |
@@ -468,26 +469,27 b' def annotate(ui, repo, *pats, **opts):' | |||
|
468 | 469 | |
|
469 | 470 | lines = fctx.annotate(follow=follow, linenumber=linenumber, |
|
470 | 471 | diffopts=diffopts) |
|
472 | if not lines: | |
|
473 | continue | |
|
471 | 474 | formats = [] |
|
472 | 475 | pieces = [] |
|
473 | 476 | |
|
474 | 477 | for f, sep in funcmap: |
|
475 | 478 | l = [f(n) for n, dummy in lines] |
|
476 |
if |
|
|
477 | if fm: | |
|
478 | formats.append(['%s' for x in l]) | |
|
479 | else: | |
|
480 | sizes = [encoding.colwidth(x) for x in l] | |
|
481 | ml = max(sizes) | |
|
482 | formats.append([sep + ' ' * (ml - w) + '%s' for w in sizes]) | |
|
483 | pieces.append(l) | |
|
479 | if fm: | |
|
480 | formats.append(['%s' for x in l]) | |
|
481 | else: | |
|
482 | sizes = [encoding.colwidth(x) for x in l] | |
|
483 | ml = max(sizes) | |
|
484 | formats.append([sep + ' ' * (ml - w) + '%s' for w in sizes]) | |
|
485 | pieces.append(l) | |
|
484 | 486 | |
|
485 | 487 | for f, p, l in zip(zip(*formats), zip(*pieces), lines): |
|
486 | 488 | fm.startitem() |
|
487 | 489 | fm.write(fields, "".join(f), *p) |
|
488 | 490 | fm.write('line', ": %s", l[1]) |
|
489 | 491 | |
|
490 |
if |
|
|
492 | if not lines[-1][1].endswith('\n'): | |
|
491 | 493 | fm.plain('\n') |
|
492 | 494 | |
|
493 | 495 | fm.end() |
@@ -2089,51 +2091,56 b' def debugbundle(ui, bundlepath, all=None' | |||
|
2089 | 2091 | gen = exchange.readbundle(ui, f, bundlepath) |
|
2090 | 2092 | if isinstance(gen, bundle2.unbundle20): |
|
2091 | 2093 | return _debugbundle2(ui, gen, all=all, **opts) |
|
2092 | if all: | |
|
2093 | ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n")) | |
|
2094 | ||
|
2095 | def showchunks(named): | |
|
2096 | ui.write("\n%s\n" % named) | |
|
2097 | chain = None | |
|
2098 |
|
|
|
2099 | chunkdata = gen.deltachunk(chain) | |
|
2100 | if not chunkdata: | |
|
2101 | break | |
|
2102 | node = chunkdata['node'] | |
|
2103 | p1 = chunkdata['p1'] | |
|
2104 | p2 = chunkdata['p2'] | |
|
2105 | cs = chunkdata['cs'] | |
|
2106 | deltabase = chunkdata['deltabase'] | |
|
2107 | delta = chunkdata['delta'] | |
|
2108 | ui.write("%s %s %s %s %s %s\n" % | |
|
2109 | (hex(node), hex(p1), hex(p2), | |
|
2110 | hex(cs), hex(deltabase), len(delta))) | |
|
2111 | chain = node | |
|
2112 | ||
|
2113 | chunkdata = gen.changelogheader() | |
|
2114 | showchunks("changelog") | |
|
2115 | chunkdata = gen.manifestheader() | |
|
2116 | showchunks("manifest") | |
|
2117 | while True: | |
|
2118 | chunkdata = gen.filelogheader() | |
|
2119 | if not chunkdata: | |
|
2120 | break | |
|
2121 | fname = chunkdata['filename'] | |
|
2122 | showchunks(fname) | |
|
2123 | else: | |
|
2124 | if isinstance(gen, bundle2.unbundle20): | |
|
2125 | raise error.Abort(_('use debugbundle2 for this file')) | |
|
2126 | chunkdata = gen.changelogheader() | |
|
2094 | _debugchangegroup(ui, gen, all=all, **opts) | |
|
2095 | ||
|
2096 | def _debugchangegroup(ui, gen, all=None, indent=0, **opts): | |
|
2097 | indent_string = ' ' * indent | |
|
2098 | if all: | |
|
2099 | ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n") | |
|
2100 | % indent_string) | |
|
2101 | ||
|
2102 | def showchunks(named): | |
|
2103 | ui.write("\n%s%s\n" % (indent_string, named)) | |
|
2127 | 2104 | chain = None |
|
2128 | 2105 | while True: |
|
2129 | 2106 | chunkdata = gen.deltachunk(chain) |
|
2130 | 2107 | if not chunkdata: |
|
2131 | 2108 | break |
|
2132 | 2109 | node = chunkdata['node'] |
|
2133 | ui.write("%s\n" % hex(node)) | |
|
2110 | p1 = chunkdata['p1'] | |
|
2111 | p2 = chunkdata['p2'] | |
|
2112 | cs = chunkdata['cs'] | |
|
2113 | deltabase = chunkdata['deltabase'] | |
|
2114 | delta = chunkdata['delta'] | |
|
2115 | ui.write("%s%s %s %s %s %s %s\n" % | |
|
2116 | (indent_string, hex(node), hex(p1), hex(p2), | |
|
2117 | hex(cs), hex(deltabase), len(delta))) | |
|
2134 | 2118 | chain = node |
|
2135 | 2119 | |
|
2136 | def _debugbundle2(ui, gen, **opts): | |
|
2120 | chunkdata = gen.changelogheader() | |
|
2121 | showchunks("changelog") | |
|
2122 | chunkdata = gen.manifestheader() | |
|
2123 | showchunks("manifest") | |
|
2124 | while True: | |
|
2125 | chunkdata = gen.filelogheader() | |
|
2126 | if not chunkdata: | |
|
2127 | break | |
|
2128 | fname = chunkdata['filename'] | |
|
2129 | showchunks(fname) | |
|
2130 | else: | |
|
2131 | if isinstance(gen, bundle2.unbundle20): | |
|
2132 | raise error.Abort(_('use debugbundle2 for this file')) | |
|
2133 | chunkdata = gen.changelogheader() | |
|
2134 | chain = None | |
|
2135 | while True: | |
|
2136 | chunkdata = gen.deltachunk(chain) | |
|
2137 | if not chunkdata: | |
|
2138 | break | |
|
2139 | node = chunkdata['node'] | |
|
2140 | ui.write("%s%s\n" % (indent_string, hex(node))) | |
|
2141 | chain = node | |
|
2142 | ||
|
2143 | def _debugbundle2(ui, gen, all=None, **opts): | |
|
2137 | 2144 | """lists the contents of a bundle2""" |
|
2138 | 2145 | if not isinstance(gen, bundle2.unbundle20): |
|
2139 | 2146 | raise error.Abort(_('not a bundle2 file')) |
@@ -2143,15 +2150,7 b' def _debugbundle2(ui, gen, **opts):' | |||
|
2143 | 2150 | if part.type == 'changegroup': |
|
2144 | 2151 | version = part.params.get('version', '01') |
|
2145 | 2152 | cg = changegroup.getunbundler(version, part, 'UN') |
|
2146 | chunkdata = cg.changelogheader() | |
|
2147 | chain = None | |
|
2148 | while True: | |
|
2149 | chunkdata = cg.deltachunk(chain) | |
|
2150 | if not chunkdata: | |
|
2151 | break | |
|
2152 | node = chunkdata['node'] | |
|
2153 | ui.write(" %s\n" % hex(node)) | |
|
2154 | chain = node | |
|
2153 | _debugchangegroup(ui, cg, all=all, indent=4, **opts) | |
|
2155 | 2154 | |
|
2156 | 2155 | @command('debugcreatestreamclonebundle', [], 'FILE') |
|
2157 | 2156 | def debugcreatestreamclonebundle(ui, repo, fname): |
@@ -2301,7 +2300,9 b' def debugdag(ui, repo, file_=None, *revs' | |||
|
2301 | 2300 | @command('debugdata', debugrevlogopts, _('-c|-m|FILE REV')) |
|
2302 | 2301 | def debugdata(ui, repo, file_, rev=None, **opts): |
|
2303 | 2302 | """dump the contents of a data file revision""" |
|
2304 | if opts.get('changelog') or opts.get('manifest'): | |
|
2303 | if opts.get('changelog') or opts.get('manifest') or opts.get('dir'): | |
|
2304 | if rev is not None: | |
|
2305 | raise error.CommandError('debugdata', _('invalid arguments')) | |
|
2305 | 2306 | file_, rev = None, file_ |
|
2306 | 2307 | elif rev is None: |
|
2307 | 2308 | raise error.CommandError('debugdata', _('invalid arguments')) |
@@ -2524,15 +2525,16 b' def debugignore(ui, repo, *files, **opts' | |||
|
2524 | 2525 | break |
|
2525 | 2526 | if ignored: |
|
2526 | 2527 | if ignored == nf: |
|
2527 | ui.write("%s is ignored\n" % f) | |
|
2528 | ui.write(_("%s is ignored\n") % f) | |
|
2528 | 2529 | else: |
|
2529 |
ui.write("%s is ignored because of |
|
|
2530 | ui.write(_("%s is ignored because of " | |
|
2531 | "containing folder %s\n") | |
|
2530 | 2532 | % (f, ignored)) |
|
2531 | 2533 | ignorefile, lineno, line = ignoredata |
|
2532 | ui.write("(ignore rule in %s, line %d: '%s')\n" | |
|
2534 | ui.write(_("(ignore rule in %s, line %d: '%s')\n") | |
|
2533 | 2535 | % (ignorefile, lineno, line)) |
|
2534 | 2536 | else: |
|
2535 | ui.write("%s is not ignored\n" % f) | |
|
2537 | ui.write(_("%s is not ignored\n") % f) | |
|
2536 | 2538 | |
|
2537 | 2539 | @command('debugindex', debugrevlogopts + |
|
2538 | 2540 | [('f', 'format', 0, _('revlog format'), _('FORMAT'))], |
@@ -2563,12 +2565,12 b' def debugindex(ui, repo, file_=None, **o' | |||
|
2563 | 2565 | break |
|
2564 | 2566 | |
|
2565 | 2567 | if format == 0: |
|
2566 | ui.write(" rev offset length " + basehdr + " linkrev" | |
|
2567 | " %s %s p2\n" % ("nodeid".ljust(idlen), "p1".ljust(idlen))) | |
|
2568 | ui.write((" rev offset length " + basehdr + " linkrev" | |
|
2569 | " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen))) | |
|
2568 | 2570 | elif format == 1: |
|
2569 | ui.write(" rev flag offset length" | |
|
2571 | ui.write((" rev flag offset length" | |
|
2570 | 2572 | " size " + basehdr + " link p1 p2" |
|
2571 | " %s\n" % "nodeid".rjust(idlen)) | |
|
2573 | " %s\n") % "nodeid".rjust(idlen)) | |
|
2572 | 2574 | |
|
2573 | 2575 | for i in r: |
|
2574 | 2576 | node = r.node(i) |
@@ -2743,7 +2745,16 b' def debuginstall(ui, **opts):' | |||
|
2743 | 2745 | fm.write('pythonlib', _("checking Python lib (%s)...\n"), |
|
2744 | 2746 | os.path.dirname(os.__file__)) |
|
2745 | 2747 | |
|
2748 | # hg version | |
|
2749 | hgver = util.version() | |
|
2750 | fm.write('hgver', _("checking Mercurial version (%s)\n"), | |
|
2751 | hgver.split('+')[0]) | |
|
2752 | fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), | |
|
2753 | '+'.join(hgver.split('+')[1:])) | |
|
2754 | ||
|
2746 | 2755 | # compiled modules |
|
2756 | fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), | |
|
2757 | policy.policy) | |
|
2747 | 2758 | fm.write('hgmodules', _("checking installed modules (%s)...\n"), |
|
2748 | 2759 | os.path.dirname(__file__)) |
|
2749 | 2760 | |
@@ -3022,13 +3033,13 b' def debuglocks(ui, repo, **opts):' | |||
|
3022 | 3033 | else: |
|
3023 | 3034 | locker = 'user %s, process %s, host %s' \ |
|
3024 | 3035 | % (user, pid, host) |
|
3025 | ui.write("%-6s %s (%ds)\n" % (name + ":", locker, age)) | |
|
3036 | ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) | |
|
3026 | 3037 | return 1 |
|
3027 | 3038 | except OSError as e: |
|
3028 | 3039 | if e.errno != errno.ENOENT: |
|
3029 | 3040 | raise |
|
3030 | 3041 | |
|
3031 | ui.write("%-6s free\n" % (name + ":")) | |
|
3042 | ui.write(("%-6s free\n") % (name + ":")) | |
|
3032 | 3043 | return 0 |
|
3033 | 3044 | |
|
3034 | 3045 | held += report(repo.svfs, "lock", repo.lock) |
@@ -3321,8 +3332,8 b' def debugrevlog(ui, repo, file_=None, **' | |||
|
3321 | 3332 | |
|
3322 | 3333 | if opts.get("dump"): |
|
3323 | 3334 | numrevs = len(r) |
|
3324 | ui.write("# rev p1rev p2rev start end deltastart base p1 p2" | |
|
3325 | " rawsize totalsize compression heads chainlen\n") | |
|
3335 | ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" | |
|
3336 | " rawsize totalsize compression heads chainlen\n")) | |
|
3326 | 3337 | ts = 0 |
|
3327 | 3338 | heads = set() |
|
3328 | 3339 | |
@@ -3511,18 +3522,19 b' def debugrevspec(ui, repo, expr, **opts)' | |||
|
3511 | 3522 | ui.note(revset.prettyformat(tree), "\n") |
|
3512 | 3523 | newtree = revset.expandaliases(ui, tree) |
|
3513 | 3524 | if newtree != tree: |
|
3514 | ui.note("* expanded:\n", revset.prettyformat(newtree), "\n") | |
|
3525 | ui.note(("* expanded:\n"), revset.prettyformat(newtree), "\n") | |
|
3515 | 3526 | tree = newtree |
|
3516 | 3527 | newtree = revset.foldconcat(tree) |
|
3517 | 3528 | if newtree != tree: |
|
3518 | ui.note("* concatenated:\n", revset.prettyformat(newtree), "\n") | |
|
3529 | ui.note(("* concatenated:\n"), revset.prettyformat(newtree), "\n") | |
|
3519 | 3530 | if opts["optimize"]: |
|
3520 |
|
|
|
3521 |
ui.note("* optimized:\n" |
|
|
3531 | optimizedtree = revset.optimize(newtree) | |
|
3532 | ui.note(("* optimized:\n"), | |
|
3533 | revset.prettyformat(optimizedtree), "\n") | |
|
3522 | 3534 | func = revset.match(ui, expr, repo) |
|
3523 | 3535 | revs = func(repo) |
|
3524 | 3536 | if ui.verbose: |
|
3525 | ui.note("* set:\n", revset.prettyformatset(revs), "\n") | |
|
3537 | ui.note(("* set:\n"), revset.prettyformatset(revs), "\n") | |
|
3526 | 3538 | for c in revs: |
|
3527 | 3539 | ui.write("%s\n" % c) |
|
3528 | 3540 | |
@@ -3677,7 +3689,7 b' def debugtemplate(ui, repo, tmpl, **opts' | |||
|
3677 | 3689 | ui.note(templater.prettyformat(tree), '\n') |
|
3678 | 3690 | newtree = templater.expandaliases(tree, aliases) |
|
3679 | 3691 | if newtree != tree: |
|
3680 | ui.note("* expanded:\n", templater.prettyformat(newtree), '\n') | |
|
3692 | ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') | |
|
3681 | 3693 | |
|
3682 | 3694 | mapfile = None |
|
3683 | 3695 | if revs is None: |
@@ -4406,7 +4418,7 b' def grep(ui, repo, pattern, *pats, **opt' | |||
|
4406 | 4418 | if not opts.get('files_with_matches'): |
|
4407 | 4419 | ui.write(sep, label='grep.sep') |
|
4408 | 4420 | if not opts.get('text') and binary(): |
|
4409 | ui.write(" Binary file matches") | |
|
4421 | ui.write(_(" Binary file matches")) | |
|
4410 | 4422 | else: |
|
4411 | 4423 | for s, label in l: |
|
4412 | 4424 | ui.write(s, label=label) |
@@ -4570,7 +4582,10 b' def help_(ui, name=None, **opts):' | |||
|
4570 | 4582 | Returns 0 if successful. |
|
4571 | 4583 | """ |
|
4572 | 4584 | |
|
4573 |
textwidth = |
|
|
4585 | textwidth = ui.configint('ui', 'textwidth', 78) | |
|
4586 | termwidth = ui.termwidth() - 2 | |
|
4587 | if textwidth <= 0 or termwidth < textwidth: | |
|
4588 | textwidth = termwidth | |
|
4574 | 4589 | |
|
4575 | 4590 | keep = opts.get('system') or [] |
|
4576 | 4591 | if len(keep) == 0: |
@@ -5773,6 +5788,9 b' def pull(ui, repo, source="default", **o' | |||
|
5773 | 5788 | If SOURCE is omitted, the 'default' path will be used. |
|
5774 | 5789 | See :hg:`help urls` for more information. |
|
5775 | 5790 | |
|
5791 | Specifying bookmark as ``.`` is equivalent to specifying the active | |
|
5792 | bookmark's name. | |
|
5793 | ||
|
5776 | 5794 | Returns 0 on success, 1 if an update had unresolved files. |
|
5777 | 5795 | """ |
|
5778 | 5796 | source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) |
@@ -5794,6 +5812,7 b' def pull(ui, repo, source="default", **o' | |||
|
5794 | 5812 | remotebookmarks = other.listkeys('bookmarks') |
|
5795 | 5813 | pullopargs['remotebookmarks'] = remotebookmarks |
|
5796 | 5814 | for b in opts['bookmark']: |
|
5815 | b = repo._bookmarks.expandname(b) | |
|
5797 | 5816 | if b not in remotebookmarks: |
|
5798 | 5817 | raise error.Abort(_('remote bookmark %s not found!') % b) |
|
5799 | 5818 | revs.append(remotebookmarks[b]) |
@@ -5926,6 +5945,15 b' def push(ui, repo, dest=None, **opts):' | |||
|
5926 | 5945 | if not revs: |
|
5927 | 5946 | raise error.Abort(_("specified revisions evaluate to an empty set"), |
|
5928 | 5947 | hint=_("use different revision arguments")) |
|
5948 | elif path.pushrev: | |
|
5949 | # It doesn't make any sense to specify ancestor revisions. So limit | |
|
5950 | # to DAG heads to make discovery simpler. | |
|
5951 | expr = revset.formatspec('heads(%r)', path.pushrev) | |
|
5952 | revs = scmutil.revrange(repo, [expr]) | |
|
5953 | revs = [repo[rev].node() for rev in revs] | |
|
5954 | if not revs: | |
|
5955 | raise error.Abort(_('default push revset for path evaluates to an ' | |
|
5956 | 'empty set')) | |
|
5929 | 5957 | |
|
5930 | 5958 | repo._subtoppath = dest |
|
5931 | 5959 | try: |
@@ -6300,7 +6328,10 b' def revert(ui, repo, *pats, **opts):' | |||
|
6300 | 6328 | related method. |
|
6301 | 6329 | |
|
6302 | 6330 | Modified files are saved with a .orig suffix before reverting. |
|
6303 | To disable these backups, use --no-backup. | |
|
6331 | To disable these backups, use --no-backup. It is possible to store | |
|
6332 | the backup files in a custom directory relative to the root of the | |
|
6333 | repository by setting the ``ui.origbackuppath`` configuration | |
|
6334 | option. | |
|
6304 | 6335 | |
|
6305 | 6336 | See :hg:`help dates` for a list of formats valid for -d/--date. |
|
6306 | 6337 | |
@@ -6380,6 +6411,11 b' def rollback(ui, repo, **opts):' | |||
|
6380 | 6411 | commit transaction if it isn't checked out. Use --force to |
|
6381 | 6412 | override this protection. |
|
6382 | 6413 | |
|
6414 | The rollback command can be entirely disabled by setting the | |
|
6415 | ``ui.rollback`` configuration setting to false. If you're here | |
|
6416 | because you want to use rollback and it's disabled, you can | |
|
6417 | re-enable the command by setting ``ui.rollback`` to true. | |
|
6418 | ||
|
6383 | 6419 | This command is not intended for use on public repositories. Once |
|
6384 | 6420 | changes are visible for pull by other users, rolling a transaction |
|
6385 | 6421 | back locally is ineffective (someone else may already have pulled |
@@ -6389,6 +6425,9 b' def rollback(ui, repo, **opts):' | |||
|
6389 | 6425 | |
|
6390 | 6426 | Returns 0 on success, 1 if no rollback data is available. |
|
6391 | 6427 | """ |
|
6428 | if not ui.configbool('ui', 'rollback', True): | |
|
6429 | raise error.Abort(_('rollback is disabled because it is unsafe'), | |
|
6430 | hint=('see `hg help -v rollback` for information')) | |
|
6392 | 6431 | return repo.rollback(dryrun=opts.get('dry_run'), |
|
6393 | 6432 | force=opts.get('force')) |
|
6394 | 6433 |
@@ -7,9 +7,13 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import SocketServer | |
|
11 | 10 | import errno |
|
11 | import gc | |
|
12 | 12 | import os |
|
13 | import random | |
|
14 | import select | |
|
15 | import signal | |
|
16 | import socket | |
|
13 | 17 | import struct |
|
14 | 18 | import sys |
|
15 | 19 | import traceback |
@@ -178,6 +182,10 b' class server(object):' | |||
|
178 | 182 | |
|
179 | 183 | self.client = fin |
|
180 | 184 | |
|
185 | def cleanup(self): | |
|
186 | """release and restore resources taken during server session""" | |
|
187 | pass | |
|
188 | ||
|
181 | 189 | def _read(self, size): |
|
182 | 190 | if not size: |
|
183 | 191 | return '' |
@@ -229,12 +237,8 b' class server(object):' | |||
|
229 | 237 | self.repo.ui = self.repo.dirstate._ui = repoui |
|
230 | 238 | self.repo.invalidateall() |
|
231 | 239 | |
|
232 | # reset last-print time of progress bar per command | |
|
233 | # (progbar is singleton, we don't have to do for all uis) | |
|
234 | if copiedui._progbar: | |
|
235 | copiedui._progbar.resetstate() | |
|
236 | ||
|
237 | 240 | for ui in uis: |
|
241 | ui.resetstate() | |
|
238 | 242 | # any kind of interaction must use server channels, but chg may |
|
239 | 243 | # replace channels by fully functional tty files. so nontty is |
|
240 | 244 | # enforced only if cin is a channel. |
@@ -278,6 +282,9 b' class server(object):' | |||
|
278 | 282 | hellomsg += 'encoding: ' + encoding.encoding |
|
279 | 283 | hellomsg += '\n' |
|
280 | 284 | hellomsg += 'pid: %d' % util.getpid() |
|
285 | if util.safehasattr(os, 'getpgid'): | |
|
286 | hellomsg += '\n' | |
|
287 | hellomsg += 'pgid: %d' % os.getpgid(0) | |
|
281 | 288 | |
|
282 | 289 | # write the hello msg in -one- chunk |
|
283 | 290 | self.cout.write(hellomsg) |
@@ -332,66 +339,193 b' class pipeservice(object):' | |||
|
332 | 339 | sv = server(ui, self.repo, fin, fout) |
|
333 | 340 | return sv.serve() |
|
334 | 341 | finally: |
|
342 | sv.cleanup() | |
|
335 | 343 | _restoreio(ui, fin, fout) |
|
336 | 344 | |
|
337 | class _requesthandler(SocketServer.StreamRequestHandler): | |
|
338 | def handle(self): | |
|
339 | ui = self.server.ui | |
|
340 | repo = self.server.repo | |
|
341 | sv = None | |
|
345 | def _initworkerprocess(): | |
|
346 | # use a different process group from the master process, making this | |
|
347 | # process pass kernel "is_current_pgrp_orphaned" check so signals like | |
|
348 | # SIGTSTP, SIGTTIN, SIGTTOU are not ignored. | |
|
349 | os.setpgid(0, 0) | |
|
350 | # change random state otherwise forked request handlers would have a | |
|
351 | # same state inherited from parent. | |
|
352 | random.seed() | |
|
353 | ||
|
354 | def _serverequest(ui, repo, conn, createcmdserver): | |
|
355 | fin = conn.makefile('rb') | |
|
356 | fout = conn.makefile('wb') | |
|
357 | sv = None | |
|
358 | try: | |
|
359 | sv = createcmdserver(repo, conn, fin, fout) | |
|
360 | try: | |
|
361 | sv.serve() | |
|
362 | # handle exceptions that may be raised by command server. most of | |
|
363 | # known exceptions are caught by dispatch. | |
|
364 | except error.Abort as inst: | |
|
365 | ui.warn(_('abort: %s\n') % inst) | |
|
366 | except IOError as inst: | |
|
367 | if inst.errno != errno.EPIPE: | |
|
368 | raise | |
|
369 | except KeyboardInterrupt: | |
|
370 | pass | |
|
371 | finally: | |
|
372 | sv.cleanup() | |
|
373 | except: # re-raises | |
|
374 | # also write traceback to error channel. otherwise client cannot | |
|
375 | # see it because it is written to server's stderr by default. | |
|
376 | if sv: | |
|
377 | cerr = sv.cerr | |
|
378 | else: | |
|
379 | cerr = channeledoutput(fout, 'e') | |
|
380 | traceback.print_exc(file=cerr) | |
|
381 | raise | |
|
382 | finally: | |
|
383 | fin.close() | |
|
342 | 384 | try: |
|
343 | sv = server(ui, repo, self.rfile, self.wfile) | |
|
344 | try: | |
|
345 | sv.serve() | |
|
346 | # handle exceptions that may be raised by command server. most of | |
|
347 | # known exceptions are caught by dispatch. | |
|
348 | except error.Abort as inst: | |
|
349 | ui.warn(_('abort: %s\n') % inst) | |
|
350 | except IOError as inst: | |
|
351 | if inst.errno != errno.EPIPE: | |
|
352 | raise | |
|
353 | except KeyboardInterrupt: | |
|
354 | pass | |
|
355 | except: # re-raises | |
|
356 | # also write traceback to error channel. otherwise client cannot | |
|
357 | # see it because it is written to server's stderr by default. | |
|
358 | if sv: | |
|
359 | cerr = sv.cerr | |
|
360 | else: | |
|
361 | cerr = channeledoutput(self.wfile, 'e') | |
|
362 | traceback.print_exc(file=cerr) | |
|
363 | raise | |
|
385 | fout.close() # implicit flush() may cause another EPIPE | |
|
386 | except IOError as inst: | |
|
387 | if inst.errno != errno.EPIPE: | |
|
388 | raise | |
|
389 | ||
|
390 | class unixservicehandler(object): | |
|
391 | """Set of pluggable operations for unix-mode services | |
|
392 | ||
|
393 | Almost all methods except for createcmdserver() are called in the main | |
|
394 | process. You can't pass mutable resource back from createcmdserver(). | |
|
395 | """ | |
|
396 | ||
|
397 | pollinterval = None | |
|
398 | ||
|
399 | def __init__(self, ui): | |
|
400 | self.ui = ui | |
|
401 | ||
|
402 | def bindsocket(self, sock, address): | |
|
403 | util.bindunixsocket(sock, address) | |
|
364 | 404 | |
|
365 | class unixservice(object): | |
|
405 | def unlinksocket(self, address): | |
|
406 | os.unlink(address) | |
|
407 | ||
|
408 | def printbanner(self, address): | |
|
409 | self.ui.status(_('listening at %s\n') % address) | |
|
410 | self.ui.flush() # avoid buffering of status message | |
|
411 | ||
|
412 | def shouldexit(self): | |
|
413 | """True if server should shut down; checked per pollinterval""" | |
|
414 | return False | |
|
415 | ||
|
416 | def newconnection(self): | |
|
417 | """Called when main process notices new connection""" | |
|
418 | pass | |
|
419 | ||
|
420 | def createcmdserver(self, repo, conn, fin, fout): | |
|
421 | """Create new command server instance; called in the process that | |
|
422 | serves for the current connection""" | |
|
423 | return server(self.ui, repo, fin, fout) | |
|
424 | ||
|
425 | class unixforkingservice(object): | |
|
366 | 426 | """ |
|
367 | 427 | Listens on unix domain socket and forks server per connection |
|
368 | 428 | """ |
|
369 | def __init__(self, ui, repo, opts): | |
|
429 | ||
|
430 | def __init__(self, ui, repo, opts, handler=None): | |
|
370 | 431 | self.ui = ui |
|
371 | 432 | self.repo = repo |
|
372 | 433 | self.address = opts['address'] |
|
373 |
if not util.safehasattr( |
|
|
434 | if not util.safehasattr(socket, 'AF_UNIX'): | |
|
374 | 435 | raise error.Abort(_('unsupported platform')) |
|
375 | 436 | if not self.address: |
|
376 | 437 | raise error.Abort(_('no socket path specified with --address')) |
|
438 | self._servicehandler = handler or unixservicehandler(ui) | |
|
439 | self._sock = None | |
|
440 | self._oldsigchldhandler = None | |
|
441 | self._workerpids = set() # updated by signal handler; do not iterate | |
|
377 | 442 | |
|
378 | 443 | def init(self): |
|
379 | class cls(SocketServer.ForkingMixIn, SocketServer.UnixStreamServer): | |
|
380 | ui = self.ui | |
|
381 | repo = self.repo | |
|
382 | self.server = cls(self.address, _requesthandler) | |
|
383 | self.ui.status(_('listening at %s\n') % self.address) | |
|
384 | self.ui.flush() # avoid buffering of status message | |
|
444 | self._sock = socket.socket(socket.AF_UNIX) | |
|
445 | self._servicehandler.bindsocket(self._sock, self.address) | |
|
446 | self._sock.listen(socket.SOMAXCONN) | |
|
447 | o = signal.signal(signal.SIGCHLD, self._sigchldhandler) | |
|
448 | self._oldsigchldhandler = o | |
|
449 | self._servicehandler.printbanner(self.address) | |
|
450 | ||
|
451 | def _cleanup(self): | |
|
452 | signal.signal(signal.SIGCHLD, self._oldsigchldhandler) | |
|
453 | self._sock.close() | |
|
454 | self._servicehandler.unlinksocket(self.address) | |
|
455 | # don't kill child processes as they have active clients, just wait | |
|
456 | self._reapworkers(0) | |
|
385 | 457 | |
|
386 | 458 | def run(self): |
|
387 | 459 | try: |
|
388 |
self. |
|
|
460 | self._mainloop() | |
|
389 | 461 | finally: |
|
390 |
|
|
|
462 | self._cleanup() | |
|
463 | ||
|
464 | def _mainloop(self): | |
|
465 | h = self._servicehandler | |
|
466 | while not h.shouldexit(): | |
|
467 | try: | |
|
468 | ready = select.select([self._sock], [], [], h.pollinterval)[0] | |
|
469 | if not ready: | |
|
470 | continue | |
|
471 | conn, _addr = self._sock.accept() | |
|
472 | except (select.error, socket.error) as inst: | |
|
473 | if inst.args[0] == errno.EINTR: | |
|
474 | continue | |
|
475 | raise | |
|
476 | ||
|
477 | pid = os.fork() | |
|
478 | if pid: | |
|
479 | try: | |
|
480 | self.ui.debug('forked worker process (pid=%d)\n' % pid) | |
|
481 | self._workerpids.add(pid) | |
|
482 | h.newconnection() | |
|
483 | finally: | |
|
484 | conn.close() # release handle in parent process | |
|
485 | else: | |
|
486 | try: | |
|
487 | self._runworker(conn) | |
|
488 | conn.close() | |
|
489 | os._exit(0) | |
|
490 | except: # never return, hence no re-raises | |
|
491 | try: | |
|
492 | self.ui.traceback(force=True) | |
|
493 | finally: | |
|
494 | os._exit(255) | |
|
495 | ||
|
496 | def _sigchldhandler(self, signal, frame): | |
|
497 | self._reapworkers(os.WNOHANG) | |
|
498 | ||
|
499 | def _reapworkers(self, options): | |
|
500 | while self._workerpids: | |
|
501 | try: | |
|
502 | pid, _status = os.waitpid(-1, options) | |
|
503 | except OSError as inst: | |
|
504 | if inst.errno == errno.EINTR: | |
|
505 | continue | |
|
506 | if inst.errno != errno.ECHILD: | |
|
507 | raise | |
|
508 | # no child processes at all (reaped by other waitpid()?) | |
|
509 | self._workerpids.clear() | |
|
510 | return | |
|
511 | if pid == 0: | |
|
512 | # no waitable child processes | |
|
513 | return | |
|
514 | self.ui.debug('worker process exited (pid=%d)\n' % pid) | |
|
515 | self._workerpids.discard(pid) | |
|
516 | ||
|
517 | def _runworker(self, conn): | |
|
518 | signal.signal(signal.SIGCHLD, self._oldsigchldhandler) | |
|
519 | _initworkerprocess() | |
|
520 | h = self._servicehandler | |
|
521 | try: | |
|
522 | _serverequest(self.ui, self.repo, conn, h.createcmdserver) | |
|
523 | finally: | |
|
524 | gc.collect() # trigger __del__ since worker process uses os._exit | |
|
391 | 525 | |
|
392 | 526 | _servicemap = { |
|
393 | 527 | 'pipe': pipeservice, |
|
394 | 'unix': unixservice, | |
|
528 | 'unix': unixforkingservice, | |
|
395 | 529 | } |
|
396 | 530 | |
|
397 | 531 | def createservice(ui, repo, opts): |
@@ -918,28 +918,25 b' class basefilectx(object):' | |||
|
918 | 918 | return p[1] |
|
919 | 919 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) |
|
920 | 920 | |
|
921 |
def annotate(self, follow=False, linenumber= |
|
|
922 | '''returns a list of tuples of (ctx, line) for each line | |
|
921 | def annotate(self, follow=False, linenumber=False, diffopts=None): | |
|
922 | '''returns a list of tuples of ((ctx, number), line) for each line | |
|
923 | 923 | in the file, where ctx is the filectx of the node where |
|
924 |
that line was last changed |
|
|
925 | This returns tuples of ((ctx, linenumber), line) for each line, | |
|
926 | if "linenumber" parameter is NOT "None". | |
|
927 | In such tuples, linenumber means one at the first appearance | |
|
928 | in the managed file. | |
|
929 | To reduce annotation cost, | |
|
930 | this returns fixed value(False is used) as linenumber, | |
|
931 | if "linenumber" parameter is "False".''' | |
|
924 | that line was last changed; if linenumber parameter is true, number is | |
|
925 | the line number at the first appearance in the managed file, otherwise, | |
|
926 | number has a fixed value of False. | |
|
927 | ''' | |
|
932 | 928 | |
|
933 | if linenumber is None: | |
|
929 | def lines(text): | |
|
930 | if text.endswith("\n"): | |
|
931 | return text.count("\n") | |
|
932 | return text.count("\n") + 1 | |
|
933 | ||
|
934 | if linenumber: | |
|
934 | 935 | def decorate(text, rev): |
|
935 |
return ([rev |
|
|
936 | elif linenumber: | |
|
937 | def decorate(text, rev): | |
|
938 | size = len(text.splitlines()) | |
|
939 | return ([(rev, i) for i in xrange(1, size + 1)], text) | |
|
936 | return ([(rev, i) for i in xrange(1, lines(text) + 1)], text) | |
|
940 | 937 | else: |
|
941 | 938 | def decorate(text, rev): |
|
942 |
return ([(rev, False)] * l |
|
|
939 | return ([(rev, False)] * lines(text), text) | |
|
943 | 940 | |
|
944 | 941 | def pair(parent, child): |
|
945 | 942 | blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts, |
@@ -484,16 +484,16 b' def checkcopies(ctx, f, m1, m2, ca, limi' | |||
|
484 | 484 | f1r, f2r = f1.linkrev(), f2.linkrev() |
|
485 | 485 | |
|
486 | 486 | if f1r is None: |
|
487 |
f1 = |
|
|
487 | f1 = next(g1) | |
|
488 | 488 | if f2r is None: |
|
489 |
f2 = |
|
|
489 | f2 = next(g2) | |
|
490 | 490 | |
|
491 | 491 | while True: |
|
492 | 492 | f1r, f2r = f1.linkrev(), f2.linkrev() |
|
493 | 493 | if f1r > f2r: |
|
494 |
f1 = |
|
|
494 | f1 = next(g1) | |
|
495 | 495 | elif f2r > f1r: |
|
496 |
f2 = |
|
|
496 | f2 = next(g2) | |
|
497 | 497 | elif f1 == f2: |
|
498 | 498 | return f1 # a match |
|
499 | 499 | elif f1r == f2r or f1r < limit or f2r < limit: |
@@ -91,6 +91,7 b' class patchnode(object):' | |||
|
91 | 91 | def allchildren(self): |
|
92 | 92 | "Return a list of all of the direct children of this node" |
|
93 | 93 | raise NotImplementedError("method must be implemented by subclass") |
|
94 | ||
|
94 | 95 | def nextsibling(self): |
|
95 | 96 | """ |
|
96 | 97 | Return the closest next item of the same type where there are no items |
@@ -110,18 +111,12 b' class patchnode(object):' | |||
|
110 | 111 | def parentitem(self): |
|
111 | 112 | raise NotImplementedError("method must be implemented by subclass") |
|
112 | 113 | |
|
113 | ||
|
114 | def nextitem(self, constrainlevel=True, skipfolded=True): | |
|
114 | def nextitem(self, skipfolded=True): | |
|
115 | 115 | """ |
|
116 | If constrainLevel == True, return the closest next item | |
|
117 | of the same type where there are no items of different types between | |
|
118 | the current item and this closest item. | |
|
116 | Try to return the next item closest to this item, regardless of item's | |
|
117 | type (header, hunk, or hunkline). | |
|
119 | 118 | |
|
120 | If constrainLevel == False, then try to return the next item | |
|
121 | closest to this item, regardless of item's type (header, hunk, or | |
|
122 | HunkLine). | |
|
123 | ||
|
124 | If skipFolded == True, and the current item is folded, then the child | |
|
119 | If skipfolded == True, and the current item is folded, then the child | |
|
125 | 120 | items that are hidden due to folding will be skipped when determining |
|
126 | 121 | the next item. |
|
127 | 122 | |
@@ -131,9 +126,7 b' class patchnode(object):' | |||
|
131 | 126 | itemfolded = self.folded |
|
132 | 127 | except AttributeError: |
|
133 | 128 | itemfolded = False |
|
134 | if constrainlevel: | |
|
135 | return self.nextsibling() | |
|
136 | elif skipfolded and itemfolded: | |
|
129 | if skipfolded and itemfolded: | |
|
137 | 130 | nextitem = self.nextsibling() |
|
138 | 131 | if nextitem is None: |
|
139 | 132 | try: |
@@ -164,43 +157,31 b' class patchnode(object):' | |||
|
164 | 157 | except AttributeError: # parent and/or grandparent was None |
|
165 | 158 | return None |
|
166 | 159 | |
|
167 | def previtem(self, constrainlevel=True, skipfolded=True): | |
|
160 | def previtem(self): | |
|
168 | 161 | """ |
|
169 | If constrainLevel == True, return the closest previous item | |
|
170 | of the same type where there are no items of different types between | |
|
171 | the current item and this closest item. | |
|
172 | ||
|
173 | If constrainLevel == False, then try to return the previous item | |
|
174 | closest to this item, regardless of item's type (header, hunk, or | |
|
175 | HunkLine). | |
|
176 | ||
|
177 | If skipFolded == True, and the current item is folded, then the items | |
|
178 | that are hidden due to folding will be skipped when determining the | |
|
179 | next item. | |
|
162 | Try to return the previous item closest to this item, regardless of | |
|
163 | item's type (header, hunk, or hunkline). | |
|
180 | 164 | |
|
181 | 165 | If it is not possible to get the previous item, return None. |
|
182 | 166 | """ |
|
183 | if constrainlevel: | |
|
184 | return self.prevsibling() | |
|
185 | else: | |
|
186 | # try previous sibling's last child's last child, | |
|
187 |
|
|
|
188 | prevsibling = self.prevsibling() | |
|
189 |
|
|
|
190 |
prevsiblingl |
|
|
191 |
if ((prevsiblingl |
|
|
192 | not prevsibling.folded): | |
|
193 |
prevsiblinglclc |
|
|
194 | if ((prevsiblinglclc is not None) and | |
|
195 | not prevsiblinglastchild.folded): | |
|
196 | return prevsiblinglclc | |
|
197 | else: | |
|
198 | return prevsiblinglastchild | |
|
167 | # try previous sibling's last child's last child, | |
|
168 | # else try previous sibling's last child, else try previous sibling | |
|
169 | prevsibling = self.prevsibling() | |
|
170 | if prevsibling is not None: | |
|
171 | prevsiblinglastchild = prevsibling.lastchild() | |
|
172 | if ((prevsiblinglastchild is not None) and | |
|
173 | not prevsibling.folded): | |
|
174 | prevsiblinglclc = prevsiblinglastchild.lastchild() | |
|
175 | if ((prevsiblinglclc is not None) and | |
|
176 | not prevsiblinglastchild.folded): | |
|
177 | return prevsiblinglclc | |
|
199 | 178 | else: |
|
200 | return prevsibling | |
|
179 | return prevsiblinglastchild | |
|
180 | else: | |
|
181 | return prevsibling | |
|
201 | 182 | |
|
202 |
|
|
|
203 |
|
|
|
183 | # try parent (or None) | |
|
184 | return self.parentitem() | |
|
204 | 185 | |
|
205 | 186 | class patch(patchnode, list): # todo: rename patchroot |
|
206 | 187 | """ |
@@ -236,7 +217,6 b' class uiheader(patchnode):' | |||
|
236 | 217 | self.neverunfolded = True |
|
237 | 218 | self.hunks = [uihunk(h, self) for h in self.hunks] |
|
238 | 219 | |
|
239 | ||
|
240 | 220 | def prettystr(self): |
|
241 | 221 | x = stringio() |
|
242 | 222 | self.pretty(x) |
@@ -392,6 +372,7 b' class uihunk(patchnode):' | |||
|
392 | 372 | def allchildren(self): |
|
393 | 373 | "return a list of all of the direct children of this node" |
|
394 | 374 | return self.changedlines |
|
375 | ||
|
395 | 376 | def countchanges(self): |
|
396 | 377 | """changedlines -> (n+,n-)""" |
|
397 | 378 | add = len([l for l in self.changedlines if l.applied |
@@ -455,14 +436,12 b' class uihunk(patchnode):' | |||
|
455 | 436 | |
|
456 | 437 | def __getattr__(self, name): |
|
457 | 438 | return getattr(self._hunk, name) |
|
439 | ||
|
458 | 440 | def __repr__(self): |
|
459 | 441 | return '<hunk %r@%d>' % (self.filename(), self.fromline) |
|
460 | 442 | |
|
461 |
def filterpatch(ui, chunks, chunkselector |
|
|
443 | def filterpatch(ui, chunks, chunkselector): | |
|
462 | 444 | """interactively filter patch chunks into applied-only chunks""" |
|
463 | ||
|
464 | if operation is None: | |
|
465 | operation = _('confirm') | |
|
466 | 445 | chunks = list(chunks) |
|
467 | 446 | # convert chunks list into structure suitable for displaying/modifying |
|
468 | 447 | # with curses. create a list of headers only. |
@@ -603,13 +582,10 b' class curseschunkselector(object):' | |||
|
603 | 582 | the last hunkline of the hunk prior to the selected hunk. or, if |
|
604 | 583 | the first hunkline of a hunk is currently selected, then select the |
|
605 | 584 | hunk itself. |
|
606 | ||
|
607 | if the currently selected item is already at the top of the screen, | |
|
608 | scroll the screen down to show the new-selected item. | |
|
609 | 585 | """ |
|
610 | 586 | currentitem = self.currentselecteditem |
|
611 | 587 | |
|
612 |
nextitem = currentitem.previtem( |
|
|
588 | nextitem = currentitem.previtem() | |
|
613 | 589 | |
|
614 | 590 | if nextitem is None: |
|
615 | 591 | # if no parent item (i.e. currentitem is the first header), then |
@@ -623,13 +599,10 b' class curseschunkselector(object):' | |||
|
623 | 599 | select (if possible) the previous item on the same level as the |
|
624 | 600 | currently selected item. otherwise, select (if possible) the |
|
625 | 601 | parent-item of the currently selected item. |
|
626 | ||
|
627 | if the currently selected item is already at the top of the screen, | |
|
628 | scroll the screen down to show the new-selected item. | |
|
629 | 602 | """ |
|
630 | 603 | currentitem = self.currentselecteditem |
|
631 |
nextitem = currentitem.prev |
|
|
632 |
# if there's no previous |
|
|
604 | nextitem = currentitem.prevsibling() | |
|
605 | # if there's no previous sibling, try choosing the parent | |
|
633 | 606 | if nextitem is None: |
|
634 | 607 | nextitem = currentitem.parentitem() |
|
635 | 608 | if nextitem is None: |
@@ -646,14 +619,11 b' class curseschunkselector(object):' | |||
|
646 | 619 | the first hunkline of the selected hunk. or, if the last hunkline of |
|
647 | 620 | a hunk is currently selected, then select the next hunk, if one exists, |
|
648 | 621 | or if not, the next header if one exists. |
|
649 | ||
|
650 | if the currently selected item is already at the bottom of the screen, | |
|
651 | scroll the screen up to show the new-selected item. | |
|
652 | 622 | """ |
|
653 | 623 | #self.startprintline += 1 #debug |
|
654 | 624 | currentitem = self.currentselecteditem |
|
655 | 625 | |
|
656 |
nextitem = currentitem.nextitem( |
|
|
626 | nextitem = currentitem.nextitem() | |
|
657 | 627 | # if there's no next item, keep the selection as-is |
|
658 | 628 | if nextitem is None: |
|
659 | 629 | nextitem = currentitem |
@@ -662,24 +632,21 b' class curseschunkselector(object):' | |||
|
662 | 632 | |
|
663 | 633 | def downarrowshiftevent(self): |
|
664 | 634 | """ |
|
665 | if the cursor is already at the bottom chunk, scroll the screen up and | |
|
666 | move the cursor-position to the subsequent chunk. otherwise, only move | |
|
667 | the cursor position down one chunk. | |
|
635 | select (if possible) the next item on the same level as the currently | |
|
636 | selected item. otherwise, select (if possible) the next item on the | |
|
637 | same level as the parent item of the currently selected item. | |
|
668 | 638 | """ |
|
669 | # todo: update docstring | |
|
670 | ||
|
671 | 639 | currentitem = self.currentselecteditem |
|
672 |
nextitem = currentitem.next |
|
|
673 |
# if there's no |
|
|
674 | # nextitem. | |
|
640 | nextitem = currentitem.nextsibling() | |
|
641 | # if there's no next sibling, try choosing the parent's nextsibling | |
|
675 | 642 | if nextitem is None: |
|
676 | 643 | try: |
|
677 |
nextitem = currentitem.parentitem().next |
|
|
644 | nextitem = currentitem.parentitem().nextsibling() | |
|
678 | 645 | except AttributeError: |
|
679 |
# parentitem returned None, so next |
|
|
646 | # parentitem returned None, so nextsibling() can't be called | |
|
680 | 647 | nextitem = None |
|
681 | 648 | if nextitem is None: |
|
682 |
# if no next |
|
|
649 | # if parent has no next sibling, then no change... | |
|
683 | 650 | nextitem = currentitem |
|
684 | 651 | |
|
685 | 652 | self.currentselecteditem = nextitem |
@@ -766,7 +733,6 b' class curseschunkselector(object):' | |||
|
766 | 733 | # negative values scroll in pgup direction |
|
767 | 734 | self.scrolllines(selstart - padstartbuffered) |
|
768 | 735 | |
|
769 | ||
|
770 | 736 | def scrolllines(self, numlines): |
|
771 | 737 | "scroll the screen up (down) by numlines when numlines >0 (<0)." |
|
772 | 738 | self.firstlineofpadtoprint += numlines |
@@ -894,7 +860,6 b' class curseschunkselector(object):' | |||
|
894 | 860 | if isinstance(item, (uiheader, uihunk)): |
|
895 | 861 | item.folded = not item.folded |
|
896 | 862 | |
|
897 | ||
|
898 | 863 | def alignstring(self, instr, window): |
|
899 | 864 | """ |
|
900 | 865 | add whitespace to the end of a string in order to make it fill |
@@ -1133,7 +1098,6 b' class curseschunkselector(object):' | |||
|
1133 | 1098 | lineprefix = " "*self.hunkindentnumchars + checkbox |
|
1134 | 1099 | frtoline = " " + hunk.getfromtoline().strip("\n") |
|
1135 | 1100 | |
|
1136 | ||
|
1137 | 1101 | outstr += self.printstring(self.chunkpad, lineprefix, towin=towin, |
|
1138 | 1102 | align=False) # add uncolored checkbox/indent |
|
1139 | 1103 | outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair, |
@@ -1377,7 +1341,7 b' the following are valid keystrokes:' | |||
|
1377 | 1341 | F : fold / unfold parent item and all of its ancestors |
|
1378 | 1342 | m : edit / resume editing the commit message |
|
1379 | 1343 | e : edit the currently selected hunk |
|
1380 |
a : toggle amend mode |
|
|
1344 | a : toggle amend mode, only with commit -i | |
|
1381 | 1345 | c : confirm selected changes |
|
1382 | 1346 | r : review/edit and confirm selected changes |
|
1383 | 1347 | q : quit without confirming (no changes will be made) |
@@ -188,15 +188,23 b' def _demandimport(name, globals=None, lo' | |||
|
188 | 188 | if globalname and isinstance(symbol, _demandmod): |
|
189 | 189 | symbol._addref(globalname) |
|
190 | 190 | |
|
191 | def chainmodules(rootmod, modname): | |
|
192 | # recurse down the module chain, and return the leaf module | |
|
193 | mod = rootmod | |
|
194 | for comp in modname.split('.')[1:]: | |
|
195 | if getattr(mod, comp, nothing) is nothing: | |
|
196 | setattr(mod, comp, | |
|
197 | _demandmod(comp, mod.__dict__, mod.__dict__)) | |
|
198 | mod = getattr(mod, comp) | |
|
199 | return mod | |
|
200 | ||
|
191 | 201 | if level >= 0: |
|
192 | # The "from a import b,c,d" or "from .a import b,c,d" | |
|
193 | # syntax gives errors with some modules for unknown | |
|
194 | # reasons. Work around the problem. | |
|
195 | 202 | if name: |
|
196 | return _hgextimport(_origimport, name, globals, locals, | |
|
197 | fromlist, level) | |
|
198 | ||
|
199 | if _pypy: | |
|
203 | # "from a import b" or "from .a import b" style | |
|
204 | rootmod = _hgextimport(_origimport, name, globals, locals, | |
|
205 | level=level) | |
|
206 | mod = chainmodules(rootmod, name) | |
|
207 | elif _pypy: | |
|
200 | 208 | # PyPy's __import__ throws an exception if invoked |
|
201 | 209 | # with an empty name and no fromlist. Recreate the |
|
202 | 210 | # desired behaviour by hand. |
@@ -220,12 +228,7 b' def _demandimport(name, globals=None, lo' | |||
|
220 | 228 | # But, we still need to support lazy loading of standard library and 3rd |
|
221 | 229 | # party modules. So handle level == -1. |
|
222 | 230 | mod = _hgextimport(_origimport, name, globals, locals) |
|
223 | # recurse down the module chain | |
|
224 | for comp in name.split('.')[1:]: | |
|
225 | if getattr(mod, comp, nothing) is nothing: | |
|
226 | setattr(mod, comp, | |
|
227 | _demandmod(comp, mod.__dict__, mod.__dict__)) | |
|
228 | mod = getattr(mod, comp) | |
|
231 | mod = chainmodules(mod, name) | |
|
229 | 232 | |
|
230 | 233 | for x in fromlist: |
|
231 | 234 | processfromitem(mod, x) |
@@ -95,6 +95,10 b' def _destupdatebranch(repo, clean, check' | |||
|
95 | 95 | wc = repo[None] |
|
96 | 96 | movemark = node = None |
|
97 | 97 | currentbranch = wc.branch() |
|
98 | ||
|
99 | if clean: | |
|
100 | currentbranch = repo['.'].branch() | |
|
101 | ||
|
98 | 102 | if currentbranch in repo.branchmap(): |
|
99 | 103 | heads = repo.branchheads(currentbranch) |
|
100 | 104 | if heads: |
@@ -74,6 +74,8 b' def _trypending(root, vfs, filename):' | |||
|
74 | 74 | raise |
|
75 | 75 | return (vfs(filename), False) |
|
76 | 76 | |
|
77 | _token = object() | |
|
78 | ||
|
77 | 79 | class dirstate(object): |
|
78 | 80 | |
|
79 | 81 | def __init__(self, opener, ui, root, validate): |
@@ -365,7 +367,7 b' class dirstate(object):' | |||
|
365 | 367 | |
|
366 | 368 | def setbranch(self, branch): |
|
367 | 369 | self._branch = encoding.fromlocal(branch) |
|
368 | f = self._opener('branch', 'w', atomictemp=True) | |
|
370 | f = self._opener('branch', 'w', atomictemp=True, checkambig=True) | |
|
369 | 371 | try: |
|
370 | 372 | f.write(self._branch + '\n') |
|
371 | 373 | f.close() |
@@ -580,6 +582,8 b' class dirstate(object):' | |||
|
580 | 582 | del self._map[f] |
|
581 | 583 | if f in self._nonnormalset: |
|
582 | 584 | self._nonnormalset.remove(f) |
|
585 | if f in self._copymap: | |
|
586 | del self._copymap[f] | |
|
583 | 587 | |
|
584 | 588 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
585 | 589 | if exists is None: |
@@ -688,16 +692,15 b' class dirstate(object):' | |||
|
688 | 692 | self._pl = (parent, nullid) |
|
689 | 693 | self._dirty = True |
|
690 | 694 | |
|
691 |
def write(self, tr= |
|
|
695 | def write(self, tr=_token): | |
|
692 | 696 | if not self._dirty: |
|
693 | 697 | return |
|
694 | 698 | |
|
695 | 699 | filename = self._filename |
|
696 |
if tr is |
|
|
697 | if (self._ui.configbool('devel', 'all-warnings') | |
|
698 | or self._ui.configbool('devel', 'check-dirstate-write')): | |
|
699 | self._ui.develwarn('use dirstate.write with ' | |
|
700 | 'repo.currenttransaction()') | |
|
700 | if tr is _token: # not explicitly specified | |
|
701 | self._ui.deprecwarn('use dirstate.write with ' | |
|
702 | 'repo.currenttransaction()', | |
|
703 | '3.9') | |
|
701 | 704 | |
|
702 | 705 | if self._opener.lexists(self._pendingfilename): |
|
703 | 706 | # if pending file already exists, in-memory changes |
@@ -727,7 +730,7 b' class dirstate(object):' | |||
|
727 | 730 | self._writedirstate, location='plain') |
|
728 | 731 | return |
|
729 | 732 | |
|
730 | st = self._opener(filename, "w", atomictemp=True) | |
|
733 | st = self._opener(filename, "w", atomictemp=True, checkambig=True) | |
|
731 | 734 | self._writedirstate(st) |
|
732 | 735 | |
|
733 | 736 | def _writedirstate(self, st): |
@@ -1206,14 +1209,16 b' class dirstate(object):' | |||
|
1206 | 1209 | else: |
|
1207 | 1210 | return self._filename |
|
1208 | 1211 | |
|
1209 |
def |
|
|
1212 | def savebackup(self, tr, suffix='', prefix=''): | |
|
1210 | 1213 | '''Save current dirstate into backup file with suffix''' |
|
1214 | assert len(suffix) > 0 or len(prefix) > 0 | |
|
1211 | 1215 | filename = self._actualfilename(tr) |
|
1212 | 1216 | |
|
1213 | 1217 | # use '_writedirstate' instead of 'write' to write changes certainly, |
|
1214 | 1218 | # because the latter omits writing out if transaction is running. |
|
1215 | 1219 | # output file will be used to create backup of dirstate at this point. |
|
1216 |
self._writedirstate(self._opener(filename, "w", atomictemp=True |
|
|
1220 | self._writedirstate(self._opener(filename, "w", atomictemp=True, | |
|
1221 | checkambig=True)) | |
|
1217 | 1222 | |
|
1218 | 1223 | if tr: |
|
1219 | 1224 | # ensure that subsequent tr.writepending returns True for |
@@ -1227,17 +1232,22 b' class dirstate(object):' | |||
|
1227 | 1232 | # end of this transaction |
|
1228 | 1233 | tr.registertmp(filename, location='plain') |
|
1229 | 1234 | |
|
1230 |
self._opener.write(filename + suffix, |
|
|
1235 | self._opener.write(prefix + self._filename + suffix, | |
|
1236 | self._opener.tryread(filename)) | |
|
1231 | 1237 | |
|
1232 |
def |
|
|
1238 | def restorebackup(self, tr, suffix='', prefix=''): | |
|
1233 | 1239 | '''Restore dirstate by backup file with suffix''' |
|
1240 | assert len(suffix) > 0 or len(prefix) > 0 | |
|
1234 | 1241 | # this "invalidate()" prevents "wlock.release()" from writing |
|
1235 | 1242 | # changes of dirstate out after restoring from backup file |
|
1236 | 1243 | self.invalidate() |
|
1237 | 1244 | filename = self._actualfilename(tr) |
|
1238 | self._opener.rename(filename + suffix, filename) | |
|
1245 | # using self._filename to avoid having "pending" in the backup filename | |
|
1246 | self._opener.rename(prefix + self._filename + suffix, filename, | |
|
1247 | checkambig=True) | |
|
1239 | 1248 | |
|
1240 |
def |
|
|
1249 | def clearbackup(self, tr, suffix='', prefix=''): | |
|
1241 | 1250 | '''Clear backup file with suffix''' |
|
1242 | filename = self._actualfilename(tr) | |
|
1243 | self._opener.unlink(filename + suffix) | |
|
1251 | assert len(suffix) > 0 or len(prefix) > 0 | |
|
1252 | # using self._filename to avoid having "pending" in the backup filename | |
|
1253 | self._opener.unlink(prefix + self._filename + suffix) |
@@ -384,7 +384,7 b' class cmdalias(object):' | |||
|
384 | 384 | self.cmdname = '' |
|
385 | 385 | self.definition = definition |
|
386 | 386 | self.fn = None |
|
387 | self.args = [] | |
|
387 | self.givenargs = [] | |
|
388 | 388 | self.opts = [] |
|
389 | 389 | self.help = '' |
|
390 | 390 | self.badalias = None |
@@ -432,7 +432,7 b' class cmdalias(object):' | |||
|
432 | 432 | % (self.name, inst)) |
|
433 | 433 | return |
|
434 | 434 | self.cmdname = cmd = args.pop(0) |
|
435 | args = map(util.expandpath, args) | |
|
435 | self.givenargs = args | |
|
436 | 436 | |
|
437 | 437 | for invalidarg in ("--cwd", "-R", "--repository", "--repo", "--config"): |
|
438 | 438 | if _earlygetopt([invalidarg], args): |
@@ -448,7 +448,6 b' class cmdalias(object):' | |||
|
448 | 448 | else: |
|
449 | 449 | self.fn, self.opts = tableentry |
|
450 | 450 | |
|
451 | self.args = aliasargs(self.fn, args) | |
|
452 | 451 | if self.help.startswith("hg " + cmd): |
|
453 | 452 | # drop prefix in old-style help lines so hg shows the alias |
|
454 | 453 | self.help = self.help[4 + len(cmd):] |
@@ -462,6 +461,11 b' class cmdalias(object):' | |||
|
462 | 461 | self.badalias = (_("alias '%s' resolves to ambiguous command '%s'") |
|
463 | 462 | % (self.name, cmd)) |
|
464 | 463 | |
|
464 | @property | |
|
465 | def args(self): | |
|
466 | args = map(util.expandpath, self.givenargs) | |
|
467 | return aliasargs(self.fn, args) | |
|
468 | ||
|
465 | 469 | def __getattr__(self, name): |
|
466 | 470 | adefaults = {'norepo': True, 'optionalrepo': False, 'inferrepo': False} |
|
467 | 471 | if name not in adefaults: |
@@ -629,10 +633,16 b' def runcommand(lui, repo, cmd, fullargs,' | |||
|
629 | 633 | # run pre-hook, and abort if it fails |
|
630 | 634 | hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs), |
|
631 | 635 | pats=cmdpats, opts=cmdoptions) |
|
632 | ret = _runcommand(ui, options, cmd, d) | |
|
633 | # run post-hook, passing command result | |
|
634 | hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), | |
|
635 | result=ret, pats=cmdpats, opts=cmdoptions) | |
|
636 | try: | |
|
637 | ret = _runcommand(ui, options, cmd, d) | |
|
638 | # run post-hook, passing command result | |
|
639 | hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), | |
|
640 | result=ret, pats=cmdpats, opts=cmdoptions) | |
|
641 | except Exception: | |
|
642 | # run failure hook and re-raise | |
|
643 | hook.hook(lui, repo, "fail-%s" % cmd, False, args=" ".join(fullargs), | |
|
644 | pats=cmdpats, opts=cmdoptions) | |
|
645 | raise | |
|
636 | 646 | return ret |
|
637 | 647 | |
|
638 | 648 | def _getlocal(ui, rpath, wd=None): |
@@ -660,12 +670,8 b' def _getlocal(ui, rpath, wd=None):' | |||
|
660 | 670 | |
|
661 | 671 | return path, lui |
|
662 | 672 | |
|
663 |
def _checkshellalias(lui, ui, args |
|
|
664 | """Return the function to run the shell alias, if it is required | |
|
665 | ||
|
666 | 'precheck' is whether this function is invoked before adding | |
|
667 | aliases or not. | |
|
668 | """ | |
|
673 | def _checkshellalias(lui, ui, args): | |
|
674 | """Return the function to run the shell alias, if it is required""" | |
|
669 | 675 | options = {} |
|
670 | 676 | |
|
671 | 677 | try: |
@@ -676,16 +682,11 b' def _checkshellalias(lui, ui, args, prec' | |||
|
676 | 682 | if not args: |
|
677 | 683 | return |
|
678 | 684 | |
|
679 | if precheck: | |
|
680 | strict = True | |
|
681 | cmdtable = commands.table.copy() | |
|
682 | addaliases(lui, cmdtable) | |
|
683 | else: | |
|
684 | strict = False | |
|
685 | cmdtable = commands.table | |
|
685 | cmdtable = commands.table | |
|
686 | 686 | |
|
687 | 687 | cmd = args[0] |
|
688 | 688 | try: |
|
689 | strict = ui.configbool("ui", "strict") | |
|
689 | 690 | aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict) |
|
690 | 691 | except (error.AmbiguousCommand, error.UnknownCommand): |
|
691 | 692 | return |
@@ -735,12 +736,6 b' def _dispatch(req):' | |||
|
735 | 736 | rpath = _earlygetopt(["-R", "--repository", "--repo"], args) |
|
736 | 737 | path, lui = _getlocal(ui, rpath) |
|
737 | 738 | |
|
738 | # Now that we're operating in the right directory/repository with | |
|
739 | # the right config settings, check for shell aliases | |
|
740 | shellaliasfn = _checkshellalias(lui, ui, args) | |
|
741 | if shellaliasfn: | |
|
742 | return shellaliasfn() | |
|
743 | ||
|
744 | 739 | # Configure extensions in phases: uisetup, extsetup, cmdtable, and |
|
745 | 740 | # reposetup. Programs like TortoiseHg will call _dispatch several |
|
746 | 741 | # times so we keep track of configured extensions in _loaded. |
@@ -762,13 +757,11 b' def _dispatch(req):' | |||
|
762 | 757 | |
|
763 | 758 | addaliases(lui, commands.table) |
|
764 | 759 | |
|
765 | if not lui.configbool("ui", "strict"): | |
|
766 | # All aliases and commands are completely defined, now. | |
|
767 | # Check abbreviation/ambiguity of shell alias again, because shell | |
|
768 | # alias may cause failure of "_parse" (see issue4355) | |
|
769 | shellaliasfn = _checkshellalias(lui, ui, args, precheck=False) | |
|
770 | if shellaliasfn: | |
|
771 | return shellaliasfn() | |
|
760 | # All aliases and commands are completely defined, now. | |
|
761 | # Check abbreviation/ambiguity of shell alias. | |
|
762 | shellaliasfn = _checkshellalias(lui, ui, args) | |
|
763 | if shellaliasfn: | |
|
764 | return shellaliasfn() | |
|
772 | 765 | |
|
773 | 766 | # check for fallback encoding |
|
774 | 767 | fallback = lui.config('ui', 'fallbackencoding') |
@@ -825,7 +818,7 b' def _dispatch(req):' | |||
|
825 | 818 | |
|
826 | 819 | if cmdoptions.get('insecure', False): |
|
827 | 820 | for ui_ in uis: |
|
828 | ui_.setconfig('web', 'cacerts', '!', '--insecure') | |
|
821 | ui_.insecureconnections = True | |
|
829 | 822 | |
|
830 | 823 | if options['version']: |
|
831 | 824 | return commands.version_(ui) |
@@ -15,12 +15,17 b' from __future__ import absolute_import' | |||
|
15 | 15 | |
|
16 | 16 | # Do not import anything here, please |
|
17 | 17 | |
|
18 | class HintException(Exception): | |
|
18 | class Hint(object): | |
|
19 | """Mix-in to provide a hint of an error | |
|
20 | ||
|
21 | This should come first in the inheritance list to consume a hint and | |
|
22 | pass remaining arguments to the exception class. | |
|
23 | """ | |
|
19 | 24 | def __init__(self, *args, **kw): |
|
20 | Exception.__init__(self, *args) | |
|
21 | self.hint = kw.get('hint') | |
|
25 | self.hint = kw.pop('hint', None) | |
|
26 | super(Hint, self).__init__(*args, **kw) | |
|
22 | 27 | |
|
23 | class RevlogError(HintException): | |
|
28 | class RevlogError(Hint, Exception): | |
|
24 | 29 | pass |
|
25 | 30 | |
|
26 | 31 | class FilteredIndexError(IndexError): |
@@ -50,10 +55,10 b' class ManifestLookupError(LookupError):' | |||
|
50 | 55 | class CommandError(Exception): |
|
51 | 56 | """Exception raised on errors in parsing the command line.""" |
|
52 | 57 | |
|
53 | class InterventionRequired(HintException): | |
|
58 | class InterventionRequired(Hint, Exception): | |
|
54 | 59 | """Exception raised when a command requires human intervention.""" |
|
55 | 60 | |
|
56 | class Abort(HintException): | |
|
61 | class Abort(Hint, Exception): | |
|
57 | 62 | """Raised if a command needs to print an error and exit.""" |
|
58 | 63 | |
|
59 | 64 | class HookLoadError(Abort): |
@@ -87,10 +92,10 b' class ResponseExpected(Abort):' | |||
|
87 | 92 | from .i18n import _ |
|
88 | 93 | Abort.__init__(self, _('response expected')) |
|
89 | 94 | |
|
90 | class OutOfBandError(HintException): | |
|
95 | class OutOfBandError(Hint, Exception): | |
|
91 | 96 | """Exception raised when a remote repo reports failure""" |
|
92 | 97 | |
|
93 | class ParseError(HintException): | |
|
98 | class ParseError(Hint, Exception): | |
|
94 | 99 | """Raised when parsing config files and {rev,file}sets (msg[, pos])""" |
|
95 | 100 | |
|
96 | 101 | class UnknownIdentifier(ParseError): |
@@ -102,7 +107,7 b' class UnknownIdentifier(ParseError):' | |||
|
102 | 107 | self.function = function |
|
103 | 108 | self.symbols = symbols |
|
104 | 109 | |
|
105 | class RepoError(HintException): | |
|
110 | class RepoError(Hint, Exception): | |
|
106 | 111 | pass |
|
107 | 112 | |
|
108 | 113 | class RepoLookupError(RepoError): |
@@ -235,3 +240,6 b' class InvalidBundleSpecification(Excepti' | |||
|
235 | 240 | |
|
236 | 241 | class UnsupportedBundleSpecification(Exception): |
|
237 | 242 | """error raised when a bundle specification is not supported.""" |
|
243 | ||
|
244 | class CorruptedState(Exception): | |
|
245 | """error raised when a command is not able to read its state from file""" |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | import hashlib | |
|
11 | 12 | |
|
12 | 13 | from .i18n import _ |
|
13 | 14 | from .node import ( |
@@ -857,14 +858,14 b' def _pushbundle2(pushop):' | |||
|
857 | 858 | try: |
|
858 | 859 | reply = pushop.remote.unbundle(stream, ['force'], 'push') |
|
859 | 860 | except error.BundleValueError as exc: |
|
860 | raise error.Abort('missing support for %s' % exc) | |
|
861 | raise error.Abort(_('missing support for %s') % exc) | |
|
861 | 862 | try: |
|
862 | 863 | trgetter = None |
|
863 | 864 | if pushback: |
|
864 | 865 | trgetter = pushop.trmanager.transaction |
|
865 | 866 | op = bundle2.processbundle(pushop.repo, reply, trgetter) |
|
866 | 867 | except error.BundleValueError as exc: |
|
867 | raise error.Abort('missing support for %s' % exc) | |
|
868 | raise error.Abort(_('missing support for %s') % exc) | |
|
868 | 869 | except bundle2.AbortFromPart as exc: |
|
869 | 870 | pushop.ui.status(_('remote: %s\n') % exc) |
|
870 | 871 | raise error.Abort(_('push failed on remote'), hint=exc.hint) |
@@ -1055,7 +1056,8 b' class pulloperation(object):' | |||
|
1055 | 1056 | # revision we try to pull (None is "all") |
|
1056 | 1057 | self.heads = heads |
|
1057 | 1058 | # bookmark pulled explicitly |
|
1058 | self.explicitbookmarks = bookmarks | |
|
1059 | self.explicitbookmarks = [repo._bookmarks.expandname(bookmark) | |
|
1060 | for bookmark in bookmarks] | |
|
1059 | 1061 | # do we force pull? |
|
1060 | 1062 | self.force = force |
|
1061 | 1063 | # whether a streaming clone was requested |
@@ -1323,7 +1325,7 b' def _pullbundle2(pullop):' | |||
|
1323 | 1325 | try: |
|
1324 | 1326 | op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction) |
|
1325 | 1327 | except error.BundleValueError as exc: |
|
1326 | raise error.Abort('missing support for %s' % exc) | |
|
1328 | raise error.Abort(_('missing support for %s') % exc) | |
|
1327 | 1329 | |
|
1328 | 1330 | if pullop.fetch: |
|
1329 | 1331 | results = [cg['return'] for cg in op.records['changegroup']] |
@@ -1646,7 +1648,7 b' def check_heads(repo, their_heads, conte' | |||
|
1646 | 1648 | Used by peer for unbundling. |
|
1647 | 1649 | """ |
|
1648 | 1650 | heads = repo.heads() |
|
1649 |
heads_hash = |
|
|
1651 | heads_hash = hashlib.sha1(''.join(sorted(heads))).digest() | |
|
1650 | 1652 | if not (their_heads == ['force'] or their_heads == heads or |
|
1651 | 1653 | their_heads == ['hashed', heads_hash]): |
|
1652 | 1654 | # someone else committed/pushed/unbundled while we |
@@ -25,7 +25,7 b' from . import (' | |||
|
25 | 25 | _aftercallbacks = {} |
|
26 | 26 | _order = [] |
|
27 | 27 | _builtin = set(['hbisect', 'bookmarks', 'parentrevspec', 'progress', 'interhg', |
|
28 | 'inotify']) | |
|
28 | 'inotify', 'hgcia']) | |
|
29 | 29 | |
|
30 | 30 | def extensions(ui=None): |
|
31 | 31 | if ui: |
@@ -127,6 +127,21 b' def load(ui, name, path):' | |||
|
127 | 127 | fn(loaded=True) |
|
128 | 128 | return mod |
|
129 | 129 | |
|
130 | def _runuisetup(name, ui): | |
|
131 | uisetup = getattr(_extensions[name], 'uisetup', None) | |
|
132 | if uisetup: | |
|
133 | uisetup(ui) | |
|
134 | ||
|
135 | def _runextsetup(name, ui): | |
|
136 | extsetup = getattr(_extensions[name], 'extsetup', None) | |
|
137 | if extsetup: | |
|
138 | try: | |
|
139 | extsetup(ui) | |
|
140 | except TypeError: | |
|
141 | if extsetup.func_code.co_argcount != 0: | |
|
142 | raise | |
|
143 | extsetup() # old extsetup with no ui argument | |
|
144 | ||
|
130 | 145 | def loadall(ui): |
|
131 | 146 | result = ui.configitems("extensions") |
|
132 | 147 | newindex = len(_order) |
@@ -148,19 +163,10 b' def loadall(ui):' | |||
|
148 | 163 | ui.traceback() |
|
149 | 164 | |
|
150 | 165 | for name in _order[newindex:]: |
|
151 | uisetup = getattr(_extensions[name], 'uisetup', None) | |
|
152 | if uisetup: | |
|
153 | uisetup(ui) | |
|
166 | _runuisetup(name, ui) | |
|
154 | 167 | |
|
155 | 168 | for name in _order[newindex:]: |
|
156 | extsetup = getattr(_extensions[name], 'extsetup', None) | |
|
157 | if extsetup: | |
|
158 | try: | |
|
159 | extsetup(ui) | |
|
160 | except TypeError: | |
|
161 | if extsetup.func_code.co_argcount != 0: | |
|
162 | raise | |
|
163 | extsetup() # old extsetup with no ui argument | |
|
169 | _runextsetup(name, ui) | |
|
164 | 170 | |
|
165 | 171 | # Call aftercallbacks that were never met. |
|
166 | 172 | for shortname in _aftercallbacks: |
@@ -7,7 +7,6 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import cPickle | |
|
11 | 10 | import os |
|
12 | 11 | |
|
13 | 12 | from .i18n import _ |
@@ -20,8 +19,11 b' from . import (' | |||
|
20 | 19 | encoding, |
|
21 | 20 | error, |
|
22 | 21 | templater, |
|
22 | util, | |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | pickle = util.pickle | |
|
26 | ||
|
25 | 27 | class baseformatter(object): |
|
26 | 28 | def __init__(self, ui, topic, opts): |
|
27 | 29 | self._ui = ui |
@@ -107,7 +109,7 b' class pickleformatter(baseformatter):' | |||
|
107 | 109 | self._data.append(self._item) |
|
108 | 110 | def end(self): |
|
109 | 111 | baseformatter.end(self) |
|
110 |
self._ui.write( |
|
|
112 | self._ui.write(pickle.dumps(self._data)) | |
|
111 | 113 | |
|
112 | 114 | def _jsonifyobj(v): |
|
113 | 115 | if isinstance(v, tuple): |
@@ -19,8 +19,6 b' Data depends on type.' | |||
|
19 | 19 | |
|
20 | 20 | from __future__ import absolute_import |
|
21 | 21 | |
|
22 | import heapq | |
|
23 | ||
|
24 | 22 | from .node import nullrev |
|
25 | 23 | from . import ( |
|
26 | 24 | revset, |
@@ -32,207 +30,11 b" PARENT = 'P'" | |||
|
32 | 30 | GRANDPARENT = 'G' |
|
33 | 31 | MISSINGPARENT = 'M' |
|
34 | 32 | # Style of line to draw. None signals a line that ends and is removed at this |
|
35 | # point. | |
|
33 | # point. A number prefix means only the last N characters of the current block | |
|
34 | # will use that style, the rest will use the PARENT style. Add a - sign | |
|
35 | # (so making N negative) and all but the first N characters use that style. | |
|
36 | 36 | EDGES = {PARENT: '|', GRANDPARENT: ':', MISSINGPARENT: None} |
|
37 | 37 | |
|
38 | def groupbranchiter(revs, parentsfunc, firstbranch=()): | |
|
39 | """Yield revisions from heads to roots one (topo) branch at a time. | |
|
40 | ||
|
41 | This function aims to be used by a graph generator that wishes to minimize | |
|
42 | the number of parallel branches and their interleaving. | |
|
43 | ||
|
44 | Example iteration order (numbers show the "true" order in a changelog): | |
|
45 | ||
|
46 | o 4 | |
|
47 | | | |
|
48 | o 1 | |
|
49 | | | |
|
50 | | o 3 | |
|
51 | | | | |
|
52 | | o 2 | |
|
53 | |/ | |
|
54 | o 0 | |
|
55 | ||
|
56 | Note that the ancestors of merges are understood by the current | |
|
57 | algorithm to be on the same branch. This means no reordering will | |
|
58 | occur behind a merge. | |
|
59 | """ | |
|
60 | ||
|
61 | ### Quick summary of the algorithm | |
|
62 | # | |
|
63 | # This function is based around a "retention" principle. We keep revisions | |
|
64 | # in memory until we are ready to emit a whole branch that immediately | |
|
65 | # "merges" into an existing one. This reduces the number of parallel | |
|
66 | # branches with interleaved revisions. | |
|
67 | # | |
|
68 | # During iteration revs are split into two groups: | |
|
69 | # A) revision already emitted | |
|
70 | # B) revision in "retention". They are stored as different subgroups. | |
|
71 | # | |
|
72 | # for each REV, we do the following logic: | |
|
73 | # | |
|
74 | # 1) if REV is a parent of (A), we will emit it. If there is a | |
|
75 | # retention group ((B) above) that is blocked on REV being | |
|
76 | # available, we emit all the revisions out of that retention | |
|
77 | # group first. | |
|
78 | # | |
|
79 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be | |
|
80 | # available, if such subgroup exist, we add REV to it and the subgroup is | |
|
81 | # now awaiting for REV.parents() to be available. | |
|
82 | # | |
|
83 | # 3) finally if no such group existed in (B), we create a new subgroup. | |
|
84 | # | |
|
85 | # | |
|
86 | # To bootstrap the algorithm, we emit the tipmost revision (which | |
|
87 | # puts it in group (A) from above). | |
|
88 | ||
|
89 | revs.sort(reverse=True) | |
|
90 | ||
|
91 | # Set of parents of revision that have been emitted. They can be considered | |
|
92 | # unblocked as the graph generator is already aware of them so there is no | |
|
93 | # need to delay the revisions that reference them. | |
|
94 | # | |
|
95 | # If someone wants to prioritize a branch over the others, pre-filling this | |
|
96 | # set will force all other branches to wait until this branch is ready to be | |
|
97 | # emitted. | |
|
98 | unblocked = set(firstbranch) | |
|
99 | ||
|
100 | # list of groups waiting to be displayed, each group is defined by: | |
|
101 | # | |
|
102 | # (revs: lists of revs waiting to be displayed, | |
|
103 | # blocked: set of that cannot be displayed before those in 'revs') | |
|
104 | # | |
|
105 | # The second value ('blocked') correspond to parents of any revision in the | |
|
106 | # group ('revs') that is not itself contained in the group. The main idea | |
|
107 | # of this algorithm is to delay as much as possible the emission of any | |
|
108 | # revision. This means waiting for the moment we are about to display | |
|
109 | # these parents to display the revs in a group. | |
|
110 | # | |
|
111 | # This first implementation is smart until it encounters a merge: it will | |
|
112 | # emit revs as soon as any parent is about to be emitted and can grow an | |
|
113 | # arbitrary number of revs in 'blocked'. In practice this mean we properly | |
|
114 | # retains new branches but gives up on any special ordering for ancestors | |
|
115 | # of merges. The implementation can be improved to handle this better. | |
|
116 | # | |
|
117 | # The first subgroup is special. It corresponds to all the revision that | |
|
118 | # were already emitted. The 'revs' lists is expected to be empty and the | |
|
119 | # 'blocked' set contains the parents revisions of already emitted revision. | |
|
120 | # | |
|
121 | # You could pre-seed the <parents> set of groups[0] to a specific | |
|
122 | # changesets to select what the first emitted branch should be. | |
|
123 | groups = [([], unblocked)] | |
|
124 | pendingheap = [] | |
|
125 | pendingset = set() | |
|
126 | ||
|
127 | heapq.heapify(pendingheap) | |
|
128 | heappop = heapq.heappop | |
|
129 | heappush = heapq.heappush | |
|
130 | for currentrev in revs: | |
|
131 | # Heap works with smallest element, we want highest so we invert | |
|
132 | if currentrev not in pendingset: | |
|
133 | heappush(pendingheap, -currentrev) | |
|
134 | pendingset.add(currentrev) | |
|
135 | # iterates on pending rev until after the current rev have been | |
|
136 | # processed. | |
|
137 | rev = None | |
|
138 | while rev != currentrev: | |
|
139 | rev = -heappop(pendingheap) | |
|
140 | pendingset.remove(rev) | |
|
141 | ||
|
142 | # Seek for a subgroup blocked, waiting for the current revision. | |
|
143 | matching = [i for i, g in enumerate(groups) if rev in g[1]] | |
|
144 | ||
|
145 | if matching: | |
|
146 | # The main idea is to gather together all sets that are blocked | |
|
147 | # on the same revision. | |
|
148 | # | |
|
149 | # Groups are merged when a common blocking ancestor is | |
|
150 | # observed. For example, given two groups: | |
|
151 | # | |
|
152 | # revs [5, 4] waiting for 1 | |
|
153 | # revs [3, 2] waiting for 1 | |
|
154 | # | |
|
155 | # These two groups will be merged when we process | |
|
156 | # 1. In theory, we could have merged the groups when | |
|
157 | # we added 2 to the group it is now in (we could have | |
|
158 | # noticed the groups were both blocked on 1 then), but | |
|
159 | # the way it works now makes the algorithm simpler. | |
|
160 | # | |
|
161 | # We also always keep the oldest subgroup first. We can | |
|
162 | # probably improve the behavior by having the longest set | |
|
163 | # first. That way, graph algorithms could minimise the length | |
|
164 | # of parallel lines their drawing. This is currently not done. | |
|
165 | targetidx = matching.pop(0) | |
|
166 | trevs, tparents = groups[targetidx] | |
|
167 | for i in matching: | |
|
168 | gr = groups[i] | |
|
169 | trevs.extend(gr[0]) | |
|
170 | tparents |= gr[1] | |
|
171 | # delete all merged subgroups (except the one we kept) | |
|
172 | # (starting from the last subgroup for performance and | |
|
173 | # sanity reasons) | |
|
174 | for i in reversed(matching): | |
|
175 | del groups[i] | |
|
176 | else: | |
|
177 | # This is a new head. We create a new subgroup for it. | |
|
178 | targetidx = len(groups) | |
|
179 | groups.append(([], set([rev]))) | |
|
180 | ||
|
181 | gr = groups[targetidx] | |
|
182 | ||
|
183 | # We now add the current nodes to this subgroups. This is done | |
|
184 | # after the subgroup merging because all elements from a subgroup | |
|
185 | # that relied on this rev must precede it. | |
|
186 | # | |
|
187 | # we also update the <parents> set to include the parents of the | |
|
188 | # new nodes. | |
|
189 | if rev == currentrev: # only display stuff in rev | |
|
190 | gr[0].append(rev) | |
|
191 | gr[1].remove(rev) | |
|
192 | parents = [p for p in parentsfunc(rev) if p > nullrev] | |
|
193 | gr[1].update(parents) | |
|
194 | for p in parents: | |
|
195 | if p not in pendingset: | |
|
196 | pendingset.add(p) | |
|
197 | heappush(pendingheap, -p) | |
|
198 | ||
|
199 | # Look for a subgroup to display | |
|
200 | # | |
|
201 | # When unblocked is empty (if clause), we were not waiting for any | |
|
202 | # revisions during the first iteration (if no priority was given) or | |
|
203 | # if we emitted a whole disconnected set of the graph (reached a | |
|
204 | # root). In that case we arbitrarily take the oldest known | |
|
205 | # subgroup. The heuristic could probably be better. | |
|
206 | # | |
|
207 | # Otherwise (elif clause) if the subgroup is blocked on | |
|
208 | # a revision we just emitted, we can safely emit it as | |
|
209 | # well. | |
|
210 | if not unblocked: | |
|
211 | if len(groups) > 1: # display other subset | |
|
212 | targetidx = 1 | |
|
213 | gr = groups[1] | |
|
214 | elif not gr[1] & unblocked: | |
|
215 | gr = None | |
|
216 | ||
|
217 | if gr is not None: | |
|
218 | # update the set of awaited revisions with the one from the | |
|
219 | # subgroup | |
|
220 | unblocked |= gr[1] | |
|
221 | # output all revisions in the subgroup | |
|
222 | for r in gr[0]: | |
|
223 | yield r | |
|
224 | # delete the subgroup that you just output | |
|
225 | # unless it is groups[0] in which case you just empty it. | |
|
226 | if targetidx: | |
|
227 | del groups[targetidx] | |
|
228 | else: | |
|
229 | gr[0][:] = [] | |
|
230 | # Check if we have some subgroup waiting for revisions we are not going to | |
|
231 | # iterate over | |
|
232 | for g in groups: | |
|
233 | for r in g[0]: | |
|
234 | yield r | |
|
235 | ||
|
236 | 38 | def dagwalker(repo, revs): |
|
237 | 39 | """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples |
|
238 | 40 | |
@@ -250,16 +52,6 b' def dagwalker(repo, revs):' | |||
|
250 | 52 | |
|
251 | 53 | gpcache = {} |
|
252 | 54 | |
|
253 | if repo.ui.configbool('experimental', 'graph-group-branches', False): | |
|
254 | firstbranch = () | |
|
255 | firstbranchrevset = repo.ui.config( | |
|
256 | 'experimental', 'graph-group-branches.firstbranch', '') | |
|
257 | if firstbranchrevset: | |
|
258 | firstbranch = repo.revs(firstbranchrevset) | |
|
259 | parentrevs = repo.changelog.parentrevs | |
|
260 | revs = groupbranchiter(revs, parentrevs, firstbranch) | |
|
261 | revs = revset.baseset(revs) | |
|
262 | ||
|
263 | 55 | for rev in revs: |
|
264 | 56 | ctx = repo[rev] |
|
265 | 57 | # partition into parents in the rev set and missing parents, then |
@@ -653,6 +445,22 b' def ascii(ui, state, type, char, text, c' | |||
|
653 | 445 | while len(text) < len(lines): |
|
654 | 446 | text.append("") |
|
655 | 447 | |
|
448 | if any(len(char) > 1 for char in edgemap.values()): | |
|
449 | # limit drawing an edge to the first or last N lines of the current | |
|
450 | # section the rest of the edge is drawn like a parent line. | |
|
451 | parent = state['styles'][PARENT][-1] | |
|
452 | def _drawgp(char, i): | |
|
453 | # should a grandparent character be drawn for this line? | |
|
454 | if len(char) < 2: | |
|
455 | return True | |
|
456 | num = int(char[:-1]) | |
|
457 | # either skip first num lines or take last num lines, based on sign | |
|
458 | return -num <= i if num < 0 else (len(lines) - i) <= num | |
|
459 | for i, line in enumerate(lines): | |
|
460 | line[:] = [c[-1] if _drawgp(c, i) else parent for c in line] | |
|
461 | edgemap.update( | |
|
462 | (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items()) | |
|
463 | ||
|
656 | 464 | # print lines |
|
657 | 465 | indentation_level = max(ncols, ncols + coldiff) |
|
658 | 466 | for (line, logstr) in zip(lines, text): |
@@ -811,6 +811,15 b' variables it is passed are listed with n' | |||
|
811 | 811 | dictionary of options (with unspecified options set to their defaults). |
|
812 | 812 | ``$HG_PATS`` is a list of arguments. Hook failure is ignored. |
|
813 | 813 | |
|
814 | ``fail-<command>`` | |
|
815 | Run after a failed invocation of an associated command. The contents | |
|
816 | of the command line are passed as ``$HG_ARGS``. Parsed command line | |
|
817 | arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain | |
|
818 | string representations of the python data internally passed to | |
|
819 | <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified | |
|
820 | options set to their defaults). ``$HG_PATS`` is a list of arguments. | |
|
821 | Hook failure is ignored. | |
|
822 | ||
|
814 | 823 | ``pre-<command>`` |
|
815 | 824 | Run before executing the associated command. The contents of the |
|
816 | 825 | command line are passed as ``$HG_ARGS``. Parsed command line arguments |
@@ -967,6 +976,8 b' is treated as a failure.' | |||
|
967 | 976 | ``hostfingerprints`` |
|
968 | 977 | -------------------- |
|
969 | 978 | |
|
979 | (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.) | |
|
980 | ||
|
970 | 981 | Fingerprints of the certificates of known HTTPS servers. |
|
971 | 982 | |
|
972 | 983 | A HTTPS connection to a server with a fingerprint configured here will |
@@ -986,6 +997,114 b' For example::' | |||
|
986 | 997 | hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33 |
|
987 | 998 | hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33 |
|
988 | 999 | |
|
1000 | ``hostsecurity`` | |
|
1001 | ---------------- | |
|
1002 | ||
|
1003 | Used to specify global and per-host security settings for connecting to | |
|
1004 | other machines. | |
|
1005 | ||
|
1006 | The following options control default behavior for all hosts. | |
|
1007 | ||
|
1008 | ``ciphers`` | |
|
1009 | Defines the cryptographic ciphers to use for connections. | |
|
1010 | ||
|
1011 | Value must be a valid OpenSSL Cipher List Format as documented at | |
|
1012 | https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT. | |
|
1013 | ||
|
1014 | This setting is for advanced users only. Setting to incorrect values | |
|
1015 | can significantly lower connection security or decrease performance. | |
|
1016 | You have been warned. | |
|
1017 | ||
|
1018 | This option requires Python 2.7. | |
|
1019 | ||
|
1020 | ``minimumprotocol`` | |
|
1021 | Defines the minimum channel encryption protocol to use. | |
|
1022 | ||
|
1023 | By default, the highest version of TLS supported by both client and server | |
|
1024 | is used. | |
|
1025 | ||
|
1026 | Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``. | |
|
1027 | ||
|
1028 | When running on an old Python version, only ``tls1.0`` is allowed since | |
|
1029 | old versions of Python only support up to TLS 1.0. | |
|
1030 | ||
|
1031 | When running a Python that supports modern TLS versions, the default is | |
|
1032 | ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this | |
|
1033 | weakens security and should only be used as a feature of last resort if | |
|
1034 | a server does not support TLS 1.1+. | |
|
1035 | ||
|
1036 | Options in the ``[hostsecurity]`` section can have the form | |
|
1037 | ``hostname``:``setting``. This allows multiple settings to be defined on a | |
|
1038 | per-host basis. | |
|
1039 | ||
|
1040 | The following per-host settings can be defined. | |
|
1041 | ||
|
1042 | ``ciphers`` | |
|
1043 | This behaves like ``ciphers`` as described above except it only applies | |
|
1044 | to the host on which it is defined. | |
|
1045 | ||
|
1046 | ``fingerprints`` | |
|
1047 | A list of hashes of the DER encoded peer/remote certificate. Values have | |
|
1048 | the form ``algorithm``:``fingerprint``. e.g. | |
|
1049 | ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``. | |
|
1050 | ||
|
1051 | The following algorithms/prefixes are supported: ``sha1``, ``sha256``, | |
|
1052 | ``sha512``. | |
|
1053 | ||
|
1054 | Use of ``sha256`` or ``sha512`` is preferred. | |
|
1055 | ||
|
1056 | If a fingerprint is specified, the CA chain is not validated for this | |
|
1057 | host and Mercurial will require the remote certificate to match one | |
|
1058 | of the fingerprints specified. This means if the server updates its | |
|
1059 | certificate, Mercurial will abort until a new fingerprint is defined. | |
|
1060 | This can provide stronger security than traditional CA-based validation | |
|
1061 | at the expense of convenience. | |
|
1062 | ||
|
1063 | This option takes precedence over ``verifycertsfile``. | |
|
1064 | ||
|
1065 | ``minimumprotocol`` | |
|
1066 | This behaves like ``minimumprotocol`` as described above except it | |
|
1067 | only applies to the host on which it is defined. | |
|
1068 | ||
|
1069 | ``verifycertsfile`` | |
|
1070 | Path to file a containing a list of PEM encoded certificates used to | |
|
1071 | verify the server certificate. Environment variables and ``~user`` | |
|
1072 | constructs are expanded in the filename. | |
|
1073 | ||
|
1074 | The server certificate or the certificate's certificate authority (CA) | |
|
1075 | must match a certificate from this file or certificate verification | |
|
1076 | will fail and connections to the server will be refused. | |
|
1077 | ||
|
1078 | If defined, only certificates provided by this file will be used: | |
|
1079 | ``web.cacerts`` and any system/default certificates will not be | |
|
1080 | used. | |
|
1081 | ||
|
1082 | This option has no effect if the per-host ``fingerprints`` option | |
|
1083 | is set. | |
|
1084 | ||
|
1085 | The format of the file is as follows: | |
|
1086 | ||
|
1087 | -----BEGIN CERTIFICATE----- | |
|
1088 | ... (certificate in base64 PEM encoding) ... | |
|
1089 | -----END CERTIFICATE----- | |
|
1090 | -----BEGIN CERTIFICATE----- | |
|
1091 | ... (certificate in base64 PEM encoding) ... | |
|
1092 | -----END CERTIFICATE----- | |
|
1093 | ||
|
1094 | For example:: | |
|
1095 | ||
|
1096 | [hostsecurity] | |
|
1097 | hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 | |
|
1098 | hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33 | |
|
1099 | foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem | |
|
1100 | ||
|
1101 | To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1 | |
|
1102 | when connecting to ``hg.example.com``:: | |
|
1103 | ||
|
1104 | [hostsecurity] | |
|
1105 | minimumprotocol = tls1.2 | |
|
1106 | hg.example.com:minimumprotocol = tls1.1 | |
|
1107 | ||
|
989 | 1108 | ``http_proxy`` |
|
990 | 1109 | -------------- |
|
991 | 1110 | |
@@ -1020,8 +1139,8 b' This section specifies behavior during m' | |||
|
1020 | 1139 | file in the changeset being merged or updated to, and has different |
|
1021 | 1140 | contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``, |
|
1022 | 1141 | abort on such files. With ``warn``, warn on such files and back them up as |
|
1023 | .orig. With ``ignore``, don't print a warning and back them up as | |
|
1024 | .orig. (default: ``abort``) | |
|
1142 | ``.orig``. With ``ignore``, don't print a warning and back them up as | |
|
1143 | ``.orig``. (default: ``abort``) | |
|
1025 | 1144 | |
|
1026 | 1145 | ``checkunknown`` |
|
1027 | 1146 | Controls behavior when an unknown file that isn't ignored has the same name |
@@ -1210,6 +1329,18 b' The following sub-options can be defined' | |||
|
1210 | 1329 | The URL to use for push operations. If not defined, the location |
|
1211 | 1330 | defined by the path's main entry is used. |
|
1212 | 1331 | |
|
1332 | ``pushrev`` | |
|
1333 | A revset defining which revisions to push by default. | |
|
1334 | ||
|
1335 | When :hg:`push` is executed without a ``-r`` argument, the revset | |
|
1336 | defined by this sub-option is evaluated to determine what to push. | |
|
1337 | ||
|
1338 | For example, a value of ``.`` will push the working directory's | |
|
1339 | revision by default. | |
|
1340 | ||
|
1341 | Revsets specifying bookmarks will not result in the bookmark being | |
|
1342 | pushed. | |
|
1343 | ||
|
1213 | 1344 | The following special named paths exist: |
|
1214 | 1345 | |
|
1215 | 1346 | ``default`` |
@@ -1442,16 +1573,6 b' Configuration for extensions that need t' | |||
|
1442 | 1573 | Optional. Method to enable TLS when connecting to mail server: starttls, |
|
1443 | 1574 | smtps or none. (default: none) |
|
1444 | 1575 | |
|
1445 | ``verifycert`` | |
|
1446 | Optional. Verification for the certificate of mail server, when | |
|
1447 | ``tls`` is starttls or smtps. "strict", "loose" or False. For | |
|
1448 | "strict" or "loose", the certificate is verified as same as the | |
|
1449 | verification for HTTPS connections (see ``[hostfingerprints]`` and | |
|
1450 | ``[web] cacerts`` also). For "strict", sending email is also | |
|
1451 | aborted, if there is no configuration for mail server in | |
|
1452 | ``[hostfingerprints]`` and ``[web] cacerts``. --insecure for | |
|
1453 | :hg:`email` overwrites this as "loose". (default: strict) | |
|
1454 | ||
|
1455 | 1576 | ``username`` |
|
1456 | 1577 | Optional. User name for authenticating with the SMTP server. |
|
1457 | 1578 | (default: None) |
@@ -1738,6 +1859,13 b' User interface controls.' | |||
|
1738 | 1859 | large organisation with its own Mercurial deployment process and crash |
|
1739 | 1860 | reports should be addressed to your internal support. |
|
1740 | 1861 | |
|
1862 | ``textwidth`` | |
|
1863 | Maximum width of help text. A longer line generated by ``hg help`` or | |
|
1864 | ``hg subcommand --help`` will be broken after white space to get this | |
|
1865 | width or the terminal width, whichever comes first. | |
|
1866 | A non-positive value will disable this and the terminal width will be | |
|
1867 | used. (default: 78) | |
|
1868 | ||
|
1741 | 1869 | ``timeout`` |
|
1742 | 1870 | The timeout used when a lock is held (in seconds), a negative value |
|
1743 | 1871 | means no timeout. (default: 600) |
@@ -1945,6 +2073,14 b' The full set of options is:' | |||
|
1945 | 2073 | ``ipv6`` |
|
1946 | 2074 | Whether to use IPv6. (default: False) |
|
1947 | 2075 | |
|
2076 | ``labels`` | |
|
2077 | List of string *labels* associated with the repository. | |
|
2078 | ||
|
2079 | Labels are exposed as a template keyword and can be used to customize | |
|
2080 | output. e.g. the ``index`` template can group or filter repositories | |
|
2081 | by labels and the ``summary`` template can display additional content | |
|
2082 | if a specific label is present. | |
|
2083 | ||
|
1948 | 2084 | ``logoimg`` |
|
1949 | 2085 | File name of the logo image that some templates display on each page. |
|
1950 | 2086 | The file name is relative to ``staticurl``. That is, the full path to |
@@ -81,6 +81,10 b' Some sample command line templates:' | |||
|
81 | 81 | |
|
82 | 82 | $ hg log -r 0 --template "files: {join(files, ', ')}\n" |
|
83 | 83 | |
|
84 | - Separate non-empty arguments by a " ":: | |
|
85 | ||
|
86 | $ hg log -r 0 --template "{separate(' ', node, bookmarks, tags}\n" | |
|
87 | ||
|
84 | 88 | - Modify each line of a commit description:: |
|
85 | 89 | |
|
86 | 90 | $ hg log --template "{splitlines(desc) % '**** {line}\n'}" |
@@ -9,6 +9,7 b'' | |||
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | import hashlib | |
|
12 | 13 | import os |
|
13 | 14 | import shutil |
|
14 | 15 | |
@@ -43,6 +44,9 b' from . import (' | |||
|
43 | 44 | |
|
44 | 45 | release = lock.release |
|
45 | 46 | |
|
47 | # shared features | |
|
48 | sharedbookmarks = 'bookmarks' | |
|
49 | ||
|
46 | 50 | def _local(path): |
|
47 | 51 | path = util.expandpath(util.urllocalpath(path)) |
|
48 | 52 | return (os.path.isfile(path) and bundlerepo or localrepo) |
@@ -257,7 +261,7 b' def postshare(sourcerepo, destrepo, book' | |||
|
257 | 261 | |
|
258 | 262 | if bookmarks: |
|
259 | 263 | fp = destrepo.vfs('shared', 'w') |
|
260 |
fp.write(' |
|
|
264 | fp.write(sharedbookmarks + '\n') | |
|
261 | 265 | fp.close() |
|
262 | 266 | |
|
263 | 267 | def _postshareupdate(repo, update, checkout=None): |
@@ -480,9 +484,11 b' def clone(ui, peeropts, source, dest=Non' | |||
|
480 | 484 | ui.status(_('(not using pooled storage: ' |
|
481 | 485 | 'unable to resolve identity of remote)\n')) |
|
482 | 486 | elif sharenamemode == 'remote': |
|
483 |
sharepath = os.path.join( |
|
|
487 | sharepath = os.path.join( | |
|
488 | sharepool, hashlib.sha1(source).hexdigest()) | |
|
484 | 489 | else: |
|
485 |
raise error.Abort('unknown share naming mode: %s' % |
|
|
490 | raise error.Abort(_('unknown share naming mode: %s') % | |
|
491 | sharenamemode) | |
|
486 | 492 | |
|
487 | 493 | if sharepath: |
|
488 | 494 | return clonewithshare(ui, peeropts, sharepath, source, srcpeer, |
@@ -921,9 +927,7 b' def remoteui(src, opts):' | |||
|
921 | 927 | for key, val in src.configitems(sect): |
|
922 | 928 | dst.setconfig(sect, key, val, 'copied') |
|
923 | 929 | v = src.config('web', 'cacerts') |
|
924 |
if v |
|
|
925 | dst.setconfig('web', 'cacerts', v, 'copied') | |
|
926 | elif v: | |
|
930 | if v: | |
|
927 | 931 | dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') |
|
928 | 932 | |
|
929 | 933 | return dst |
@@ -8,11 +8,14 b'' | |||
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | import BaseHTTPServer | |
|
12 | 11 | import errno |
|
13 | 12 | import mimetypes |
|
14 | 13 | import os |
|
15 | 14 | |
|
15 | from .. import util | |
|
16 | ||
|
17 | httpserver = util.httpserver | |
|
18 | ||
|
16 | 19 | HTTP_OK = 200 |
|
17 | 20 | HTTP_NOT_MODIFIED = 304 |
|
18 | 21 | HTTP_BAD_REQUEST = 400 |
@@ -107,7 +110,7 b' class continuereader(object):' | |||
|
107 | 110 | raise AttributeError |
|
108 | 111 | |
|
109 | 112 | def _statusmessage(code): |
|
110 |
responses = |
|
|
113 | responses = httpserver.basehttprequesthandler.responses | |
|
111 | 114 | return responses.get(code, ('Error', 'Unknown error'))[0] |
|
112 | 115 | |
|
113 | 116 | def statusmessage(code, message=None): |
@@ -187,7 +190,7 b' def get_contact(config):' | |||
|
187 | 190 | os.environ.get("EMAIL") or "") |
|
188 | 191 | |
|
189 | 192 | def caching(web, req): |
|
190 |
tag = |
|
|
193 | tag = 'W/"%s"' % web.mtime | |
|
191 | 194 | if req.env.get('HTTP_IF_NONE_MATCH') == tag: |
|
192 | 195 | raise ErrorResponse(HTTP_NOT_MODIFIED) |
|
193 | 196 | req.headers.append(('ETag', tag)) |
@@ -366,7 +366,9 b' class hgwebdir(object):' | |||
|
366 | 366 | 'lastchange': d, |
|
367 | 367 | 'lastchange_sort': d[1]-d[0], |
|
368 | 368 | 'archives': [], |
|
369 |
'isdirectory': True |
|
|
369 | 'isdirectory': True, | |
|
370 | 'labels': [], | |
|
371 | } | |
|
370 | 372 | |
|
371 | 373 | seendirs.add(name) |
|
372 | 374 | yield row |
@@ -416,6 +418,7 b' class hgwebdir(object):' | |||
|
416 | 418 | 'lastchange_sort': d[1]-d[0], |
|
417 | 419 | 'archives': archivelist(u, "tip", url), |
|
418 | 420 | 'isdirectory': None, |
|
421 | 'labels': u.configlist('web', 'labels', untrusted=True), | |
|
419 | 422 | } |
|
420 | 423 | |
|
421 | 424 | yield row |
@@ -8,8 +8,6 b'' | |||
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | import BaseHTTPServer | |
|
12 | import SocketServer | |
|
13 | 11 | import errno |
|
14 | 12 | import os |
|
15 | 13 | import socket |
@@ -23,6 +21,8 b' from .. import (' | |||
|
23 | 21 | util, |
|
24 | 22 | ) |
|
25 | 23 | |
|
24 | httpservermod = util.httpserver | |
|
25 | socketserver = util.socketserver | |
|
26 | 26 | urlerr = util.urlerr |
|
27 | 27 | urlreq = util.urlreq |
|
28 | 28 | |
@@ -53,18 +53,18 b' class _error_logger(object):' | |||
|
53 | 53 | for msg in seq: |
|
54 | 54 | self.handler.log_error("HG error: %s", msg) |
|
55 | 55 | |
|
56 |
class _httprequesthandler( |
|
|
56 | class _httprequesthandler(httpservermod.basehttprequesthandler): | |
|
57 | 57 | |
|
58 | 58 | url_scheme = 'http' |
|
59 | 59 | |
|
60 | 60 | @staticmethod |
|
61 |
def preparehttpserver(httpserver, |
|
|
61 | def preparehttpserver(httpserver, ui): | |
|
62 | 62 | """Prepare .socket of new HTTPServer instance""" |
|
63 | 63 | pass |
|
64 | 64 | |
|
65 | 65 | def __init__(self, *args, **kargs): |
|
66 | 66 | self.protocol_version = 'HTTP/1.1' |
|
67 |
|
|
|
67 | httpservermod.basehttprequesthandler.__init__(self, *args, **kargs) | |
|
68 | 68 | |
|
69 | 69 | def _log_any(self, fp, format, *args): |
|
70 | 70 | fp.write("%s - - [%s] %s\n" % (self.client_address[0], |
@@ -147,9 +147,9 b' class _httprequesthandler(BaseHTTPServer' | |||
|
147 | 147 | env['wsgi.input'] = self.rfile |
|
148 | 148 | env['wsgi.errors'] = _error_logger(self) |
|
149 | 149 | env['wsgi.multithread'] = isinstance(self.server, |
|
150 |
|
|
|
150 | socketserver.ThreadingMixIn) | |
|
151 | 151 | env['wsgi.multiprocess'] = isinstance(self.server, |
|
152 |
|
|
|
152 | socketserver.ForkingMixIn) | |
|
153 | 153 | env['wsgi.run_once'] = 0 |
|
154 | 154 | |
|
155 | 155 | self.saved_status = None |
@@ -222,15 +222,25 b' class _httprequesthandlerssl(_httpreques' | |||
|
222 | 222 | url_scheme = 'https' |
|
223 | 223 | |
|
224 | 224 | @staticmethod |
|
225 |
def preparehttpserver(httpserver, |
|
|
225 | def preparehttpserver(httpserver, ui): | |
|
226 | 226 | try: |
|
227 | import ssl | |
|
228 |
ssl. |
|
|
227 | from .. import sslutil | |
|
228 | sslutil.modernssl | |
|
229 | 229 | except ImportError: |
|
230 | 230 | raise error.Abort(_("SSL support is unavailable")) |
|
231 | httpserver.socket = ssl.wrap_socket( | |
|
232 | httpserver.socket, server_side=True, | |
|
233 | certfile=ssl_cert, ssl_version=ssl.PROTOCOL_TLSv1) | |
|
231 | ||
|
232 | certfile = ui.config('web', 'certificate') | |
|
233 | ||
|
234 | # These config options are currently only meant for testing. Use | |
|
235 | # at your own risk. | |
|
236 | cafile = ui.config('devel', 'servercafile') | |
|
237 | reqcert = ui.configbool('devel', 'serverrequirecert') | |
|
238 | ||
|
239 | httpserver.socket = sslutil.wrapserversocket(httpserver.socket, | |
|
240 | ui, | |
|
241 | certfile=certfile, | |
|
242 | cafile=cafile, | |
|
243 | requireclientcert=reqcert) | |
|
234 | 244 | |
|
235 | 245 | def setup(self): |
|
236 | 246 | self.connection = self.request |
@@ -240,10 +250,10 b' class _httprequesthandlerssl(_httpreques' | |||
|
240 | 250 | try: |
|
241 | 251 | import threading |
|
242 | 252 | threading.activeCount() # silence pyflakes and bypass demandimport |
|
243 |
_mixin = |
|
|
253 | _mixin = socketserver.ThreadingMixIn | |
|
244 | 254 | except ImportError: |
|
245 | 255 | if util.safehasattr(os, "fork"): |
|
246 |
_mixin = |
|
|
256 | _mixin = socketserver.ForkingMixIn | |
|
247 | 257 | else: |
|
248 | 258 | class _mixin(object): |
|
249 | 259 | pass |
@@ -253,18 +263,18 b' def openlog(opt, default):' | |||
|
253 | 263 | return open(opt, 'a') |
|
254 | 264 | return default |
|
255 | 265 | |
|
256 |
class MercurialHTTPServer(object, _mixin, |
|
|
266 | class MercurialHTTPServer(object, _mixin, httpservermod.httpserver): | |
|
257 | 267 | |
|
258 | 268 | # SO_REUSEADDR has broken semantics on windows |
|
259 | 269 | if os.name == 'nt': |
|
260 | 270 | allow_reuse_address = 0 |
|
261 | 271 | |
|
262 | 272 | def __init__(self, ui, app, addr, handler, **kwargs): |
|
263 |
|
|
|
273 | httpservermod.httpserver.__init__(self, addr, handler, **kwargs) | |
|
264 | 274 | self.daemon_threads = True |
|
265 | 275 | self.application = app |
|
266 | 276 | |
|
267 |
handler.preparehttpserver(self, ui |
|
|
277 | handler.preparehttpserver(self, ui) | |
|
268 | 278 | |
|
269 | 279 | prefix = ui.config('web', 'prefix', '') |
|
270 | 280 | if prefix: |
@@ -139,7 +139,7 b' def _filerevision(web, req, tmpl, fctx):' | |||
|
139 | 139 | yield {"line": t, |
|
140 | 140 | "lineid": "l%d" % (lineno + 1), |
|
141 | 141 | "linenumber": "% 6d" % (lineno + 1), |
|
142 |
"parity": |
|
|
142 | "parity": next(parity)} | |
|
143 | 143 | |
|
144 | 144 | return tmpl("filerevision", |
|
145 | 145 | file=f, |
@@ -278,7 +278,7 b' def _search(web, req, tmpl):' | |||
|
278 | 278 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) |
|
279 | 279 | |
|
280 | 280 | yield tmpl('searchentry', |
|
281 |
parity= |
|
|
281 | parity=next(parity), | |
|
282 | 282 | changelogtag=showtags, |
|
283 | 283 | files=files, |
|
284 | 284 | **webutil.commonentry(web.repo, ctx)) |
@@ -375,7 +375,7 b' def changelog(web, req, tmpl, shortlog=F' | |||
|
375 | 375 | break |
|
376 | 376 | |
|
377 | 377 | entry = webutil.changelistentry(web, web.repo[rev], tmpl) |
|
378 |
entry['parity'] = |
|
|
378 | entry['parity'] = next(parity) | |
|
379 | 379 | yield entry |
|
380 | 380 | |
|
381 | 381 | if shortlog: |
@@ -527,7 +527,7 b' def manifest(web, req, tmpl):' | |||
|
527 | 527 | |
|
528 | 528 | fctx = ctx.filectx(full) |
|
529 | 529 | yield {"file": full, |
|
530 |
"parity": |
|
|
530 | "parity": next(parity), | |
|
531 | 531 | "basename": f, |
|
532 | 532 | "date": fctx.date(), |
|
533 | 533 | "size": fctx.size(), |
@@ -545,7 +545,7 b' def manifest(web, req, tmpl):' | |||
|
545 | 545 | h = v |
|
546 | 546 | |
|
547 | 547 | path = "%s%s" % (abspath, d) |
|
548 |
yield {"parity": |
|
|
548 | yield {"parity": next(parity), | |
|
549 | 549 | "path": path, |
|
550 | 550 | "emptydirs": "/".join(emptydirs), |
|
551 | 551 | "basename": d} |
@@ -554,7 +554,7 b' def manifest(web, req, tmpl):' | |||
|
554 | 554 | symrev=symrev, |
|
555 | 555 | path=abspath, |
|
556 | 556 | up=webutil.up(abspath), |
|
557 |
upparity= |
|
|
557 | upparity=next(parity), | |
|
558 | 558 | fentries=filelist, |
|
559 | 559 | dentries=dirlist, |
|
560 | 560 | archives=web.archivelist(hex(node)), |
@@ -582,7 +582,7 b' def tags(web, req, tmpl):' | |||
|
582 | 582 | if latestonly: |
|
583 | 583 | t = t[:1] |
|
584 | 584 | for k, n in t: |
|
585 |
yield {"parity": |
|
|
585 | yield {"parity": next(parity), | |
|
586 | 586 | "tag": k, |
|
587 | 587 | "date": web.repo[n].date(), |
|
588 | 588 | "node": hex(n)} |
@@ -615,7 +615,7 b' def bookmarks(web, req, tmpl):' | |||
|
615 | 615 | if latestonly: |
|
616 | 616 | t = i[:1] |
|
617 | 617 | for k, n in t: |
|
618 |
yield {"parity": |
|
|
618 | yield {"parity": next(parity), | |
|
619 | 619 | "bookmark": k, |
|
620 | 620 | "date": web.repo[n].date(), |
|
621 | 621 | "node": hex(n)} |
@@ -677,7 +677,7 b' def summary(web, req, tmpl):' | |||
|
677 | 677 | break |
|
678 | 678 | |
|
679 | 679 | yield tmpl("tagentry", |
|
680 |
parity= |
|
|
680 | parity=next(parity), | |
|
681 | 681 | tag=k, |
|
682 | 682 | node=hex(n), |
|
683 | 683 | date=web.repo[n].date()) |
@@ -688,7 +688,7 b' def summary(web, req, tmpl):' | |||
|
688 | 688 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) |
|
689 | 689 | marks = sorted(marks, key=sortkey, reverse=True) |
|
690 | 690 | for k, n in marks[:10]: # limit to 10 bookmarks |
|
691 |
yield {'parity': |
|
|
691 | yield {'parity': next(parity), | |
|
692 | 692 | 'bookmark': k, |
|
693 | 693 | 'date': web.repo[n].date(), |
|
694 | 694 | 'node': hex(n)} |
@@ -704,11 +704,11 b' def summary(web, req, tmpl):' | |||
|
704 | 704 | |
|
705 | 705 | l.append(tmpl( |
|
706 | 706 | 'shortlogentry', |
|
707 |
parity= |
|
|
707 | parity=next(parity), | |
|
708 | 708 | **webutil.commonentry(web.repo, ctx))) |
|
709 | 709 | |
|
710 |
l |
|
|
711 |
yield |
|
|
710 | for entry in reversed(l): | |
|
711 | yield entry | |
|
712 | 712 | |
|
713 | 713 | tip = web.repo['tip'] |
|
714 | 714 | count = len(web.repo) |
@@ -725,7 +725,8 b' def summary(web, req, tmpl):' | |||
|
725 | 725 | shortlog=changelist, |
|
726 | 726 | node=tip.hex(), |
|
727 | 727 | symrev='tip', |
|
728 |
archives=web.archivelist("tip") |
|
|
728 | archives=web.archivelist("tip"), | |
|
729 | labels=web.configlist('web', 'labels')) | |
|
729 | 730 | |
|
730 | 731 | @webcommand('filediff') |
|
731 | 732 | def filediff(web, req, tmpl): |
@@ -863,29 +864,41 b' def annotate(web, req, tmpl):' | |||
|
863 | 864 | diffopts = patch.difffeatureopts(web.repo.ui, untrusted=True, |
|
864 | 865 | section='annotate', whitespace=True) |
|
865 | 866 | |
|
867 | def parents(f): | |
|
868 | for p in f.parents(): | |
|
869 | yield { | |
|
870 | "node": p.hex(), | |
|
871 | "rev": p.rev(), | |
|
872 | } | |
|
873 | ||
|
866 | 874 | def annotate(**map): |
|
867 | last = None | |
|
868 | 875 | if util.binary(fctx.data()): |
|
869 | 876 | mt = (mimetypes.guess_type(fctx.path())[0] |
|
870 | 877 | or 'application/octet-stream') |
|
871 |
lines = |
|
|
872 | '(binary:%s)' % mt)]) | |
|
878 | lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)] | |
|
873 | 879 | else: |
|
874 |
lines = |
|
|
875 |
|
|
|
876 | for lineno, ((f, targetline), l) in lines: | |
|
877 | fnode = f.filenode() | |
|
878 | ||
|
879 |
|
|
|
880 | last = fnode | |
|
881 | ||
|
882 |
|
|
|
880 | lines = fctx.annotate(follow=True, linenumber=True, | |
|
881 | diffopts=diffopts) | |
|
882 | previousrev = None | |
|
883 | blockparitygen = paritygen(1) | |
|
884 | for lineno, ((f, targetline), l) in enumerate(lines): | |
|
885 | rev = f.rev() | |
|
886 | if rev != previousrev: | |
|
887 | blockhead = True | |
|
888 | blockparity = next(blockparitygen) | |
|
889 | else: | |
|
890 | blockhead = None | |
|
891 | previousrev = rev | |
|
892 | yield {"parity": next(parity), | |
|
883 | 893 | "node": f.hex(), |
|
884 |
"rev": |
|
|
894 | "rev": rev, | |
|
885 | 895 | "author": f.user(), |
|
896 | "parents": parents(f), | |
|
886 | 897 | "desc": f.description(), |
|
887 | 898 | "extra": f.extra(), |
|
888 | 899 | "file": f.path(), |
|
900 | "blockhead": blockhead, | |
|
901 | "blockparity": blockparity, | |
|
889 | 902 | "targetline": targetline, |
|
890 | 903 | "line": l, |
|
891 | 904 | "lineno": lineno + 1, |
@@ -963,7 +976,7 b' def filelog(web, req, tmpl):' | |||
|
963 | 976 | iterfctx = fctx.filectx(i) |
|
964 | 977 | |
|
965 | 978 | l.append(dict( |
|
966 |
parity= |
|
|
979 | parity=next(parity), | |
|
967 | 980 | filerev=i, |
|
968 | 981 | file=f, |
|
969 | 982 | rename=webutil.renamelink(iterfctx), |
@@ -75,7 +75,7 b' class revnav(object):' | |||
|
75 | 75 | def _first(self): |
|
76 | 76 | """return the minimum non-filtered changeset or None""" |
|
77 | 77 | try: |
|
78 |
return iter(self._revlog) |
|
|
78 | return next(iter(self._revlog)) | |
|
79 | 79 | except StopIteration: |
|
80 | 80 | return None |
|
81 | 81 | |
@@ -247,7 +247,7 b' def branchentries(repo, stripecount, lim' | |||
|
247 | 247 | else: |
|
248 | 248 | status = 'open' |
|
249 | 249 | yield { |
|
250 |
'parity': |
|
|
250 | 'parity': next(parity), | |
|
251 | 251 | 'branch': ctx.branch(), |
|
252 | 252 | 'status': status, |
|
253 | 253 | 'node': ctx.hex(), |
@@ -369,7 +369,7 b' def changesetentry(web, req, tmpl, ctx):' | |||
|
369 | 369 | template = f in ctx and 'filenodelink' or 'filenolink' |
|
370 | 370 | files.append(tmpl(template, |
|
371 | 371 | node=ctx.hex(), file=f, blockno=blockno + 1, |
|
372 |
parity= |
|
|
372 | parity=next(parity))) | |
|
373 | 373 | |
|
374 | 374 | basectx = basechangectx(web.repo, req) |
|
375 | 375 | if basectx is None: |
@@ -450,15 +450,15 b' def diffs(repo, tmpl, ctx, basectx, file' | |||
|
450 | 450 | block = [] |
|
451 | 451 | for chunk in patch.diff(repo, node1, node2, m, opts=diffopts): |
|
452 | 452 | if chunk.startswith('diff') and block: |
|
453 |
blockno = |
|
|
454 |
yield tmpl('diffblock', parity= |
|
|
453 | blockno = next(blockcount) | |
|
454 | yield tmpl('diffblock', parity=next(parity), blockno=blockno, | |
|
455 | 455 | lines=prettyprintlines(''.join(block), blockno)) |
|
456 | 456 | block = [] |
|
457 | 457 | if chunk.startswith('diff') and style != 'raw': |
|
458 | 458 | chunk = ''.join(chunk.splitlines(True)[1:]) |
|
459 | 459 | block.append(chunk) |
|
460 |
blockno = |
|
|
461 |
yield tmpl('diffblock', parity= |
|
|
460 | blockno = next(blockcount) | |
|
461 | yield tmpl('diffblock', parity=next(parity), blockno=blockno, | |
|
462 | 462 | lines=prettyprintlines(''.join(block), blockno)) |
|
463 | 463 | |
|
464 | 464 | def compare(tmpl, context, leftlines, rightlines): |
@@ -521,14 +521,14 b' def diffstatgen(ctx, basectx):' | |||
|
521 | 521 | def diffsummary(statgen): |
|
522 | 522 | '''Return a short summary of the diff.''' |
|
523 | 523 | |
|
524 |
stats, maxname, maxtotal, addtotal, removetotal, binary = |
|
|
524 | stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) | |
|
525 | 525 | return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % ( |
|
526 | 526 | len(stats), addtotal, removetotal) |
|
527 | 527 | |
|
528 | 528 | def diffstat(tmpl, ctx, statgen, parity): |
|
529 | 529 | '''Return a diffstat template for each file in the diff.''' |
|
530 | 530 | |
|
531 |
stats, maxname, maxtotal, addtotal, removetotal, binary = |
|
|
531 | stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) | |
|
532 | 532 | files = ctx.files() |
|
533 | 533 | |
|
534 | 534 | def pct(i): |
@@ -543,7 +543,7 b' def diffstat(tmpl, ctx, statgen, parity)' | |||
|
543 | 543 | fileno += 1 |
|
544 | 544 | yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno, |
|
545 | 545 | total=total, addpct=pct(adds), removepct=pct(removes), |
|
546 |
parity= |
|
|
546 | parity=next(parity)) | |
|
547 | 547 | |
|
548 | 548 | class sessionvars(object): |
|
549 | 549 | def __init__(self, vars, start='?'): |
@@ -40,26 +40,38 b' from __future__ import absolute_import' | |||
|
40 | 40 | |
|
41 | 41 | # Many functions in this file have too many arguments. |
|
42 | 42 | # pylint: disable=R0913 |
|
43 | ||
|
44 | import cStringIO | |
|
43 | import email | |
|
44 | import email.message | |
|
45 | 45 | import errno |
|
46 | import httplib | |
|
46 | import inspect | |
|
47 | 47 | import logging |
|
48 | import rfc822 | |
|
49 | 48 | import select |
|
50 | 49 | import socket |
|
50 | import ssl | |
|
51 | import sys | |
|
52 | ||
|
53 | try: | |
|
54 | import cStringIO as io | |
|
55 | io.StringIO | |
|
56 | except ImportError: | |
|
57 | import io | |
|
58 | ||
|
59 | try: | |
|
60 | import httplib | |
|
61 | httplib.HTTPException | |
|
62 | except ImportError: | |
|
63 | import http.client as httplib | |
|
51 | 64 | |
|
52 | 65 | from . import ( |
|
53 | 66 | _readers, |
|
54 | socketutil, | |
|
55 | ) | |
|
67 | ) | |
|
56 | 68 | |
|
57 | 69 | logger = logging.getLogger(__name__) |
|
58 | 70 | |
|
59 | 71 | __all__ = ['HTTPConnection', 'HTTPResponse'] |
|
60 | 72 | |
|
61 | HTTP_VER_1_0 = 'HTTP/1.0' | |
|
62 | HTTP_VER_1_1 = 'HTTP/1.1' | |
|
73 | HTTP_VER_1_0 = b'HTTP/1.0' | |
|
74 | HTTP_VER_1_1 = b'HTTP/1.1' | |
|
63 | 75 | |
|
64 | 76 | OUTGOING_BUFFER_SIZE = 1 << 15 |
|
65 | 77 | INCOMING_BUFFER_SIZE = 1 << 20 |
@@ -73,7 +85,7 b" XFER_ENCODING_CHUNKED = 'chunked'" | |||
|
73 | 85 | |
|
74 | 86 | CONNECTION_CLOSE = 'close' |
|
75 | 87 | |
|
76 | EOL = '\r\n' | |
|
88 | EOL = b'\r\n' | |
|
77 | 89 | _END_HEADERS = EOL * 2 |
|
78 | 90 | |
|
79 | 91 | # Based on some searching around, 1 second seems like a reasonable |
@@ -81,6 +93,57 b" EOL = '\\r\\n'" | |||
|
81 | 93 | TIMEOUT_ASSUME_CONTINUE = 1 |
|
82 | 94 | TIMEOUT_DEFAULT = None |
|
83 | 95 | |
|
96 | if sys.version_info > (3, 0): | |
|
97 | _unicode = str | |
|
98 | else: | |
|
99 | _unicode = unicode | |
|
100 | ||
|
101 | def _ensurebytes(data): | |
|
102 | if not isinstance(data, (_unicode, bytes)): | |
|
103 | data = str(data) | |
|
104 | if not isinstance(data, bytes): | |
|
105 | try: | |
|
106 | return data.encode('latin-1') | |
|
107 | except UnicodeEncodeError as err: | |
|
108 | raise UnicodeEncodeError( | |
|
109 | err.encoding, | |
|
110 | err.object, | |
|
111 | err.start, | |
|
112 | err.end, | |
|
113 | '%r is not valid Latin-1 Use .encode("utf-8") ' | |
|
114 | 'if sending as utf-8 is desired.' % ( | |
|
115 | data[err.start:err.end],)) | |
|
116 | return data | |
|
117 | ||
|
118 | class _CompatMessage(email.message.Message): | |
|
119 | """Workaround for rfc822.Message and email.message.Message API diffs.""" | |
|
120 | ||
|
121 | @classmethod | |
|
122 | def from_string(cls, s): | |
|
123 | if sys.version_info > (3, 0): | |
|
124 | # Python 3 can't decode headers from bytes, so we have to | |
|
125 | # trust RFC 2616 and decode the headers as iso-8859-1 | |
|
126 | # bytes. | |
|
127 | s = s.decode('iso-8859-1') | |
|
128 | headers = email.message_from_string(s, _class=_CompatMessage) | |
|
129 | # Fix multi-line headers to match httplib's behavior from | |
|
130 | # Python 2.x, since email.message.Message handles them in | |
|
131 | # slightly different ways. | |
|
132 | if sys.version_info < (3, 0): | |
|
133 | new = [] | |
|
134 | for h, v in headers._headers: | |
|
135 | if '\r\n' in v: | |
|
136 | v = '\n'.join([' ' + x.lstrip() for x in v.split('\r\n')])[1:] | |
|
137 | new.append((h, v)) | |
|
138 | headers._headers = new | |
|
139 | return headers | |
|
140 | ||
|
141 | def getheaders(self, key): | |
|
142 | return self.get_all(key) | |
|
143 | ||
|
144 | def getheader(self, key, default=None): | |
|
145 | return self.get(key, failobj=default) | |
|
146 | ||
|
84 | 147 | |
|
85 | 148 | class HTTPResponse(object): |
|
86 | 149 | """Response from an HTTP server. |
@@ -91,11 +154,11 b' class HTTPResponse(object):' | |||
|
91 | 154 | def __init__(self, sock, timeout, method): |
|
92 | 155 | self.sock = sock |
|
93 | 156 | self.method = method |
|
94 | self.raw_response = '' | |
|
157 | self.raw_response = b'' | |
|
95 | 158 | self._headers_len = 0 |
|
96 | 159 | self.headers = None |
|
97 | 160 | self.will_close = False |
|
98 | self.status_line = '' | |
|
161 | self.status_line = b'' | |
|
99 | 162 | self.status = None |
|
100 | 163 | self.continued = False |
|
101 | 164 | self.http_version = None |
@@ -131,6 +194,10 b' class HTTPResponse(object):' | |||
|
131 | 194 | return self.headers.getheader(header, default=default) |
|
132 | 195 | |
|
133 | 196 | def getheaders(self): |
|
197 | if sys.version_info < (3, 0): | |
|
198 | return [(k.lower(), v) for k, v in self.headers.items()] | |
|
199 | # Starting in Python 3, headers aren't lowercased before being | |
|
200 | # returned here. | |
|
134 | 201 | return self.headers.items() |
|
135 | 202 | |
|
136 | 203 | def readline(self): |
@@ -141,14 +208,14 b' class HTTPResponse(object):' | |||
|
141 | 208 | """ |
|
142 | 209 | blocks = [] |
|
143 | 210 | while True: |
|
144 | self._reader.readto('\n', blocks) | |
|
211 | self._reader.readto(b'\n', blocks) | |
|
145 | 212 | |
|
146 | if blocks and blocks[-1][-1] == '\n' or self.complete(): | |
|
213 | if blocks and blocks[-1][-1:] == b'\n' or self.complete(): | |
|
147 | 214 | break |
|
148 | 215 | |
|
149 | 216 | self._select() |
|
150 | 217 | |
|
151 | return ''.join(blocks) | |
|
218 | return b''.join(blocks) | |
|
152 | 219 | |
|
153 | 220 | def read(self, length=None): |
|
154 | 221 | """Read data from the response body.""" |
@@ -175,8 +242,8 b' class HTTPResponse(object):' | |||
|
175 | 242 | raise HTTPTimeoutException('timeout reading data') |
|
176 | 243 | try: |
|
177 | 244 | data = self.sock.recv(INCOMING_BUFFER_SIZE) |
|
178 |
except s |
|
|
179 |
if e.args[0] != s |
|
|
245 | except ssl.SSLError as e: | |
|
246 | if e.args[0] != ssl.SSL_ERROR_WANT_READ: | |
|
180 | 247 | raise |
|
181 | 248 | logger.debug('SSL_ERROR_WANT_READ in _select, should retry later') |
|
182 | 249 | return True |
@@ -203,7 +270,7 b' class HTTPResponse(object):' | |||
|
203 | 270 | self.raw_response += data |
|
204 | 271 | # This is a bogus server with bad line endings |
|
205 | 272 | if self._eol not in self.raw_response: |
|
206 | for bad_eol in ('\n', '\r'): | |
|
273 | for bad_eol in (b'\n', b'\r'): | |
|
207 | 274 | if (bad_eol in self.raw_response |
|
208 | 275 | # verify that bad_eol is not the end of the incoming data |
|
209 | 276 | # as this could be a response line that just got |
@@ -220,8 +287,8 b' class HTTPResponse(object):' | |||
|
220 | 287 | |
|
221 | 288 | # handle 100-continue response |
|
222 | 289 | hdrs, body = self.raw_response.split(self._end_headers, 1) |
|
223 | unused_http_ver, status = hdrs.split(' ', 1) | |
|
224 | if status.startswith('100'): | |
|
290 | unused_http_ver, status = hdrs.split(b' ', 1) | |
|
291 | if status.startswith(b'100'): | |
|
225 | 292 | self.raw_response = body |
|
226 | 293 | self.continued = True |
|
227 | 294 | logger.debug('continue seen, setting body to %r', body) |
@@ -235,14 +302,14 b' class HTTPResponse(object):' | |||
|
235 | 302 | self.status_line, hdrs = hdrs.split(self._eol, 1) |
|
236 | 303 | else: |
|
237 | 304 | self.status_line = hdrs |
|
238 | hdrs = '' | |
|
305 | hdrs = b'' | |
|
239 | 306 | # TODO HTTP < 1.0 support |
|
240 | 307 | (self.http_version, self.status, |
|
241 | self.reason) = self.status_line.split(' ', 2) | |
|
308 | self.reason) = self.status_line.split(b' ', 2) | |
|
242 | 309 | self.status = int(self.status) |
|
243 | 310 | if self._eol != EOL: |
|
244 | hdrs = hdrs.replace(self._eol, '\r\n') | |
|
245 |
headers = |
|
|
311 | hdrs = hdrs.replace(self._eol, b'\r\n') | |
|
312 | headers = _CompatMessage.from_string(hdrs) | |
|
246 | 313 | content_len = None |
|
247 | 314 | if HDR_CONTENT_LENGTH in headers: |
|
248 | 315 | content_len = int(headers[HDR_CONTENT_LENGTH]) |
@@ -259,8 +326,8 b' class HTTPResponse(object):' | |||
|
259 | 326 | # HEAD responses are forbidden from returning a body, and |
|
260 | 327 | # it's implausible for a CONNECT response to use |
|
261 | 328 | # close-is-end logic for an OK response. |
|
262 | if (self.method == 'HEAD' or | |
|
263 | (self.method == 'CONNECT' and content_len is None)): | |
|
329 | if (self.method == b'HEAD' or | |
|
330 | (self.method == b'CONNECT' and content_len is None)): | |
|
264 | 331 | content_len = 0 |
|
265 | 332 | if content_len is not None: |
|
266 | 333 | logger.debug('using a content-length reader with length %d', |
@@ -294,8 +361,48 b' def _foldheaders(headers):' | |||
|
294 | 361 | >>> _foldheaders({'Accept-Encoding': 'wat'}) |
|
295 | 362 | {'accept-encoding': ('Accept-Encoding', 'wat')} |
|
296 | 363 | """ |
|
297 |
return dict((k.lower(), (k, v)) for k, v in headers. |
|
|
364 | return dict((k.lower(), (k, v)) for k, v in headers.items()) | |
|
365 | ||
|
366 | try: | |
|
367 | inspect.signature | |
|
368 | def _handlesarg(func, arg): | |
|
369 | """ Try to determine if func accepts arg | |
|
370 | ||
|
371 | If it takes arg, return True | |
|
372 | If it happens to take **args, then it could do anything: | |
|
373 | * It could throw a different TypeError, just for fun | |
|
374 | * It could throw an ArgumentError or anything else | |
|
375 | * It could choose not to throw an Exception at all | |
|
376 | ... return 'unknown' | |
|
298 | 377 |
|
|
378 | Otherwise, return False | |
|
379 | """ | |
|
380 | params = inspect.signature(func).parameters | |
|
381 | if arg in params: | |
|
382 | return True | |
|
383 | for p in params: | |
|
384 | if params[p].kind == inspect._ParameterKind.VAR_KEYWORD: | |
|
385 | return 'unknown' | |
|
386 | return False | |
|
387 | except AttributeError: | |
|
388 | def _handlesarg(func, arg): | |
|
389 | """ Try to determine if func accepts arg | |
|
390 | ||
|
391 | If it takes arg, return True | |
|
392 | If it happens to take **args, then it could do anything: | |
|
393 | * It could throw a different TypeError, just for fun | |
|
394 | * It could throw an ArgumentError or anything else | |
|
395 | * It could choose not to throw an Exception at all | |
|
396 | ... return 'unknown' | |
|
397 | ||
|
398 | Otherwise, return False | |
|
399 | """ | |
|
400 | spec = inspect.getargspec(func) | |
|
401 | if arg in spec.args: | |
|
402 | return True | |
|
403 | if spec.keywords: | |
|
404 | return 'unknown' | |
|
405 | return False | |
|
299 | 406 | |
|
300 | 407 | class HTTPConnection(object): |
|
301 | 408 | """Connection to a single http server. |
@@ -340,15 +447,38 b' class HTTPConnection(object):' | |||
|
340 | 447 | Any extra keyword arguments to this function will be provided |
|
341 | 448 | to the ssl_wrap_socket method. If no ssl |
|
342 | 449 | """ |
|
343 | if port is None and host.count(':') == 1 or ']:' in host: | |
|
344 | host, port = host.rsplit(':', 1) | |
|
450 | host = _ensurebytes(host) | |
|
451 | if port is None and host.count(b':') == 1 or b']:' in host: | |
|
452 | host, port = host.rsplit(b':', 1) | |
|
345 | 453 | port = int(port) |
|
346 | if '[' in host: | |
|
454 | if b'[' in host: | |
|
347 | 455 | host = host[1:-1] |
|
348 | 456 | if ssl_wrap_socket is not None: |
|
349 |
|
|
|
457 | _wrap_socket = ssl_wrap_socket | |
|
350 | 458 | else: |
|
351 |
|
|
|
459 | _wrap_socket = ssl.wrap_socket | |
|
460 | call_wrap_socket = None | |
|
461 | handlesubar = _handlesarg(_wrap_socket, 'server_hostname') | |
|
462 | if handlesubar is True: | |
|
463 | # supports server_hostname | |
|
464 | call_wrap_socket = _wrap_socket | |
|
465 | handlesnobar = _handlesarg(_wrap_socket, 'serverhostname') | |
|
466 | if handlesnobar is True and handlesubar is not True: | |
|
467 | # supports serverhostname | |
|
468 | def call_wrap_socket(sock, server_hostname=None, **ssl_opts): | |
|
469 | return _wrap_socket(sock, serverhostname=server_hostname, | |
|
470 | **ssl_opts) | |
|
471 | if handlesubar is False and handlesnobar is False: | |
|
472 | # does not support either | |
|
473 | def call_wrap_socket(sock, server_hostname=None, **ssl_opts): | |
|
474 | return _wrap_socket(sock, **ssl_opts) | |
|
475 | if call_wrap_socket is None: | |
|
476 | # we assume it takes **args | |
|
477 | def call_wrap_socket(sock, **ssl_opts): | |
|
478 | if 'server_hostname' in ssl_opts: | |
|
479 | ssl_opts['serverhostname'] = ssl_opts['server_hostname'] | |
|
480 | return _wrap_socket(sock, **ssl_opts) | |
|
481 | self._ssl_wrap_socket = call_wrap_socket | |
|
352 | 482 | if use_ssl is None and port is None: |
|
353 | 483 | use_ssl = False |
|
354 | 484 | port = 80 |
@@ -357,8 +487,6 b' class HTTPConnection(object):' | |||
|
357 | 487 | elif port is None: |
|
358 | 488 | port = (use_ssl and 443 or 80) |
|
359 | 489 | self.port = port |
|
360 | if use_ssl and not socketutil.have_ssl: | |
|
361 | raise Exception('ssl requested but unavailable on this Python') | |
|
362 | 490 | self.ssl = use_ssl |
|
363 | 491 | self.ssl_opts = ssl_opts |
|
364 | 492 | self._ssl_validator = ssl_validator |
@@ -388,15 +516,15 b' class HTTPConnection(object):' | |||
|
388 | 516 | if self._proxy_host is not None: |
|
389 | 517 | logger.info('Connecting to http proxy %s:%s', |
|
390 | 518 | self._proxy_host, self._proxy_port) |
|
391 |
sock = socket |
|
|
392 |
|
|
|
519 | sock = socket.create_connection((self._proxy_host, | |
|
520 | self._proxy_port)) | |
|
393 | 521 | if self.ssl: |
|
394 | data = self._buildheaders('CONNECT', '%s:%d' % (self.host, | |
|
395 | self.port), | |
|
522 | data = self._buildheaders(b'CONNECT', b'%s:%d' % (self.host, | |
|
523 | self.port), | |
|
396 | 524 | proxy_headers, HTTP_VER_1_0) |
|
397 | 525 | sock.send(data) |
|
398 | 526 | sock.setblocking(0) |
|
399 | r = self.response_class(sock, self.timeout, 'CONNECT') | |
|
527 | r = self.response_class(sock, self.timeout, b'CONNECT') | |
|
400 | 528 | timeout_exc = HTTPTimeoutException( |
|
401 | 529 | 'Timed out waiting for CONNECT response from proxy') |
|
402 | 530 | while not r.complete(): |
@@ -421,7 +549,7 b' class HTTPConnection(object):' | |||
|
421 | 549 | logger.info('CONNECT (for SSL) to %s:%s via proxy succeeded.', |
|
422 | 550 | self.host, self.port) |
|
423 | 551 | else: |
|
424 |
sock = socket |
|
|
552 | sock = socket.create_connection((self.host, self.port)) | |
|
425 | 553 | if self.ssl: |
|
426 | 554 | # This is the default, but in the case of proxied SSL |
|
427 | 555 | # requests the proxy logic above will have cleared |
@@ -429,7 +557,8 b' class HTTPConnection(object):' | |||
|
429 | 557 | sock.setblocking(1) |
|
430 | 558 | logger.debug('wrapping socket for ssl with options %r', |
|
431 | 559 | self.ssl_opts) |
|
432 |
sock = self._ssl_wrap_socket(sock, |
|
|
560 | sock = self._ssl_wrap_socket(sock, server_hostname=self.host, | |
|
561 | **self.ssl_opts) | |
|
433 | 562 | if self._ssl_validator: |
|
434 | 563 | self._ssl_validator(sock) |
|
435 | 564 | sock.setblocking(0) |
@@ -441,25 +570,26 b' class HTTPConnection(object):' | |||
|
441 | 570 | hdrhost = self.host |
|
442 | 571 | else: |
|
443 | 572 | # include nonstandard port in header |
|
444 | if ':' in self.host: # must be IPv6 | |
|
445 | hdrhost = '[%s]:%d' % (self.host, self.port) | |
|
573 | if b':' in self.host: # must be IPv6 | |
|
574 | hdrhost = b'[%s]:%d' % (self.host, self.port) | |
|
446 | 575 | else: |
|
447 | hdrhost = '%s:%d' % (self.host, self.port) | |
|
576 | hdrhost = b'%s:%d' % (self.host, self.port) | |
|
448 | 577 | if self._proxy_host and not self.ssl: |
|
449 | 578 | # When talking to a regular http proxy we must send the |
|
450 | 579 | # full URI, but in all other cases we must not (although |
|
451 | 580 | # technically RFC 2616 says servers must accept our |
|
452 | 581 | # request if we screw up, experimentally few do that |
|
453 | 582 | # correctly.) |
|
454 | assert path[0] == '/', 'path must start with a /' | |
|
455 | path = 'http://%s%s' % (hdrhost, path) | |
|
456 | outgoing = ['%s %s %s%s' % (method, path, http_ver, EOL)] | |
|
457 | headers['host'] = ('Host', hdrhost) | |
|
583 | assert path[0:1] == b'/', 'path must start with a /' | |
|
584 | path = b'http://%s%s' % (hdrhost, path) | |
|
585 | outgoing = [b'%s %s %s%s' % (method, path, http_ver, EOL)] | |
|
586 | headers[b'host'] = (b'Host', hdrhost) | |
|
458 | 587 | headers[HDR_ACCEPT_ENCODING] = (HDR_ACCEPT_ENCODING, 'identity') |
|
459 | for hdr, val in headers.itervalues(): | |
|
460 | outgoing.append('%s: %s%s' % (hdr, val, EOL)) | |
|
588 | for hdr, val in sorted((_ensurebytes(h), _ensurebytes(v)) | |
|
589 | for h, v in headers.values()): | |
|
590 | outgoing.append(b'%s: %s%s' % (hdr, val, EOL)) | |
|
461 | 591 | outgoing.append(EOL) |
|
462 | return ''.join(outgoing) | |
|
592 | return b''.join(outgoing) | |
|
463 | 593 | |
|
464 | 594 | def close(self): |
|
465 | 595 | """Close the connection to the server. |
@@ -512,6 +642,8 b' class HTTPConnection(object):' | |||
|
512 | 642 | available. Use the `getresponse()` method to retrieve the |
|
513 | 643 | response. |
|
514 | 644 | """ |
|
645 | method = _ensurebytes(method) | |
|
646 | path = _ensurebytes(path) | |
|
515 | 647 | if self.busy(): |
|
516 | 648 | raise httplib.CannotSendRequest( |
|
517 | 649 | 'Can not send another request before ' |
@@ -520,11 +652,26 b' class HTTPConnection(object):' | |||
|
520 | 652 | |
|
521 | 653 | logger.info('sending %s request for %s to %s on port %s', |
|
522 | 654 | method, path, self.host, self.port) |
|
655 | ||
|
523 | 656 | hdrs = _foldheaders(headers) |
|
524 | if hdrs.get('expect', ('', ''))[1].lower() == '100-continue': | |
|
657 | # Figure out headers that have to be computed from the request | |
|
658 | # body. | |
|
659 | chunked = False | |
|
660 | if body and HDR_CONTENT_LENGTH not in hdrs: | |
|
661 | if getattr(body, '__len__', False): | |
|
662 | hdrs[HDR_CONTENT_LENGTH] = (HDR_CONTENT_LENGTH, | |
|
663 | b'%d' % len(body)) | |
|
664 | elif getattr(body, 'read', False): | |
|
665 | hdrs[HDR_XFER_ENCODING] = (HDR_XFER_ENCODING, | |
|
666 | XFER_ENCODING_CHUNKED) | |
|
667 | chunked = True | |
|
668 | else: | |
|
669 | raise BadRequestData('body has no __len__() nor read()') | |
|
670 | # Figure out expect-continue header | |
|
671 | if hdrs.get('expect', ('', ''))[1].lower() == b'100-continue': | |
|
525 | 672 | expect_continue = True |
|
526 | 673 | elif expect_continue: |
|
527 | hdrs['expect'] = ('Expect', '100-Continue') | |
|
674 | hdrs['expect'] = (b'Expect', b'100-Continue') | |
|
528 | 675 | # httplib compatibility: if the user specified a |
|
529 | 676 | # proxy-authorization header, that's actually intended for a |
|
530 | 677 | # proxy CONNECT action, not the real request, but only if |
@@ -534,25 +681,15 b' class HTTPConnection(object):' | |||
|
534 | 681 | pa = hdrs.pop('proxy-authorization', None) |
|
535 | 682 | if pa is not None: |
|
536 | 683 | pheaders['proxy-authorization'] = pa |
|
537 | ||
|
538 | chunked = False | |
|
539 | if body and HDR_CONTENT_LENGTH not in hdrs: | |
|
540 | if getattr(body, '__len__', False): | |
|
541 | hdrs[HDR_CONTENT_LENGTH] = (HDR_CONTENT_LENGTH, len(body)) | |
|
542 | elif getattr(body, 'read', False): | |
|
543 | hdrs[HDR_XFER_ENCODING] = (HDR_XFER_ENCODING, | |
|
544 | XFER_ENCODING_CHUNKED) | |
|
545 | chunked = True | |
|
546 | else: | |
|
547 | raise BadRequestData('body has no __len__() nor read()') | |
|
684 | # Build header data | |
|
685 | outgoing_headers = self._buildheaders( | |
|
686 | method, path, hdrs, self.http_version) | |
|
548 | 687 | |
|
549 | 688 | # If we're reusing the underlying socket, there are some |
|
550 | 689 | # conditions where we'll want to retry, so make a note of the |
|
551 | 690 | # state of self.sock |
|
552 | 691 | fresh_socket = self.sock is None |
|
553 | 692 | self._connect(pheaders) |
|
554 | outgoing_headers = self._buildheaders( | |
|
555 | method, path, hdrs, self.http_version) | |
|
556 | 693 | response = None |
|
557 | 694 | first = True |
|
558 | 695 | |
@@ -592,8 +729,8 b' class HTTPConnection(object):' | |||
|
592 | 729 | try: |
|
593 | 730 | try: |
|
594 | 731 | data = r[0].recv(INCOMING_BUFFER_SIZE) |
|
595 |
except s |
|
|
596 |
if e.args[0] != s |
|
|
732 | except ssl.SSLError as e: | |
|
733 | if e.args[0] != ssl.SSL_ERROR_WANT_READ: | |
|
597 | 734 | raise |
|
598 | 735 | logger.debug('SSL_ERROR_WANT_READ while sending ' |
|
599 | 736 | 'data, retrying...') |
@@ -662,16 +799,20 b' class HTTPConnection(object):' | |||
|
662 | 799 | continue |
|
663 | 800 | if len(data) < OUTGOING_BUFFER_SIZE: |
|
664 | 801 | if chunked: |
|
665 | body = '0' + EOL + EOL | |
|
802 | body = b'0' + EOL + EOL | |
|
666 | 803 | else: |
|
667 | 804 | body = None |
|
668 | 805 | if chunked: |
|
669 | out = hex(len(data))[2:] + EOL + data + EOL | |
|
806 | # This encode is okay because we know | |
|
807 | # hex() is building us only 0-9 and a-f | |
|
808 | # digits. | |
|
809 | asciilen = hex(len(data))[2:].encode('ascii') | |
|
810 | out = asciilen + EOL + data + EOL | |
|
670 | 811 | else: |
|
671 | 812 | out = data |
|
672 | 813 | amt = w[0].send(out) |
|
673 | 814 | except socket.error as e: |
|
674 |
if e[0] == s |
|
|
815 | if e[0] == ssl.SSL_ERROR_WANT_WRITE and self.ssl: | |
|
675 | 816 | # This means that SSL hasn't flushed its buffer into |
|
676 | 817 | # the socket yet. |
|
677 | 818 | # TODO: find a way to block on ssl flushing its buffer |
@@ -690,6 +831,7 b' class HTTPConnection(object):' | |||
|
690 | 831 | body = out[amt:] |
|
691 | 832 | else: |
|
692 | 833 | outgoing_headers = out[amt:] |
|
834 | # End of request-sending loop. | |
|
693 | 835 | |
|
694 | 836 | # close if the server response said to or responded before eating |
|
695 | 837 | # the whole request |
@@ -33,7 +33,12 b' have any clients outside of httpplus.' | |||
|
33 | 33 | """ |
|
34 | 34 | from __future__ import absolute_import |
|
35 | 35 | |
|
36 | import httplib | |
|
36 | try: | |
|
37 | import httplib | |
|
38 | httplib.HTTPException | |
|
39 | except ImportError: | |
|
40 | import http.client as httplib | |
|
41 | ||
|
37 | 42 | import logging |
|
38 | 43 | |
|
39 | 44 | logger = logging.getLogger(__name__) |
@@ -93,7 +98,7 b' class AbstractReader(object):' | |||
|
93 | 98 | need -= len(b) |
|
94 | 99 | if need == 0: |
|
95 | 100 | break |
|
96 | result = ''.join(blocks) | |
|
101 | result = b''.join(blocks) | |
|
97 | 102 | assert len(result) == amt or (self._finished and len(result) < amt) |
|
98 | 103 | |
|
99 | 104 | return result |
@@ -280,10 +280,9 b' class http2handler(urlreq.httphandler, u' | |||
|
280 | 280 | kwargs['keyfile'] = keyfile |
|
281 | 281 | kwargs['certfile'] = certfile |
|
282 | 282 | |
|
283 | kwargs.update(sslutil.sslkwargs(self.ui, host)) | |
|
284 | ||
|
285 | 283 | con = HTTPConnection(host, port, use_ssl=True, |
|
286 | 284 | ssl_wrap_socket=sslutil.wrapsocket, |
|
287 |
ssl_validator=sslutil.validat |
|
|
285 | ssl_validator=sslutil.validatesocket, | |
|
286 | ui=self.ui, | |
|
288 | 287 | **kwargs) |
|
289 | 288 | return con |
@@ -9,7 +9,6 b'' | |||
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | import httplib | |
|
13 | 12 | import os |
|
14 | 13 | import socket |
|
15 | 14 | import tempfile |
@@ -27,6 +26,7 b' from . import (' | |||
|
27 | 26 | wireproto, |
|
28 | 27 | ) |
|
29 | 28 | |
|
29 | httplib = util.httplib | |
|
30 | 30 | urlerr = util.urlerr |
|
31 | 31 | urlreq = util.urlreq |
|
32 | 32 | |
@@ -302,7 +302,7 b' def instance(ui, path, create):' | |||
|
302 | 302 | except error.RepoError as httpexception: |
|
303 | 303 | try: |
|
304 | 304 | r = statichttprepo.instance(ui, "static-" + path, create) |
|
305 | ui.note('(falling back to static-http)\n') | |
|
305 | ui.note(_('(falling back to static-http)\n')) | |
|
306 | 306 | return r |
|
307 | 307 | except error.RepoError: |
|
308 | 308 | raise httpexception # use the original http RepoError instead |
@@ -78,7 +78,7 b' def gettext(message):' | |||
|
78 | 78 | paragraphs = [p.decode("ascii") for p in message.split('\n\n')] |
|
79 | 79 | # Be careful not to translate the empty string -- it holds the |
|
80 | 80 | # meta data of the .po file. |
|
81 | u = u'\n\n'.join([p and _ugettext(p) or '' for p in paragraphs]) | |
|
81 | u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs]) | |
|
82 | 82 | try: |
|
83 | 83 | # encoding.tolocal cannot be used since it will first try to |
|
84 | 84 | # decode the Unicode string. Calling u.decode(enc) really |
@@ -110,15 +110,16 b' EXTRA ATTRIBUTES AND METHODS' | |||
|
110 | 110 | from __future__ import absolute_import, print_function |
|
111 | 111 | |
|
112 | 112 | import errno |
|
113 |
import h |
|
|
113 | import hashlib | |
|
114 | 114 | import socket |
|
115 | 115 | import sys |
|
116 | import thread | |
|
116 | import threading | |
|
117 | 117 | |
|
118 | 118 | from . import ( |
|
119 | 119 | util, |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | httplib = util.httplib | |
|
122 | 123 | urlerr = util.urlerr |
|
123 | 124 | urlreq = util.urlreq |
|
124 | 125 | |
@@ -134,7 +135,7 b' class ConnectionManager(object):' | |||
|
134 | 135 | * keep track of all existing |
|
135 | 136 | """ |
|
136 | 137 | def __init__(self): |
|
137 |
self._lock = thread. |
|
|
138 | self._lock = threading.Lock() | |
|
138 | 139 | self._hostmap = {} # map hosts to a list of connections |
|
139 | 140 | self._connmap = {} # map connections to host |
|
140 | 141 | self._readymap = {} # map connection to ready state |
@@ -624,8 +625,7 b' def error_handler(url):' | |||
|
624 | 625 | keepalive_handler.close_all() |
|
625 | 626 | |
|
626 | 627 | def continuity(url): |
|
627 | from . import util | |
|
628 | md5 = util.md5 | |
|
628 | md5 = hashlib.md5 | |
|
629 | 629 | format = '%25s: %s' |
|
630 | 630 | |
|
631 | 631 | # first fetch the file with the normal http handler |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | import hashlib | |
|
11 | 12 | import inspect |
|
12 | 13 | import os |
|
13 | 14 | import random |
@@ -57,16 +58,16 b' from . import (' | |||
|
57 | 58 | ) |
|
58 | 59 | |
|
59 | 60 | release = lockmod.release |
|
60 | propertycache = util.propertycache | |
|
61 | 61 | urlerr = util.urlerr |
|
62 | 62 | urlreq = util.urlreq |
|
63 | filecache = scmutil.filecache | |
|
64 | 63 | |
|
65 | class repofilecache(filecache): | |
|
64 | class repofilecache(scmutil.filecache): | |
|
66 | 65 | """All filecache usage on repo are done for logic that should be unfiltered |
|
67 | 66 | """ |
|
68 | 67 | |
|
69 | 68 | def __get__(self, repo, type=None): |
|
69 | if repo is None: | |
|
70 | return self | |
|
70 | 71 | return super(repofilecache, self).__get__(repo.unfiltered(), type) |
|
71 | 72 | def __set__(self, repo, value): |
|
72 | 73 | return super(repofilecache, self).__set__(repo.unfiltered(), value) |
@@ -78,7 +79,7 b' class storecache(repofilecache):' | |||
|
78 | 79 | def join(self, obj, fname): |
|
79 | 80 | return obj.sjoin(fname) |
|
80 | 81 | |
|
81 | class unfilteredpropertycache(propertycache): | |
|
82 | class unfilteredpropertycache(util.propertycache): | |
|
82 | 83 | """propertycache that apply to unfiltered repo only""" |
|
83 | 84 | |
|
84 | 85 | def __get__(self, repo, type=None): |
@@ -87,7 +88,7 b' class unfilteredpropertycache(propertyca' | |||
|
87 | 88 | return super(unfilteredpropertycache, self).__get__(unfi) |
|
88 | 89 | return getattr(unfi, self.name) |
|
89 | 90 | |
|
90 | class filteredpropertycache(propertycache): | |
|
91 | class filteredpropertycache(util.propertycache): | |
|
91 | 92 | """propertycache that must take filtering in account""" |
|
92 | 93 | |
|
93 | 94 | def cachevalue(self, obj, value): |
@@ -553,7 +554,10 b' class localrepository(object):' | |||
|
553 | 554 | The revset is specified as a string ``expr`` that may contain |
|
554 | 555 | %-formatting to escape certain types. See ``revset.formatspec``. |
|
555 | 556 | |
|
556 | Return a revset.abstractsmartset, which is a list-like interface | |
|
557 | Revset aliases from the configuration are not expanded. To expand | |
|
558 | user aliases, consider calling ``scmutil.revrange()``. | |
|
559 | ||
|
560 | Returns a revset.abstractsmartset, which is a list-like interface | |
|
557 | 561 | that contains integer revisions. |
|
558 | 562 | ''' |
|
559 | 563 | expr = revset.formatspec(expr, *args) |
@@ -565,6 +569,9 b' class localrepository(object):' | |||
|
565 | 569 | |
|
566 | 570 | This is a convenience wrapper around ``revs()`` that iterates the |
|
567 | 571 | result and is a generator of changectx instances. |
|
572 | ||
|
573 | Revset aliases from the configuration are not expanded. To expand | |
|
574 | user aliases, consider calling ``scmutil.revrange()``. | |
|
568 | 575 | ''' |
|
569 | 576 | for r in self.revs(expr, *args): |
|
570 | 577 | yield self[r] |
@@ -881,12 +888,6 b' class localrepository(object):' | |||
|
881 | 888 | f = f[1:] |
|
882 | 889 | return filelog.filelog(self.svfs, f) |
|
883 | 890 | |
|
884 | def parents(self, changeid=None): | |
|
885 | '''get list of changectxs for parents of changeid''' | |
|
886 | msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid | |
|
887 | self.ui.deprecwarn(msg, '3.7') | |
|
888 | return self[changeid].parents() | |
|
889 | ||
|
890 | 891 | def changectx(self, changeid): |
|
891 | 892 | return self[changeid] |
|
892 | 893 | |
@@ -1008,7 +1009,8 b' class localrepository(object):' | |||
|
1008 | 1009 | or self.ui.configbool('devel', 'check-locks')): |
|
1009 | 1010 | l = self._lockref and self._lockref() |
|
1010 | 1011 | if l is None or not l.held: |
|
1011 | self.ui.develwarn('transaction with no lock') | |
|
1012 | raise RuntimeError('programming error: transaction requires ' | |
|
1013 | 'locking') | |
|
1012 | 1014 | tr = self.currenttransaction() |
|
1013 | 1015 | if tr is not None: |
|
1014 | 1016 | return tr.nest() |
@@ -1019,11 +1021,8 b' class localrepository(object):' | |||
|
1019 | 1021 | _("abandoned transaction found"), |
|
1020 | 1022 | hint=_("run 'hg recover' to clean up transaction")) |
|
1021 | 1023 | |
|
1022 | # make journal.dirstate contain in-memory changes at this point | |
|
1023 | self.dirstate.write(None) | |
|
1024 | ||
|
1025 | 1024 | idbase = "%.40f#%f" % (random.random(), time.time()) |
|
1026 |
txnid = 'TXN:' + |
|
|
1025 | txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest() | |
|
1027 | 1026 | self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid) |
|
1028 | 1027 | |
|
1029 | 1028 | self._writejournal(desc) |
@@ -1049,13 +1048,9 b' class localrepository(object):' | |||
|
1049 | 1048 | # transaction running |
|
1050 | 1049 | repo.dirstate.write(None) |
|
1051 | 1050 | else: |
|
1052 | # prevent in-memory changes from being written out at | |
|
1053 | # the end of outer wlock scope or so | |
|
1054 | repo.dirstate.invalidate() | |
|
1055 | ||
|
1056 | 1051 | # discard all changes (including ones already written |
|
1057 | 1052 | # out) in this transaction |
|
1058 | repo.vfs.rename('journal.dirstate', 'dirstate') | |
|
1053 | repo.dirstate.restorebackup(None, prefix='journal.') | |
|
1059 | 1054 | |
|
1060 | 1055 | repo.invalidate(clearfilecache=True) |
|
1061 | 1056 | |
@@ -1110,8 +1105,7 b' class localrepository(object):' | |||
|
1110 | 1105 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] |
|
1111 | 1106 | |
|
1112 | 1107 | def _writejournal(self, desc): |
|
1113 | self.vfs.write("journal.dirstate", | |
|
1114 | self.vfs.tryread("dirstate")) | |
|
1108 | self.dirstate.savebackup(None, prefix='journal.') | |
|
1115 | 1109 | self.vfs.write("journal.branch", |
|
1116 | 1110 | encoding.fromlocal(self.dirstate.branch())) |
|
1117 | 1111 | self.vfs.write("journal.desc", |
@@ -1186,9 +1180,9 b' class localrepository(object):' | |||
|
1186 | 1180 | vfsmap = {'plain': self.vfs, '': self.svfs} |
|
1187 | 1181 | transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn) |
|
1188 | 1182 | if self.vfs.exists('undo.bookmarks'): |
|
1189 | self.vfs.rename('undo.bookmarks', 'bookmarks') | |
|
1183 | self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True) | |
|
1190 | 1184 | if self.svfs.exists('undo.phaseroots'): |
|
1191 | self.svfs.rename('undo.phaseroots', 'phaseroots') | |
|
1185 | self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True) | |
|
1192 | 1186 | self.invalidate() |
|
1193 | 1187 | |
|
1194 | 1188 | parentgone = (parents[0] not in self.changelog.nodemap or |
@@ -1197,7 +1191,7 b' class localrepository(object):' | |||
|
1197 | 1191 | # prevent dirstateguard from overwriting already restored one |
|
1198 | 1192 | dsguard.close() |
|
1199 | 1193 | |
|
1200 | self.vfs.rename('undo.dirstate', 'dirstate') | |
|
1194 | self.dirstate.restorebackup(None, prefix='undo.') | |
|
1201 | 1195 | try: |
|
1202 | 1196 | branch = self.vfs.read('undo.branch') |
|
1203 | 1197 | self.dirstate.setbranch(encoding.tolocal(branch)) |
@@ -1206,7 +1200,6 b' class localrepository(object):' | |||
|
1206 | 1200 | 'current branch is still \'%s\'\n') |
|
1207 | 1201 | % self.dirstate.branch()) |
|
1208 | 1202 | |
|
1209 | self.dirstate.invalidate() | |
|
1210 | 1203 | parents = tuple([p.rev() for p in self[None].parents()]) |
|
1211 | 1204 | if len(parents) > 1: |
|
1212 | 1205 | ui.status(_('working directory now based on ' |
@@ -41,16 +41,16 b' def _unifiedheaderinit(self, *args, **kw' | |||
|
41 | 41 | kw['continuation_ws'] = ' ' |
|
42 | 42 | _oldheaderinit(self, *args, **kw) |
|
43 | 43 | |
|
44 |
email. |
|
|
44 | setattr(email.header.Header, '__init__', _unifiedheaderinit) | |
|
45 | 45 | |
|
46 | 46 | class STARTTLS(smtplib.SMTP): |
|
47 | 47 | '''Derived class to verify the peer certificate for STARTTLS. |
|
48 | 48 | |
|
49 | 49 | This class allows to pass any keyword arguments to SSL socket creation. |
|
50 | 50 | ''' |
|
51 |
def __init__(self, |
|
|
51 | def __init__(self, ui, host=None, **kwargs): | |
|
52 | 52 | smtplib.SMTP.__init__(self, **kwargs) |
|
53 |
self._ |
|
|
53 | self._ui = ui | |
|
54 | 54 | self._host = host |
|
55 | 55 | |
|
56 | 56 | def starttls(self, keyfile=None, certfile=None): |
@@ -60,8 +60,8 b' class STARTTLS(smtplib.SMTP):' | |||
|
60 | 60 | (resp, reply) = self.docmd("STARTTLS") |
|
61 | 61 | if resp == 220: |
|
62 | 62 | self.sock = sslutil.wrapsocket(self.sock, keyfile, certfile, |
|
63 |
|
|
|
64 |
|
|
|
63 | ui=self._ui, | |
|
64 | serverhostname=self._host) | |
|
65 | 65 | self.file = smtplib.SSLFakeFile(self.sock) |
|
66 | 66 | self.helo_resp = None |
|
67 | 67 | self.ehlo_resp = None |
@@ -74,14 +74,14 b' class SMTPS(smtplib.SMTP):' | |||
|
74 | 74 | |
|
75 | 75 | This class allows to pass any keyword arguments to SSL socket creation. |
|
76 | 76 | ''' |
|
77 |
def __init__(self, |
|
|
77 | def __init__(self, ui, keyfile=None, certfile=None, host=None, | |
|
78 | 78 | **kwargs): |
|
79 | 79 | self.keyfile = keyfile |
|
80 | 80 | self.certfile = certfile |
|
81 | 81 | smtplib.SMTP.__init__(self, **kwargs) |
|
82 | 82 | self._host = host |
|
83 | 83 | self.default_port = smtplib.SMTP_SSL_PORT |
|
84 |
self._ |
|
|
84 | self._ui = ui | |
|
85 | 85 | |
|
86 | 86 | def _get_socket(self, host, port, timeout): |
|
87 | 87 | if self.debuglevel > 0: |
@@ -89,8 +89,8 b' class SMTPS(smtplib.SMTP):' | |||
|
89 | 89 | new_socket = socket.create_connection((host, port), timeout) |
|
90 | 90 | new_socket = sslutil.wrapsocket(new_socket, |
|
91 | 91 | self.keyfile, self.certfile, |
|
92 |
|
|
|
93 |
|
|
|
92 | ui=self._ui, | |
|
93 | serverhostname=self._host) | |
|
94 | 94 | self.file = smtplib.SSLFakeFile(new_socket) |
|
95 | 95 | return new_socket |
|
96 | 96 | |
@@ -106,22 +106,11 b' def _smtp(ui):' | |||
|
106 | 106 | mailhost = ui.config('smtp', 'host') |
|
107 | 107 | if not mailhost: |
|
108 | 108 | raise error.Abort(_('smtp.host not configured - cannot send mail')) |
|
109 | verifycert = ui.config('smtp', 'verifycert', 'strict') | |
|
110 | if verifycert not in ['strict', 'loose']: | |
|
111 | if util.parsebool(verifycert) is not False: | |
|
112 | raise error.Abort(_('invalid smtp.verifycert configuration: %s') | |
|
113 | % (verifycert)) | |
|
114 | verifycert = False | |
|
115 | if (starttls or smtps) and verifycert: | |
|
116 | sslkwargs = sslutil.sslkwargs(ui, mailhost) | |
|
117 | else: | |
|
118 | # 'ui' is required by sslutil.wrapsocket() and set by sslkwargs() | |
|
119 | sslkwargs = {'ui': ui} | |
|
120 | 109 | if smtps: |
|
121 | 110 | ui.note(_('(using smtps)\n')) |
|
122 |
s = SMTPS( |
|
|
111 | s = SMTPS(ui, local_hostname=local_hostname, host=mailhost) | |
|
123 | 112 | elif starttls: |
|
124 |
s = STARTTLS( |
|
|
113 | s = STARTTLS(ui, local_hostname=local_hostname, host=mailhost) | |
|
125 | 114 | else: |
|
126 | 115 | s = smtplib.SMTP(local_hostname=local_hostname) |
|
127 | 116 | if smtps: |
@@ -137,9 +126,9 b' def _smtp(ui):' | |||
|
137 | 126 | s.ehlo() |
|
138 | 127 | s.starttls() |
|
139 | 128 | s.ehlo() |
|
140 |
if |
|
|
129 | if starttls or smtps: | |
|
141 | 130 | ui.note(_('(verifying remote certificate)\n')) |
|
142 |
sslutil.validat |
|
|
131 | sslutil.validatesocket(s.sock) | |
|
143 | 132 | username = ui.config('smtp', 'username') |
|
144 | 133 | password = ui.config('smtp', 'password') |
|
145 | 134 | if username and not password: |
@@ -211,8 +211,10 b' class manifestdict(object):' | |||
|
211 | 211 | |
|
212 | 212 | def filesnotin(self, m2): |
|
213 | 213 | '''Set of files in this manifest that are not in the other''' |
|
214 |
|
|
|
215 | files.difference_update(m2) | |
|
214 | diff = self.diff(m2) | |
|
215 | files = set(filepath | |
|
216 | for filepath, hashflags in diff.iteritems() | |
|
217 | if hashflags[1][0] is None) | |
|
216 | 218 | return files |
|
217 | 219 | |
|
218 | 220 | @propertycache |
@@ -966,7 +968,7 b' class manifest(revlog.revlog):' | |||
|
966 | 968 | return self.readdelta(node) |
|
967 | 969 | if self._usemanifestv2: |
|
968 | 970 | raise error.Abort( |
|
969 | "readshallowdelta() not implemented for manifestv2") | |
|
971 | _("readshallowdelta() not implemented for manifestv2")) | |
|
970 | 972 | r = self.rev(node) |
|
971 | 973 | d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r)) |
|
972 | 974 | return manifestdict(d) |
@@ -38,7 +38,7 b' def _expandsets(kindpats, ctx, listsubre' | |||
|
38 | 38 | for kind, pat, source in kindpats: |
|
39 | 39 | if kind == 'set': |
|
40 | 40 | if not ctx: |
|
41 | raise error.Abort("fileset expression with no context") | |
|
41 | raise error.Abort(_("fileset expression with no context")) | |
|
42 | 42 | s = ctx.getfileset(pat) |
|
43 | 43 | fset.update(s) |
|
44 | 44 |
@@ -58,10 +58,8 b' class diffopts(object):' | |||
|
58 | 58 | 'upgrade': False, |
|
59 | 59 | } |
|
60 | 60 | |
|
61 | __slots__ = defaults.keys() | |
|
62 | ||
|
63 | 61 | def __init__(self, **opts): |
|
64 |
for k in self. |
|
|
62 | for k in self.defaults.keys(): | |
|
65 | 63 | v = opts.get(k) |
|
66 | 64 | if v is None: |
|
67 | 65 | v = self.defaults[k] |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | import hashlib | |
|
11 | 12 | import os |
|
12 | 13 | import shutil |
|
13 | 14 | import struct |
@@ -373,7 +374,7 b' class mergestate(object):' | |||
|
373 | 374 | """Write current state on disk in a version 1 file""" |
|
374 | 375 | f = self._repo.vfs(self.statepathv1, 'w') |
|
375 | 376 | irecords = iter(records) |
|
376 |
lrecords = |
|
|
377 | lrecords = next(irecords) | |
|
377 | 378 | assert lrecords[0] == 'L' |
|
378 | 379 | f.write(hex(self._local) + '\n') |
|
379 | 380 | for rtype, data in irecords: |
@@ -408,7 +409,7 b' class mergestate(object):' | |||
|
408 | 409 | if fcl.isabsent(): |
|
409 | 410 | hash = nullhex |
|
410 | 411 | else: |
|
411 |
hash = |
|
|
412 | hash = hashlib.sha1(fcl.path()).hexdigest() | |
|
412 | 413 | self._repo.vfs.write('merge/' + hash, fcl.data()) |
|
413 | 414 | self._state[fd] = ['u', hash, fcl.path(), |
|
414 | 415 | fca.path(), hex(fca.filenode()), |
@@ -989,19 +990,19 b' def calculateupdates(repo, wctx, mctx, a' | |||
|
989 | 990 | if len(bids) == 1: # all bids are the same kind of method |
|
990 | 991 | m, l = bids.items()[0] |
|
991 | 992 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 |
|
992 | repo.ui.note(" %s: consensus for %s\n" % (f, m)) | |
|
993 | repo.ui.note(_(" %s: consensus for %s\n") % (f, m)) | |
|
993 | 994 | actions[f] = l[0] |
|
994 | 995 | continue |
|
995 | 996 | # If keep is an option, just do it. |
|
996 | 997 | if 'k' in bids: |
|
997 | repo.ui.note(" %s: picking 'keep' action\n" % f) | |
|
998 | repo.ui.note(_(" %s: picking 'keep' action\n") % f) | |
|
998 | 999 | actions[f] = bids['k'][0] |
|
999 | 1000 | continue |
|
1000 | 1001 | # If there are gets and they all agree [how could they not?], do it. |
|
1001 | 1002 | if 'g' in bids: |
|
1002 | 1003 | ga0 = bids['g'][0] |
|
1003 | 1004 | if all(a == ga0 for a in bids['g'][1:]): |
|
1004 | repo.ui.note(" %s: picking 'get' action\n" % f) | |
|
1005 | repo.ui.note(_(" %s: picking 'get' action\n") % f) | |
|
1005 | 1006 | actions[f] = ga0 |
|
1006 | 1007 | continue |
|
1007 | 1008 | # TODO: Consider other simple actions such as mode changes |
@@ -1075,15 +1076,14 b' def batchget(repo, mctx, actions):' | |||
|
1075 | 1076 | absf = repo.wjoin(f) |
|
1076 | 1077 | orig = scmutil.origpath(ui, repo, absf) |
|
1077 | 1078 | try: |
|
1078 | # TODO Mercurial has always aborted if an untracked | |
|
1079 | # directory is replaced by a tracked file, or generally | |
|
1080 | # with file/directory merges. This needs to be sorted out. | |
|
1081 | 1079 | if repo.wvfs.isfileorlink(f): |
|
1082 | 1080 | util.rename(absf, orig) |
|
1083 | 1081 | except OSError as e: |
|
1084 | 1082 | if e.errno != errno.ENOENT: |
|
1085 | 1083 | raise |
|
1086 | 1084 | |
|
1085 | if repo.wvfs.isdir(f): | |
|
1086 | repo.wvfs.removedirs(f) | |
|
1087 | 1087 | wwrite(f, fctx(f).data(), flags, backgroundclose=True) |
|
1088 | 1088 | if i == 100: |
|
1089 | 1089 | yield i, f |
@@ -1442,9 +1442,7 b' def update(repo, node, branchmerge, forc' | |||
|
1442 | 1442 | pas = [repo[ancestor]] |
|
1443 | 1443 | |
|
1444 | 1444 | if node is None: |
|
1445 | if (repo.ui.configbool('devel', 'all-warnings') | |
|
1446 | or repo.ui.configbool('devel', 'oldapi')): | |
|
1447 | repo.ui.develwarn('update with no target') | |
|
1445 | repo.ui.deprecwarn('update with no target', '3.9') | |
|
1448 | 1446 | rev, _mark, _act = destutil.destupdate(repo) |
|
1449 | 1447 | node = repo[rev].node() |
|
1450 | 1448 |
@@ -26,6 +26,7 b'' | |||
|
26 | 26 | #include <string.h> |
|
27 | 27 | |
|
28 | 28 | #include "util.h" |
|
29 | #include "bitmanipulation.h" | |
|
29 | 30 | |
|
30 | 31 | static char mpatch_doc[] = "Efficient binary patching."; |
|
31 | 32 | static PyObject *mpatch_Error; |
@@ -600,8 +600,8 b' class obsstore(object):' | |||
|
600 | 600 | Take care of filtering duplicate. |
|
601 | 601 | Return the number of new marker.""" |
|
602 | 602 | if self._readonly: |
|
603 | raise error.Abort('creating obsolete markers is not enabled on ' | |
|
604 | 'this repo') | |
|
603 | raise error.Abort(_('creating obsolete markers is not enabled on ' | |
|
604 | 'this repo')) | |
|
605 | 605 | known = set(self._all) |
|
606 | 606 | new = [] |
|
607 | 607 | for m in markers: |
@@ -1171,7 +1171,7 b' def _computebumpedset(repo):' | |||
|
1171 | 1171 | ignoreflags=bumpedfix): |
|
1172 | 1172 | prev = torev(pnode) # unfiltered! but so is phasecache |
|
1173 | 1173 | if (prev is not None) and (phase(repo, prev) <= public): |
|
1174 |
# we have a public precursor |
|
|
1174 | # we have a public precursor | |
|
1175 | 1175 | bumped.add(rev) |
|
1176 | 1176 | break # Next draft! |
|
1177 | 1177 | return bumped |
@@ -1234,7 +1234,7 b' def createmarkers(repo, relations, flag=' | |||
|
1234 | 1234 | localmetadata.update(rel[2]) |
|
1235 | 1235 | |
|
1236 | 1236 | if not prec.mutable(): |
|
1237 | raise error.Abort("cannot obsolete public changeset: %s" | |
|
1237 | raise error.Abort(_("cannot obsolete public changeset: %s") | |
|
1238 | 1238 | % prec, |
|
1239 | 1239 | hint='see "hg help phases" for details') |
|
1240 | 1240 | nprec = prec.node() |
@@ -1243,7 +1243,8 b' def createmarkers(repo, relations, flag=' | |||
|
1243 | 1243 | if not nsucs: |
|
1244 | 1244 | npare = tuple(p.node() for p in prec.parents()) |
|
1245 | 1245 | if nprec in nsucs: |
|
1246 |
raise error.Abort("changeset %s cannot obsolete itself" |
|
|
1246 | raise error.Abort(_("changeset %s cannot obsolete itself") | |
|
1247 | % prec) | |
|
1247 | 1248 | |
|
1248 | 1249 | # Creating the marker causes the hidden cache to become invalid, |
|
1249 | 1250 | # which causes recomputation when we ask for prec.parents() above. |
@@ -325,13 +325,13 b' class basealiasrules(object):' | |||
|
325 | 325 | >>> builddecl('foo') |
|
326 | 326 | ('foo', None, None) |
|
327 | 327 | >>> builddecl('$foo') |
|
328 |
('$foo', None, " |
|
|
328 | ('$foo', None, "invalid symbol '$foo'") | |
|
329 | 329 | >>> builddecl('foo::bar') |
|
330 | 330 | ('foo::bar', None, 'invalid format') |
|
331 | 331 | >>> builddecl('foo()') |
|
332 | 332 | ('foo', [], None) |
|
333 | 333 | >>> builddecl('$foo()') |
|
334 |
('$foo()', None, " |
|
|
334 | ('$foo()', None, "invalid function '$foo'") | |
|
335 | 335 | >>> builddecl('foo($1, $2)') |
|
336 | 336 | ('foo', ['$1', '$2'], None) |
|
337 | 337 | >>> builddecl('foo(bar_bar, baz.baz)') |
@@ -358,7 +358,7 b' class basealiasrules(object):' | |||
|
358 | 358 | # "name = ...." style |
|
359 | 359 | name = tree[1] |
|
360 | 360 | if name.startswith('$'): |
|
361 |
return (decl, None, _("' |
|
|
361 | return (decl, None, _("invalid symbol '%s'") % name) | |
|
362 | 362 | return (name, None, None) |
|
363 | 363 | |
|
364 | 364 | func = cls._trygetfunc(tree) |
@@ -366,7 +366,7 b' class basealiasrules(object):' | |||
|
366 | 366 | # "name(arg, ....) = ...." style |
|
367 | 367 | name, args = func |
|
368 | 368 | if name.startswith('$'): |
|
369 |
return (decl, None, _("' |
|
|
369 | return (decl, None, _("invalid function '%s'") % name) | |
|
370 | 370 | if any(t[0] != cls._symbolnode for t in args): |
|
371 | 371 | return (decl, None, _("invalid argument list")) |
|
372 | 372 | if len(args) != len(set(args)): |
@@ -389,7 +389,7 b' class basealiasrules(object):' | |||
|
389 | 389 | if sym in args: |
|
390 | 390 | op = '_aliasarg' |
|
391 | 391 | elif sym.startswith('$'): |
|
392 |
raise error.ParseError(_("' |
|
|
392 | raise error.ParseError(_("invalid symbol '%s'") % sym) | |
|
393 | 393 | return (op, sym) |
|
394 | 394 | |
|
395 | 395 | @classmethod |
@@ -423,7 +423,7 b' class basealiasrules(object):' | |||
|
423 | 423 | ... builddefn('$1 or $bar', args) |
|
424 | 424 | ... except error.ParseError as inst: |
|
425 | 425 | ... print parseerrordetail(inst) |
|
426 | '$' not for alias arguments | |
|
426 | invalid symbol '$bar' | |
|
427 | 427 | >>> args = ['$1', '$10', 'foo'] |
|
428 | 428 | >>> pprint(builddefn('$10 or baz', args)) |
|
429 | 429 | (or |
@@ -447,15 +447,13 b' class basealiasrules(object):' | |||
|
447 | 447 | repl = efmt = None |
|
448 | 448 | name, args, err = cls._builddecl(decl) |
|
449 | 449 | if err: |
|
450 |
efmt = _(' |
|
|
451 | '"%(name)s": %(error)s') | |
|
450 | efmt = _('bad declaration of %(section)s "%(name)s": %(error)s') | |
|
452 | 451 | else: |
|
453 | 452 | try: |
|
454 | 453 | repl = cls._builddefn(defn, args) |
|
455 | 454 | except error.ParseError as inst: |
|
456 | 455 | err = parseerrordetail(inst) |
|
457 |
efmt = _(' |
|
|
458 | '"%(name)s": %(error)s') | |
|
456 | efmt = _('bad definition of %(section)s "%(name)s": %(error)s') | |
|
459 | 457 | if err: |
|
460 | 458 | err = efmt % {'section': cls._section, 'name': name, 'error': err} |
|
461 | 459 | return alias(name, args, err, repl) |
@@ -13,6 +13,7 b'' | |||
|
13 | 13 | #include <string.h> |
|
14 | 14 | |
|
15 | 15 | #include "util.h" |
|
16 | #include "bitmanipulation.h" | |
|
16 | 17 | |
|
17 | 18 | static char *versionerrortext = "Python minor version mismatch"; |
|
18 | 19 |
@@ -12,6 +12,7 b' import collections' | |||
|
12 | 12 | import copy |
|
13 | 13 | import email |
|
14 | 14 | import errno |
|
15 | import hashlib | |
|
15 | 16 | import os |
|
16 | 17 | import posixpath |
|
17 | 18 | import re |
@@ -978,7 +979,19 b' class recordhunk(object):' | |||
|
978 | 979 | def filterpatch(ui, headers, operation=None): |
|
979 | 980 | """Interactively filter patch chunks into applied-only chunks""" |
|
980 | 981 | if operation is None: |
|
981 |
operation = |
|
|
982 | operation = 'record' | |
|
983 | messages = { | |
|
984 | 'multiple': { | |
|
985 | 'discard': _("discard change %d/%d to '%s'?"), | |
|
986 | 'record': _("record change %d/%d to '%s'?"), | |
|
987 | 'revert': _("revert change %d/%d to '%s'?"), | |
|
988 | }[operation], | |
|
989 | 'single': { | |
|
990 | 'discard': _("discard this change to '%s'?"), | |
|
991 | 'record': _("record this change to '%s'?"), | |
|
992 | 'revert': _("revert this change to '%s'?"), | |
|
993 | }[operation], | |
|
994 | } | |
|
982 | 995 | |
|
983 | 996 | def prompt(skipfile, skipall, query, chunk): |
|
984 | 997 | """prompt query, and process base inputs |
@@ -1109,11 +1122,10 b' the hunk is left unchanged.' | |||
|
1109 | 1122 | if skipfile is None and skipall is None: |
|
1110 | 1123 | chunk.pretty(ui) |
|
1111 | 1124 | if total == 1: |
|
1112 |
msg = |
|
|
1125 | msg = messages['single'] % chunk.filename() | |
|
1113 | 1126 | else: |
|
1114 | 1127 | idx = pos - len(h.hunks) + i |
|
1115 | msg = _("record change %d/%d to '%s'?") % (idx, total, | |
|
1116 | chunk.filename()) | |
|
1128 | msg = messages['multiple'] % (idx, total, chunk.filename()) | |
|
1117 | 1129 | r, skipfile, skipall, newpatches = prompt(skipfile, |
|
1118 | 1130 | skipall, msg, chunk) |
|
1119 | 1131 | if r: |
@@ -2172,7 +2184,7 b' def difffeatureopts(ui, opts=None, untru' | |||
|
2172 | 2184 | return mdiff.diffopts(**buildopts) |
|
2173 | 2185 | |
|
2174 | 2186 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, |
|
2175 | losedatafn=None, prefix='', relroot=''): | |
|
2187 | losedatafn=None, prefix='', relroot='', copy=None): | |
|
2176 | 2188 | '''yields diff of changes to files between two nodes, or node and |
|
2177 | 2189 | working directory. |
|
2178 | 2190 | |
@@ -2191,7 +2203,10 b' def diff(repo, node1=None, node2=None, m' | |||
|
2191 | 2203 | display (used for subrepos). |
|
2192 | 2204 | |
|
2193 | 2205 | relroot, if not empty, must be normalized with a trailing /. Any match |
|
2194 |
patterns that fall outside it will be ignored. |
|
|
2206 | patterns that fall outside it will be ignored. | |
|
2207 | ||
|
2208 | copy, if not empty, should contain mappings {dst@y: src@x} of copy | |
|
2209 | information.''' | |
|
2195 | 2210 | |
|
2196 | 2211 | if opts is None: |
|
2197 | 2212 | opts = mdiff.defaultopts |
@@ -2238,9 +2253,10 b' def diff(repo, node1=None, node2=None, m' | |||
|
2238 | 2253 | hexfunc = short |
|
2239 | 2254 | revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node] |
|
2240 | 2255 | |
|
2241 |
copy |
|
|
2242 | if opts.git or opts.upgrade: | |
|
2243 | copy = copies.pathcopies(ctx1, ctx2, match=match) | |
|
2256 | if copy is None: | |
|
2257 | copy = {} | |
|
2258 | if opts.git or opts.upgrade: | |
|
2259 | copy = copies.pathcopies(ctx1, ctx2, match=match) | |
|
2244 | 2260 | |
|
2245 | 2261 | if relroot is not None: |
|
2246 | 2262 | if not relfiltered: |
@@ -2401,7 +2417,7 b' def trydiff(repo, revs, ctx1, ctx2, modi' | |||
|
2401 | 2417 | if not text: |
|
2402 | 2418 | text = "" |
|
2403 | 2419 | l = len(text) |
|
2404 |
s = |
|
|
2420 | s = hashlib.sha1('blob %d\0' % l) | |
|
2405 | 2421 | s.update(text) |
|
2406 | 2422 | return s.hexdigest() |
|
2407 | 2423 |
@@ -653,24 +653,24 b' static int sha1hash(char hash[20], const' | |||
|
653 | 653 | PyObject *shaobj, *hashobj; |
|
654 | 654 | |
|
655 | 655 | if (shafunc == NULL) { |
|
656 |
PyObject * |
|
|
656 | PyObject *hashlib, *name = PyString_FromString("hashlib"); | |
|
657 | 657 | |
|
658 | 658 | if (name == NULL) |
|
659 | 659 | return -1; |
|
660 | 660 | |
|
661 |
|
|
|
661 | hashlib = PyImport_Import(name); | |
|
662 | 662 | Py_DECREF(name); |
|
663 | 663 | |
|
664 |
if ( |
|
|
665 |
PyErr_SetString(PyExc_ImportError, " |
|
|
664 | if (hashlib == NULL) { | |
|
665 | PyErr_SetString(PyExc_ImportError, "hashlib"); | |
|
666 | 666 | return -1; |
|
667 | 667 | } |
|
668 |
shafunc = PyObject_GetAttrString( |
|
|
669 |
Py_DECREF( |
|
|
668 | shafunc = PyObject_GetAttrString(hashlib, "sha1"); | |
|
669 | Py_DECREF(hashlib); | |
|
670 | 670 | |
|
671 | 671 | if (shafunc == NULL) { |
|
672 | 672 | PyErr_SetString(PyExc_AttributeError, |
|
673 |
"module ' |
|
|
673 | "module 'hashlib' has no " | |
|
674 | 674 | "attribute 'sha1'"); |
|
675 | 675 | return -1; |
|
676 | 676 | } |
@@ -98,12 +98,12 b' def batchable(f):' | |||
|
98 | 98 | ''' |
|
99 | 99 | def plain(*args, **opts): |
|
100 | 100 | batchable = f(*args, **opts) |
|
101 |
encargsorres, encresref = |
|
|
101 | encargsorres, encresref = next(batchable) | |
|
102 | 102 | if not encresref: |
|
103 | 103 | return encargsorres # a local result in this case |
|
104 | 104 | self = args[0] |
|
105 | 105 | encresref.set(self._submitone(f.func_name, encargsorres)) |
|
106 |
return |
|
|
106 | return next(batchable) | |
|
107 | 107 | setattr(plain, 'batchable', f) |
|
108 | 108 | return plain |
|
109 | 109 |
@@ -251,7 +251,7 b' class phasecache(object):' | |||
|
251 | 251 | def write(self): |
|
252 | 252 | if not self.dirty: |
|
253 | 253 | return |
|
254 | f = self.opener('phaseroots', 'w', atomictemp=True) | |
|
254 | f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True) | |
|
255 | 255 | try: |
|
256 | 256 | self._write(f) |
|
257 | 257 | finally: |
@@ -598,3 +598,18 b' def readpipe(pipe):' | |||
|
598 | 598 | return ''.join(chunks) |
|
599 | 599 | finally: |
|
600 | 600 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
601 | ||
|
602 | def bindunixsocket(sock, path): | |
|
603 | """Bind the UNIX domain socket to the specified path""" | |
|
604 | # use relative path instead of full path at bind() if possible, since | |
|
605 | # AF_UNIX path has very small length limit (107 chars) on common | |
|
606 | # platforms (see sys/un.h) | |
|
607 | dirname, basename = os.path.split(path) | |
|
608 | bakwdfd = None | |
|
609 | if dirname: | |
|
610 | bakwdfd = os.open('.', os.O_DIRECTORY) | |
|
611 | os.chdir(dirname) | |
|
612 | sock.bind(basename) | |
|
613 | if bakwdfd: | |
|
614 | os.fchdir(bakwdfd) | |
|
615 | os.close(bakwdfd) |
@@ -14,6 +14,10 b' import socket' | |||
|
14 | 14 | import stat as statmod |
|
15 | 15 | import sys |
|
16 | 16 | |
|
17 | from . import policy | |
|
18 | modulepolicy = policy.policy | |
|
19 | policynocffi = policy.policynocffi | |
|
20 | ||
|
17 | 21 | def _mode_to_kind(mode): |
|
18 | 22 | if statmod.S_ISREG(mode): |
|
19 | 23 | return statmod.S_IFREG |
@@ -31,7 +35,7 b' def _mode_to_kind(mode):' | |||
|
31 | 35 | return statmod.S_IFSOCK |
|
32 | 36 | return mode |
|
33 | 37 | |
|
34 | def listdir(path, stat=False, skip=None): | |
|
38 | def listdirpure(path, stat=False, skip=None): | |
|
35 | 39 | '''listdir(path, stat=False) -> list_of_tuples |
|
36 | 40 | |
|
37 | 41 | Return a sorted list containing information about the entries |
@@ -61,6 +65,95 b' def listdir(path, stat=False, skip=None)' | |||
|
61 | 65 | result.append((fn, _mode_to_kind(st.st_mode))) |
|
62 | 66 | return result |
|
63 | 67 | |
|
68 | ffi = None | |
|
69 | if modulepolicy not in policynocffi and sys.platform == 'darwin': | |
|
70 | try: | |
|
71 | from _osutil_cffi import ffi, lib | |
|
72 | except ImportError: | |
|
73 | if modulepolicy == 'cffi': # strict cffi import | |
|
74 | raise | |
|
75 | ||
|
76 | if sys.platform == 'darwin' and ffi is not None: | |
|
77 | listdir_batch_size = 4096 | |
|
78 | # tweakable number, only affects performance, which chunks | |
|
79 | # of bytes do we get back from getattrlistbulk | |
|
80 | ||
|
81 | attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty | |
|
82 | ||
|
83 | attrkinds[lib.VREG] = statmod.S_IFREG | |
|
84 | attrkinds[lib.VDIR] = statmod.S_IFDIR | |
|
85 | attrkinds[lib.VLNK] = statmod.S_IFLNK | |
|
86 | attrkinds[lib.VBLK] = statmod.S_IFBLK | |
|
87 | attrkinds[lib.VCHR] = statmod.S_IFCHR | |
|
88 | attrkinds[lib.VFIFO] = statmod.S_IFIFO | |
|
89 | attrkinds[lib.VSOCK] = statmod.S_IFSOCK | |
|
90 | ||
|
91 | class stat_res(object): | |
|
92 | def __init__(self, st_mode, st_mtime, st_size): | |
|
93 | self.st_mode = st_mode | |
|
94 | self.st_mtime = st_mtime | |
|
95 | self.st_size = st_size | |
|
96 | ||
|
97 | tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec") | |
|
98 | buf = ffi.new("char[]", listdir_batch_size) | |
|
99 | ||
|
100 | def listdirinternal(dfd, req, stat, skip): | |
|
101 | ret = [] | |
|
102 | while True: | |
|
103 | r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0) | |
|
104 | if r == 0: | |
|
105 | break | |
|
106 | if r == -1: | |
|
107 | raise OSError(ffi.errno, os.strerror(ffi.errno)) | |
|
108 | cur = ffi.cast("val_attrs_t*", buf) | |
|
109 | for i in range(r): | |
|
110 | lgt = cur.length | |
|
111 | assert lgt == ffi.cast('uint32_t*', cur)[0] | |
|
112 | ofs = cur.name_info.attr_dataoffset | |
|
113 | str_lgt = cur.name_info.attr_length | |
|
114 | base_ofs = ffi.offsetof('val_attrs_t', 'name_info') | |
|
115 | name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs, | |
|
116 | str_lgt - 1)) | |
|
117 | tp = attrkinds[cur.obj_type] | |
|
118 | if name == "." or name == "..": | |
|
119 | continue | |
|
120 | if skip == name and tp == statmod.S_ISDIR: | |
|
121 | return [] | |
|
122 | if stat: | |
|
123 | mtime = cur.time.tv_sec | |
|
124 | mode = (cur.accessmask & ~lib.S_IFMT)| tp | |
|
125 | ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime, | |
|
126 | st_size=cur.datalength))) | |
|
127 | else: | |
|
128 | ret.append((name, tp)) | |
|
129 | cur += lgt | |
|
130 | return ret | |
|
131 | ||
|
132 | def listdir(path, stat=False, skip=None): | |
|
133 | req = ffi.new("struct attrlist*") | |
|
134 | req.bitmapcount = lib.ATTR_BIT_MAP_COUNT | |
|
135 | req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS | | |
|
136 | lib.ATTR_CMN_NAME | | |
|
137 | lib.ATTR_CMN_OBJTYPE | | |
|
138 | lib.ATTR_CMN_ACCESSMASK | | |
|
139 | lib.ATTR_CMN_MODTIME) | |
|
140 | req.fileattr = lib.ATTR_FILE_DATALENGTH | |
|
141 | dfd = lib.open(path, lib.O_RDONLY, 0) | |
|
142 | if dfd == -1: | |
|
143 | raise OSError(ffi.errno, os.strerror(ffi.errno)) | |
|
144 | ||
|
145 | try: | |
|
146 | ret = listdirinternal(dfd, req, stat, skip) | |
|
147 | finally: | |
|
148 | try: | |
|
149 | lib.close(dfd) | |
|
150 | except BaseException: | |
|
151 | pass # we ignore all the errors from closing, not | |
|
152 | # much we can do about that | |
|
153 | return ret | |
|
154 | else: | |
|
155 | listdir = listdirpure | |
|
156 | ||
|
64 | 157 | if os.name != 'nt': |
|
65 | 158 | posixfile = open |
|
66 | 159 |
@@ -25,49 +25,111 b' def dirstatetuple(*x):' | |||
|
25 | 25 | # x is a tuple |
|
26 | 26 | return x |
|
27 | 27 | |
|
28 | def parse_index2(data, inline): | |
|
29 | def gettype(q): | |
|
30 | return int(q & 0xFFFF) | |
|
28 | indexformatng = ">Qiiiiii20s12x" | |
|
29 | indexfirst = struct.calcsize('Q') | |
|
30 | sizeint = struct.calcsize('i') | |
|
31 | indexsize = struct.calcsize(indexformatng) | |
|
32 | ||
|
33 | def gettype(q): | |
|
34 | return int(q & 0xFFFF) | |
|
31 | 35 | |
|
32 |
|
|
|
33 |
|
|
|
36 | def offset_type(offset, type): | |
|
37 | return long(long(offset) << 16 | type) | |
|
38 | ||
|
39 | class BaseIndexObject(object): | |
|
40 | def __len__(self): | |
|
41 | return self._lgt + len(self._extra) + 1 | |
|
42 | ||
|
43 | def insert(self, i, tup): | |
|
44 | assert i == -1 | |
|
45 | self._extra.append(tup) | |
|
34 | 46 | |
|
35 | indexformatng = ">Qiiiiii20s12x" | |
|
47 | def _fix_index(self, i): | |
|
48 | if not isinstance(i, int): | |
|
49 | raise TypeError("expecting int indexes") | |
|
50 | if i < 0: | |
|
51 | i = len(self) + i | |
|
52 | if i < 0 or i >= len(self): | |
|
53 | raise IndexError | |
|
54 | return i | |
|
36 | 55 | |
|
37 | s = struct.calcsize(indexformatng) | |
|
38 | index = [] | |
|
39 | cache = None | |
|
40 | off = 0 | |
|
56 | def __getitem__(self, i): | |
|
57 | i = self._fix_index(i) | |
|
58 | if i == len(self) - 1: | |
|
59 | return (0, 0, 0, -1, -1, -1, -1, nullid) | |
|
60 | if i >= self._lgt: | |
|
61 | return self._extra[i - self._lgt] | |
|
62 | index = self._calculate_index(i) | |
|
63 | r = struct.unpack(indexformatng, self._data[index:index + indexsize]) | |
|
64 | if i == 0: | |
|
65 | e = list(r) | |
|
66 | type = gettype(e[0]) | |
|
67 | e[0] = offset_type(0, type) | |
|
68 | return tuple(e) | |
|
69 | return r | |
|
70 | ||
|
71 | class IndexObject(BaseIndexObject): | |
|
72 | def __init__(self, data): | |
|
73 | assert len(data) % indexsize == 0 | |
|
74 | self._data = data | |
|
75 | self._lgt = len(data) // indexsize | |
|
76 | self._extra = [] | |
|
77 | ||
|
78 | def _calculate_index(self, i): | |
|
79 | return i * indexsize | |
|
41 | 80 | |
|
42 | l = len(data) - s | |
|
43 | append = index.append | |
|
44 | if inline: | |
|
45 | cache = (0, data) | |
|
46 |
|
|
|
47 | e = _unpack(indexformatng, data[off:off + s]) | |
|
48 | append(e) | |
|
49 |
|
|
|
50 | break | |
|
51 | off += e[1] + s | |
|
52 | else: | |
|
53 | while off <= l: | |
|
54 | e = _unpack(indexformatng, data[off:off + s]) | |
|
55 | append(e) | |
|
56 | off += s | |
|
81 | def __delitem__(self, i): | |
|
82 | if not isinstance(i, slice) or not i.stop == -1 or not i.step is None: | |
|
83 | raise ValueError("deleting slices only supports a:-1 with step 1") | |
|
84 | i = self._fix_index(i.start) | |
|
85 | if i < self._lgt: | |
|
86 | self._data = self._data[:i * indexsize] | |
|
87 | self._lgt = i | |
|
88 | self._extra = [] | |
|
89 | else: | |
|
90 | self._extra = self._extra[:i - self._lgt] | |
|
91 | ||
|
92 | class InlinedIndexObject(BaseIndexObject): | |
|
93 | def __init__(self, data, inline=0): | |
|
94 | self._data = data | |
|
95 | self._lgt = self._inline_scan(None) | |
|
96 | self._inline_scan(self._lgt) | |
|
97 | self._extra = [] | |
|
57 | 98 | |
|
58 | if off != len(data): | |
|
59 | raise ValueError('corrupt index file') | |
|
99 | def _inline_scan(self, lgt): | |
|
100 | off = 0 | |
|
101 | if lgt is not None: | |
|
102 | self._offsets = [0] * lgt | |
|
103 | count = 0 | |
|
104 | while off <= len(self._data) - indexsize: | |
|
105 | s, = struct.unpack('>i', | |
|
106 | self._data[off + indexfirst:off + sizeint + indexfirst]) | |
|
107 | if lgt is not None: | |
|
108 | self._offsets[count] = off | |
|
109 | count += 1 | |
|
110 | off += indexsize + s | |
|
111 | if off != len(self._data): | |
|
112 | raise ValueError("corrupted data") | |
|
113 | return count | |
|
60 | 114 | |
|
61 | if index: | |
|
62 | e = list(index[0]) | |
|
63 | type = gettype(e[0]) | |
|
64 | e[0] = offset_type(0, type) | |
|
65 | index[0] = tuple(e) | |
|
115 | def __delitem__(self, i): | |
|
116 | if not isinstance(i, slice) or not i.stop == -1 or not i.step is None: | |
|
117 | raise ValueError("deleting slices only supports a:-1 with step 1") | |
|
118 | i = self._fix_index(i.start) | |
|
119 | if i < self._lgt: | |
|
120 | self._offsets = self._offsets[:i] | |
|
121 | self._lgt = i | |
|
122 | self._extra = [] | |
|
123 | else: | |
|
124 | self._extra = self._extra[:i - self._lgt] | |
|
66 | 125 | |
|
67 | # add the magic null revision at -1 | |
|
68 | index.append((0, 0, 0, -1, -1, -1, -1, nullid)) | |
|
126 | def _calculate_index(self, i): | |
|
127 | return self._offsets[i] | |
|
69 | 128 | |
|
70 | return index, cache | |
|
129 | def parse_index2(data, inline): | |
|
130 | if not inline: | |
|
131 | return IndexObject(data), None | |
|
132 | return InlinedIndexObject(data, inline), (0, data) | |
|
71 | 133 | |
|
72 | 134 | def parse_dirstate(dmap, copymap, st): |
|
73 | 135 | parents = [st[:20], st[20: 40]] |
@@ -10,18 +10,26 b' This contains aliases to hide python ver' | |||
|
10 | 10 | |
|
11 | 11 | from __future__ import absolute_import |
|
12 | 12 | |
|
13 | try: | |
|
13 | import sys | |
|
14 | ||
|
15 | if sys.version_info[0] < 3: | |
|
16 | import cPickle as pickle | |
|
14 | 17 | import cStringIO as io |
|
15 | stringio = io.StringIO | |
|
16 | except ImportError: | |
|
18 | import httplib | |
|
19 | import Queue as _queue | |
|
20 | import SocketServer as socketserver | |
|
21 | import urlparse | |
|
22 | import xmlrpclib | |
|
23 | else: | |
|
24 | import http.client as httplib | |
|
17 | 25 | import io |
|
18 | stringio = io.StringIO | |
|
26 | import pickle | |
|
27 | import queue as _queue | |
|
28 | import socketserver | |
|
29 | import urllib.parse as urlparse | |
|
30 | import xmlrpc.client as xmlrpclib | |
|
19 | 31 | |
|
20 | try: | |
|
21 | import Queue as _queue | |
|
22 | _queue.Queue | |
|
23 | except ImportError: | |
|
24 | import queue as _queue | |
|
32 | stringio = io.StringIO | |
|
25 | 33 | empty = _queue.Empty |
|
26 | 34 | queue = _queue.Queue |
|
27 | 35 | |
@@ -41,9 +49,13 b' def _alias(alias, origin, items):' | |||
|
41 | 49 | except AttributeError: |
|
42 | 50 | pass |
|
43 | 51 | |
|
52 | httpserver = _pycompatstub() | |
|
44 | 53 | urlreq = _pycompatstub() |
|
45 | 54 | urlerr = _pycompatstub() |
|
46 | 55 | try: |
|
56 | import BaseHTTPServer | |
|
57 | import CGIHTTPServer | |
|
58 | import SimpleHTTPServer | |
|
47 | 59 | import urllib2 |
|
48 | 60 | import urllib |
|
49 | 61 | _alias(urlreq, urllib, ( |
@@ -81,6 +93,16 b' try:' | |||
|
81 | 93 | "HTTPError", |
|
82 | 94 | "URLError", |
|
83 | 95 | )) |
|
96 | _alias(httpserver, BaseHTTPServer, ( | |
|
97 | "HTTPServer", | |
|
98 | "BaseHTTPRequestHandler", | |
|
99 | )) | |
|
100 | _alias(httpserver, SimpleHTTPServer, ( | |
|
101 | "SimpleHTTPRequestHandler", | |
|
102 | )) | |
|
103 | _alias(httpserver, CGIHTTPServer, ( | |
|
104 | "CGIHTTPRequestHandler", | |
|
105 | )) | |
|
84 | 106 | |
|
85 | 107 | except ImportError: |
|
86 | 108 | import urllib.request |
@@ -99,6 +121,7 b' except ImportError:' | |||
|
99 | 121 | "pathname2url", |
|
100 | 122 | "HTTPBasicAuthHandler", |
|
101 | 123 | "HTTPDigestAuthHandler", |
|
124 | "HTTPPasswordMgrWithDefaultRealm", | |
|
102 | 125 | "ProxyHandler", |
|
103 | 126 | "quote", |
|
104 | 127 | "Request", |
@@ -115,6 +138,13 b' except ImportError:' | |||
|
115 | 138 | "HTTPError", |
|
116 | 139 | "URLError", |
|
117 | 140 | )) |
|
141 | import http.server | |
|
142 | _alias(httpserver, http.server, ( | |
|
143 | "HTTPServer", | |
|
144 | "BaseHTTPRequestHandler", | |
|
145 | "SimpleHTTPRequestHandler", | |
|
146 | "CGIHTTPRequestHandler", | |
|
147 | )) | |
|
118 | 148 | |
|
119 | 149 | try: |
|
120 | 150 | xrange |
@@ -9,6 +9,7 b'' | |||
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | import hashlib | |
|
12 | 13 | |
|
13 | 14 | from .i18n import _ |
|
14 | 15 | from .node import short |
@@ -35,7 +36,7 b' def _bundle(repo, bases, heads, node, su' | |||
|
35 | 36 | # Include a hash of all the nodes in the filename for uniqueness |
|
36 | 37 | allcommits = repo.set('%ln::%ln', bases, heads) |
|
37 | 38 | allhashes = sorted(c.hex() for c in allcommits) |
|
38 |
totalhash = |
|
|
39 | totalhash = hashlib.sha1(''.join(allhashes)).hexdigest() | |
|
39 | 40 | name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) |
|
40 | 41 | |
|
41 | 42 | comp = None |
@@ -166,6 +167,13 b' def strip(ui, repo, nodelist, backup=Tru' | |||
|
166 | 167 | tr.startgroup() |
|
167 | 168 | cl.strip(striprev, tr) |
|
168 | 169 | mfst.strip(striprev, tr) |
|
170 | if 'treemanifest' in repo.requirements: # safe but unnecessary | |
|
171 | # otherwise | |
|
172 | for unencoded, encoded, size in repo.store.datafiles(): | |
|
173 | if (unencoded.startswith('meta/') and | |
|
174 | unencoded.endswith('00manifest.i')): | |
|
175 | dir = unencoded[5:-12] | |
|
176 | repo.dirlog(dir).strip(striprev, tr) | |
|
169 | 177 | for fn in files: |
|
170 | 178 | repo.file(fn).strip(striprev, tr) |
|
171 | 179 | tr.endgroup() |
@@ -9,6 +9,7 b'' | |||
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import copy |
|
12 | import hashlib | |
|
12 | 13 | import heapq |
|
13 | 14 | import struct |
|
14 | 15 | |
@@ -18,7 +19,6 b' from . import (' | |||
|
18 | 19 | obsolete, |
|
19 | 20 | phases, |
|
20 | 21 | tags as tagsmod, |
|
21 | util, | |
|
22 | 22 | ) |
|
23 | 23 | |
|
24 | 24 | def hideablerevs(repo): |
@@ -102,7 +102,7 b' def cachehash(repo, hideable):' | |||
|
102 | 102 | it to the cache. Upon reading we can easily validate by checking the hash |
|
103 | 103 | against the stored one and discard the cache in case the hashes don't match. |
|
104 | 104 | """ |
|
105 |
h = |
|
|
105 | h = hashlib.sha1() | |
|
106 | 106 | h.update(''.join(repo.heads())) |
|
107 | 107 | h.update(str(hash(frozenset(hideable)))) |
|
108 | 108 | return h.digest() |
@@ -15,6 +15,7 b' from __future__ import absolute_import' | |||
|
15 | 15 | |
|
16 | 16 | import collections |
|
17 | 17 | import errno |
|
18 | import hashlib | |
|
18 | 19 | import os |
|
19 | 20 | import struct |
|
20 | 21 | import zlib |
@@ -40,7 +41,6 b' from . import (' | |||
|
40 | 41 | _unpack = struct.unpack |
|
41 | 42 | _compress = zlib.compress |
|
42 | 43 | _decompress = zlib.decompress |
|
43 | _sha = util.sha1 | |
|
44 | 44 | |
|
45 | 45 | # revlog header flags |
|
46 | 46 | REVLOGV0 = 0 |
@@ -74,7 +74,7 b' def gettype(q):' | |||
|
74 | 74 | def offset_type(offset, type): |
|
75 | 75 | return long(long(offset) << 16 | type) |
|
76 | 76 | |
|
77 |
_nullhash = |
|
|
77 | _nullhash = hashlib.sha1(nullid) | |
|
78 | 78 | |
|
79 | 79 | def hash(text, p1, p2): |
|
80 | 80 | """generate a hash from the given text and its parent hashes |
@@ -92,7 +92,7 b' def hash(text, p1, p2):' | |||
|
92 | 92 | # none of the parent nodes are nullid |
|
93 | 93 | l = [p1, p2] |
|
94 | 94 | l.sort() |
|
95 |
s = |
|
|
95 | s = hashlib.sha1(l[0]) | |
|
96 | 96 | s.update(l[1]) |
|
97 | 97 | s.update(text) |
|
98 | 98 | return s.digest() |
@@ -941,8 +941,11 b' class revlog(object):' | |||
|
941 | 941 | return None |
|
942 | 942 | except RevlogError: |
|
943 | 943 | # parsers.c radix tree lookup gave multiple matches |
|
944 | # fast path: for unfiltered changelog, radix tree is accurate | |
|
945 | if not getattr(self, 'filteredrevs', None): | |
|
946 | raise LookupError(id, self.indexfile, | |
|
947 | _('ambiguous identifier')) | |
|
944 | 948 | # fall through to slow path that filters hidden revisions |
|
945 | pass | |
|
946 | 949 | except (AttributeError, ValueError): |
|
947 | 950 | # we are pure python, or key was too short to search radix tree |
|
948 | 951 | pass |
This diff has been collapsed as it changes many lines, (564 lines changed) Show them Hide them | |||
@@ -302,6 +302,11 b' def tokenize(program, lookup=None, symin' | |||
|
302 | 302 | |
|
303 | 303 | # helpers |
|
304 | 304 | |
|
305 | def getsymbol(x): | |
|
306 | if x and x[0] == 'symbol': | |
|
307 | return x[1] | |
|
308 | raise error.ParseError(_('not a symbol')) | |
|
309 | ||
|
305 | 310 | def getstring(x, err): |
|
306 | 311 | if x and (x[0] == 'string' or x[0] == 'symbol'): |
|
307 | 312 | return x[1] |
@@ -330,13 +335,12 b' def getset(repo, subset, x):' | |||
|
330 | 335 | s = methods[x[0]](repo, subset, *x[1:]) |
|
331 | 336 | if util.safehasattr(s, 'isascending'): |
|
332 | 337 | return s |
|
333 | if (repo.ui.configbool('devel', 'all-warnings') | |
|
334 | or repo.ui.configbool('devel', 'old-revset')): | |
|
335 | # else case should not happen, because all non-func are internal, | |
|
336 | # ignoring for now. | |
|
337 | if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols: | |
|
338 | repo.ui.develwarn('revset "%s" use list instead of smartset, ' | |
|
339 | '(upgrade your code)' % x[1][1]) | |
|
338 | # else case should not happen, because all non-func are internal, | |
|
339 | # ignoring for now. | |
|
340 | if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols: | |
|
341 | repo.ui.deprecwarn('revset "%s" uses list instead of smartset' | |
|
342 | % x[1][1], | |
|
343 | '3.9') | |
|
340 | 344 | return baseset(s) |
|
341 | 345 | |
|
342 | 346 | def _getrevsource(repo, r): |
@@ -387,9 +391,7 b' def dagrange(repo, subset, x, y):' | |||
|
387 | 391 | r = fullreposet(repo) |
|
388 | 392 | xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y), |
|
389 | 393 | includepath=True) |
|
390 | # XXX We should combine with subset first: 'subset & baseset(...)'. This is | |
|
391 | # necessary to ensure we preserve the order in subset. | |
|
392 | return xs & subset | |
|
394 | return subset & xs | |
|
393 | 395 | |
|
394 | 396 | def andset(repo, subset, x, y): |
|
395 | 397 | return getset(repo, getset(repo, subset, x), y) |
@@ -417,13 +419,14 b' def keyvaluepair(repo, subset, k, v):' | |||
|
417 | 419 | raise error.ParseError(_("can't use a key-value pair in this context")) |
|
418 | 420 | |
|
419 | 421 | def func(repo, subset, a, b): |
|
420 | if a[0] == 'symbol' and a[1] in symbols: | |
|
421 | return symbols[a[1]](repo, subset, b) | |
|
422 | f = getsymbol(a) | |
|
423 | if f in symbols: | |
|
424 | return symbols[f](repo, subset, b) | |
|
422 | 425 | |
|
423 | 426 | keep = lambda fn: getattr(fn, '__doc__', None) is not None |
|
424 | 427 | |
|
425 | 428 | syms = [s for (s, fn) in symbols.items() if keep(fn)] |
|
426 |
raise error.UnknownIdentifier( |
|
|
429 | raise error.UnknownIdentifier(f, syms) | |
|
427 | 430 | |
|
428 | 431 | # functions |
|
429 | 432 | |
@@ -695,20 +698,18 b' def checkstatus(repo, subset, pat, field' | |||
|
695 | 698 | |
|
696 | 699 | return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat)) |
|
697 | 700 | |
|
698 |
def _children(repo, |
|
|
701 | def _children(repo, subset, parentset): | |
|
699 | 702 | if not parentset: |
|
700 | 703 | return baseset() |
|
701 | 704 | cs = set() |
|
702 | 705 | pr = repo.changelog.parentrevs |
|
703 | 706 | minrev = parentset.min() |
|
704 |
for r in |
|
|
707 | for r in subset: | |
|
705 | 708 | if r <= minrev: |
|
706 | 709 | continue |
|
707 | 710 | for p in pr(r): |
|
708 | 711 | if p in parentset: |
|
709 | 712 | cs.add(r) |
|
710 | # XXX using a set to feed the baseset is wrong. Sets are not ordered. | |
|
711 | # This does not break because of other fullreposet misbehavior. | |
|
712 | 713 | return baseset(cs) |
|
713 | 714 | |
|
714 | 715 | @predicate('children(set)', safe=True) |
@@ -1150,13 +1151,9 b' def head(repo, subset, x):' | |||
|
1150 | 1151 | getargs(x, 0, 0, _("head takes no arguments")) |
|
1151 | 1152 | hs = set() |
|
1152 | 1153 | cl = repo.changelog |
|
1153 |
for |
|
|
1154 | for ls in repo.branchmap().itervalues(): | |
|
1154 | 1155 | hs.update(cl.rev(h) for h in ls) |
|
1155 | # XXX using a set to feed the baseset is wrong. Sets are not ordered. | |
|
1156 | # This does not break because of other fullreposet misbehavior. | |
|
1157 | # XXX We should combine with subset first: 'subset & baseset(...)'. This is | |
|
1158 | # necessary to ensure we preserve the order in subset. | |
|
1159 | return baseset(hs) & subset | |
|
1156 | return subset & baseset(hs) | |
|
1160 | 1157 | |
|
1161 | 1158 | @predicate('heads(set)', safe=True) |
|
1162 | 1159 | def heads(repo, subset, x): |
@@ -1837,7 +1834,54 b' def roots(repo, subset, x):' | |||
|
1837 | 1834 | return True |
|
1838 | 1835 | return subset & s.filter(filter, condrepr='<roots>') |
|
1839 | 1836 | |
|
1840 | @predicate('sort(set[, [-]key...])', safe=True) | |
|
1837 | _sortkeyfuncs = { | |
|
1838 | 'rev': lambda c: c.rev(), | |
|
1839 | 'branch': lambda c: c.branch(), | |
|
1840 | 'desc': lambda c: c.description(), | |
|
1841 | 'user': lambda c: c.user(), | |
|
1842 | 'author': lambda c: c.user(), | |
|
1843 | 'date': lambda c: c.date()[0], | |
|
1844 | } | |
|
1845 | ||
|
1846 | def _getsortargs(x): | |
|
1847 | """Parse sort options into (set, [(key, reverse)], opts)""" | |
|
1848 | args = getargsdict(x, 'sort', 'set keys topo.firstbranch') | |
|
1849 | if 'set' not in args: | |
|
1850 | # i18n: "sort" is a keyword | |
|
1851 | raise error.ParseError(_('sort requires one or two arguments')) | |
|
1852 | keys = "rev" | |
|
1853 | if 'keys' in args: | |
|
1854 | # i18n: "sort" is a keyword | |
|
1855 | keys = getstring(args['keys'], _("sort spec must be a string")) | |
|
1856 | ||
|
1857 | keyflags = [] | |
|
1858 | for k in keys.split(): | |
|
1859 | fk = k | |
|
1860 | reverse = (k[0] == '-') | |
|
1861 | if reverse: | |
|
1862 | k = k[1:] | |
|
1863 | if k not in _sortkeyfuncs and k != 'topo': | |
|
1864 | raise error.ParseError(_("unknown sort key %r") % fk) | |
|
1865 | keyflags.append((k, reverse)) | |
|
1866 | ||
|
1867 | if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags): | |
|
1868 | # i18n: "topo" is a keyword | |
|
1869 | raise error.ParseError(_( | |
|
1870 | 'topo sort order cannot be combined with other sort keys')) | |
|
1871 | ||
|
1872 | opts = {} | |
|
1873 | if 'topo.firstbranch' in args: | |
|
1874 | if any(k == 'topo' for k, reverse in keyflags): | |
|
1875 | opts['topo.firstbranch'] = args['topo.firstbranch'] | |
|
1876 | else: | |
|
1877 | # i18n: "topo" and "topo.firstbranch" are keywords | |
|
1878 | raise error.ParseError(_( | |
|
1879 | 'topo.firstbranch can only be used when using the topo sort ' | |
|
1880 | 'key')) | |
|
1881 | ||
|
1882 | return args['set'], keyflags, opts | |
|
1883 | ||
|
1884 | @predicate('sort(set[, [-]key... [, ...]])', safe=True) | |
|
1841 | 1885 | def sort(repo, subset, x): |
|
1842 | 1886 | """Sort set by keys. The default sort order is ascending, specify a key |
|
1843 | 1887 | as ``-key`` to sort in descending order. |
@@ -1849,50 +1893,235 b' def sort(repo, subset, x):' | |||
|
1849 | 1893 | - ``desc`` for the commit message (description), |
|
1850 | 1894 | - ``user`` for user name (``author`` can be used as an alias), |
|
1851 | 1895 | - ``date`` for the commit date |
|
1896 | - ``topo`` for a reverse topographical sort | |
|
1897 | ||
|
1898 | The ``topo`` sort order cannot be combined with other sort keys. This sort | |
|
1899 | takes one optional argument, ``topo.firstbranch``, which takes a revset that | |
|
1900 | specifies what topographical branches to prioritize in the sort. | |
|
1901 | ||
|
1852 | 1902 | """ |
|
1853 | # i18n: "sort" is a keyword | |
|
1854 | l = getargs(x, 1, 2, _("sort requires one or two arguments")) | |
|
1855 | keys = "rev" | |
|
1856 | if len(l) == 2: | |
|
1857 | # i18n: "sort" is a keyword | |
|
1858 | keys = getstring(l[1], _("sort spec must be a string")) | |
|
1859 | ||
|
1860 | s = l[0] | |
|
1861 | keys = keys.split() | |
|
1903 | s, keyflags, opts = _getsortargs(x) | |
|
1862 | 1904 | revs = getset(repo, subset, s) |
|
1863 | if keys == ["rev"]: | |
|
1864 | revs.sort() | |
|
1905 | ||
|
1906 | if not keyflags: | |
|
1907 | return revs | |
|
1908 | if len(keyflags) == 1 and keyflags[0][0] == "rev": | |
|
1909 | revs.sort(reverse=keyflags[0][1]) | |
|
1865 | 1910 | return revs |
|
1866 |
elif keys == |
|
|
1867 | revs.sort(reverse=True) | |
|
1911 | elif keyflags[0][0] == "topo": | |
|
1912 | firstbranch = () | |
|
1913 | if 'topo.firstbranch' in opts: | |
|
1914 | firstbranch = getset(repo, subset, opts['topo.firstbranch']) | |
|
1915 | revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch), | |
|
1916 | istopo=True) | |
|
1917 | if keyflags[0][1]: | |
|
1918 | revs.reverse() | |
|
1868 | 1919 | return revs |
|
1920 | ||
|
1869 | 1921 | # sort() is guaranteed to be stable |
|
1870 | 1922 | ctxs = [repo[r] for r in revs] |
|
1871 | for k in reversed(keys): | |
|
1872 | if k == 'rev': | |
|
1873 | ctxs.sort(key=lambda c: c.rev()) | |
|
1874 | elif k == '-rev': | |
|
1875 | ctxs.sort(key=lambda c: c.rev(), reverse=True) | |
|
1876 | elif k == 'branch': | |
|
1877 | ctxs.sort(key=lambda c: c.branch()) | |
|
1878 | elif k == '-branch': | |
|
1879 | ctxs.sort(key=lambda c: c.branch(), reverse=True) | |
|
1880 | elif k == 'desc': | |
|
1881 | ctxs.sort(key=lambda c: c.description()) | |
|
1882 | elif k == '-desc': | |
|
1883 | ctxs.sort(key=lambda c: c.description(), reverse=True) | |
|
1884 | elif k in 'user author': | |
|
1885 | ctxs.sort(key=lambda c: c.user()) | |
|
1886 | elif k in '-user -author': | |
|
1887 | ctxs.sort(key=lambda c: c.user(), reverse=True) | |
|
1888 | elif k == 'date': | |
|
1889 | ctxs.sort(key=lambda c: c.date()[0]) | |
|
1890 | elif k == '-date': | |
|
1891 | ctxs.sort(key=lambda c: c.date()[0], reverse=True) | |
|
1892 | else: | |
|
1893 | raise error.ParseError(_("unknown sort key %r") % k) | |
|
1923 | for k, reverse in reversed(keyflags): | |
|
1924 | ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse) | |
|
1894 | 1925 | return baseset([c.rev() for c in ctxs]) |
|
1895 | 1926 | |
|
1927 | def _toposort(revs, parentsfunc, firstbranch=()): | |
|
1928 | """Yield revisions from heads to roots one (topo) branch at a time. | |
|
1929 | ||
|
1930 | This function aims to be used by a graph generator that wishes to minimize | |
|
1931 | the number of parallel branches and their interleaving. | |
|
1932 | ||
|
1933 | Example iteration order (numbers show the "true" order in a changelog): | |
|
1934 | ||
|
1935 | o 4 | |
|
1936 | | | |
|
1937 | o 1 | |
|
1938 | | | |
|
1939 | | o 3 | |
|
1940 | | | | |
|
1941 | | o 2 | |
|
1942 | |/ | |
|
1943 | o 0 | |
|
1944 | ||
|
1945 | Note that the ancestors of merges are understood by the current | |
|
1946 | algorithm to be on the same branch. This means no reordering will | |
|
1947 | occur behind a merge. | |
|
1948 | """ | |
|
1949 | ||
|
1950 | ### Quick summary of the algorithm | |
|
1951 | # | |
|
1952 | # This function is based around a "retention" principle. We keep revisions | |
|
1953 | # in memory until we are ready to emit a whole branch that immediately | |
|
1954 | # "merges" into an existing one. This reduces the number of parallel | |
|
1955 | # branches with interleaved revisions. | |
|
1956 | # | |
|
1957 | # During iteration revs are split into two groups: | |
|
1958 | # A) revision already emitted | |
|
1959 | # B) revision in "retention". They are stored as different subgroups. | |
|
1960 | # | |
|
1961 | # for each REV, we do the following logic: | |
|
1962 | # | |
|
1963 | # 1) if REV is a parent of (A), we will emit it. If there is a | |
|
1964 | # retention group ((B) above) that is blocked on REV being | |
|
1965 | # available, we emit all the revisions out of that retention | |
|
1966 | # group first. | |
|
1967 | # | |
|
1968 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be | |
|
1969 | # available, if such subgroup exist, we add REV to it and the subgroup is | |
|
1970 | # now awaiting for REV.parents() to be available. | |
|
1971 | # | |
|
1972 | # 3) finally if no such group existed in (B), we create a new subgroup. | |
|
1973 | # | |
|
1974 | # | |
|
1975 | # To bootstrap the algorithm, we emit the tipmost revision (which | |
|
1976 | # puts it in group (A) from above). | |
|
1977 | ||
|
1978 | revs.sort(reverse=True) | |
|
1979 | ||
|
1980 | # Set of parents of revision that have been emitted. They can be considered | |
|
1981 | # unblocked as the graph generator is already aware of them so there is no | |
|
1982 | # need to delay the revisions that reference them. | |
|
1983 | # | |
|
1984 | # If someone wants to prioritize a branch over the others, pre-filling this | |
|
1985 | # set will force all other branches to wait until this branch is ready to be | |
|
1986 | # emitted. | |
|
1987 | unblocked = set(firstbranch) | |
|
1988 | ||
|
1989 | # list of groups waiting to be displayed, each group is defined by: | |
|
1990 | # | |
|
1991 | # (revs: lists of revs waiting to be displayed, | |
|
1992 | # blocked: set of that cannot be displayed before those in 'revs') | |
|
1993 | # | |
|
1994 | # The second value ('blocked') correspond to parents of any revision in the | |
|
1995 | # group ('revs') that is not itself contained in the group. The main idea | |
|
1996 | # of this algorithm is to delay as much as possible the emission of any | |
|
1997 | # revision. This means waiting for the moment we are about to display | |
|
1998 | # these parents to display the revs in a group. | |
|
1999 | # | |
|
2000 | # This first implementation is smart until it encounters a merge: it will | |
|
2001 | # emit revs as soon as any parent is about to be emitted and can grow an | |
|
2002 | # arbitrary number of revs in 'blocked'. In practice this mean we properly | |
|
2003 | # retains new branches but gives up on any special ordering for ancestors | |
|
2004 | # of merges. The implementation can be improved to handle this better. | |
|
2005 | # | |
|
2006 | # The first subgroup is special. It corresponds to all the revision that | |
|
2007 | # were already emitted. The 'revs' lists is expected to be empty and the | |
|
2008 | # 'blocked' set contains the parents revisions of already emitted revision. | |
|
2009 | # | |
|
2010 | # You could pre-seed the <parents> set of groups[0] to a specific | |
|
2011 | # changesets to select what the first emitted branch should be. | |
|
2012 | groups = [([], unblocked)] | |
|
2013 | pendingheap = [] | |
|
2014 | pendingset = set() | |
|
2015 | ||
|
2016 | heapq.heapify(pendingheap) | |
|
2017 | heappop = heapq.heappop | |
|
2018 | heappush = heapq.heappush | |
|
2019 | for currentrev in revs: | |
|
2020 | # Heap works with smallest element, we want highest so we invert | |
|
2021 | if currentrev not in pendingset: | |
|
2022 | heappush(pendingheap, -currentrev) | |
|
2023 | pendingset.add(currentrev) | |
|
2024 | # iterates on pending rev until after the current rev have been | |
|
2025 | # processed. | |
|
2026 | rev = None | |
|
2027 | while rev != currentrev: | |
|
2028 | rev = -heappop(pendingheap) | |
|
2029 | pendingset.remove(rev) | |
|
2030 | ||
|
2031 | # Seek for a subgroup blocked, waiting for the current revision. | |
|
2032 | matching = [i for i, g in enumerate(groups) if rev in g[1]] | |
|
2033 | ||
|
2034 | if matching: | |
|
2035 | # The main idea is to gather together all sets that are blocked | |
|
2036 | # on the same revision. | |
|
2037 | # | |
|
2038 | # Groups are merged when a common blocking ancestor is | |
|
2039 | # observed. For example, given two groups: | |
|
2040 | # | |
|
2041 | # revs [5, 4] waiting for 1 | |
|
2042 | # revs [3, 2] waiting for 1 | |
|
2043 | # | |
|
2044 | # These two groups will be merged when we process | |
|
2045 | # 1. In theory, we could have merged the groups when | |
|
2046 | # we added 2 to the group it is now in (we could have | |
|
2047 | # noticed the groups were both blocked on 1 then), but | |
|
2048 | # the way it works now makes the algorithm simpler. | |
|
2049 | # | |
|
2050 | # We also always keep the oldest subgroup first. We can | |
|
2051 | # probably improve the behavior by having the longest set | |
|
2052 | # first. That way, graph algorithms could minimise the length | |
|
2053 | # of parallel lines their drawing. This is currently not done. | |
|
2054 | targetidx = matching.pop(0) | |
|
2055 | trevs, tparents = groups[targetidx] | |
|
2056 | for i in matching: | |
|
2057 | gr = groups[i] | |
|
2058 | trevs.extend(gr[0]) | |
|
2059 | tparents |= gr[1] | |
|
2060 | # delete all merged subgroups (except the one we kept) | |
|
2061 | # (starting from the last subgroup for performance and | |
|
2062 | # sanity reasons) | |
|
2063 | for i in reversed(matching): | |
|
2064 | del groups[i] | |
|
2065 | else: | |
|
2066 | # This is a new head. We create a new subgroup for it. | |
|
2067 | targetidx = len(groups) | |
|
2068 | groups.append(([], set([rev]))) | |
|
2069 | ||
|
2070 | gr = groups[targetidx] | |
|
2071 | ||
|
2072 | # We now add the current nodes to this subgroups. This is done | |
|
2073 | # after the subgroup merging because all elements from a subgroup | |
|
2074 | # that relied on this rev must precede it. | |
|
2075 | # | |
|
2076 | # we also update the <parents> set to include the parents of the | |
|
2077 | # new nodes. | |
|
2078 | if rev == currentrev: # only display stuff in rev | |
|
2079 | gr[0].append(rev) | |
|
2080 | gr[1].remove(rev) | |
|
2081 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] | |
|
2082 | gr[1].update(parents) | |
|
2083 | for p in parents: | |
|
2084 | if p not in pendingset: | |
|
2085 | pendingset.add(p) | |
|
2086 | heappush(pendingheap, -p) | |
|
2087 | ||
|
2088 | # Look for a subgroup to display | |
|
2089 | # | |
|
2090 | # When unblocked is empty (if clause), we were not waiting for any | |
|
2091 | # revisions during the first iteration (if no priority was given) or | |
|
2092 | # if we emitted a whole disconnected set of the graph (reached a | |
|
2093 | # root). In that case we arbitrarily take the oldest known | |
|
2094 | # subgroup. The heuristic could probably be better. | |
|
2095 | # | |
|
2096 | # Otherwise (elif clause) if the subgroup is blocked on | |
|
2097 | # a revision we just emitted, we can safely emit it as | |
|
2098 | # well. | |
|
2099 | if not unblocked: | |
|
2100 | if len(groups) > 1: # display other subset | |
|
2101 | targetidx = 1 | |
|
2102 | gr = groups[1] | |
|
2103 | elif not gr[1] & unblocked: | |
|
2104 | gr = None | |
|
2105 | ||
|
2106 | if gr is not None: | |
|
2107 | # update the set of awaited revisions with the one from the | |
|
2108 | # subgroup | |
|
2109 | unblocked |= gr[1] | |
|
2110 | # output all revisions in the subgroup | |
|
2111 | for r in gr[0]: | |
|
2112 | yield r | |
|
2113 | # delete the subgroup that you just output | |
|
2114 | # unless it is groups[0] in which case you just empty it. | |
|
2115 | if targetidx: | |
|
2116 | del groups[targetidx] | |
|
2117 | else: | |
|
2118 | gr[0][:] = [] | |
|
2119 | # Check if we have some subgroup waiting for revisions we are not going to | |
|
2120 | # iterate over | |
|
2121 | for g in groups: | |
|
2122 | for r in g[0]: | |
|
2123 | yield r | |
|
2124 | ||
|
1896 | 2125 | @predicate('subrepo([pattern])') |
|
1897 | 2126 | def subrepo(repo, subset, x): |
|
1898 | 2127 | """Changesets that add, modify or remove the given subrepo. If no subrepo |
@@ -2073,7 +2302,22 b' methods = {' | |||
|
2073 | 2302 | "parentpost": p1, |
|
2074 | 2303 | } |
|
2075 | 2304 | |
|
2076 | def optimize(x, small): | |
|
2305 | def _matchonly(revs, bases): | |
|
2306 | """ | |
|
2307 | >>> f = lambda *args: _matchonly(*map(parse, args)) | |
|
2308 | >>> f('ancestors(A)', 'not ancestors(B)') | |
|
2309 | ('list', ('symbol', 'A'), ('symbol', 'B')) | |
|
2310 | """ | |
|
2311 | if (revs is not None | |
|
2312 | and revs[0] == 'func' | |
|
2313 | and getsymbol(revs[1]) == 'ancestors' | |
|
2314 | and bases is not None | |
|
2315 | and bases[0] == 'not' | |
|
2316 | and bases[1][0] == 'func' | |
|
2317 | and getsymbol(bases[1][1]) == 'ancestors'): | |
|
2318 | return ('list', revs[2], bases[1][2]) | |
|
2319 | ||
|
2320 | def _optimize(x, small): | |
|
2077 | 2321 | if x is None: |
|
2078 | 2322 | return 0, x |
|
2079 | 2323 | |
@@ -2083,47 +2327,36 b' def optimize(x, small):' | |||
|
2083 | 2327 | |
|
2084 | 2328 | op = x[0] |
|
2085 | 2329 | if op == 'minus': |
|
2086 | return optimize(('and', x[1], ('not', x[2])), small) | |
|
2330 | return _optimize(('and', x[1], ('not', x[2])), small) | |
|
2087 | 2331 | elif op == 'only': |
|
2088 |
|
|
|
2089 | ('list', x[1], x[2])), small) | |
|
2332 | t = ('func', ('symbol', 'only'), ('list', x[1], x[2])) | |
|
2333 | return _optimize(t, small) | |
|
2090 | 2334 | elif op == 'onlypost': |
|
2091 | return optimize(('func', ('symbol', 'only'), x[1]), small) | |
|
2335 | return _optimize(('func', ('symbol', 'only'), x[1]), small) | |
|
2092 | 2336 | elif op == 'dagrangepre': |
|
2093 | return optimize(('func', ('symbol', 'ancestors'), x[1]), small) | |
|
2337 | return _optimize(('func', ('symbol', 'ancestors'), x[1]), small) | |
|
2094 | 2338 | elif op == 'dagrangepost': |
|
2095 | return optimize(('func', ('symbol', 'descendants'), x[1]), small) | |
|
2339 | return _optimize(('func', ('symbol', 'descendants'), x[1]), small) | |
|
2096 | 2340 | elif op == 'rangeall': |
|
2097 | return optimize(('range', ('string', '0'), ('string', 'tip')), small) | |
|
2341 | return _optimize(('range', ('string', '0'), ('string', 'tip')), small) | |
|
2098 | 2342 | elif op == 'rangepre': |
|
2099 | return optimize(('range', ('string', '0'), x[1]), small) | |
|
2343 | return _optimize(('range', ('string', '0'), x[1]), small) | |
|
2100 | 2344 | elif op == 'rangepost': |
|
2101 | return optimize(('range', x[1], ('string', 'tip')), small) | |
|
2345 | return _optimize(('range', x[1], ('string', 'tip')), small) | |
|
2102 | 2346 | elif op == 'negate': |
|
2103 | return optimize(('string', | |
|
2104 | '-' + getstring(x[1], _("can't negate that"))), small) | |
|
2347 | s = getstring(x[1], _("can't negate that")) | |
|
2348 | return _optimize(('string', '-' + s), small) | |
|
2105 | 2349 | elif op in 'string symbol negate': |
|
2106 | 2350 | return smallbonus, x # single revisions are small |
|
2107 | 2351 | elif op == 'and': |
|
2108 | wa, ta = optimize(x[1], True) | |
|
2109 | wb, tb = optimize(x[2], True) | |
|
2352 | wa, ta = _optimize(x[1], True) | |
|
2353 | wb, tb = _optimize(x[2], True) | |
|
2354 | w = min(wa, wb) | |
|
2110 | 2355 | |
|
2111 | 2356 | # (::x and not ::y)/(not ::y and ::x) have a fast path |
|
2112 | def isonly(revs, bases): | |
|
2113 | return ( | |
|
2114 | revs is not None | |
|
2115 | and revs[0] == 'func' | |
|
2116 | and getstring(revs[1], _('not a symbol')) == 'ancestors' | |
|
2117 | and bases is not None | |
|
2118 | and bases[0] == 'not' | |
|
2119 | and bases[1][0] == 'func' | |
|
2120 | and getstring(bases[1][1], _('not a symbol')) == 'ancestors') | |
|
2121 | ||
|
2122 | w = min(wa, wb) | |
|
2123 | if isonly(ta, tb): | |
|
2124 | return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2])) | |
|
2125 | if isonly(tb, ta): | |
|
2126 | return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2])) | |
|
2357 | tm = _matchonly(ta, tb) or _matchonly(tb, ta) | |
|
2358 | if tm: | |
|
2359 | return w, ('func', ('symbol', 'only'), tm) | |
|
2127 | 2360 | |
|
2128 | 2361 | if tb is not None and tb[0] == 'not': |
|
2129 | 2362 | return wa, ('difference', ta, tb[1]) |
@@ -2143,12 +2376,12 b' def optimize(x, small):' | |||
|
2143 | 2376 | else: |
|
2144 | 2377 | s = '\0'.join(t[1] for w, t in ss) |
|
2145 | 2378 | y = ('func', ('symbol', '_list'), ('string', s)) |
|
2146 | w, t = optimize(y, False) | |
|
2379 | w, t = _optimize(y, False) | |
|
2147 | 2380 | ws.append(w) |
|
2148 | 2381 | ts.append(t) |
|
2149 | 2382 | del ss[:] |
|
2150 | 2383 | for y in x[1:]: |
|
2151 | w, t = optimize(y, False) | |
|
2384 | w, t = _optimize(y, False) | |
|
2152 | 2385 | if t is not None and (t[0] == 'string' or t[0] == 'symbol'): |
|
2153 | 2386 | ss.append((w, t)) |
|
2154 | 2387 | continue |
@@ -2166,34 +2399,34 b' def optimize(x, small):' | |||
|
2166 | 2399 | # Optimize not public() to _notpublic() because we have a fast version |
|
2167 | 2400 | if x[1] == ('func', ('symbol', 'public'), None): |
|
2168 | 2401 | newsym = ('func', ('symbol', '_notpublic'), None) |
|
2169 | o = optimize(newsym, not small) | |
|
2402 | o = _optimize(newsym, not small) | |
|
2170 | 2403 | return o[0], o[1] |
|
2171 | 2404 | else: |
|
2172 | o = optimize(x[1], not small) | |
|
2405 | o = _optimize(x[1], not small) | |
|
2173 | 2406 | return o[0], (op, o[1]) |
|
2174 | 2407 | elif op == 'parentpost': |
|
2175 | o = optimize(x[1], small) | |
|
2408 | o = _optimize(x[1], small) | |
|
2176 | 2409 | return o[0], (op, o[1]) |
|
2177 | 2410 | elif op == 'group': |
|
2178 | return optimize(x[1], small) | |
|
2411 | return _optimize(x[1], small) | |
|
2179 | 2412 | elif op in 'dagrange range parent ancestorspec': |
|
2180 | 2413 | if op == 'parent': |
|
2181 | 2414 | # x^:y means (x^) : y, not x ^ (:y) |
|
2182 | 2415 | post = ('parentpost', x[1]) |
|
2183 | 2416 | if x[2][0] == 'dagrangepre': |
|
2184 | return optimize(('dagrange', post, x[2][1]), small) | |
|
2417 | return _optimize(('dagrange', post, x[2][1]), small) | |
|
2185 | 2418 | elif x[2][0] == 'rangepre': |
|
2186 | return optimize(('range', post, x[2][1]), small) | |
|
2187 | ||
|
2188 | wa, ta = optimize(x[1], small) | |
|
2189 | wb, tb = optimize(x[2], small) | |
|
2419 | return _optimize(('range', post, x[2][1]), small) | |
|
2420 | ||
|
2421 | wa, ta = _optimize(x[1], small) | |
|
2422 | wb, tb = _optimize(x[2], small) | |
|
2190 | 2423 | return wa + wb, (op, ta, tb) |
|
2191 | 2424 | elif op == 'list': |
|
2192 | ws, ts = zip(*(optimize(y, small) for y in x[1:])) | |
|
2425 | ws, ts = zip(*(_optimize(y, small) for y in x[1:])) | |
|
2193 | 2426 | return sum(ws), (op,) + ts |
|
2194 | 2427 | elif op == 'func': |
|
2195 |
f = gets |
|
|
2196 | wa, ta = optimize(x[2], small) | |
|
2428 | f = getsymbol(x[1]) | |
|
2429 | wa, ta = _optimize(x[2], small) | |
|
2197 | 2430 | if f in ("author branch closed date desc file grep keyword " |
|
2198 | 2431 | "outgoing user"): |
|
2199 | 2432 | w = 10 # slow |
@@ -2212,33 +2445,32 b' def optimize(x, small):' | |||
|
2212 | 2445 | return w + wa, (op, x[1], ta) |
|
2213 | 2446 | return 1, x |
|
2214 | 2447 | |
|
2448 | def optimize(tree): | |
|
2449 | _weight, newtree = _optimize(tree, small=True) | |
|
2450 | return newtree | |
|
2451 | ||
|
2215 | 2452 | # the set of valid characters for the initial letter of symbols in |
|
2216 | 2453 | # alias declarations and definitions |
|
2217 | 2454 | _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)] |
|
2218 | 2455 | if c.isalnum() or c in '._@$' or ord(c) > 127) |
|
2219 | 2456 | |
|
2220 | def _tokenizealias(program, lookup=None): | |
|
2221 | """Parse alias declaration/definition into a stream of tokens | |
|
2222 | ||
|
2223 | This allows symbol names to use also ``$`` as an initial letter | |
|
2224 | (for backward compatibility), and callers of this function should | |
|
2225 | examine whether ``$`` is used also for unexpected symbols or not. | |
|
2226 | """ | |
|
2227 | return tokenize(program, lookup=lookup, | |
|
2228 | syminitletters=_aliassyminitletters) | |
|
2229 | ||
|
2230 | def _parsealias(spec): | |
|
2231 | """Parse alias declaration/definition ``spec`` | |
|
2232 | ||
|
2233 | >>> _parsealias('foo($1)') | |
|
2457 | def _parsewith(spec, lookup=None, syminitletters=None): | |
|
2458 | """Generate a parse tree of given spec with given tokenizing options | |
|
2459 | ||
|
2460 | >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters) | |
|
2234 | 2461 | ('func', ('symbol', 'foo'), ('symbol', '$1')) |
|
2235 |
>>> _parse |
|
|
2462 | >>> _parsewith('$1') | |
|
2463 | Traceback (most recent call last): | |
|
2464 | ... | |
|
2465 | ParseError: ("syntax error in revset '$1'", 0) | |
|
2466 | >>> _parsewith('foo bar') | |
|
2236 | 2467 | Traceback (most recent call last): |
|
2237 | 2468 | ... |
|
2238 | 2469 | ParseError: ('invalid token', 4) |
|
2239 | 2470 | """ |
|
2240 | 2471 | p = parser.parser(elements) |
|
2241 |
tree, pos = p.parse( |
|
|
2472 | tree, pos = p.parse(tokenize(spec, lookup=lookup, | |
|
2473 | syminitletters=syminitletters)) | |
|
2242 | 2474 | if pos != len(spec): |
|
2243 | 2475 | raise error.ParseError(_('invalid token'), pos) |
|
2244 | 2476 | return parser.simplifyinfixops(tree, ('list', 'or')) |
@@ -2246,7 +2478,16 b' def _parsealias(spec):' | |||
|
2246 | 2478 | class _aliasrules(parser.basealiasrules): |
|
2247 | 2479 | """Parsing and expansion rule set of revset aliases""" |
|
2248 | 2480 | _section = _('revset alias') |
|
2249 | _parse = staticmethod(_parsealias) | |
|
2481 | ||
|
2482 | @staticmethod | |
|
2483 | def _parse(spec): | |
|
2484 | """Parse alias declaration/definition ``spec`` | |
|
2485 | ||
|
2486 | This allows symbol names to use also ``$`` as an initial letter | |
|
2487 | (for backward compatibility), and callers of this function should | |
|
2488 | examine whether ``$`` is used also for unexpected symbols or not. | |
|
2489 | """ | |
|
2490 | return _parsewith(spec, syminitletters=_aliassyminitletters) | |
|
2250 | 2491 | |
|
2251 | 2492 | @staticmethod |
|
2252 | 2493 | def _trygetfunc(tree): |
@@ -2286,24 +2527,15 b' def foldconcat(tree):' | |||
|
2286 | 2527 | return tuple(foldconcat(t) for t in tree) |
|
2287 | 2528 | |
|
2288 | 2529 | def parse(spec, lookup=None): |
|
2289 | p = parser.parser(elements) | |
|
2290 | tree, pos = p.parse(tokenize(spec, lookup=lookup)) | |
|
2291 | if pos != len(spec): | |
|
2292 | raise error.ParseError(_("invalid token"), pos) | |
|
2293 | return parser.simplifyinfixops(tree, ('list', 'or')) | |
|
2530 | return _parsewith(spec, lookup=lookup) | |
|
2294 | 2531 | |
|
2295 | 2532 | def posttreebuilthook(tree, repo): |
|
2296 | 2533 | # hook for extensions to execute code on the optimized tree |
|
2297 | 2534 | pass |
|
2298 | 2535 | |
|
2299 | 2536 | def match(ui, spec, repo=None): |
|
2300 | if not spec: | |
|
2301 | raise error.ParseError(_("empty query")) | |
|
2302 | lookup = None | |
|
2303 | if repo: | |
|
2304 | lookup = repo.__contains__ | |
|
2305 | tree = parse(spec, lookup) | |
|
2306 | return _makematcher(ui, tree, repo) | |
|
2537 | """Create a matcher for a single revision spec.""" | |
|
2538 | return matchany(ui, [spec], repo=repo) | |
|
2307 | 2539 | |
|
2308 | 2540 | def matchany(ui, specs, repo=None): |
|
2309 | 2541 | """Create a matcher that will include any revisions matching one of the |
@@ -2327,7 +2559,7 b' def _makematcher(ui, tree, repo):' | |||
|
2327 | 2559 | if ui: |
|
2328 | 2560 | tree = expandaliases(ui, tree, showwarning=ui.warn) |
|
2329 | 2561 | tree = foldconcat(tree) |
|
2330 |
|
|
|
2562 | tree = optimize(tree) | |
|
2331 | 2563 | posttreebuilthook(tree, repo) |
|
2332 | 2564 | def mfunc(repo, subset=None): |
|
2333 | 2565 | if subset is None: |
@@ -2426,7 +2658,8 b' def formatspec(expr, *args):' | |||
|
2426 | 2658 | ret += listexp(list(args[arg]), d) |
|
2427 | 2659 | arg += 1 |
|
2428 | 2660 | else: |
|
2429 |
raise error.Abort('unexpected revspec format character %s' |
|
|
2661 | raise error.Abort(_('unexpected revspec format character %s') | |
|
2662 | % d) | |
|
2430 | 2663 | else: |
|
2431 | 2664 | ret += c |
|
2432 | 2665 | pos += 1 |
@@ -2506,6 +2739,10 b' class abstractsmartset(object):' | |||
|
2506 | 2739 | """True if the set will iterate in descending order""" |
|
2507 | 2740 | raise NotImplementedError() |
|
2508 | 2741 | |
|
2742 | def istopo(self): | |
|
2743 | """True if the set will iterate in topographical order""" | |
|
2744 | raise NotImplementedError() | |
|
2745 | ||
|
2509 | 2746 | @util.cachefunc |
|
2510 | 2747 | def min(self): |
|
2511 | 2748 | """return the minimum element in the set""" |
@@ -2591,12 +2828,13 b' class baseset(abstractsmartset):' | |||
|
2591 | 2828 | |
|
2592 | 2829 | Every method in this class should be implemented by any smartset class. |
|
2593 | 2830 | """ |
|
2594 | def __init__(self, data=(), datarepr=None): | |
|
2831 | def __init__(self, data=(), datarepr=None, istopo=False): | |
|
2595 | 2832 | """ |
|
2596 | 2833 | datarepr: a tuple of (format, obj, ...), a function or an object that |
|
2597 | 2834 | provides a printable representation of the given data. |
|
2598 | 2835 | """ |
|
2599 | 2836 | self._ascending = None |
|
2837 | self._istopo = istopo | |
|
2600 | 2838 | if not isinstance(data, list): |
|
2601 | 2839 | if isinstance(data, set): |
|
2602 | 2840 | self._set = data |
@@ -2639,12 +2877,14 b' class baseset(abstractsmartset):' | |||
|
2639 | 2877 | |
|
2640 | 2878 | def sort(self, reverse=False): |
|
2641 | 2879 | self._ascending = not bool(reverse) |
|
2880 | self._istopo = False | |
|
2642 | 2881 | |
|
2643 | 2882 | def reverse(self): |
|
2644 | 2883 | if self._ascending is None: |
|
2645 | 2884 | self._list.reverse() |
|
2646 | 2885 | else: |
|
2647 | 2886 | self._ascending = not self._ascending |
|
2887 | self._istopo = False | |
|
2648 | 2888 | |
|
2649 | 2889 | def __len__(self): |
|
2650 | 2890 | return len(self._list) |
@@ -2665,6 +2905,14 b' class baseset(abstractsmartset):' | |||
|
2665 | 2905 | return True |
|
2666 | 2906 | return self._ascending is not None and not self._ascending |
|
2667 | 2907 | |
|
2908 | def istopo(self): | |
|
2909 | """Is the collection is in topographical order or not. | |
|
2910 | ||
|
2911 | This is part of the mandatory API for smartset.""" | |
|
2912 | if len(self) <= 1: | |
|
2913 | return True | |
|
2914 | return self._istopo | |
|
2915 | ||
|
2668 | 2916 | def first(self): |
|
2669 | 2917 | if self: |
|
2670 | 2918 | if self._ascending is None: |
@@ -2741,9 +2989,16 b' class filteredset(abstractsmartset):' | |||
|
2741 | 2989 | return lambda: self._iterfilter(it()) |
|
2742 | 2990 | |
|
2743 | 2991 | def __nonzero__(self): |
|
2744 |
fast = |
|
|
2745 | if fast is None: | |
|
2746 | fast = self.fastdesc | |
|
2992 | fast = None | |
|
2993 | candidates = [self.fastasc if self.isascending() else None, | |
|
2994 | self.fastdesc if self.isdescending() else None, | |
|
2995 | self.fastasc, | |
|
2996 | self.fastdesc] | |
|
2997 | for candidate in candidates: | |
|
2998 | if candidate is not None: | |
|
2999 | fast = candidate | |
|
3000 | break | |
|
3001 | ||
|
2747 | 3002 | if fast is not None: |
|
2748 | 3003 | it = fast() |
|
2749 | 3004 | else: |
@@ -2773,6 +3028,9 b' class filteredset(abstractsmartset):' | |||
|
2773 | 3028 | def isdescending(self): |
|
2774 | 3029 | return self._subset.isdescending() |
|
2775 | 3030 | |
|
3031 | def istopo(self): | |
|
3032 | return self._subset.istopo() | |
|
3033 | ||
|
2776 | 3034 | def first(self): |
|
2777 | 3035 | for x in self: |
|
2778 | 3036 | return x |
@@ -2816,14 +3074,14 b' def _iterordered(ascending, iter1, iter2' | |||
|
2816 | 3074 | # Consume both iterators in an ordered way until one is empty |
|
2817 | 3075 | while True: |
|
2818 | 3076 | if val1 is None: |
|
2819 |
val1 = |
|
|
3077 | val1 = next(iter1) | |
|
2820 | 3078 | if val2 is None: |
|
2821 |
val2 = |
|
|
2822 |
n |
|
|
2823 |
yield n |
|
|
2824 |
if val1 == n |
|
|
3079 | val2 = next(iter2) | |
|
3080 | n = choice(val1, val2) | |
|
3081 | yield n | |
|
3082 | if val1 == n: | |
|
2825 | 3083 | val1 = None |
|
2826 |
if val2 == n |
|
|
3084 | if val2 == n: | |
|
2827 | 3085 | val2 = None |
|
2828 | 3086 | except StopIteration: |
|
2829 | 3087 | # Flush any remaining values and consume the other one |
@@ -3019,6 +3277,12 b' class addset(abstractsmartset):' | |||
|
3019 | 3277 | def isdescending(self): |
|
3020 | 3278 | return self._ascending is not None and not self._ascending |
|
3021 | 3279 | |
|
3280 | def istopo(self): | |
|
3281 | # not worth the trouble asserting if the two sets combined are still | |
|
3282 | # in topographical order. Use the sort() predicate to explicitly sort | |
|
3283 | # again instead. | |
|
3284 | return False | |
|
3285 | ||
|
3022 | 3286 | def reverse(self): |
|
3023 | 3287 | if self._ascending is None: |
|
3024 | 3288 | self._list.reverse() |
@@ -3186,6 +3450,12 b' class generatorset(abstractsmartset):' | |||
|
3186 | 3450 | def isdescending(self): |
|
3187 | 3451 | return not self._ascending |
|
3188 | 3452 | |
|
3453 | def istopo(self): | |
|
3454 | # not worth the trouble asserting if the two sets combined are still | |
|
3455 | # in topographical order. Use the sort() predicate to explicitly sort | |
|
3456 | # again instead. | |
|
3457 | return False | |
|
3458 | ||
|
3189 | 3459 | def first(self): |
|
3190 | 3460 | if self._ascending: |
|
3191 | 3461 | it = self.fastasc |
@@ -3248,6 +3518,12 b' class spanset(abstractsmartset):' | |||
|
3248 | 3518 | def reverse(self): |
|
3249 | 3519 | self._ascending = not self._ascending |
|
3250 | 3520 | |
|
3521 | def istopo(self): | |
|
3522 | # not worth the trouble asserting if the two sets combined are still | |
|
3523 | # in topographical order. Use the sort() predicate to explicitly sort | |
|
3524 | # again instead. | |
|
3525 | return False | |
|
3526 | ||
|
3251 | 3527 | def _iterfilter(self, iterrange): |
|
3252 | 3528 | s = self._hiddenrevs |
|
3253 | 3529 | for r in iterrange: |
@@ -10,6 +10,7 b' from __future__ import absolute_import' | |||
|
10 | 10 | import contextlib |
|
11 | 11 | import errno |
|
12 | 12 | import glob |
|
13 | import hashlib | |
|
13 | 14 | import os |
|
14 | 15 | import re |
|
15 | 16 | import shutil |
@@ -224,7 +225,7 b' def filteredhash(repo, maxrev):' | |||
|
224 | 225 | key = None |
|
225 | 226 | revs = sorted(r for r in cl.filteredrevs if r <= maxrev) |
|
226 | 227 | if revs: |
|
227 |
s = |
|
|
228 | s = hashlib.sha1() | |
|
228 | 229 | for rev in revs: |
|
229 | 230 | s.update('%s;' % rev) |
|
230 | 231 | key = s.digest() |
@@ -377,8 +378,24 b' class abstractvfs(object):' | |||
|
377 | 378 | def readlock(self, path): |
|
378 | 379 | return util.readlock(self.join(path)) |
|
379 | 380 | |
|
380 | def rename(self, src, dst): | |
|
381 | return util.rename(self.join(src), self.join(dst)) | |
|
381 | def rename(self, src, dst, checkambig=False): | |
|
382 | """Rename from src to dst | |
|
383 | ||
|
384 | checkambig argument is used with util.filestat, and is useful | |
|
385 | only if destination file is guarded by any lock | |
|
386 | (e.g. repo.lock or repo.wlock). | |
|
387 | """ | |
|
388 | dstpath = self.join(dst) | |
|
389 | oldstat = checkambig and util.filestat(dstpath) | |
|
390 | if oldstat and oldstat.stat: | |
|
391 | ret = util.rename(self.join(src), dstpath) | |
|
392 | newstat = util.filestat(dstpath) | |
|
393 | if newstat.isambig(oldstat): | |
|
394 | # stat of renamed file is ambiguous to original one | |
|
395 | advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff | |
|
396 | os.utime(dstpath, (advanced, advanced)) | |
|
397 | return ret | |
|
398 | return util.rename(self.join(src), dstpath) | |
|
382 | 399 | |
|
383 | 400 | def readlink(self, path): |
|
384 | 401 | return os.readlink(self.join(path)) |
@@ -451,7 +468,8 b' class abstractvfs(object):' | |||
|
451 | 468 | # have a use case. |
|
452 | 469 | vfs = getattr(self, 'vfs', self) |
|
453 | 470 | if getattr(vfs, '_backgroundfilecloser', None): |
|
454 | raise error.Abort('can only have 1 active background file closer') | |
|
471 | raise error.Abort( | |
|
472 | _('can only have 1 active background file closer')) | |
|
455 | 473 | |
|
456 | 474 | with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc: |
|
457 | 475 | try: |
@@ -502,7 +520,7 b' class vfs(abstractvfs):' | |||
|
502 | 520 | os.chmod(name, self.createmode & 0o666) |
|
503 | 521 | |
|
504 | 522 | def __call__(self, path, mode="r", text=False, atomictemp=False, |
|
505 | notindexed=False, backgroundclose=False): | |
|
523 | notindexed=False, backgroundclose=False, checkambig=False): | |
|
506 | 524 | '''Open ``path`` file, which is relative to vfs root. |
|
507 | 525 | |
|
508 | 526 | Newly created directories are marked as "not to be indexed by |
@@ -521,6 +539,10 b' class vfs(abstractvfs):' | |||
|
521 | 539 | closing a file on a background thread and reopening it. (If the |
|
522 | 540 | file were opened multiple times, there could be unflushed data |
|
523 | 541 | because the original file handle hasn't been flushed/closed yet.) |
|
542 | ||
|
543 | ``checkambig`` argument is passed to atomictemplfile (valid | |
|
544 | only for writing), and is useful only if target file is | |
|
545 | guarded by any lock (e.g. repo.lock or repo.wlock). | |
|
524 | 546 | ''' |
|
525 | 547 | if self._audit: |
|
526 | 548 | r = util.checkosfilename(path) |
@@ -540,7 +562,8 b' class vfs(abstractvfs):' | |||
|
540 | 562 | if basename: |
|
541 | 563 | if atomictemp: |
|
542 | 564 | util.makedirs(dirname, self.createmode, notindexed) |
|
543 |
return util.atomictempfile(f, mode, self.createmode |
|
|
565 | return util.atomictempfile(f, mode, self.createmode, | |
|
566 | checkambig=checkambig) | |
|
544 | 567 | try: |
|
545 | 568 | if 'w' in mode: |
|
546 | 569 | util.unlink(f) |
@@ -568,8 +591,9 b' class vfs(abstractvfs):' | |||
|
568 | 591 | |
|
569 | 592 | if backgroundclose: |
|
570 | 593 | if not self._backgroundfilecloser: |
|
571 | raise error.Abort('backgroundclose can only be used when a ' | |
|
594 | raise error.Abort(_('backgroundclose can only be used when a ' | |
|
572 | 595 | 'backgroundclosing context manager is active') |
|
596 | ) | |
|
573 | 597 | |
|
574 | 598 | fp = delayclosedfile(fp, self._backgroundfilecloser) |
|
575 | 599 | |
@@ -640,7 +664,7 b' class readonlyvfs(abstractvfs, auditvfs)' | |||
|
640 | 664 | |
|
641 | 665 | def __call__(self, path, mode='r', *args, **kw): |
|
642 | 666 | if mode not in ('r', 'rb'): |
|
643 | raise error.Abort('this vfs is read only') | |
|
667 | raise error.Abort(_('this vfs is read only')) | |
|
644 | 668 | return self.vfs(path, mode, *args, **kw) |
|
645 | 669 | |
|
646 | 670 | def join(self, path, *insidef): |
@@ -751,7 +775,7 b" def revsingle(repo, revspec, default='.'" | |||
|
751 | 775 | |
|
752 | 776 | def _pairspec(revspec): |
|
753 | 777 | tree = revset.parse(revspec) |
|
754 |
tree = revset.optimize(tree |
|
|
778 | tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y" | |
|
755 | 779 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') |
|
756 | 780 | |
|
757 | 781 | def revpair(repo, revs): |
@@ -784,10 +808,29 b' def revpair(repo, revs):' | |||
|
784 | 808 | |
|
785 | 809 | return repo.lookup(first), repo.lookup(second) |
|
786 | 810 | |
|
787 |
def revrange(repo, |
|
|
788 | """Yield revision as strings from a list of revision specifications.""" | |
|
811 | def revrange(repo, specs): | |
|
812 | """Execute 1 to many revsets and return the union. | |
|
813 | ||
|
814 | This is the preferred mechanism for executing revsets using user-specified | |
|
815 | config options, such as revset aliases. | |
|
816 | ||
|
817 | The revsets specified by ``specs`` will be executed via a chained ``OR`` | |
|
818 | expression. If ``specs`` is empty, an empty result is returned. | |
|
819 | ||
|
820 | ``specs`` can contain integers, in which case they are assumed to be | |
|
821 | revision numbers. | |
|
822 | ||
|
823 | It is assumed the revsets are already formatted. If you have arguments | |
|
824 | that need to be expanded in the revset, call ``revset.formatspec()`` | |
|
825 | and pass the result as an element of ``specs``. | |
|
826 | ||
|
827 | Specifying a single revset is allowed. | |
|
828 | ||
|
829 | Returns a ``revset.abstractsmartset`` which is a list-like interface over | |
|
830 | integer revisions. | |
|
831 | """ | |
|
789 | 832 | allspecs = [] |
|
790 |
for spec in |
|
|
833 | for spec in specs: | |
|
791 | 834 | if isinstance(spec, int): |
|
792 | 835 | spec = revset.formatspec('rev(%d)', spec) |
|
793 | 836 | allspecs.append(spec) |
@@ -1183,6 +1226,9 b' class filecache(object):' | |||
|
1183 | 1226 | return self |
|
1184 | 1227 | |
|
1185 | 1228 | def __get__(self, obj, type=None): |
|
1229 | # if accessed on the class, return the descriptor itself. | |
|
1230 | if obj is None: | |
|
1231 | return self | |
|
1186 | 1232 | # do we need to check if the file changed? |
|
1187 | 1233 | if self.name in obj.__dict__: |
|
1188 | 1234 | assert self.name in obj._filecache, self.name |
@@ -1358,8 +1404,8 b' class backgroundfilecloser(object):' | |||
|
1358 | 1404 | def close(self, fh): |
|
1359 | 1405 | """Schedule a file for closing.""" |
|
1360 | 1406 | if not self._entered: |
|
1361 | raise error.Abort('can only call close() when context manager ' | |
|
1362 | 'active') | |
|
1407 | raise error.Abort(_('can only call close() when context manager ' | |
|
1408 | 'active')) | |
|
1363 | 1409 | |
|
1364 | 1410 | # If a background thread encountered an exception, raise now so we fail |
|
1365 | 1411 | # fast. Otherwise we may potentially go on for minutes until the error |
@@ -1375,4 +1421,3 b' class backgroundfilecloser(object):' | |||
|
1375 | 1421 | return |
|
1376 | 1422 | |
|
1377 | 1423 | self._queue.put(fh, block=True, timeout=None) |
|
1378 |
@@ -7,6 +7,8 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import hashlib | |
|
11 | ||
|
10 | 12 | from .i18n import _ |
|
11 | 13 | from . import ( |
|
12 | 14 | bdiff, |
@@ -27,14 +29,14 b' def _findexactmatches(repo, added, remov' | |||
|
27 | 29 | for i, fctx in enumerate(removed): |
|
28 | 30 | repo.ui.progress(_('searching for exact renames'), i, total=numfiles, |
|
29 | 31 | unit=_('files')) |
|
30 |
h = |
|
|
32 | h = hashlib.sha1(fctx.data()).digest() | |
|
31 | 33 | hashes[h] = fctx |
|
32 | 34 | |
|
33 | 35 | # For each added file, see if it corresponds to a removed file. |
|
34 | 36 | for i, fctx in enumerate(added): |
|
35 | 37 | repo.ui.progress(_('searching for exact renames'), i + len(removed), |
|
36 | 38 | total=numfiles, unit=_('files')) |
|
37 |
h = |
|
|
39 | h = hashlib.sha1(fctx.data()).digest() | |
|
38 | 40 | if h in hashes: |
|
39 | 41 | yield (hashes[h], fctx) |
|
40 | 42 | |
@@ -106,4 +108,3 b' def findrenames(repo, added, removed, th' | |||
|
106 | 108 | for (a, b, score) in _findsimilarmatches(repo, |
|
107 | 109 | sorted(addedfiles), sorted(removedfiles), threshold): |
|
108 | 110 | yield (a.path(), b.path(), score) |
|
109 |
@@ -307,7 +307,7 b' class sshpeer(wireproto.wirepeer):' | |||
|
307 | 307 | r = self._call(cmd, **args) |
|
308 | 308 | if r: |
|
309 | 309 | # XXX needs to be made better |
|
310 | raise error.Abort('unexpected remote reply: %s' % r) | |
|
310 | raise error.Abort(_('unexpected remote reply: %s') % r) | |
|
311 | 311 | while True: |
|
312 | 312 | d = fp.read(4096) |
|
313 | 313 | if not d: |
@@ -11,6 +11,7 b' from __future__ import absolute_import' | |||
|
11 | 11 | import os |
|
12 | 12 | import sys |
|
13 | 13 | |
|
14 | from .i18n import _ | |
|
14 | 15 | from . import ( |
|
15 | 16 | error, |
|
16 | 17 | hook, |
@@ -40,7 +41,7 b' class sshserver(wireproto.abstractserver' | |||
|
40 | 41 | argline = self.fin.readline()[:-1] |
|
41 | 42 | arg, l = argline.split() |
|
42 | 43 | if arg not in keys: |
|
43 | raise error.Abort("unexpected parameter %r" % arg) | |
|
44 | raise error.Abort(_("unexpected parameter %r") % arg) | |
|
44 | 45 | if arg == '*': |
|
45 | 46 | star = {} |
|
46 | 47 | for k in xrange(int(l)): |
This diff has been collapsed as it changes many lines, (638 lines changed) Show them Hide them | |||
@@ -9,6 +9,7 b'' | |||
|
9 | 9 | |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | import hashlib | |
|
12 | 13 | import os |
|
13 | 14 | import re |
|
14 | 15 | import ssl |
@@ -28,14 +29,21 b' from . import (' | |||
|
28 | 29 | # modern/secure or legacy/insecure. Many operations in this module have |
|
29 | 30 | # separate code paths depending on support in Python. |
|
30 | 31 | |
|
32 | configprotocols = set([ | |
|
33 | 'tls1.0', | |
|
34 | 'tls1.1', | |
|
35 | 'tls1.2', | |
|
36 | ]) | |
|
37 | ||
|
31 | 38 | hassni = getattr(ssl, 'HAS_SNI', False) |
|
32 | 39 | |
|
33 | try: | |
|
34 | OP_NO_SSLv2 = ssl.OP_NO_SSLv2 | |
|
35 | OP_NO_SSLv3 = ssl.OP_NO_SSLv3 | |
|
36 | except AttributeError: | |
|
37 | OP_NO_SSLv2 = 0x1000000 | |
|
38 | OP_NO_SSLv3 = 0x2000000 | |
|
40 | # TLS 1.1 and 1.2 may not be supported if the OpenSSL Python is compiled | |
|
41 | # against doesn't support them. | |
|
42 | supportedprotocols = set(['tls1.0']) | |
|
43 | if util.safehasattr(ssl, 'PROTOCOL_TLSv1_1'): | |
|
44 | supportedprotocols.add('tls1.1') | |
|
45 | if util.safehasattr(ssl, 'PROTOCOL_TLSv1_2'): | |
|
46 | supportedprotocols.add('tls1.2') | |
|
39 | 47 | |
|
40 | 48 | try: |
|
41 | 49 | # ssl.SSLContext was added in 2.7.9 and presence indicates modern |
@@ -76,15 +84,19 b' except AttributeError:' | |||
|
76 | 84 | |
|
77 | 85 | def load_verify_locations(self, cafile=None, capath=None, cadata=None): |
|
78 | 86 | if capath: |
|
79 | raise error.Abort('capath not supported') | |
|
87 | raise error.Abort(_('capath not supported')) | |
|
80 | 88 | if cadata: |
|
81 | raise error.Abort('cadata not supported') | |
|
89 | raise error.Abort(_('cadata not supported')) | |
|
82 | 90 | |
|
83 | 91 | self._cacerts = cafile |
|
84 | 92 | |
|
85 | 93 | def set_ciphers(self, ciphers): |
|
86 | 94 | if not self._supportsciphers: |
|
87 |
raise error.Abort('setting ciphers |
|
|
95 | raise error.Abort(_('setting ciphers in [hostsecurity] is not ' | |
|
96 | 'supported by this version of Python'), | |
|
97 | hint=_('remove the config option or run ' | |
|
98 | 'Mercurial with a modern Python ' | |
|
99 | 'version (preferred)')) | |
|
88 | 100 | |
|
89 | 101 | self._ciphers = ciphers |
|
90 | 102 | |
@@ -107,8 +119,213 b' except AttributeError:' | |||
|
107 | 119 | |
|
108 | 120 | return ssl.wrap_socket(socket, **args) |
|
109 | 121 | |
|
110 | def wrapsocket(sock, keyfile, certfile, ui, cert_reqs=ssl.CERT_NONE, | |
|
111 | ca_certs=None, serverhostname=None): | |
|
122 | def _hostsettings(ui, hostname): | |
|
123 | """Obtain security settings for a hostname. | |
|
124 | ||
|
125 | Returns a dict of settings relevant to that hostname. | |
|
126 | """ | |
|
127 | s = { | |
|
128 | # Whether we should attempt to load default/available CA certs | |
|
129 | # if an explicit ``cafile`` is not defined. | |
|
130 | 'allowloaddefaultcerts': True, | |
|
131 | # List of 2-tuple of (hash algorithm, hash). | |
|
132 | 'certfingerprints': [], | |
|
133 | # Path to file containing concatenated CA certs. Used by | |
|
134 | # SSLContext.load_verify_locations(). | |
|
135 | 'cafile': None, | |
|
136 | # Whether certificate verification should be disabled. | |
|
137 | 'disablecertverification': False, | |
|
138 | # Whether the legacy [hostfingerprints] section has data for this host. | |
|
139 | 'legacyfingerprint': False, | |
|
140 | # PROTOCOL_* constant to use for SSLContext.__init__. | |
|
141 | 'protocol': None, | |
|
142 | # ssl.CERT_* constant used by SSLContext.verify_mode. | |
|
143 | 'verifymode': None, | |
|
144 | # Defines extra ssl.OP* bitwise options to set. | |
|
145 | 'ctxoptions': None, | |
|
146 | # OpenSSL Cipher List to use (instead of default). | |
|
147 | 'ciphers': None, | |
|
148 | } | |
|
149 | ||
|
150 | # Allow minimum TLS protocol to be specified in the config. | |
|
151 | def validateprotocol(protocol, key): | |
|
152 | if protocol not in configprotocols: | |
|
153 | raise error.Abort( | |
|
154 | _('unsupported protocol from hostsecurity.%s: %s') % | |
|
155 | (key, protocol), | |
|
156 | hint=_('valid protocols: %s') % | |
|
157 | ' '.join(sorted(configprotocols))) | |
|
158 | ||
|
159 | # We default to TLS 1.1+ where we can because TLS 1.0 has known | |
|
160 | # vulnerabilities (like BEAST and POODLE). We allow users to downgrade to | |
|
161 | # TLS 1.0+ via config options in case a legacy server is encountered. | |
|
162 | if 'tls1.1' in supportedprotocols: | |
|
163 | defaultprotocol = 'tls1.1' | |
|
164 | else: | |
|
165 | # Let people know they are borderline secure. | |
|
166 | # We don't document this config option because we want people to see | |
|
167 | # the bold warnings on the web site. | |
|
168 | # internal config: hostsecurity.disabletls10warning | |
|
169 | if not ui.configbool('hostsecurity', 'disabletls10warning'): | |
|
170 | ui.warn(_('warning: connecting to %s using legacy security ' | |
|
171 | 'technology (TLS 1.0); see ' | |
|
172 | 'https://mercurial-scm.org/wiki/SecureConnections for ' | |
|
173 | 'more info\n') % hostname) | |
|
174 | defaultprotocol = 'tls1.0' | |
|
175 | ||
|
176 | key = 'minimumprotocol' | |
|
177 | protocol = ui.config('hostsecurity', key, defaultprotocol) | |
|
178 | validateprotocol(protocol, key) | |
|
179 | ||
|
180 | key = '%s:minimumprotocol' % hostname | |
|
181 | protocol = ui.config('hostsecurity', key, protocol) | |
|
182 | validateprotocol(protocol, key) | |
|
183 | ||
|
184 | s['protocol'], s['ctxoptions'] = protocolsettings(protocol) | |
|
185 | ||
|
186 | ciphers = ui.config('hostsecurity', 'ciphers') | |
|
187 | ciphers = ui.config('hostsecurity', '%s:ciphers' % hostname, ciphers) | |
|
188 | s['ciphers'] = ciphers | |
|
189 | ||
|
190 | # Look for fingerprints in [hostsecurity] section. Value is a list | |
|
191 | # of <alg>:<fingerprint> strings. | |
|
192 | fingerprints = ui.configlist('hostsecurity', '%s:fingerprints' % hostname, | |
|
193 | []) | |
|
194 | for fingerprint in fingerprints: | |
|
195 | if not (fingerprint.startswith(('sha1:', 'sha256:', 'sha512:'))): | |
|
196 | raise error.Abort(_('invalid fingerprint for %s: %s') % ( | |
|
197 | hostname, fingerprint), | |
|
198 | hint=_('must begin with "sha1:", "sha256:", ' | |
|
199 | 'or "sha512:"')) | |
|
200 | ||
|
201 | alg, fingerprint = fingerprint.split(':', 1) | |
|
202 | fingerprint = fingerprint.replace(':', '').lower() | |
|
203 | s['certfingerprints'].append((alg, fingerprint)) | |
|
204 | ||
|
205 | # Fingerprints from [hostfingerprints] are always SHA-1. | |
|
206 | for fingerprint in ui.configlist('hostfingerprints', hostname, []): | |
|
207 | fingerprint = fingerprint.replace(':', '').lower() | |
|
208 | s['certfingerprints'].append(('sha1', fingerprint)) | |
|
209 | s['legacyfingerprint'] = True | |
|
210 | ||
|
211 | # If a host cert fingerprint is defined, it is the only thing that | |
|
212 | # matters. No need to validate CA certs. | |
|
213 | if s['certfingerprints']: | |
|
214 | s['verifymode'] = ssl.CERT_NONE | |
|
215 | s['allowloaddefaultcerts'] = False | |
|
216 | ||
|
217 | # If --insecure is used, don't take CAs into consideration. | |
|
218 | elif ui.insecureconnections: | |
|
219 | s['disablecertverification'] = True | |
|
220 | s['verifymode'] = ssl.CERT_NONE | |
|
221 | s['allowloaddefaultcerts'] = False | |
|
222 | ||
|
223 | if ui.configbool('devel', 'disableloaddefaultcerts'): | |
|
224 | s['allowloaddefaultcerts'] = False | |
|
225 | ||
|
226 | # If both fingerprints and a per-host ca file are specified, issue a warning | |
|
227 | # because users should not be surprised about what security is or isn't | |
|
228 | # being performed. | |
|
229 | cafile = ui.config('hostsecurity', '%s:verifycertsfile' % hostname) | |
|
230 | if s['certfingerprints'] and cafile: | |
|
231 | ui.warn(_('(hostsecurity.%s:verifycertsfile ignored when host ' | |
|
232 | 'fingerprints defined; using host fingerprints for ' | |
|
233 | 'verification)\n') % hostname) | |
|
234 | ||
|
235 | # Try to hook up CA certificate validation unless something above | |
|
236 | # makes it not necessary. | |
|
237 | if s['verifymode'] is None: | |
|
238 | # Look at per-host ca file first. | |
|
239 | if cafile: | |
|
240 | cafile = util.expandpath(cafile) | |
|
241 | if not os.path.exists(cafile): | |
|
242 | raise error.Abort(_('path specified by %s does not exist: %s') % | |
|
243 | ('hostsecurity.%s:verifycertsfile' % hostname, | |
|
244 | cafile)) | |
|
245 | s['cafile'] = cafile | |
|
246 | else: | |
|
247 | # Find global certificates file in config. | |
|
248 | cafile = ui.config('web', 'cacerts') | |
|
249 | ||
|
250 | if cafile: | |
|
251 | cafile = util.expandpath(cafile) | |
|
252 | if not os.path.exists(cafile): | |
|
253 | raise error.Abort(_('could not find web.cacerts: %s') % | |
|
254 | cafile) | |
|
255 | elif s['allowloaddefaultcerts']: | |
|
256 | # CAs not defined in config. Try to find system bundles. | |
|
257 | cafile = _defaultcacerts(ui) | |
|
258 | if cafile: | |
|
259 | ui.debug('using %s for CA file\n' % cafile) | |
|
260 | ||
|
261 | s['cafile'] = cafile | |
|
262 | ||
|
263 | # Require certificate validation if CA certs are being loaded and | |
|
264 | # verification hasn't been disabled above. | |
|
265 | if cafile or (_canloaddefaultcerts and s['allowloaddefaultcerts']): | |
|
266 | s['verifymode'] = ssl.CERT_REQUIRED | |
|
267 | else: | |
|
268 | # At this point we don't have a fingerprint, aren't being | |
|
269 | # explicitly insecure, and can't load CA certs. Connecting | |
|
270 | # is insecure. We allow the connection and abort during | |
|
271 | # validation (once we have the fingerprint to print to the | |
|
272 | # user). | |
|
273 | s['verifymode'] = ssl.CERT_NONE | |
|
274 | ||
|
275 | assert s['protocol'] is not None | |
|
276 | assert s['ctxoptions'] is not None | |
|
277 | assert s['verifymode'] is not None | |
|
278 | ||
|
279 | return s | |
|
280 | ||
|
281 | def protocolsettings(protocol): | |
|
282 | """Resolve the protocol and context options for a config value.""" | |
|
283 | if protocol not in configprotocols: | |
|
284 | raise ValueError('protocol value not supported: %s' % protocol) | |
|
285 | ||
|
286 | # Despite its name, PROTOCOL_SSLv23 selects the highest protocol | |
|
287 | # that both ends support, including TLS protocols. On legacy stacks, | |
|
288 | # the highest it likely goes is TLS 1.0. On modern stacks, it can | |
|
289 | # support TLS 1.2. | |
|
290 | # | |
|
291 | # The PROTOCOL_TLSv* constants select a specific TLS version | |
|
292 | # only (as opposed to multiple versions). So the method for | |
|
293 | # supporting multiple TLS versions is to use PROTOCOL_SSLv23 and | |
|
294 | # disable protocols via SSLContext.options and OP_NO_* constants. | |
|
295 | # However, SSLContext.options doesn't work unless we have the | |
|
296 | # full/real SSLContext available to us. | |
|
297 | if supportedprotocols == set(['tls1.0']): | |
|
298 | if protocol != 'tls1.0': | |
|
299 | raise error.Abort(_('current Python does not support protocol ' | |
|
300 | 'setting %s') % protocol, | |
|
301 | hint=_('upgrade Python or disable setting since ' | |
|
302 | 'only TLS 1.0 is supported')) | |
|
303 | ||
|
304 | return ssl.PROTOCOL_TLSv1, 0 | |
|
305 | ||
|
306 | # WARNING: returned options don't work unless the modern ssl module | |
|
307 | # is available. Be careful when adding options here. | |
|
308 | ||
|
309 | # SSLv2 and SSLv3 are broken. We ban them outright. | |
|
310 | options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | |
|
311 | ||
|
312 | if protocol == 'tls1.0': | |
|
313 | # Defaults above are to use TLS 1.0+ | |
|
314 | pass | |
|
315 | elif protocol == 'tls1.1': | |
|
316 | options |= ssl.OP_NO_TLSv1 | |
|
317 | elif protocol == 'tls1.2': | |
|
318 | options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | |
|
319 | else: | |
|
320 | raise error.Abort(_('this should not happen')) | |
|
321 | ||
|
322 | # Prevent CRIME. | |
|
323 | # There is no guarantee this attribute is defined on the module. | |
|
324 | options |= getattr(ssl, 'OP_NO_COMPRESSION', 0) | |
|
325 | ||
|
326 | return ssl.PROTOCOL_SSLv23, options | |
|
327 | ||
|
328 | def wrapsocket(sock, keyfile, certfile, ui, serverhostname=None): | |
|
112 | 329 | """Add SSL/TLS to a socket. |
|
113 | 330 | |
|
114 | 331 | This is a glorified wrapper for ``ssl.wrap_socket()``. It makes sane |
@@ -121,32 +338,33 b' def wrapsocket(sock, keyfile, certfile, ' | |||
|
121 | 338 | server (and client) support SNI, this tells the server which certificate |
|
122 | 339 | to use. |
|
123 | 340 | """ |
|
124 | # Despite its name, PROTOCOL_SSLv23 selects the highest protocol | |
|
125 | # that both ends support, including TLS protocols. On legacy stacks, | |
|
126 | # the highest it likely goes in TLS 1.0. On modern stacks, it can | |
|
127 | # support TLS 1.2. | |
|
128 | # | |
|
129 | # The PROTOCOL_TLSv* constants select a specific TLS version | |
|
130 | # only (as opposed to multiple versions). So the method for | |
|
131 | # supporting multiple TLS versions is to use PROTOCOL_SSLv23 and | |
|
132 | # disable protocols via SSLContext.options and OP_NO_* constants. | |
|
133 | # However, SSLContext.options doesn't work unless we have the | |
|
134 | # full/real SSLContext available to us. | |
|
135 | # | |
|
136 | # SSLv2 and SSLv3 are broken. We ban them outright. | |
|
137 | if modernssl: | |
|
138 | protocol = ssl.PROTOCOL_SSLv23 | |
|
139 | else: | |
|
140 | protocol = ssl.PROTOCOL_TLSv1 | |
|
341 | if not serverhostname: | |
|
342 | raise error.Abort(_('serverhostname argument is required')) | |
|
343 | ||
|
344 | settings = _hostsettings(ui, serverhostname) | |
|
141 | 345 | |
|
142 |
# |
|
|
143 | sslcontext = SSLContext(protocol) | |
|
346 | # We can't use ssl.create_default_context() because it calls | |
|
347 | # load_default_certs() unless CA arguments are passed to it. We want to | |
|
348 | # have explicit control over CA loading because implicitly loading | |
|
349 | # CAs may undermine the user's intent. For example, a user may define a CA | |
|
350 | # bundle with a specific CA cert removed. If the system/default CA bundle | |
|
351 | # is loaded and contains that removed CA, you've just undone the user's | |
|
352 | # choice. | |
|
353 | sslcontext = SSLContext(settings['protocol']) | |
|
144 | 354 | |
|
145 |
# This is a no-op |
|
|
146 | sslcontext.options |= OP_NO_SSLv2 | OP_NO_SSLv3 | |
|
355 | # This is a no-op unless using modern ssl. | |
|
356 | sslcontext.options |= settings['ctxoptions'] | |
|
147 | 357 | |
|
148 | 358 | # This still works on our fake SSLContext. |
|
149 |
sslcontext.verify_mode = |
|
|
359 | sslcontext.verify_mode = settings['verifymode'] | |
|
360 | ||
|
361 | if settings['ciphers']: | |
|
362 | try: | |
|
363 | sslcontext.set_ciphers(settings['ciphers']) | |
|
364 | except ssl.SSLError as e: | |
|
365 | raise error.Abort(_('could not set ciphers: %s') % e.args[0], | |
|
366 | hint=_('change cipher string (%s) in config') % | |
|
367 | settings['ciphers']) | |
|
150 | 368 | |
|
151 | 369 | if certfile is not None: |
|
152 | 370 | def password(): |
@@ -154,20 +372,123 b' def wrapsocket(sock, keyfile, certfile, ' | |||
|
154 | 372 | return ui.getpass(_('passphrase for %s: ') % f, '') |
|
155 | 373 | sslcontext.load_cert_chain(certfile, keyfile, password) |
|
156 | 374 | |
|
157 |
if |
|
|
158 | sslcontext.load_verify_locations(cafile=ca_certs) | |
|
159 | else: | |
|
375 | if settings['cafile'] is not None: | |
|
376 | try: | |
|
377 | sslcontext.load_verify_locations(cafile=settings['cafile']) | |
|
378 | except ssl.SSLError as e: | |
|
379 | raise error.Abort(_('error loading CA file %s: %s') % ( | |
|
380 | settings['cafile'], e.args[1]), | |
|
381 | hint=_('file is empty or malformed?')) | |
|
382 | caloaded = True | |
|
383 | elif settings['allowloaddefaultcerts']: | |
|
160 | 384 | # This is a no-op on old Python. |
|
161 | 385 | sslcontext.load_default_certs() |
|
386 | caloaded = True | |
|
387 | else: | |
|
388 | caloaded = False | |
|
162 | 389 | |
|
163 | sslsocket = sslcontext.wrap_socket(sock, server_hostname=serverhostname) | |
|
390 | try: | |
|
391 | sslsocket = sslcontext.wrap_socket(sock, server_hostname=serverhostname) | |
|
392 | except ssl.SSLError as e: | |
|
393 | # If we're doing certificate verification and no CA certs are loaded, | |
|
394 | # that is almost certainly the reason why verification failed. Provide | |
|
395 | # a hint to the user. | |
|
396 | # Only modern ssl module exposes SSLContext.get_ca_certs() so we can | |
|
397 | # only show this warning if modern ssl is available. | |
|
398 | if (caloaded and settings['verifymode'] == ssl.CERT_REQUIRED and | |
|
399 | modernssl and not sslcontext.get_ca_certs()): | |
|
400 | ui.warn(_('(an attempt was made to load CA certificates but none ' | |
|
401 | 'were loaded; see ' | |
|
402 | 'https://mercurial-scm.org/wiki/SecureConnections for ' | |
|
403 | 'how to configure Mercurial to avoid this error)\n')) | |
|
404 | # Try to print more helpful error messages for known failures. | |
|
405 | if util.safehasattr(e, 'reason'): | |
|
406 | if e.reason == 'UNSUPPORTED_PROTOCOL': | |
|
407 | ui.warn(_('(could not negotiate a common protocol; see ' | |
|
408 | 'https://mercurial-scm.org/wiki/SecureConnections ' | |
|
409 | 'for how to configure Mercurial to avoid this ' | |
|
410 | 'error)\n')) | |
|
411 | raise | |
|
412 | ||
|
164 | 413 | # check if wrap_socket failed silently because socket had been |
|
165 | 414 | # closed |
|
166 | 415 | # - see http://bugs.python.org/issue13721 |
|
167 | 416 | if not sslsocket.cipher(): |
|
168 | 417 | raise error.Abort(_('ssl connection failed')) |
|
418 | ||
|
419 | sslsocket._hgstate = { | |
|
420 | 'caloaded': caloaded, | |
|
421 | 'hostname': serverhostname, | |
|
422 | 'settings': settings, | |
|
423 | 'ui': ui, | |
|
424 | } | |
|
425 | ||
|
169 | 426 | return sslsocket |
|
170 | 427 | |
|
428 | def wrapserversocket(sock, ui, certfile=None, keyfile=None, cafile=None, | |
|
429 | requireclientcert=False): | |
|
430 | """Wrap a socket for use by servers. | |
|
431 | ||
|
432 | ``certfile`` and ``keyfile`` specify the files containing the certificate's | |
|
433 | public and private keys, respectively. Both keys can be defined in the same | |
|
434 | file via ``certfile`` (the private key must come first in the file). | |
|
435 | ||
|
436 | ``cafile`` defines the path to certificate authorities. | |
|
437 | ||
|
438 | ``requireclientcert`` specifies whether to require client certificates. | |
|
439 | ||
|
440 | Typically ``cafile`` is only defined if ``requireclientcert`` is true. | |
|
441 | """ | |
|
442 | protocol, options = protocolsettings('tls1.0') | |
|
443 | ||
|
444 | # This config option is intended for use in tests only. It is a giant | |
|
445 | # footgun to kill security. Don't define it. | |
|
446 | exactprotocol = ui.config('devel', 'serverexactprotocol') | |
|
447 | if exactprotocol == 'tls1.0': | |
|
448 | protocol = ssl.PROTOCOL_TLSv1 | |
|
449 | elif exactprotocol == 'tls1.1': | |
|
450 | if 'tls1.1' not in supportedprotocols: | |
|
451 | raise error.Abort(_('TLS 1.1 not supported by this Python')) | |
|
452 | protocol = ssl.PROTOCOL_TLSv1_1 | |
|
453 | elif exactprotocol == 'tls1.2': | |
|
454 | if 'tls1.2' not in supportedprotocols: | |
|
455 | raise error.Abort(_('TLS 1.2 not supported by this Python')) | |
|
456 | protocol = ssl.PROTOCOL_TLSv1_2 | |
|
457 | elif exactprotocol: | |
|
458 | raise error.Abort(_('invalid value for serverexactprotocol: %s') % | |
|
459 | exactprotocol) | |
|
460 | ||
|
461 | if modernssl: | |
|
462 | # We /could/ use create_default_context() here since it doesn't load | |
|
463 | # CAs when configured for client auth. However, it is hard-coded to | |
|
464 | # use ssl.PROTOCOL_SSLv23 which may not be appropriate here. | |
|
465 | sslcontext = SSLContext(protocol) | |
|
466 | sslcontext.options |= options | |
|
467 | ||
|
468 | # Improve forward secrecy. | |
|
469 | sslcontext.options |= getattr(ssl, 'OP_SINGLE_DH_USE', 0) | |
|
470 | sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0) | |
|
471 | ||
|
472 | # Use the list of more secure ciphers if found in the ssl module. | |
|
473 | if util.safehasattr(ssl, '_RESTRICTED_SERVER_CIPHERS'): | |
|
474 | sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0) | |
|
475 | sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS) | |
|
476 | else: | |
|
477 | sslcontext = SSLContext(ssl.PROTOCOL_TLSv1) | |
|
478 | ||
|
479 | if requireclientcert: | |
|
480 | sslcontext.verify_mode = ssl.CERT_REQUIRED | |
|
481 | else: | |
|
482 | sslcontext.verify_mode = ssl.CERT_NONE | |
|
483 | ||
|
484 | if certfile or keyfile: | |
|
485 | sslcontext.load_cert_chain(certfile=certfile, keyfile=keyfile) | |
|
486 | ||
|
487 | if cafile: | |
|
488 | sslcontext.load_verify_locations(cafile=cafile) | |
|
489 | ||
|
490 | return sslcontext.wrap_socket(sock, server_side=True) | |
|
491 | ||
|
171 | 492 | class wildcarderror(Exception): |
|
172 | 493 | """Represents an error parsing wildcards in DNS name.""" |
|
173 | 494 | |
@@ -268,10 +589,6 b' def _verifycert(cert, hostname):' | |||
|
268 | 589 | else: |
|
269 | 590 | return _('no commonName or subjectAltName found in certificate') |
|
270 | 591 | |
|
271 | ||
|
272 | # CERT_REQUIRED means fetch the cert from the server all the time AND | |
|
273 | # validate it against the CA store provided in web.cacerts. | |
|
274 | ||
|
275 | 592 | def _plainapplepython(): |
|
276 | 593 | """return true if this seems to be a pure Apple Python that |
|
277 | 594 | * is unfrozen and presumably has the whole mercurial module in the file |
@@ -286,97 +603,172 b' def _plainapplepython():' | |||
|
286 | 603 | return (exe.startswith('/usr/bin/python') or |
|
287 | 604 | exe.startswith('/system/library/frameworks/python.framework/')) |
|
288 | 605 | |
|
289 | def _defaultcacerts(): | |
|
290 | """return path to CA certificates; None for system's store; ! to disable""" | |
|
606 | _systemcacertpaths = [ | |
|
607 | # RHEL, CentOS, and Fedora | |
|
608 | '/etc/pki/tls/certs/ca-bundle.trust.crt', | |
|
609 | # Debian, Ubuntu, Gentoo | |
|
610 | '/etc/ssl/certs/ca-certificates.crt', | |
|
611 | ] | |
|
612 | ||
|
613 | def _defaultcacerts(ui): | |
|
614 | """return path to default CA certificates or None. | |
|
615 | ||
|
616 | It is assumed this function is called when the returned certificates | |
|
617 | file will actually be used to validate connections. Therefore this | |
|
618 | function may print warnings or debug messages assuming this usage. | |
|
619 | ||
|
620 | We don't print a message when the Python is able to load default | |
|
621 | CA certs because this scenario is detected at socket connect time. | |
|
622 | """ | |
|
623 | # The "certifi" Python package provides certificates. If it is installed, | |
|
624 | # assume the user intends it to be used and use it. | |
|
625 | try: | |
|
626 | import certifi | |
|
627 | certs = certifi.where() | |
|
628 | ui.debug('using ca certificates from certifi\n') | |
|
629 | return certs | |
|
630 | except ImportError: | |
|
631 | pass | |
|
632 | ||
|
633 | # On Windows, only the modern ssl module is capable of loading the system | |
|
634 | # CA certificates. If we're not capable of doing that, emit a warning | |
|
635 | # because we'll get a certificate verification error later and the lack | |
|
636 | # of loaded CA certificates will be the reason why. | |
|
637 | # Assertion: this code is only called if certificates are being verified. | |
|
638 | if os.name == 'nt': | |
|
639 | if not _canloaddefaultcerts: | |
|
640 | ui.warn(_('(unable to load Windows CA certificates; see ' | |
|
641 | 'https://mercurial-scm.org/wiki/SecureConnections for ' | |
|
642 | 'how to configure Mercurial to avoid this message)\n')) | |
|
643 | ||
|
644 | return None | |
|
645 | ||
|
646 | # Apple's OpenSSL has patches that allow a specially constructed certificate | |
|
647 | # to load the system CA store. If we're running on Apple Python, use this | |
|
648 | # trick. | |
|
291 | 649 | if _plainapplepython(): |
|
292 | 650 | dummycert = os.path.join(os.path.dirname(__file__), 'dummycert.pem') |
|
293 | 651 | if os.path.exists(dummycert): |
|
294 | 652 | return dummycert |
|
295 | if _canloaddefaultcerts: | |
|
653 | ||
|
654 | # The Apple OpenSSL trick isn't available to us. If Python isn't able to | |
|
655 | # load system certs, we're out of luck. | |
|
656 | if sys.platform == 'darwin': | |
|
657 | # FUTURE Consider looking for Homebrew or MacPorts installed certs | |
|
658 | # files. Also consider exporting the keychain certs to a file during | |
|
659 | # Mercurial install. | |
|
660 | if not _canloaddefaultcerts: | |
|
661 | ui.warn(_('(unable to load CA certificates; see ' | |
|
662 | 'https://mercurial-scm.org/wiki/SecureConnections for ' | |
|
663 | 'how to configure Mercurial to avoid this message)\n')) | |
|
296 | 664 | return None |
|
297 | return '!' | |
|
665 | ||
|
666 | # / is writable on Windows. Out of an abundance of caution make sure | |
|
667 | # we're not on Windows because paths from _systemcacerts could be installed | |
|
668 | # by non-admin users. | |
|
669 | assert os.name != 'nt' | |
|
298 | 670 | |
|
299 | def sslkwargs(ui, host): | |
|
300 | kws = {'ui': ui} | |
|
301 | hostfingerprint = ui.config('hostfingerprints', host) | |
|
302 | if hostfingerprint: | |
|
303 | return kws | |
|
304 | cacerts = ui.config('web', 'cacerts') | |
|
305 | if cacerts == '!': | |
|
306 | pass | |
|
307 | elif cacerts: | |
|
308 | cacerts = util.expandpath(cacerts) | |
|
309 | if not os.path.exists(cacerts): | |
|
310 | raise error.Abort(_('could not find web.cacerts: %s') % cacerts) | |
|
311 | else: | |
|
312 | cacerts = _defaultcacerts() | |
|
313 | if cacerts and cacerts != '!': | |
|
314 | ui.debug('using %s to enable OS X system CA\n' % cacerts) | |
|
315 | ui.setconfig('web', 'cacerts', cacerts, 'defaultcacerts') | |
|
316 | if cacerts != '!': | |
|
317 | kws.update({'ca_certs': cacerts, | |
|
318 | 'cert_reqs': ssl.CERT_REQUIRED, | |
|
319 | }) | |
|
320 | return kws | |
|
671 | # Try to find CA certificates in well-known locations. We print a warning | |
|
672 | # when using a found file because we don't want too much silent magic | |
|
673 | # for security settings. The expectation is that proper Mercurial | |
|
674 | # installs will have the CA certs path defined at install time and the | |
|
675 | # installer/packager will make an appropriate decision on the user's | |
|
676 | # behalf. We only get here and perform this setting as a feature of | |
|
677 | # last resort. | |
|
678 | if not _canloaddefaultcerts: | |
|
679 | for path in _systemcacertpaths: | |
|
680 | if os.path.isfile(path): | |
|
681 | ui.warn(_('(using CA certificates from %s; if you see this ' | |
|
682 | 'message, your Mercurial install is not properly ' | |
|
683 | 'configured; see ' | |
|
684 | 'https://mercurial-scm.org/wiki/SecureConnections ' | |
|
685 | 'for how to configure Mercurial to avoid this ' | |
|
686 | 'message)\n') % path) | |
|
687 | return path | |
|
321 | 688 | |
|
322 | class validator(object): | |
|
323 | def __init__(self, ui, host): | |
|
324 | self.ui = ui | |
|
325 | self.host = host | |
|
689 | ui.warn(_('(unable to load CA certificates; see ' | |
|
690 | 'https://mercurial-scm.org/wiki/SecureConnections for ' | |
|
691 | 'how to configure Mercurial to avoid this message)\n')) | |
|
326 | 692 | |
|
327 | def __call__(self, sock, strict=False): | |
|
328 | host = self.host | |
|
693 | return None | |
|
694 | ||
|
695 | def validatesocket(sock): | |
|
696 | """Validate a socket meets security requiremnets. | |
|
329 | 697 |
|
|
330 | if not sock.cipher(): # work around http://bugs.python.org/issue13721 | |
|
331 | raise error.Abort(_('%s ssl connection error') % host) | |
|
332 | try: | |
|
333 | peercert = sock.getpeercert(True) | |
|
334 | peercert2 = sock.getpeercert() | |
|
335 | except AttributeError: | |
|
336 | raise error.Abort(_('%s ssl connection error') % host) | |
|
698 | The passed socket must have been created with ``wrapsocket()``. | |
|
699 | """ | |
|
700 | host = sock._hgstate['hostname'] | |
|
701 | ui = sock._hgstate['ui'] | |
|
702 | settings = sock._hgstate['settings'] | |
|
703 | ||
|
704 | try: | |
|
705 | peercert = sock.getpeercert(True) | |
|
706 | peercert2 = sock.getpeercert() | |
|
707 | except AttributeError: | |
|
708 | raise error.Abort(_('%s ssl connection error') % host) | |
|
709 | ||
|
710 | if not peercert: | |
|
711 | raise error.Abort(_('%s certificate error: ' | |
|
712 | 'no certificate received') % host) | |
|
337 | 713 | |
|
338 | if not peercert: | |
|
339 | raise error.Abort(_('%s certificate error: ' | |
|
340 | 'no certificate received') % host) | |
|
714 | if settings['disablecertverification']: | |
|
715 | # We don't print the certificate fingerprint because it shouldn't | |
|
716 | # be necessary: if the user requested certificate verification be | |
|
717 | # disabled, they presumably already saw a message about the inability | |
|
718 | # to verify the certificate and this message would have printed the | |
|
719 | # fingerprint. So printing the fingerprint here adds little to no | |
|
720 | # value. | |
|
721 | ui.warn(_('warning: connection security to %s is disabled per current ' | |
|
722 | 'settings; communication is susceptible to eavesdropping ' | |
|
723 | 'and tampering\n') % host) | |
|
724 | return | |
|
725 | ||
|
726 | # If a certificate fingerprint is pinned, use it and only it to | |
|
727 | # validate the remote cert. | |
|
728 | peerfingerprints = { | |
|
729 | 'sha1': hashlib.sha1(peercert).hexdigest(), | |
|
730 | 'sha256': hashlib.sha256(peercert).hexdigest(), | |
|
731 | 'sha512': hashlib.sha512(peercert).hexdigest(), | |
|
732 | } | |
|
733 | ||
|
734 | def fmtfingerprint(s): | |
|
735 | return ':'.join([s[x:x + 2] for x in range(0, len(s), 2)]) | |
|
736 | ||
|
737 | nicefingerprint = 'sha256:%s' % fmtfingerprint(peerfingerprints['sha256']) | |
|
341 | 738 | |
|
342 | # If a certificate fingerprint is pinned, use it and only it to | |
|
343 | # validate the remote cert. | |
|
344 | hostfingerprints = self.ui.configlist('hostfingerprints', host) | |
|
345 | peerfingerprint = util.sha1(peercert).hexdigest() | |
|
346 | nicefingerprint = ":".join([peerfingerprint[x:x + 2] | |
|
347 | for x in xrange(0, len(peerfingerprint), 2)]) | |
|
348 | if hostfingerprints: | |
|
349 |
|
|
|
350 | for hostfingerprint in hostfingerprints: | |
|
351 | if peerfingerprint.lower() == \ | |
|
352 | hostfingerprint.replace(':', '').lower(): | |
|
353 | fingerprintmatch = True | |
|
354 | break | |
|
355 | if not fingerprintmatch: | |
|
356 |
|
|
|
357 |
|
|
|
358 |
|
|
|
359 | self.ui.debug('%s certificate matched fingerprint %s\n' % | |
|
360 | (host, nicefingerprint)) | |
|
361 | return | |
|
739 | if settings['certfingerprints']: | |
|
740 | for hash, fingerprint in settings['certfingerprints']: | |
|
741 | if peerfingerprints[hash].lower() == fingerprint: | |
|
742 | ui.debug('%s certificate matched fingerprint %s:%s\n' % | |
|
743 | (host, hash, fmtfingerprint(fingerprint))) | |
|
744 | return | |
|
745 | ||
|
746 | # Pinned fingerprint didn't match. This is a fatal error. | |
|
747 | if settings['legacyfingerprint']: | |
|
748 | section = 'hostfingerprint' | |
|
749 | nice = fmtfingerprint(peerfingerprints['sha1']) | |
|
750 | else: | |
|
751 | section = 'hostsecurity' | |
|
752 | nice = '%s:%s' % (hash, fmtfingerprint(peerfingerprints[hash])) | |
|
753 | raise error.Abort(_('certificate for %s has unexpected ' | |
|
754 | 'fingerprint %s') % (host, nice), | |
|
755 | hint=_('check %s configuration') % section) | |
|
362 | 756 | |
|
363 | # No pinned fingerprint. Establish trust by looking at the CAs. | |
|
364 | cacerts = self.ui.config('web', 'cacerts') | |
|
365 | if cacerts != '!': | |
|
366 | msg = _verifycert(peercert2, host) | |
|
367 | if msg: | |
|
368 | raise error.Abort(_('%s certificate error: %s') % (host, msg), | |
|
369 | hint=_('configure hostfingerprint %s or use ' | |
|
370 | '--insecure to connect insecurely') % | |
|
371 | nicefingerprint) | |
|
372 | self.ui.debug('%s certificate successfully verified\n' % host) | |
|
373 | elif strict: | |
|
374 | raise error.Abort(_('%s certificate with fingerprint %s not ' | |
|
375 | 'verified') % (host, nicefingerprint), | |
|
376 | hint=_('check hostfingerprints or web.cacerts ' | |
|
377 | 'config setting')) | |
|
378 | else: | |
|
379 | self.ui.warn(_('warning: %s certificate with fingerprint %s not ' | |
|
380 |
|
|
|
381 | 'config setting)\n') % | |
|
382 | (host, nicefingerprint)) | |
|
757 | # Security is enabled but no CAs are loaded. We can't establish trust | |
|
758 | # for the cert so abort. | |
|
759 | if not sock._hgstate['caloaded']: | |
|
760 | raise error.Abort( | |
|
761 | _('unable to verify security of %s (no loaded CA certificates); ' | |
|
762 | 'refusing to connect') % host, | |
|
763 | hint=_('see https://mercurial-scm.org/wiki/SecureConnections for ' | |
|
764 | 'how to configure Mercurial to avoid this error or set ' | |
|
765 | 'hostsecurity.%s:fingerprints=%s to trust this server') % | |
|
766 | (host, nicefingerprint)) | |
|
767 | ||
|
768 | msg = _verifycert(peercert2, host) | |
|
769 | if msg: | |
|
770 | raise error.Abort(_('%s certificate error: %s') % (host, msg), | |
|
771 | hint=_('set hostsecurity.%s:certfingerprints=%s ' | |
|
772 | 'config setting or use --insecure to connect ' | |
|
773 | 'insecurely') % | |
|
774 | (host, nicefingerprint)) |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | import hashlib | |
|
11 | 12 | import os |
|
12 | 13 | import stat |
|
13 | 14 | |
@@ -19,8 +20,6 b' from . import (' | |||
|
19 | 20 | util, |
|
20 | 21 | ) |
|
21 | 22 | |
|
22 | _sha = util.sha1 | |
|
23 | ||
|
24 | 23 | # This avoids a collision between a file named foo and a dir named |
|
25 | 24 | # foo.i or foo.d |
|
26 | 25 | def _encodedir(path): |
@@ -57,6 +56,23 b' def decodedir(path):' | |||
|
57 | 56 | .replace(".i.hg/", ".i/") |
|
58 | 57 | .replace(".hg.hg/", ".hg/")) |
|
59 | 58 | |
|
59 | def _reserved(): | |
|
60 | ''' characters that are problematic for filesystems | |
|
61 | ||
|
62 | * ascii escapes (0..31) | |
|
63 | * ascii hi (126..255) | |
|
64 | * windows specials | |
|
65 | ||
|
66 | these characters will be escaped by encodefunctions | |
|
67 | ''' | |
|
68 | winreserved = [ord(x) for x in '\\:*?"<>|'] | |
|
69 | for x in range(32): | |
|
70 | yield x | |
|
71 | for x in range(126, 256): | |
|
72 | yield x | |
|
73 | for x in winreserved: | |
|
74 | yield x | |
|
75 | ||
|
60 | 76 | def _buildencodefun(): |
|
61 | 77 | ''' |
|
62 | 78 | >>> enc, dec = _buildencodefun() |
@@ -82,11 +98,10 b' def _buildencodefun():' | |||
|
82 | 98 | 'the\\x07quick\\xadshot' |
|
83 | 99 | ''' |
|
84 | 100 | e = '_' |
|
85 | winreserved = [ord(x) for x in '\\:*?"<>|'] | |
|
86 | 101 | cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) |
|
87 | for x in (range(32) + range(126, 256) + winreserved): | |
|
102 | for x in _reserved(): | |
|
88 | 103 | cmap[chr(x)] = "~%02x" % x |
|
89 | for x in range(ord("A"), ord("Z") + 1) + [ord(e)]: | |
|
104 | for x in list(range(ord("A"), ord("Z") + 1)) + [ord(e)]: | |
|
90 | 105 | cmap[chr(x)] = e + chr(x).lower() |
|
91 | 106 | dmap = {} |
|
92 | 107 | for k, v in cmap.iteritems(): |
@@ -134,9 +149,8 b' def _buildlowerencodefun():' | |||
|
134 | 149 | >>> f('the\x07quick\xADshot') |
|
135 | 150 | 'the~07quick~adshot' |
|
136 | 151 | ''' |
|
137 | winreserved = [ord(x) for x in '\\:*?"<>|'] | |
|
138 | 152 | cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) |
|
139 | for x in (range(32) + range(126, 256) + winreserved): | |
|
153 | for x in _reserved(): | |
|
140 | 154 | cmap[chr(x)] = "~%02x" % x |
|
141 | 155 | for x in range(ord("A"), ord("Z") + 1): |
|
142 | 156 | cmap[chr(x)] = chr(x).lower() |
@@ -196,7 +210,7 b' def _auxencode(path, dotencode):' | |||
|
196 | 210 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 |
|
197 | 211 | |
|
198 | 212 | def _hashencode(path, dotencode): |
|
199 |
digest = |
|
|
213 | digest = hashlib.sha1(path).hexdigest() | |
|
200 | 214 | le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/' |
|
201 | 215 | parts = _auxencode(le, dotencode) |
|
202 | 216 | basename = parts[-1] |
@@ -9,6 +9,7 b' from __future__ import absolute_import' | |||
|
9 | 9 | |
|
10 | 10 | import copy |
|
11 | 11 | import errno |
|
12 | import hashlib | |
|
12 | 13 | import os |
|
13 | 14 | import posixpath |
|
14 | 15 | import re |
@@ -50,14 +51,14 b' def _expandedabspath(path):' | |||
|
50 | 51 | |
|
51 | 52 | def _getstorehashcachename(remotepath): |
|
52 | 53 | '''get a unique filename for the store hash cache of a remote repository''' |
|
53 |
return |
|
|
54 | return hashlib.sha1(_expandedabspath(remotepath)).hexdigest()[0:12] | |
|
54 | 55 | |
|
55 | 56 | class SubrepoAbort(error.Abort): |
|
56 | 57 | """Exception class used to avoid handling a subrepo error more than once""" |
|
57 | 58 | def __init__(self, *args, **kw): |
|
59 | self.subrepo = kw.pop('subrepo', None) | |
|
60 | self.cause = kw.pop('cause', None) | |
|
58 | 61 | error.Abort.__init__(self, *args, **kw) |
|
59 | self.subrepo = kw.get('subrepo') | |
|
60 | self.cause = kw.get('cause') | |
|
61 | 62 | |
|
62 | 63 | def annotatesubrepoerror(func): |
|
63 | 64 | def decoratedmethod(self, *args, **kargs): |
@@ -585,7 +586,7 b' class abstractsubrepo(object):' | |||
|
585 | 586 | return 1 |
|
586 | 587 | |
|
587 | 588 | def revert(self, substate, *pats, **opts): |
|
588 | self.ui.warn('%s: reverting %s subrepos is unsupported\n' \ | |
|
589 | self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \ | |
|
589 | 590 | % (substate[0], substate[2])) |
|
590 | 591 | return [] |
|
591 | 592 | |
@@ -659,7 +660,7 b' class hgsubrepo(abstractsubrepo):' | |||
|
659 | 660 | yield '# %s\n' % _expandedabspath(remotepath) |
|
660 | 661 | vfs = self._repo.vfs |
|
661 | 662 | for relname in filelist: |
|
662 |
filehash = |
|
|
663 | filehash = hashlib.sha1(vfs.tryread(relname)).hexdigest() | |
|
663 | 664 | yield '%s = %s\n' % (relname, filehash) |
|
664 | 665 | |
|
665 | 666 | @propertycache |
@@ -1413,7 +1414,7 b' class gitsubrepo(abstractsubrepo):' | |||
|
1413 | 1414 | if command in ('cat-file', 'symbolic-ref'): |
|
1414 | 1415 | return retdata, p.returncode |
|
1415 | 1416 | # for all others, abort |
|
1416 | raise error.Abort('git %s error %d in %s' % | |
|
1417 | raise error.Abort(_('git %s error %d in %s') % | |
|
1417 | 1418 | (command, p.returncode, self._relpath)) |
|
1418 | 1419 | |
|
1419 | 1420 | return retdata, p.returncode |
@@ -292,7 +292,7 b' def _readtagcache(ui, repo):' | |||
|
292 | 292 | cachehash = None |
|
293 | 293 | if cachefile: |
|
294 | 294 | try: |
|
295 |
validline = |
|
|
295 | validline = next(cachelines) | |
|
296 | 296 | validline = validline.split() |
|
297 | 297 | cacherev = int(validline[0]) |
|
298 | 298 | cachenode = bin(validline[1]) |
@@ -724,6 +724,25 b' def rstdoc(context, mapping, args):' | |||
|
724 | 724 | |
|
725 | 725 | return minirst.format(text, style=style, keep=['verbose']) |
|
726 | 726 | |
|
727 | @templatefunc('separate(sep, args)') | |
|
728 | def separate(context, mapping, args): | |
|
729 | """Add a separator between non-empty arguments.""" | |
|
730 | if not args: | |
|
731 | # i18n: "separate" is a keyword | |
|
732 | raise error.ParseError(_("separate expects at least one argument")) | |
|
733 | ||
|
734 | sep = evalstring(context, mapping, args[0]) | |
|
735 | first = True | |
|
736 | for arg in args[1:]: | |
|
737 | argstr = evalstring(context, mapping, arg) | |
|
738 | if not argstr: | |
|
739 | continue | |
|
740 | if first: | |
|
741 | first = False | |
|
742 | else: | |
|
743 | yield sep | |
|
744 | yield argstr | |
|
745 | ||
|
727 | 746 | @templatefunc('shortest(node, minlength=4)') |
|
728 | 747 | def shortest(context, mapping, args): |
|
729 | 748 | """Obtain the shortest representation of |
@@ -4,5 +4,5 b'' | |||
|
4 | 4 | <id>{urlbase}{url|urlescape}#branch-{node}</id> |
|
5 | 5 | <updated>{date|rfc3339date}</updated> |
|
6 | 6 | <published>{date|rfc3339date}</published> |
|
7 |
<content type="text"> |
|
|
7 | <content type="text">{branch|strip|escape}</content> | |
|
8 | 8 | </entry> |
@@ -9,35 +9,35 b'' | |||
|
9 | 9 | <updated>{date|rfc3339date}</updated> |
|
10 | 10 | <published>{date|rfc3339date}</published> |
|
11 | 11 | <content type="xhtml"> |
|
12 |
|
|
|
13 |
|
|
|
14 |
|
|
|
15 |
|
|
|
16 |
|
|
|
17 |
|
|
|
18 |
|
|
|
19 |
|
|
|
20 |
|
|
|
21 |
|
|
|
22 |
|
|
|
23 |
|
|
|
24 |
|
|
|
25 |
|
|
|
26 |
|
|
|
27 |
|
|
|
28 |
|
|
|
29 |
|
|
|
30 |
|
|
|
31 |
|
|
|
32 |
|
|
|
33 |
|
|
|
34 |
|
|
|
35 |
|
|
|
36 |
|
|
|
37 |
|
|
|
38 |
|
|
|
39 |
|
|
|
40 |
|
|
|
41 |
|
|
|
12 | <table xmlns="http://www.w3.org/1999/xhtml"> | |
|
13 | <tr> | |
|
14 | <th style="text-align:left;">changeset</th> | |
|
15 | <td>{node|short}</td> | |
|
16 | </tr> | |
|
17 | <tr> | |
|
18 | <th style="text-align:left;">branch</th> | |
|
19 | <td>{inbranch%"{name|escape}"}{branches%"{name|escape}"}</td> | |
|
20 | </tr> | |
|
21 | <tr> | |
|
22 | <th style="text-align:left;">bookmark</th> | |
|
23 | <td>{bookmarks%"{name|escape}"}</td> | |
|
24 | </tr> | |
|
25 | <tr> | |
|
26 | <th style="text-align:left;">tag</th> | |
|
27 | <td>{tags%"{name|escape}"}</td> | |
|
28 | </tr> | |
|
29 | <tr> | |
|
30 | <th style="text-align:left;">user</th> | |
|
31 | <td>{author|obfuscate}</td> | |
|
32 | </tr> | |
|
33 | <tr> | |
|
34 | <th style="text-align:left;vertical-align:top;">description</th> | |
|
35 | <td>{desc|strip|escape|websub|addbreaks|nonempty}</td> | |
|
36 | </tr> | |
|
37 | <tr> | |
|
38 | <th style="text-align:left;vertical-align:top;">files</th> | |
|
39 | <td>{files}</td> | |
|
40 | </tr> | |
|
41 | </table> | |
|
42 | 42 | </content> |
|
43 | 43 | </entry> |
@@ -5,7 +5,6 b' header = header.tmpl' | |||
|
5 | 5 | changelog = changelog.tmpl |
|
6 | 6 | changelogentry = changelogentry.tmpl |
|
7 | 7 | filelog = filelog.tmpl |
|
8 | filelogentry = filelogentry.tmpl | |
|
9 | 8 | tags = tags.tmpl |
|
10 | 9 | tagentry = tagentry.tmpl |
|
11 | 10 | bookmarks = bookmarks.tmpl |
@@ -95,14 +95,29 b' filelog = filelog.tmpl' | |||
|
95 | 95 | fileline = ' |
|
96 | 96 | <a href="#{lineid}"></a><span id="{lineid}">{strip(line|escape, '\r\n')}</span>' |
|
97 | 97 | annotateline = ' |
|
98 | <tr id="{lineid}" style="font-family:monospace" class="parity{parity}"> | |
|
99 | <td class="linenr" style="text-align: right;"> | |
|
100 | <a href="{url|urlescape}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}" | |
|
101 | title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a> | |
|
98 | <tr id="{lineid}" style="font-family:monospace" class="parity{parity}{ifeq(node, originalnode, ' thisrev')}"> | |
|
99 | <td class="annotate linenr parity{blockparity}" style="text-align: right;"> | |
|
100 | {if(blockhead, | |
|
101 | '<a href="{url|urlescape}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}"> | |
|
102 | {rev} | |
|
103 | </a>')} | |
|
104 | <div class="annotate-info"> | |
|
105 | <div> | |
|
106 | <a href="{url|urlescape}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}"> | |
|
107 | {node|short}</a> | |
|
108 | {desc|escape|firstline} | |
|
109 | </div> | |
|
110 | <div><em>{author|obfuscate}</em></div> | |
|
111 | <div>parents: {parents%annotateparent}</div> | |
|
112 | <a href="{url|urlescape}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | |
|
113 | <a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | |
|
114 | </div> | |
|
102 | 115 | </td> |
|
103 | 116 | <td><pre><a class="linenr" href="#{lineid}">{linenumber}</a></pre></td> |
|
104 | 117 | <td><pre>{line|escape}</pre></td> |
|
105 | 118 | </tr>' |
|
119 | annotateparent = ' | |
|
120 | <a href="{url|urlescape}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rev}</a>' | |
|
106 | 121 | difflineplus = ' |
|
107 | 122 | <a href="#{lineid}"></a><span id="{lineid}" class="difflineplus">{strip(line|escape, '\r\n')}</span>' |
|
108 | 123 | difflineminus = ' |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file renamed from tests/test-update-renames.t to tests/test-update-names.t | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now