Show More
@@ -291,7 +291,7 b' pypats = [' | |||||
291 | "always assign an opened file to a variable, and close it afterwards"), |
|
291 | "always assign an opened file to a variable, and close it afterwards"), | |
292 | (r'[\s\(](open|file)\([^)]*\)\.', |
|
292 | (r'[\s\(](open|file)\([^)]*\)\.', | |
293 | "always assign an opened file to a variable, and close it afterwards"), |
|
293 | "always assign an opened file to a variable, and close it afterwards"), | |
294 | (r'(?i)descendent', "the proper spelling is descendAnt"), |
|
294 | (r'(?i)descend[e]nt', "the proper spelling is descendAnt"), | |
295 | (r'\.debug\(\_', "don't mark debug messages for translation"), |
|
295 | (r'\.debug\(\_', "don't mark debug messages for translation"), | |
296 | (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), |
|
296 | (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"), | |
297 | (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'), |
|
297 | (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'), |
@@ -74,7 +74,7 b' def getrevs(spec):' | |||||
74 |
|
74 | |||
75 | parser = OptionParser(usage="usage: %prog [options] <revs>") |
|
75 | parser = OptionParser(usage="usage: %prog [options] <revs>") | |
76 | parser.add_option("-f", "--file", |
|
76 | parser.add_option("-f", "--file", | |
77 | help="read revset from FILE (stdin if omited)", |
|
77 | help="read revset from FILE (stdin if omitted)", | |
78 | metavar="FILE") |
|
78 | metavar="FILE") | |
79 | parser.add_option("-R", "--repo", |
|
79 | parser.add_option("-R", "--repo", | |
80 | help="run benchmark on REPO", metavar="REPO") |
|
80 | help="run benchmark on REPO", metavar="REPO") |
@@ -462,10 +462,10 b' def updatelfiles(ui, repo, filelist=None' | |||||
462 | expecthash != lfutil.hashfile(abslfile))): |
|
462 | expecthash != lfutil.hashfile(abslfile))): | |
463 | if lfile not in repo[None]: # not switched to normal file |
|
463 | if lfile not in repo[None]: # not switched to normal file | |
464 | util.unlinkpath(abslfile, ignoremissing=True) |
|
464 | util.unlinkpath(abslfile, ignoremissing=True) | |
465 | # use normallookup() to allocate entry in largefiles |
|
465 | # use normallookup() to allocate an entry in largefiles | |
466 | # dirstate, because lack of it misleads |
|
466 | # dirstate, because lack of it misleads | |
467 | # lfilesrepo.status() into recognition that such cache |
|
467 | # lfilesrepo.status() into recognition that such cache | |
468 |
# missing files are |
|
468 | # missing files are removed. | |
469 | lfdirstate.normallookup(lfile) |
|
469 | lfdirstate.normallookup(lfile) | |
470 | update[lfile] = expecthash |
|
470 | update[lfile] = expecthash | |
471 | else: |
|
471 | else: |
@@ -63,10 +63,10 b' def installmatchandpatsfn(f):' | |||||
63 |
|
63 | |||
64 | def restorematchandpatsfn(): |
|
64 | def restorematchandpatsfn(): | |
65 | '''restores scmutil.matchandpats to what it was before |
|
65 | '''restores scmutil.matchandpats to what it was before | |
66 |
install |
|
66 | installmatchandpatsfn was called. No-op if scmutil.matchandpats | |
67 | is its original function. |
|
67 | is its original function. | |
68 |
|
68 | |||
69 |
Note that n calls to install |
|
69 | Note that n calls to installmatchandpatsfn will require n calls | |
70 | to restore matchfn to reverse''' |
|
70 | to restore matchfn to reverse''' | |
71 | scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats', |
|
71 | scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats', | |
72 | scmutil.matchandpats) |
|
72 | scmutil.matchandpats) |
@@ -263,7 +263,7 b' def reposetup(ui, repo):' | |||||
263 | # and so on), this code path is used to avoid: |
|
263 | # and so on), this code path is used to avoid: | |
264 | # (1) updating standins, because standins should |
|
264 | # (1) updating standins, because standins should | |
265 | # be already updated at this point |
|
265 | # be already updated at this point | |
266 |
# (2) aborting when sta |
|
266 | # (2) aborting when standins are matched by "match", | |
267 | # because automated committing may specify them directly |
|
267 | # because automated committing may specify them directly | |
268 | # |
|
268 | # | |
269 | if getattr(self, "_isrebasing", False) or \ |
|
269 | if getattr(self, "_isrebasing", False) or \ |
@@ -278,7 +278,7 b' def rebase(ui, repo, **opts):' | |||||
278 | commonanc, base, commonanc) |
|
278 | commonanc, base, commonanc) | |
279 | if not rebaseset: |
|
279 | if not rebaseset: | |
280 | # transform to list because smartsets are not comparable to |
|
280 | # transform to list because smartsets are not comparable to | |
281 |
# lists. This should be improved to honor laz |
|
281 | # lists. This should be improved to honor laziness of | |
282 | # smartset. |
|
282 | # smartset. | |
283 | if list(base) == [dest.rev()]: |
|
283 | if list(base) == [dest.rev()]: | |
284 | if basef: |
|
284 | if basef: |
@@ -1,4 +1,4 b'' | |||||
1 |
"""strip changesets and their descend |
|
1 | """strip changesets and their descendants from history | |
2 |
|
2 | |||
3 | This extension allows you to strip changesets and all their descendants from the |
|
3 | This extension allows you to strip changesets and all their descendants from the | |
4 | repository. See the command help for details. |
|
4 | repository. See the command help for details. |
@@ -396,7 +396,7 b' class _BaseFile(list):' | |||||
396 | def ordered_metadata(self): |
|
396 | def ordered_metadata(self): | |
397 | """ |
|
397 | """ | |
398 | Convenience method that returns an ordered version of the metadata |
|
398 | Convenience method that returns an ordered version of the metadata | |
399 |
diction |
|
399 | dictionary. The return value is list of tuples (metadata name, | |
400 | metadata_value). |
|
400 | metadata_value). | |
401 | """ |
|
401 | """ | |
402 | # copy the dict first |
|
402 | # copy the dict first |
@@ -229,7 +229,7 b' class unbundlerecords(object):' | |||||
229 | self.getreplies(inreplyto).add(category, entry) |
|
229 | self.getreplies(inreplyto).add(category, entry) | |
230 |
|
230 | |||
231 | def getreplies(self, partid): |
|
231 | def getreplies(self, partid): | |
232 |
"""get the |
|
232 | """get the records that are replies to a specific part""" | |
233 | return self._replies.setdefault(partid, unbundlerecords()) |
|
233 | return self._replies.setdefault(partid, unbundlerecords()) | |
234 |
|
234 | |||
235 | def __getitem__(self, cat): |
|
235 | def __getitem__(self, cat): | |
@@ -303,7 +303,7 b' def processbundle(repo, unbundler, trans' | |||||
303 | # consume the bundle content |
|
303 | # consume the bundle content | |
304 | part.read() |
|
304 | part.read() | |
305 | # Small hack to let caller code distinguish exceptions from bundle2 |
|
305 | # Small hack to let caller code distinguish exceptions from bundle2 | |
306 |
# processing fro |
|
306 | # processing from processing the old format. This is mostly | |
307 | # needed to handle different return codes to unbundle according to the |
|
307 | # needed to handle different return codes to unbundle according to the | |
308 | # type of bundle. We should probably clean up or drop this return code |
|
308 | # type of bundle. We should probably clean up or drop this return code | |
309 | # craziness in a future version. |
|
309 | # craziness in a future version. | |
@@ -359,7 +359,7 b' def _processpart(op, part):' | |||||
359 |
|
359 | |||
360 |
|
360 | |||
361 | def decodecaps(blob): |
|
361 | def decodecaps(blob): | |
362 |
"""decode a bundle2 caps bytes blob into a diction |
|
362 | """decode a bundle2 caps bytes blob into a dictionary | |
363 |
|
363 | |||
364 | The blob is a list of capabilities (one per line) |
|
364 | The blob is a list of capabilities (one per line) | |
365 | Capabilities may have values using a line of the form:: |
|
365 | Capabilities may have values using a line of the form:: | |
@@ -741,7 +741,7 b' class interrupthandler(unpackermixin):' | |||||
741 | self.ui.debug('bundle2 stream interruption, looking for a part.\n') |
|
741 | self.ui.debug('bundle2 stream interruption, looking for a part.\n') | |
742 | headerblock = self._readpartheader() |
|
742 | headerblock = self._readpartheader() | |
743 | if headerblock is None: |
|
743 | if headerblock is None: | |
744 | self.ui.debug('no part found during iterruption.\n') |
|
744 | self.ui.debug('no part found during interruption.\n') | |
745 | return |
|
745 | return | |
746 | part = unbundlepart(self.ui, headerblock, self._fp) |
|
746 | part = unbundlepart(self.ui, headerblock, self._fp) | |
747 | op = interruptoperation(self.ui) |
|
747 | op = interruptoperation(self.ui) | |
@@ -828,7 +828,7 b' class unbundlepart(unpackermixin):' | |||||
828 | # split mandatory from advisory |
|
828 | # split mandatory from advisory | |
829 | mansizes = paramsizes[:mancount] |
|
829 | mansizes = paramsizes[:mancount] | |
830 | advsizes = paramsizes[mancount:] |
|
830 | advsizes = paramsizes[mancount:] | |
831 | # retrive param value |
|
831 | # retrieve param value | |
832 | manparams = [] |
|
832 | manparams = [] | |
833 | for key, value in mansizes: |
|
833 | for key, value in mansizes: | |
834 | manparams.append((self._fromheader(key), self._fromheader(value))) |
|
834 | manparams.append((self._fromheader(key), self._fromheader(value))) | |
@@ -920,7 +920,7 b' def handlechangegroup(op, inpart):' | |||||
920 | ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2') |
|
920 | ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2') | |
921 | op.records.add('changegroup', {'return': ret}) |
|
921 | op.records.add('changegroup', {'return': ret}) | |
922 | if op.reply is not None: |
|
922 | if op.reply is not None: | |
923 | # This is definitly not the final form of this |
|
923 | # This is definitely not the final form of this | |
924 | # return. But one need to start somewhere. |
|
924 | # return. But one need to start somewhere. | |
925 | part = op.reply.newpart('b2x:reply:changegroup') |
|
925 | part = op.reply.newpart('b2x:reply:changegroup') | |
926 | part.addparam('in-reply-to', str(inpart.id), mandatory=False) |
|
926 | part.addparam('in-reply-to', str(inpart.id), mandatory=False) | |
@@ -989,7 +989,7 b' def handleremotechangegroup(op, inpart):' | |||||
989 | ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2') |
|
989 | ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2') | |
990 | op.records.add('changegroup', {'return': ret}) |
|
990 | op.records.add('changegroup', {'return': ret}) | |
991 | if op.reply is not None: |
|
991 | if op.reply is not None: | |
992 | # This is definitly not the final form of this |
|
992 | # This is definitely not the final form of this | |
993 | # return. But one need to start somewhere. |
|
993 | # return. But one need to start somewhere. | |
994 | part = op.reply.newpart('b2x:reply:changegroup') |
|
994 | part = op.reply.newpart('b2x:reply:changegroup') | |
995 | part.addparam('in-reply-to', str(inpart.id), mandatory=False) |
|
995 | part.addparam('in-reply-to', str(inpart.id), mandatory=False) |
@@ -113,7 +113,7 b' def logmessage(ui, opts):' | |||||
113 | def mergeeditform(ctxorbool, baseform): |
|
113 | def mergeeditform(ctxorbool, baseform): | |
114 | """build appropriate editform from ctxorbool and baseform |
|
114 | """build appropriate editform from ctxorbool and baseform | |
115 |
|
115 | |||
116 |
'c |
|
116 | 'ctxorbool' is one of a ctx to be committed, or a bool whether | |
117 | merging is committed. |
|
117 | merging is committed. | |
118 |
|
118 | |||
119 | This returns editform 'baseform' with '.merge' if merging is |
|
119 | This returns editform 'baseform' with '.merge' if merging is | |
@@ -1783,8 +1783,8 b' def _makelogrevset(repo, pats, opts, rev' | |||||
1783 | # If we're forced to take the slowpath it means we're following |
|
1783 | # If we're forced to take the slowpath it means we're following | |
1784 | # at least one pattern/directory, so don't bother with rename tracking. |
|
1784 | # at least one pattern/directory, so don't bother with rename tracking. | |
1785 | if follow and not match.always() and not slowpath: |
|
1785 | if follow and not match.always() and not slowpath: | |
1786 |
# _makelogfilematcher expects its files argument to be |
|
1786 | # _makefollowlogfilematcher expects its files argument to be | |
1787 | # the repo root, so use match.files(), not pats. |
|
1787 | # relative to the repo root, so use match.files(), not pats. | |
1788 | filematcher = _makefollowlogfilematcher(repo, match.files(), |
|
1788 | filematcher = _makefollowlogfilematcher(repo, match.files(), | |
1789 | followfirst) |
|
1789 | followfirst) | |
1790 | else: |
|
1790 | else: | |
@@ -2522,11 +2522,11 b' def revert(ui, repo, ctx, parents, *pats' | |||||
2522 | deladded = _deleted - smf |
|
2522 | deladded = _deleted - smf | |
2523 | deleted = _deleted - deladded |
|
2523 | deleted = _deleted - deladded | |
2524 |
|
2524 | |||
2525 | # We need to account for the state of file in the dirstate |
|
2525 | # We need to account for the state of file in the dirstate. | |
2526 | # |
|
2526 | # | |
2527 |
# Even, when we revert agains something else than parent. |
|
2527 | # Even, when we revert against something else than parent. This will | |
2528 | # slightly alter the behavior of revert (doing back up or not, delete |
|
2528 | # slightly alter the behavior of revert (doing back up or not, delete | |
2529 | # or just forget etc) |
|
2529 | # or just forget etc). | |
2530 | if parent == node: |
|
2530 | if parent == node: | |
2531 | dsmodified = modified |
|
2531 | dsmodified = modified | |
2532 | dsadded = added |
|
2532 | dsadded = added |
@@ -109,7 +109,7 b' class basectx(object):' | |||||
109 | """provide a hook to allow child objects to postprocess status results |
|
109 | """provide a hook to allow child objects to postprocess status results | |
110 |
|
110 | |||
111 | For example, this allows other contexts, such as workingctx, to filter |
|
111 | For example, this allows other contexts, such as workingctx, to filter | |
112 | suspect symlinks in the case of FAT32 and NTFS filesytems. |
|
112 | suspect symlinks in the case of FAT32 and NTFS filesystems. | |
113 | """ |
|
113 | """ | |
114 | return s |
|
114 | return s | |
115 |
|
115 | |||
@@ -1415,7 +1415,7 b' class workingctx(committablectx):' | |||||
1415 | def _prestatus(self, other, s, match, listignored, listclean, listunknown): |
|
1415 | def _prestatus(self, other, s, match, listignored, listclean, listunknown): | |
1416 | """override the parent hook with a dirstate query |
|
1416 | """override the parent hook with a dirstate query | |
1417 |
|
1417 | |||
1418 | We use this prestatus hook to populate the status with information from |
|
1418 | We use this _prestatus hook to populate the status with information from | |
1419 | the dirstate. |
|
1419 | the dirstate. | |
1420 | """ |
|
1420 | """ | |
1421 | # doesn't need to call super; if that changes, be aware that super |
|
1421 | # doesn't need to call super; if that changes, be aware that super | |
@@ -1426,9 +1426,9 b' class workingctx(committablectx):' | |||||
1426 | def _poststatus(self, other, s, match, listignored, listclean, listunknown): |
|
1426 | def _poststatus(self, other, s, match, listignored, listclean, listunknown): | |
1427 | """override the parent hook with a filter for suspect symlinks |
|
1427 | """override the parent hook with a filter for suspect symlinks | |
1428 |
|
1428 | |||
1429 | We use this poststatus hook to filter out symlinks that might have |
|
1429 | We use this _poststatus hook to filter out symlinks that might have | |
1430 | accidentally ended up with the entire contents of the file they are |
|
1430 | accidentally ended up with the entire contents of the file they are | |
1431 |
su |
|
1431 | supposed to be linking to. | |
1432 | """ |
|
1432 | """ | |
1433 | s[0] = self._filtersuspectsymlink(s[0]) |
|
1433 | s[0] = self._filtersuspectsymlink(s[0]) | |
1434 | self._status = scmutil.status(*s) |
|
1434 | self._status = scmutil.status(*s) | |
@@ -1693,7 +1693,7 b' class memctx(committablectx):' | |||||
1693 | class memfilectx(committablefilectx): |
|
1693 | class memfilectx(committablefilectx): | |
1694 | """memfilectx represents an in-memory file to commit. |
|
1694 | """memfilectx represents an in-memory file to commit. | |
1695 |
|
1695 | |||
1696 | See memctx and commitablefilectx for more details. |
|
1696 | See memctx and committablefilectx for more details. | |
1697 | """ |
|
1697 | """ | |
1698 | def __init__(self, repo, path, data, islink=False, |
|
1698 | def __init__(self, repo, path, data, islink=False, | |
1699 | isexec=False, copied=None, memctx=None): |
|
1699 | isexec=False, copied=None, memctx=None): |
@@ -97,7 +97,7 b' def _findlimit(repo, a, b):' | |||||
97 | # |/ |
|
97 | # |/ | |
98 | # o 0 a0 |
|
98 | # o 0 a0 | |
99 | # |
|
99 | # | |
100 | # When findlimit is called, a and b are revs 3 and 0, so limit will be 2, |
|
100 | # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2, | |
101 | # yet the filelog has the copy information in rev 1 and we will not look |
|
101 | # yet the filelog has the copy information in rev 1 and we will not look | |
102 | # back far enough unless we also look at the a and b as candidates. |
|
102 | # back far enough unless we also look at the a and b as candidates. | |
103 | # This only occurs when a is a descendent of b or visa-versa. |
|
103 | # This only occurs when a is a descendent of b or visa-versa. |
@@ -298,7 +298,7 b' def _pushdiscoveryphase(pushop):' | |||||
298 | else: |
|
298 | else: | |
299 | # adds changeset we are going to push as draft |
|
299 | # adds changeset we are going to push as draft | |
300 | # |
|
300 | # | |
301 |
# should not be necessary for pu |
|
301 | # should not be necessary for publishing server, but because of an | |
302 | # issue fixed in xxxxx we have to do it anyway. |
|
302 | # issue fixed in xxxxx we have to do it anyway. | |
303 | fdroots = list(unfi.set('roots(%ln + %ln::)', |
|
303 | fdroots = list(unfi.set('roots(%ln + %ln::)', | |
304 | outgoing.missing, droots)) |
|
304 | outgoing.missing, droots)) | |
@@ -448,7 +448,7 b' def _pushb2ctx(pushop, bundler):' | |||||
448 | cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing) |
|
448 | cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing) | |
449 | cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks()) |
|
449 | cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks()) | |
450 | def handlereply(op): |
|
450 | def handlereply(op): | |
451 | """extract addchangroup returns from server reply""" |
|
451 | """extract addchangegroup returns from server reply""" | |
452 | cgreplies = op.records.getreplies(cgpart.id) |
|
452 | cgreplies = op.records.getreplies(cgpart.id) | |
453 | assert len(cgreplies['changegroup']) == 1 |
|
453 | assert len(cgreplies['changegroup']) == 1 | |
454 | pushop.cgresult = cgreplies['changegroup'][0]['return'] |
|
454 | pushop.cgresult = cgreplies['changegroup'][0]['return'] | |
@@ -702,7 +702,7 b' def _pushsyncphase(pushop):' | |||||
702 | pushop.ui.warn(msg) |
|
702 | pushop.ui.warn(msg) | |
703 |
|
703 | |||
704 | else: |
|
704 | else: | |
705 |
# fallback to independ |
|
705 | # fallback to independent pushkey command | |
706 | for newremotehead in outdated: |
|
706 | for newremotehead in outdated: | |
707 | r = pushop.remote.pushkey('phases', |
|
707 | r = pushop.remote.pushkey('phases', | |
708 | newremotehead.hex(), |
|
708 | newremotehead.hex(), |
@@ -146,7 +146,7 b' def canonpath(root, cwd, myname, auditor' | |||||
146 | def normasprefix(path): |
|
146 | def normasprefix(path): | |
147 | '''normalize the specified path as path prefix |
|
147 | '''normalize the specified path as path prefix | |
148 |
|
148 | |||
149 |
Returned va |
|
149 | Returned value can be used safely for "p.startswith(prefix)", | |
150 | "p[len(prefix):]", and so on. |
|
150 | "p[len(prefix):]", and so on. | |
151 |
|
151 | |||
152 | For efficiency, this expects "path" argument to be already |
|
152 | For efficiency, this expects "path" argument to be already |
@@ -2551,7 +2551,7 b' class addset(abstractsmartset):' | |||||
2551 | return it() |
|
2551 | return it() | |
2552 |
|
2552 | |||
2553 | def _trysetasclist(self): |
|
2553 | def _trysetasclist(self): | |
2554 | """populate the _asclist attribut if possible and necessary""" |
|
2554 | """populate the _asclist attribute if possible and necessary""" | |
2555 | if self._genlist is not None and self._asclist is None: |
|
2555 | if self._genlist is not None and self._asclist is None: | |
2556 | self._asclist = sorted(self._genlist) |
|
2556 | self._asclist = sorted(self._genlist) | |
2557 |
|
2557 | |||
@@ -2744,7 +2744,7 b' class generatorset(abstractsmartset):' | |||||
2744 |
|
2744 | |||
2745 | # We have to use this complex iteration strategy to allow multiple |
|
2745 | # We have to use this complex iteration strategy to allow multiple | |
2746 | # iterations at the same time. We need to be able to catch revision |
|
2746 | # iterations at the same time. We need to be able to catch revision | |
2747 |
# removed from |
|
2747 | # removed from _consumegen and added to genlist in another instance. | |
2748 | # |
|
2748 | # | |
2749 | # Getting rid of it would provide an about 15% speed up on this |
|
2749 | # Getting rid of it would provide an about 15% speed up on this | |
2750 | # iteration. |
|
2750 | # iteration. | |
@@ -2939,17 +2939,15 b' class _spanset(abstractsmartset):' | |||||
2939 | class fullreposet(_spanset): |
|
2939 | class fullreposet(_spanset): | |
2940 | """a set containing all revisions in the repo |
|
2940 | """a set containing all revisions in the repo | |
2941 |
|
2941 | |||
2942 |
This class exists to host special optimi |
|
2942 | This class exists to host special optimization. | |
2943 | """ |
|
2943 | """ | |
2944 |
|
2944 | |||
2945 | def __init__(self, repo): |
|
2945 | def __init__(self, repo): | |
2946 | super(fullreposet, self).__init__(repo) |
|
2946 | super(fullreposet, self).__init__(repo) | |
2947 |
|
2947 | |||
2948 | def __and__(self, other): |
|
2948 | def __and__(self, other): | |
2949 | """fullrepo & other -> other |
|
2949 | """As self contains the whole repo, all of the other set should also be | |
2950 |
|
2950 | in self. Therefore `self & other = other`. | ||
2951 | As self contains the whole repo, all of the other set should also be in |
|
|||
2952 | self. Therefor `self & other = other`. |
|
|||
2953 |
|
2951 | |||
2954 | This boldly assumes the other contains valid revs only. |
|
2952 | This boldly assumes the other contains valid revs only. | |
2955 | """ |
|
2953 | """ |
@@ -39,7 +39,7 b'' | |||||
39 | # and between base and p2, possibly on separate clones |
|
39 | # and between base and p2, possibly on separate clones | |
40 | # 4. for each tag found both on p1 and p2 perform the following merge algorithm: |
|
40 | # 4. for each tag found both on p1 and p2 perform the following merge algorithm: | |
41 | # - the tags conflict if their tag "histories" have the same "rank" (i.e. |
|
41 | # - the tags conflict if their tag "histories" have the same "rank" (i.e. | |
42 |
# length) |
|
42 | # length) AND the last (current) tag is NOT the same | |
43 | # - for non conflicting tags: |
|
43 | # - for non conflicting tags: | |
44 | # - choose which are the high and the low ranking nodes |
|
44 | # - choose which are the high and the low ranking nodes | |
45 | # - the high ranking list of nodes is the one that is longer. |
|
45 | # - the high ranking list of nodes is the one that is longer. | |
@@ -57,7 +57,7 b'' | |||||
57 | # 5. write the merged tags taking into account to their positions in the first |
|
57 | # 5. write the merged tags taking into account to their positions in the first | |
58 | # parent (i.e. try to keep the relative ordering of the nodes that come |
|
58 | # parent (i.e. try to keep the relative ordering of the nodes that come | |
59 | # from p1). This minimizes the diff between the merged and the p1 tag files |
|
59 | # from p1). This minimizes the diff between the merged and the p1 tag files | |
60 |
# This is don |
|
60 | # This is done by using the following algorithm | |
61 | # - group the nodes for a given tag that must be written next to each other |
|
61 | # - group the nodes for a given tag that must be written next to each other | |
62 | # - A: nodes that come from consecutive lines on p1 |
|
62 | # - A: nodes that come from consecutive lines on p1 | |
63 | # - B: nodes that come from p2 (i.e. whose associated line number is |
|
63 | # - B: nodes that come from p2 (i.e. whose associated line number is | |
@@ -81,9 +81,9 b' hexnullid = hex(nullid)' | |||||
81 | def readtagsformerge(ui, repo, lines, fn='', keeplinenums=False): |
|
81 | def readtagsformerge(ui, repo, lines, fn='', keeplinenums=False): | |
82 | '''read the .hgtags file into a structure that is suitable for merging |
|
82 | '''read the .hgtags file into a structure that is suitable for merging | |
83 |
|
83 | |||
84 |
|
|
84 | Depending on the keeplinenums flag, clear the line numbers associated | |
85 |
with each tag. |
|
85 | with each tag. This is done because only the line numbers of the first | |
86 | parent are useful for merging |
|
86 | parent are useful for merging. | |
87 | ''' |
|
87 | ''' | |
88 | filetags = tagsmod._readtaghist(ui, repo, lines, fn=fn, recode=None, |
|
88 | filetags = tagsmod._readtaghist(ui, repo, lines, fn=fn, recode=None, | |
89 | calcnodelines=True)[1] |
|
89 | calcnodelines=True)[1] |
@@ -87,7 +87,7 b' def readlocaltags(ui, repo, alltags, tag' | |||||
87 | def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False): |
|
87 | def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False): | |
88 | '''Read tag definitions from a file (or any source of lines). |
|
88 | '''Read tag definitions from a file (or any source of lines). | |
89 | This function returns two sortdicts with similar information: |
|
89 | This function returns two sortdicts with similar information: | |
90 |
- the first dict, bin |
|
90 | - the first dict, bintaghist, contains the tag information as expected by | |
91 | the _readtags function, i.e. a mapping from tag name to (node, hist): |
|
91 | the _readtags function, i.e. a mapping from tag name to (node, hist): | |
92 | - node is the node id from the last line read for that name, |
|
92 | - node is the node id from the last line read for that name, | |
93 | - hist is the list of node ids previously associated with it (in file |
|
93 | - hist is the list of node ids previously associated with it (in file |
@@ -537,7 +537,7 b' class ui(object):' | |||||
537 | return path or loc |
|
537 | return path or loc | |
538 |
|
538 | |||
539 | def pushbuffer(self, error=False): |
|
539 | def pushbuffer(self, error=False): | |
540 | """install a buffer to capture standar output of the ui object |
|
540 | """install a buffer to capture standard output of the ui object | |
541 |
|
541 | |||
542 | If error is True, the error output will be captured too.""" |
|
542 | If error is True, the error output will be captured too.""" | |
543 | self._buffers.append([]) |
|
543 | self._buffers.append([]) |
@@ -1148,7 +1148,7 b' class chunkbuffer(object):' | |||||
1148 | """Read L bytes of data from the iterator of chunks of data. |
|
1148 | """Read L bytes of data from the iterator of chunks of data. | |
1149 | Returns less than L bytes if the iterator runs dry. |
|
1149 | Returns less than L bytes if the iterator runs dry. | |
1150 |
|
1150 | |||
1151 |
If size parameter is om |
|
1151 | If size parameter is omitted, read everything""" | |
1152 | left = l |
|
1152 | left = l | |
1153 | buf = [] |
|
1153 | buf = [] | |
1154 | queue = self._queue |
|
1154 | queue = self._queue |
@@ -827,7 +827,7 b' def unbundle(repo, proto, heads):' | |||||
827 | r = exchange.unbundle(repo, gen, their_heads, 'serve', |
|
827 | r = exchange.unbundle(repo, gen, their_heads, 'serve', | |
828 | proto._client()) |
|
828 | proto._client()) | |
829 | if util.safehasattr(r, 'addpart'): |
|
829 | if util.safehasattr(r, 'addpart'): | |
830 |
# The return looks stream |
|
830 | # The return looks streamable, we are in the bundle2 case and | |
831 | # should return a stream. |
|
831 | # should return a stream. | |
832 | return streamres(r.getchunks()) |
|
832 | return streamres(r.getchunks()) | |
833 | return pushres(r) |
|
833 | return pushres(r) |
@@ -500,7 +500,7 b' class Test(unittest.TestCase):' | |||||
500 | except self.failureException, e: |
|
500 | except self.failureException, e: | |
501 | # This differs from unittest in that we don't capture |
|
501 | # This differs from unittest in that we don't capture | |
502 | # the stack trace. This is for historical reasons and |
|
502 | # the stack trace. This is for historical reasons and | |
503 | # this decision could be revisted in the future, |
|
503 | # this decision could be revisited in the future, | |
504 | # especially for PythonTest instances. |
|
504 | # especially for PythonTest instances. | |
505 | if result.addFailure(self, str(e)): |
|
505 | if result.addFailure(self, str(e)): | |
506 | success = True |
|
506 | success = True | |
@@ -1263,7 +1263,7 b' class TestResult(unittest._TextTestResul' | |||||
1263 | iolock.release() |
|
1263 | iolock.release() | |
1264 |
|
1264 | |||
1265 | class TestSuite(unittest.TestSuite): |
|
1265 | class TestSuite(unittest.TestSuite): | |
1266 | """Custom unitest TestSuite that knows how to execute Mercurial tests.""" |
|
1266 | """Custom unittest TestSuite that knows how to execute Mercurial tests.""" | |
1267 |
|
1267 | |||
1268 | def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None, |
|
1268 | def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None, | |
1269 | retest=False, keywords=None, loop=False, |
|
1269 | retest=False, keywords=None, loop=False, | |
@@ -1895,8 +1895,8 b' class TestRunner(object):' | |||||
1895 | the one we expect it to be. If not, print a warning to stderr.""" |
|
1895 | the one we expect it to be. If not, print a warning to stderr.""" | |
1896 | if ((self._bindir == self._pythondir) and |
|
1896 | if ((self._bindir == self._pythondir) and | |
1897 | (self._bindir != self._tmpbindir)): |
|
1897 | (self._bindir != self._tmpbindir)): | |
1898 | # The pythondir has been infered from --with-hg flag. |
|
1898 | # The pythondir has been inferred from --with-hg flag. | |
1899 | # We cannot expect anything sensible here |
|
1899 | # We cannot expect anything sensible here. | |
1900 | return |
|
1900 | return | |
1901 | expecthg = os.path.join(self._pythondir, 'mercurial') |
|
1901 | expecthg = os.path.join(self._pythondir, 'mercurial') | |
1902 | actualhg = self._gethgpath() |
|
1902 | actualhg = self._gethgpath() |
@@ -1,4 +1,4 b'' | |||||
1 |
This test is de |
|
1 | This test is dedicated to test the bundle2 container format | |
2 |
|
2 | |||
3 | It test multiple existing parts to test different feature of the container. You |
|
3 | It test multiple existing parts to test different feature of the container. You | |
4 | probably do not need to touch this test unless you change the binary encoding |
|
4 | probably do not need to touch this test unless you change the binary encoding |
@@ -261,7 +261,7 b'' | |||||
261 | > print _("concatenating " " by " " space %s" % v) |
|
261 | > print _("concatenating " " by " " space %s" % v) | |
262 | > print _("concatenating " + " by " + " '+' %s" % v) |
|
262 | > print _("concatenating " + " by " + " '+' %s" % v) | |
263 | > |
|
263 | > | |
264 | > print _("maping operation in different line %s" |
|
264 | > print _("mapping operation in different line %s" | |
265 | > % v) |
|
265 | > % v) | |
266 | > |
|
266 | > | |
267 | > print _( |
|
267 | > print _( | |
@@ -278,7 +278,7 b'' | |||||
278 | > print _("concatenating " + " by " + " '+' %s" % v) |
|
278 | > print _("concatenating " + " by " + " '+' %s" % v) | |
279 | don't use % inside _() |
|
279 | don't use % inside _() | |
280 | ./map-inside-gettext.py:6: |
|
280 | ./map-inside-gettext.py:6: | |
281 | > print _("maping operation in different line %s" |
|
281 | > print _("mapping operation in different line %s" | |
282 | don't use % inside _() |
|
282 | don't use % inside _() | |
283 | ./map-inside-gettext.py:9: |
|
283 | ./map-inside-gettext.py:9: | |
284 | > print _( |
|
284 | > print _( |
@@ -889,9 +889,9 b' in the file revlog topology and the chan' | |||||
889 |
|
889 | |||
890 | The way mercurial does amends is to create a temporary commit (rev 3) and then |
|
890 | The way mercurial does amends is to create a temporary commit (rev 3) and then | |
891 | fold the new and old commits together into another commit (rev 4). During this |
|
891 | fold the new and old commits together into another commit (rev 4). During this | |
892 | process, findlimit is called to check how far back to look for the transitive |
|
892 | process, _findlimit is called to check how far back to look for the transitive | |
893 | closure of file copy information, but due to the divergence of the filelog |
|
893 | closure of file copy information, but due to the divergence of the filelog | |
894 | and changelog graph topologies, before findlimit was fixed, it returned a rev |
|
894 | and changelog graph topologies, before _findlimit was fixed, it returned a rev | |
895 | which was not far enough back in this case. |
|
895 | which was not far enough back in this case. | |
896 | $ hg mv a1 a2 |
|
896 | $ hg mv a1 a2 | |
897 | $ hg status --copies --rev 0 |
|
897 | $ hg status --copies --rev 0 |
@@ -272,7 +272,7 b' Test extension help:' | |||||
272 | schemes extend schemes with shortcuts to repository swarms |
|
272 | schemes extend schemes with shortcuts to repository swarms | |
273 | share share a common history between several working directories |
|
273 | share share a common history between several working directories | |
274 | shelve save and restore changes to the working directory |
|
274 | shelve save and restore changes to the working directory | |
275 |
strip strip changesets and their descend |
|
275 | strip strip changesets and their descendants from history | |
276 | transplant command to transplant changesets from another branch |
|
276 | transplant command to transplant changesets from another branch | |
277 | win32mbcs allow the use of MBCS paths with problematic encodings |
|
277 | win32mbcs allow the use of MBCS paths with problematic encodings | |
278 | zeroconf discover and advertise repositories on the local network |
|
278 | zeroconf discover and advertise repositories on the local network |
@@ -589,7 +589,7 b' check messages when there are files to u' | |||||
589 | 89e6c98d92887913cadf06b2adb97f26cde4849b |
|
589 | 89e6c98d92887913cadf06b2adb97f26cde4849b | |
590 |
|
590 | |||
591 |
|
591 | |||
592 | Pusing revision #1 causes uploading entity 89e6c98d9288, which is |
|
592 | Pushing revision #1 causes uploading entity 89e6c98d9288, which is | |
593 | shared also by largefiles b1, b2 in revision #2 and b in revision #5. |
|
593 | shared also by largefiles b1, b2 in revision #2 and b in revision #5. | |
594 |
|
594 | |||
595 | Then, entity 89e6c98d9288 is not treated as "outgoing entity" at "hg |
|
595 | Then, entity 89e6c98d9288 is not treated as "outgoing entity" at "hg |
@@ -1,5 +1,5 b'' | |||||
1 |
This file contains testcases that tend to be related to the wireprotocol part |
|
1 | This file contains testcases that tend to be related to the wire protocol part | |
2 | largefile. |
|
2 | of largefiles. | |
3 |
|
3 | |||
4 | $ USERCACHE="$TESTTMP/cache"; export USERCACHE |
|
4 | $ USERCACHE="$TESTTMP/cache"; export USERCACHE | |
5 | $ mkdir "${USERCACHE}" |
|
5 | $ mkdir "${USERCACHE}" |
@@ -568,7 +568,7 b' guarded (= not yet applied) one.' | |||||
568 | 3 G b.patch |
|
568 | 3 G b.patch | |
569 |
|
569 | |||
570 | test that "qselect --reapply" checks applied patches correctly when no |
|
570 | test that "qselect --reapply" checks applied patches correctly when no | |
571 | applied patche becomes guarded but some of unapplied ones become |
|
571 | applied patches becomes guarded but some of unapplied ones become | |
572 | unguarded. |
|
572 | unguarded. | |
573 |
|
573 | |||
574 | $ hg qpop -q -a |
|
574 | $ hg qpop -q -a |
@@ -1582,7 +1582,7 b' Test that secret mq patch does not break' | |||||
1582 |
|
1582 | |||
1583 | $ cd .. |
|
1583 | $ cd .. | |
1584 |
|
1584 | |||
1585 |
Test inter |
|
1585 | Test interaction with revset (issue4426) | |
1586 |
|
1586 | |||
1587 | $ hg init issue4426 |
|
1587 | $ hg init issue4426 | |
1588 | $ cd issue4426 |
|
1588 | $ cd issue4426 |
@@ -755,7 +755,7 b' Pushing to Publish=True (common changese' | |||||
755 |
|
755 | |||
756 | Bare push with next changeset and common changeset needing sync (issue3575) |
|
756 | Bare push with next changeset and common changeset needing sync (issue3575) | |
757 |
|
757 | |||
758 |
(reset some stat on remot repo to |
|
758 | (reset some stat on remote repo to avoid confusing other tests) | |
759 |
|
759 | |||
760 | $ hg -R ../alpha --config extensions.strip= strip --no-backup 967b449fbc94 |
|
760 | $ hg -R ../alpha --config extensions.strip= strip --no-backup 967b449fbc94 | |
761 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
761 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
@@ -296,7 +296,7 b' are different from each other.' | |||||
296 | \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88 [=====> ]\r (no-eol) (esc) |
|
296 | \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88 [=====> ]\r (no-eol) (esc) | |
297 | \r (no-eol) (esc) |
|
297 | \r (no-eol) (esc) | |
298 |
|
298 | |||
299 | test triming progress items, when they contain multi-byte characters, |
|
299 | test trimming progress items, when they contain multi-byte characters, | |
300 | of which length of byte sequence and columns in display are different |
|
300 | of which length of byte sequence and columns in display are different | |
301 | from each other. |
|
301 | from each other. | |
302 |
|
302 |
@@ -400,7 +400,7 b' Systematic behavior validation of most p' | |||||
400 |
|
400 | |||
401 | This section tests most of the possible combinations of working directory |
|
401 | This section tests most of the possible combinations of working directory | |
402 | changes and inter-revision changes. The number of possible cases is significant |
|
402 | changes and inter-revision changes. The number of possible cases is significant | |
403 | but they all have a slighly different handling. So this section commits to |
|
403 | but they all have a slightly different handling. So this section commits to | |
404 | generating and testing all of them to allow safe refactoring of the revert code. |
|
404 | generating and testing all of them to allow safe refactoring of the revert code. | |
405 |
|
405 | |||
406 | A python script is used to generate a file history for each combination of |
|
406 | A python script is used to generate a file history for each combination of | |
@@ -1065,7 +1065,7 b' revert all files individually and check ' | |||||
1065 | ### revert for: removed_wc |
|
1065 | ### revert for: removed_wc | |
1066 |
|
1066 | |||
1067 |
|
1067 | |||
1068 | check resulting directory againt the --all run |
|
1068 | check resulting directory against the --all run | |
1069 | (There should be no difference) |
|
1069 | (There should be no difference) | |
1070 |
|
1070 | |||
1071 | $ python ../dircontent.py > ../content-parent-explicit.txt |
|
1071 | $ python ../dircontent.py > ../content-parent-explicit.txt | |
@@ -1185,7 +1185,7 b' revert all files individually and check ' | |||||
1185 | ### revert for: removed_wc |
|
1185 | ### revert for: removed_wc | |
1186 |
|
1186 | |||
1187 |
|
1187 | |||
1188 | check resulting directory againt the --all run |
|
1188 | check resulting directory against the --all run | |
1189 | (There should be no difference) |
|
1189 | (There should be no difference) | |
1190 |
|
1190 | |||
1191 | $ python ../dircontent.py > ../content-base-explicit.txt |
|
1191 | $ python ../dircontent.py > ../content-base-explicit.txt |
General Comments 0
You need to be logged in to leave comments.
Login now