##// END OF EJS Templates
spelling: fixes from proofreading of spell checker issues
Mads Kiilerich -
r23139:e53f6b72 default
parent child Browse files
Show More
@@ -291,7 +291,7 b' pypats = ['
291 291 "always assign an opened file to a variable, and close it afterwards"),
292 292 (r'[\s\(](open|file)\([^)]*\)\.',
293 293 "always assign an opened file to a variable, and close it afterwards"),
294 (r'(?i)descendent', "the proper spelling is descendAnt"),
294 (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
295 295 (r'\.debug\(\_', "don't mark debug messages for translation"),
296 296 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
297 297 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
@@ -74,7 +74,7 b' def getrevs(spec):'
74 74
75 75 parser = OptionParser(usage="usage: %prog [options] <revs>")
76 76 parser.add_option("-f", "--file",
77 help="read revset from FILE (stdin if omited)",
77 help="read revset from FILE (stdin if omitted)",
78 78 metavar="FILE")
79 79 parser.add_option("-R", "--repo",
80 80 help="run benchmark on REPO", metavar="REPO")
@@ -462,10 +462,10 b' def updatelfiles(ui, repo, filelist=None'
462 462 expecthash != lfutil.hashfile(abslfile))):
463 463 if lfile not in repo[None]: # not switched to normal file
464 464 util.unlinkpath(abslfile, ignoremissing=True)
465 # use normallookup() to allocate entry in largefiles
465 # use normallookup() to allocate an entry in largefiles
466 466 # dirstate, because lack of it misleads
467 467 # lfilesrepo.status() into recognition that such cache
468 # missing files are REMOVED.
468 # missing files are removed.
469 469 lfdirstate.normallookup(lfile)
470 470 update[lfile] = expecthash
471 471 else:
@@ -63,10 +63,10 b' def installmatchandpatsfn(f):'
63 63
64 64 def restorematchandpatsfn():
65 65 '''restores scmutil.matchandpats to what it was before
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
66 installmatchandpatsfn was called. No-op if scmutil.matchandpats
67 67 is its original function.
68 68
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
69 Note that n calls to installmatchandpatsfn will require n calls
70 70 to restore matchfn to reverse'''
71 71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 72 scmutil.matchandpats)
@@ -263,7 +263,7 b' def reposetup(ui, repo):'
263 263 # and so on), this code path is used to avoid:
264 264 # (1) updating standins, because standins should
265 265 # be already updated at this point
266 # (2) aborting when stadnins are matched by "match",
266 # (2) aborting when standins are matched by "match",
267 267 # because automated committing may specify them directly
268 268 #
269 269 if getattr(self, "_isrebasing", False) or \
@@ -278,7 +278,7 b' def rebase(ui, repo, **opts):'
278 278 commonanc, base, commonanc)
279 279 if not rebaseset:
280 280 # transform to list because smartsets are not comparable to
281 # lists. This should be improved to honor lazyness of
281 # lists. This should be improved to honor laziness of
282 282 # smartset.
283 283 if list(base) == [dest.rev()]:
284 284 if basef:
@@ -1,4 +1,4 b''
1 """strip changesets and their descendents from history
1 """strip changesets and their descendants from history
2 2
3 3 This extension allows you to strip changesets and all their descendants from the
4 4 repository. See the command help for details.
@@ -396,7 +396,7 b' class _BaseFile(list):'
396 396 def ordered_metadata(self):
397 397 """
398 398 Convenience method that returns an ordered version of the metadata
399 dictionnary. The return value is list of tuples (metadata name,
399 dictionary. The return value is list of tuples (metadata name,
400 400 metadata_value).
401 401 """
402 402 # copy the dict first
@@ -229,7 +229,7 b' class unbundlerecords(object):'
229 229 self.getreplies(inreplyto).add(category, entry)
230 230
231 231 def getreplies(self, partid):
232 """get the subrecords that replies to a specific part"""
232 """get the records that are replies to a specific part"""
233 233 return self._replies.setdefault(partid, unbundlerecords())
234 234
235 235 def __getitem__(self, cat):
@@ -303,7 +303,7 b' def processbundle(repo, unbundler, trans'
303 303 # consume the bundle content
304 304 part.read()
305 305 # Small hack to let caller code distinguish exceptions from bundle2
306 # processing fron the ones from bundle1 processing. This is mostly
306 # processing from processing the old format. This is mostly
307 307 # needed to handle different return codes to unbundle according to the
308 308 # type of bundle. We should probably clean up or drop this return code
309 309 # craziness in a future version.
@@ -359,7 +359,7 b' def _processpart(op, part):'
359 359
360 360
361 361 def decodecaps(blob):
362 """decode a bundle2 caps bytes blob into a dictionnary
362 """decode a bundle2 caps bytes blob into a dictionary
363 363
364 364 The blob is a list of capabilities (one per line)
365 365 Capabilities may have values using a line of the form::
@@ -741,7 +741,7 b' class interrupthandler(unpackermixin):'
741 741 self.ui.debug('bundle2 stream interruption, looking for a part.\n')
742 742 headerblock = self._readpartheader()
743 743 if headerblock is None:
744 self.ui.debug('no part found during iterruption.\n')
744 self.ui.debug('no part found during interruption.\n')
745 745 return
746 746 part = unbundlepart(self.ui, headerblock, self._fp)
747 747 op = interruptoperation(self.ui)
@@ -828,7 +828,7 b' class unbundlepart(unpackermixin):'
828 828 # split mandatory from advisory
829 829 mansizes = paramsizes[:mancount]
830 830 advsizes = paramsizes[mancount:]
831 # retrive param value
831 # retrieve param value
832 832 manparams = []
833 833 for key, value in mansizes:
834 834 manparams.append((self._fromheader(key), self._fromheader(value)))
@@ -920,7 +920,7 b' def handlechangegroup(op, inpart):'
920 920 ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
921 921 op.records.add('changegroup', {'return': ret})
922 922 if op.reply is not None:
923 # This is definitly not the final form of this
923 # This is definitely not the final form of this
924 924 # return. But one need to start somewhere.
925 925 part = op.reply.newpart('b2x:reply:changegroup')
926 926 part.addparam('in-reply-to', str(inpart.id), mandatory=False)
@@ -989,7 +989,7 b' def handleremotechangegroup(op, inpart):'
989 989 ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
990 990 op.records.add('changegroup', {'return': ret})
991 991 if op.reply is not None:
992 # This is definitly not the final form of this
992 # This is definitely not the final form of this
993 993 # return. But one need to start somewhere.
994 994 part = op.reply.newpart('b2x:reply:changegroup')
995 995 part.addparam('in-reply-to', str(inpart.id), mandatory=False)
@@ -113,7 +113,7 b' def logmessage(ui, opts):'
113 113 def mergeeditform(ctxorbool, baseform):
114 114 """build appropriate editform from ctxorbool and baseform
115 115
116 'cxtorbool' is one of a ctx to be committed, or a bool whether
116 'ctxorbool' is one of a ctx to be committed, or a bool whether
117 117 merging is committed.
118 118
119 119 This returns editform 'baseform' with '.merge' if merging is
@@ -1783,8 +1783,8 b' def _makelogrevset(repo, pats, opts, rev'
1783 1783 # If we're forced to take the slowpath it means we're following
1784 1784 # at least one pattern/directory, so don't bother with rename tracking.
1785 1785 if follow and not match.always() and not slowpath:
1786 # _makelogfilematcher expects its files argument to be relative to
1787 # the repo root, so use match.files(), not pats.
1786 # _makefollowlogfilematcher expects its files argument to be
1787 # relative to the repo root, so use match.files(), not pats.
1788 1788 filematcher = _makefollowlogfilematcher(repo, match.files(),
1789 1789 followfirst)
1790 1790 else:
@@ -2522,11 +2522,11 b' def revert(ui, repo, ctx, parents, *pats'
2522 2522 deladded = _deleted - smf
2523 2523 deleted = _deleted - deladded
2524 2524
2525 # We need to account for the state of file in the dirstate
2525 # We need to account for the state of file in the dirstate.
2526 2526 #
2527 # Even, when we revert agains something else than parent. this will
2527 # Even, when we revert against something else than parent. This will
2528 2528 # slightly alter the behavior of revert (doing back up or not, delete
2529 # or just forget etc)
2529 # or just forget etc).
2530 2530 if parent == node:
2531 2531 dsmodified = modified
2532 2532 dsadded = added
@@ -109,7 +109,7 b' class basectx(object):'
109 109 """provide a hook to allow child objects to postprocess status results
110 110
111 111 For example, this allows other contexts, such as workingctx, to filter
112 suspect symlinks in the case of FAT32 and NTFS filesytems.
112 suspect symlinks in the case of FAT32 and NTFS filesystems.
113 113 """
114 114 return s
115 115
@@ -1415,7 +1415,7 b' class workingctx(committablectx):'
1415 1415 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
1416 1416 """override the parent hook with a dirstate query
1417 1417
1418 We use this prestatus hook to populate the status with information from
1418 We use this _prestatus hook to populate the status with information from
1419 1419 the dirstate.
1420 1420 """
1421 1421 # doesn't need to call super; if that changes, be aware that super
@@ -1426,9 +1426,9 b' class workingctx(committablectx):'
1426 1426 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
1427 1427 """override the parent hook with a filter for suspect symlinks
1428 1428
1429 We use this poststatus hook to filter out symlinks that might have
1429 We use this _poststatus hook to filter out symlinks that might have
1430 1430 accidentally ended up with the entire contents of the file they are
1431 susposed to be linking to.
1431 supposed to be linking to.
1432 1432 """
1433 1433 s[0] = self._filtersuspectsymlink(s[0])
1434 1434 self._status = scmutil.status(*s)
@@ -1693,7 +1693,7 b' class memctx(committablectx):'
1693 1693 class memfilectx(committablefilectx):
1694 1694 """memfilectx represents an in-memory file to commit.
1695 1695
1696 See memctx and commitablefilectx for more details.
1696 See memctx and committablefilectx for more details.
1697 1697 """
1698 1698 def __init__(self, repo, path, data, islink=False,
1699 1699 isexec=False, copied=None, memctx=None):
@@ -97,7 +97,7 b' def _findlimit(repo, a, b):'
97 97 # |/
98 98 # o 0 a0
99 99 #
100 # When findlimit is called, a and b are revs 3 and 0, so limit will be 2,
100 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
101 101 # yet the filelog has the copy information in rev 1 and we will not look
102 102 # back far enough unless we also look at the a and b as candidates.
103 103 # This only occurs when a is a descendent of b or visa-versa.
@@ -298,7 +298,7 b' def _pushdiscoveryphase(pushop):'
298 298 else:
299 299 # adds changeset we are going to push as draft
300 300 #
301 # should not be necessary for pushblishing server, but because of an
301 # should not be necessary for publishing server, but because of an
302 302 # issue fixed in xxxxx we have to do it anyway.
303 303 fdroots = list(unfi.set('roots(%ln + %ln::)',
304 304 outgoing.missing, droots))
@@ -448,7 +448,7 b' def _pushb2ctx(pushop, bundler):'
448 448 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
449 449 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
450 450 def handlereply(op):
451 """extract addchangroup returns from server reply"""
451 """extract addchangegroup returns from server reply"""
452 452 cgreplies = op.records.getreplies(cgpart.id)
453 453 assert len(cgreplies['changegroup']) == 1
454 454 pushop.cgresult = cgreplies['changegroup'][0]['return']
@@ -702,7 +702,7 b' def _pushsyncphase(pushop):'
702 702 pushop.ui.warn(msg)
703 703
704 704 else:
705 # fallback to independant pushkey command
705 # fallback to independent pushkey command
706 706 for newremotehead in outdated:
707 707 r = pushop.remote.pushkey('phases',
708 708 newremotehead.hex(),
@@ -146,7 +146,7 b' def canonpath(root, cwd, myname, auditor'
146 146 def normasprefix(path):
147 147 '''normalize the specified path as path prefix
148 148
149 Returned vaule can be used safely for "p.startswith(prefix)",
149 Returned value can be used safely for "p.startswith(prefix)",
150 150 "p[len(prefix):]", and so on.
151 151
152 152 For efficiency, this expects "path" argument to be already
@@ -2551,7 +2551,7 b' class addset(abstractsmartset):'
2551 2551 return it()
2552 2552
2553 2553 def _trysetasclist(self):
2554 """populate the _asclist attribut if possible and necessary"""
2554 """populate the _asclist attribute if possible and necessary"""
2555 2555 if self._genlist is not None and self._asclist is None:
2556 2556 self._asclist = sorted(self._genlist)
2557 2557
@@ -2744,7 +2744,7 b' class generatorset(abstractsmartset):'
2744 2744
2745 2745 # We have to use this complex iteration strategy to allow multiple
2746 2746 # iterations at the same time. We need to be able to catch revision
2747 # removed from `consumegen` and added to genlist in another instance.
2747 # removed from _consumegen and added to genlist in another instance.
2748 2748 #
2749 2749 # Getting rid of it would provide an about 15% speed up on this
2750 2750 # iteration.
@@ -2939,17 +2939,15 b' class _spanset(abstractsmartset):'
2939 2939 class fullreposet(_spanset):
2940 2940 """a set containing all revisions in the repo
2941 2941
2942 This class exists to host special optimisation.
2942 This class exists to host special optimization.
2943 2943 """
2944 2944
2945 2945 def __init__(self, repo):
2946 2946 super(fullreposet, self).__init__(repo)
2947 2947
2948 2948 def __and__(self, other):
2949 """fullrepo & other -> other
2950
2951 As self contains the whole repo, all of the other set should also be in
2952 self. Therefor `self & other = other`.
2949 """As self contains the whole repo, all of the other set should also be
2950 in self. Therefore `self & other = other`.
2953 2951
2954 2952 This boldly assumes the other contains valid revs only.
2955 2953 """
@@ -39,7 +39,7 b''
39 39 # and between base and p2, possibly on separate clones
40 40 # 4. for each tag found both on p1 and p2 perform the following merge algorithm:
41 41 # - the tags conflict if their tag "histories" have the same "rank" (i.e.
42 # length) _AND_ the last (current) tag is _NOT_ the same
42 # length) AND the last (current) tag is NOT the same
43 43 # - for non conflicting tags:
44 44 # - choose which are the high and the low ranking nodes
45 45 # - the high ranking list of nodes is the one that is longer.
@@ -57,7 +57,7 b''
57 57 # 5. write the merged tags taking into account to their positions in the first
58 58 # parent (i.e. try to keep the relative ordering of the nodes that come
59 59 # from p1). This minimizes the diff between the merged and the p1 tag files
60 # This is donw by using the following algorithm
60 # This is done by using the following algorithm
61 61 # - group the nodes for a given tag that must be written next to each other
62 62 # - A: nodes that come from consecutive lines on p1
63 63 # - B: nodes that come from p2 (i.e. whose associated line number is
@@ -81,9 +81,9 b' hexnullid = hex(nullid)'
81 81 def readtagsformerge(ui, repo, lines, fn='', keeplinenums=False):
82 82 '''read the .hgtags file into a structure that is suitable for merging
83 83
84 Sepending on the keeplinenumbers flag, clear the line numbers associated
85 with each tag. Rhis is done because only the line numbers of the first
86 parent are useful for merging
84 Depending on the keeplinenums flag, clear the line numbers associated
85 with each tag. This is done because only the line numbers of the first
86 parent are useful for merging.
87 87 '''
88 88 filetags = tagsmod._readtaghist(ui, repo, lines, fn=fn, recode=None,
89 89 calcnodelines=True)[1]
@@ -87,7 +87,7 b' def readlocaltags(ui, repo, alltags, tag'
87 87 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
88 88 '''Read tag definitions from a file (or any source of lines).
89 89 This function returns two sortdicts with similar information:
90 - the first dict, bingtaglist, contains the tag information as expected by
90 - the first dict, bintaghist, contains the tag information as expected by
91 91 the _readtags function, i.e. a mapping from tag name to (node, hist):
92 92 - node is the node id from the last line read for that name,
93 93 - hist is the list of node ids previously associated with it (in file
@@ -537,7 +537,7 b' class ui(object):'
537 537 return path or loc
538 538
539 539 def pushbuffer(self, error=False):
540 """install a buffer to capture standar output of the ui object
540 """install a buffer to capture standard output of the ui object
541 541
542 542 If error is True, the error output will be captured too."""
543 543 self._buffers.append([])
@@ -1148,7 +1148,7 b' class chunkbuffer(object):'
1148 1148 """Read L bytes of data from the iterator of chunks of data.
1149 1149 Returns less than L bytes if the iterator runs dry.
1150 1150
1151 If size parameter is ommited, read everything"""
1151 If size parameter is omitted, read everything"""
1152 1152 left = l
1153 1153 buf = []
1154 1154 queue = self._queue
@@ -827,7 +827,7 b' def unbundle(repo, proto, heads):'
827 827 r = exchange.unbundle(repo, gen, their_heads, 'serve',
828 828 proto._client())
829 829 if util.safehasattr(r, 'addpart'):
830 # The return looks streameable, we are in the bundle2 case and
830 # The return looks streamable, we are in the bundle2 case and
831 831 # should return a stream.
832 832 return streamres(r.getchunks())
833 833 return pushres(r)
@@ -500,7 +500,7 b' class Test(unittest.TestCase):'
500 500 except self.failureException, e:
501 501 # This differs from unittest in that we don't capture
502 502 # the stack trace. This is for historical reasons and
503 # this decision could be revisted in the future,
503 # this decision could be revisited in the future,
504 504 # especially for PythonTest instances.
505 505 if result.addFailure(self, str(e)):
506 506 success = True
@@ -1263,7 +1263,7 b' class TestResult(unittest._TextTestResul'
1263 1263 iolock.release()
1264 1264
1265 1265 class TestSuite(unittest.TestSuite):
1266 """Custom unitest TestSuite that knows how to execute Mercurial tests."""
1266 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1267 1267
1268 1268 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1269 1269 retest=False, keywords=None, loop=False,
@@ -1895,8 +1895,8 b' class TestRunner(object):'
1895 1895 the one we expect it to be. If not, print a warning to stderr."""
1896 1896 if ((self._bindir == self._pythondir) and
1897 1897 (self._bindir != self._tmpbindir)):
1898 # The pythondir has been infered from --with-hg flag.
1899 # We cannot expect anything sensible here
1898 # The pythondir has been inferred from --with-hg flag.
1899 # We cannot expect anything sensible here.
1900 1900 return
1901 1901 expecthg = os.path.join(self._pythondir, 'mercurial')
1902 1902 actualhg = self._gethgpath()
@@ -1,4 +1,4 b''
1 This test is decicated to test the bundle2 container format
1 This test is dedicated to test the bundle2 container format
2 2
3 3 It test multiple existing parts to test different feature of the container. You
4 4 probably do not need to touch this test unless you change the binary encoding
@@ -261,7 +261,7 b''
261 261 > print _("concatenating " " by " " space %s" % v)
262 262 > print _("concatenating " + " by " + " '+' %s" % v)
263 263 >
264 > print _("maping operation in different line %s"
264 > print _("mapping operation in different line %s"
265 265 > % v)
266 266 >
267 267 > print _(
@@ -278,7 +278,7 b''
278 278 > print _("concatenating " + " by " + " '+' %s" % v)
279 279 don't use % inside _()
280 280 ./map-inside-gettext.py:6:
281 > print _("maping operation in different line %s"
281 > print _("mapping operation in different line %s"
282 282 don't use % inside _()
283 283 ./map-inside-gettext.py:9:
284 284 > print _(
@@ -889,9 +889,9 b' in the file revlog topology and the chan'
889 889
890 890 The way mercurial does amends is to create a temporary commit (rev 3) and then
891 891 fold the new and old commits together into another commit (rev 4). During this
892 process, findlimit is called to check how far back to look for the transitive
892 process, _findlimit is called to check how far back to look for the transitive
893 893 closure of file copy information, but due to the divergence of the filelog
894 and changelog graph topologies, before findlimit was fixed, it returned a rev
894 and changelog graph topologies, before _findlimit was fixed, it returned a rev
895 895 which was not far enough back in this case.
896 896 $ hg mv a1 a2
897 897 $ hg status --copies --rev 0
@@ -272,7 +272,7 b' Test extension help:'
272 272 schemes extend schemes with shortcuts to repository swarms
273 273 share share a common history between several working directories
274 274 shelve save and restore changes to the working directory
275 strip strip changesets and their descendents from history
275 strip strip changesets and their descendants from history
276 276 transplant command to transplant changesets from another branch
277 277 win32mbcs allow the use of MBCS paths with problematic encodings
278 278 zeroconf discover and advertise repositories on the local network
@@ -589,7 +589,7 b' check messages when there are files to u'
589 589 89e6c98d92887913cadf06b2adb97f26cde4849b
590 590
591 591
592 Pusing revision #1 causes uploading entity 89e6c98d9288, which is
592 Pushing revision #1 causes uploading entity 89e6c98d9288, which is
593 593 shared also by largefiles b1, b2 in revision #2 and b in revision #5.
594 594
595 595 Then, entity 89e6c98d9288 is not treated as "outgoing entity" at "hg
@@ -1,5 +1,5 b''
1 This file contains testcases that tend to be related to the wireprotocol part of
2 largefile.
1 This file contains testcases that tend to be related to the wire protocol part
2 of largefiles.
3 3
4 4 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
5 5 $ mkdir "${USERCACHE}"
@@ -568,7 +568,7 b' guarded (= not yet applied) one.'
568 568 3 G b.patch
569 569
570 570 test that "qselect --reapply" checks applied patches correctly when no
571 applied patche becomes guarded but some of unapplied ones become
571 applied patches becomes guarded but some of unapplied ones become
572 572 unguarded.
573 573
574 574 $ hg qpop -q -a
@@ -1582,7 +1582,7 b' Test that secret mq patch does not break'
1582 1582
1583 1583 $ cd ..
1584 1584
1585 Test interraction with revset (issue4426)
1585 Test interaction with revset (issue4426)
1586 1586
1587 1587 $ hg init issue4426
1588 1588 $ cd issue4426
@@ -755,7 +755,7 b' Pushing to Publish=True (common changese'
755 755
756 756 Bare push with next changeset and common changeset needing sync (issue3575)
757 757
758 (reset some stat on remot repo to not confused other test)
758 (reset some stat on remote repo to avoid confusing other tests)
759 759
760 760 $ hg -R ../alpha --config extensions.strip= strip --no-backup 967b449fbc94
761 761 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -296,7 +296,7 b' are different from each other.'
296 296 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88 [=====> ]\r (no-eol) (esc)
297 297 \r (no-eol) (esc)
298 298
299 test triming progress items, when they contain multi-byte characters,
299 test trimming progress items, when they contain multi-byte characters,
300 300 of which length of byte sequence and columns in display are different
301 301 from each other.
302 302
@@ -400,7 +400,7 b' Systematic behavior validation of most p'
400 400
401 401 This section tests most of the possible combinations of working directory
402 402 changes and inter-revision changes. The number of possible cases is significant
403 but they all have a slighly different handling. So this section commits to
403 but they all have a slightly different handling. So this section commits to
404 404 generating and testing all of them to allow safe refactoring of the revert code.
405 405
406 406 A python script is used to generate a file history for each combination of
@@ -1065,7 +1065,7 b' revert all files individually and check '
1065 1065 ### revert for: removed_wc
1066 1066
1067 1067
1068 check resulting directory againt the --all run
1068 check resulting directory against the --all run
1069 1069 (There should be no difference)
1070 1070
1071 1071 $ python ../dircontent.py > ../content-parent-explicit.txt
@@ -1185,7 +1185,7 b' revert all files individually and check '
1185 1185 ### revert for: removed_wc
1186 1186
1187 1187
1188 check resulting directory againt the --all run
1188 check resulting directory against the --all run
1189 1189 (There should be no difference)
1190 1190
1191 1191 $ python ../dircontent.py > ../content-base-explicit.txt
General Comments 0
You need to be logged in to leave comments. Login now