Show More
@@ -1,304 +1,314 b'' | |||
|
1 | 1 | # repair.py - functions for repository repair for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # Copyright 2007 Matt Mackall |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import short |
|
15 | 15 | from . import ( |
|
16 | 16 | bundle2, |
|
17 | 17 | changegroup, |
|
18 | 18 | error, |
|
19 | 19 | exchange, |
|
20 | 20 | util, |
|
21 | 21 | ) |
|
22 | 22 | |
|
23 | 23 | def _bundle(repo, bases, heads, node, suffix, compress=True): |
|
24 | 24 | """create a bundle with the specified revisions as a backup""" |
|
25 | 25 | cgversion = changegroup.safeversion(repo) |
|
26 | 26 | |
|
27 | 27 | cg = changegroup.changegroupsubset(repo, bases, heads, 'strip', |
|
28 | 28 | version=cgversion) |
|
29 | 29 | backupdir = "strip-backup" |
|
30 | 30 | vfs = repo.vfs |
|
31 | 31 | if not vfs.isdir(backupdir): |
|
32 | 32 | vfs.mkdir(backupdir) |
|
33 | 33 | |
|
34 | 34 | # Include a hash of all the nodes in the filename for uniqueness |
|
35 | 35 | allcommits = repo.set('%ln::%ln', bases, heads) |
|
36 | 36 | allhashes = sorted(c.hex() for c in allcommits) |
|
37 | 37 | totalhash = util.sha1(''.join(allhashes)).hexdigest() |
|
38 | 38 | name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) |
|
39 | 39 | |
|
40 | 40 | comp = None |
|
41 | 41 | if cgversion != '01': |
|
42 | 42 | bundletype = "HG20" |
|
43 | 43 | if compress: |
|
44 | 44 | comp = 'BZ' |
|
45 | 45 | elif compress: |
|
46 | 46 | bundletype = "HG10BZ" |
|
47 | 47 | else: |
|
48 | 48 | bundletype = "HG10UN" |
|
49 | 49 | return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs, |
|
50 | 50 | compression=comp) |
|
51 | 51 | |
|
52 | 52 | def _collectfiles(repo, striprev): |
|
53 | 53 | """find out the filelogs affected by the strip""" |
|
54 | 54 | files = set() |
|
55 | 55 | |
|
56 | 56 | for x in xrange(striprev, len(repo)): |
|
57 | 57 | files.update(repo[x].files()) |
|
58 | 58 | |
|
59 | 59 | return sorted(files) |
|
60 | 60 | |
|
61 | 61 | def _collectbrokencsets(repo, files, striprev): |
|
62 | 62 | """return the changesets which will be broken by the truncation""" |
|
63 | 63 | s = set() |
|
64 | 64 | def collectone(revlog): |
|
65 | 65 | _, brokenset = revlog.getstrippoint(striprev) |
|
66 | 66 | s.update([revlog.linkrev(r) for r in brokenset]) |
|
67 | 67 | |
|
68 | 68 | collectone(repo.manifest) |
|
69 | 69 | for fname in files: |
|
70 | 70 | collectone(repo.file(fname)) |
|
71 | 71 | |
|
72 | 72 | return s |
|
73 | 73 | |
|
74 | 74 | def strip(ui, repo, nodelist, backup=True, topic='backup'): |
|
75 | 75 | # This function operates within a transaction of its own, but does |
|
76 | 76 | # not take any lock on the repo. |
|
77 | 77 | # Simple way to maintain backwards compatibility for this |
|
78 | 78 | # argument. |
|
79 | 79 | if backup in ['none', 'strip']: |
|
80 | 80 | backup = False |
|
81 | 81 | |
|
82 | 82 | repo = repo.unfiltered() |
|
83 | 83 | repo.destroying() |
|
84 | 84 | |
|
85 | 85 | cl = repo.changelog |
|
86 | 86 | # TODO handle undo of merge sets |
|
87 | 87 | if isinstance(nodelist, str): |
|
88 | 88 | nodelist = [nodelist] |
|
89 | 89 | striplist = [cl.rev(node) for node in nodelist] |
|
90 | 90 | striprev = min(striplist) |
|
91 | 91 | |
|
92 | 92 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
93 | 93 | # We have to find these revisions and put them in a bundle, so that |
|
94 | 94 | # we can restore them after the truncations. |
|
95 | 95 | # To create the bundle we use repo.changegroupsubset which requires |
|
96 | 96 | # the list of heads and bases of the set of interesting revisions. |
|
97 | 97 | # (head = revision in the set that has no descendant in the set; |
|
98 | 98 | # base = revision in the set that has no ancestor in the set) |
|
99 | 99 | tostrip = set(striplist) |
|
100 | 100 | for rev in striplist: |
|
101 | 101 | for desc in cl.descendants([rev]): |
|
102 | 102 | tostrip.add(desc) |
|
103 | 103 | |
|
104 | 104 | files = _collectfiles(repo, striprev) |
|
105 | 105 | saverevs = _collectbrokencsets(repo, files, striprev) |
|
106 | 106 | |
|
107 | 107 | # compute heads |
|
108 | 108 | saveheads = set(saverevs) |
|
109 | 109 | for r in xrange(striprev + 1, len(cl)): |
|
110 | 110 | if r not in tostrip: |
|
111 | 111 | saverevs.add(r) |
|
112 | 112 | saveheads.difference_update(cl.parentrevs(r)) |
|
113 | 113 | saveheads.add(r) |
|
114 | 114 | saveheads = [cl.node(r) for r in saveheads] |
|
115 | 115 | |
|
116 | 116 | # compute base nodes |
|
117 | 117 | if saverevs: |
|
118 | 118 | descendants = set(cl.descendants(saverevs)) |
|
119 | 119 | saverevs.difference_update(descendants) |
|
120 | 120 | savebases = [cl.node(r) for r in saverevs] |
|
121 | 121 | stripbases = [cl.node(r) for r in tostrip] |
|
122 | 122 | |
|
123 | 123 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but |
|
124 | 124 | # is much faster |
|
125 | 125 | newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) |
|
126 | 126 | if newbmtarget: |
|
127 | 127 | newbmtarget = repo[newbmtarget.first()].node() |
|
128 | 128 | else: |
|
129 | 129 | newbmtarget = '.' |
|
130 | 130 | |
|
131 | 131 | bm = repo._bookmarks |
|
132 | 132 | updatebm = [] |
|
133 | 133 | for m in bm: |
|
134 | 134 | rev = repo[bm[m]].rev() |
|
135 | 135 | if rev in tostrip: |
|
136 | 136 | updatebm.append(m) |
|
137 | 137 | |
|
138 | 138 | # create a changegroup for all the branches we need to keep |
|
139 | 139 | backupfile = None |
|
140 | 140 | vfs = repo.vfs |
|
141 | 141 | node = nodelist[-1] |
|
142 | 142 | if backup: |
|
143 | 143 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) |
|
144 | 144 | repo.ui.status(_("saved backup bundle to %s\n") % |
|
145 | 145 | vfs.join(backupfile)) |
|
146 | 146 | repo.ui.log("backupbundle", "saved backup bundle to %s\n", |
|
147 | 147 | vfs.join(backupfile)) |
|
148 | 148 | if saveheads or savebases: |
|
149 | 149 | # do not compress partial bundle if we remove it from disk later |
|
150 | 150 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', |
|
151 | 151 | compress=False) |
|
152 | 152 | |
|
153 | 153 | mfst = repo.manifest |
|
154 | 154 | |
|
155 | 155 | curtr = repo.currenttransaction() |
|
156 | 156 | if curtr is not None: |
|
157 | 157 | del curtr # avoid carrying reference to transaction for nothing |
|
158 | 158 | msg = _('programming error: cannot strip from inside a transaction') |
|
159 | 159 | raise error.Abort(msg, hint=_('contact your extension maintainer')) |
|
160 | 160 | |
|
161 | 161 | try: |
|
162 | 162 | with repo.transaction("strip") as tr: |
|
163 | 163 | offset = len(tr.entries) |
|
164 | 164 | |
|
165 | 165 | tr.startgroup() |
|
166 | 166 | cl.strip(striprev, tr) |
|
167 | 167 | mfst.strip(striprev, tr) |
|
168 | 168 | for fn in files: |
|
169 | 169 | repo.file(fn).strip(striprev, tr) |
|
170 | 170 | tr.endgroup() |
|
171 | 171 | |
|
172 | 172 | for i in xrange(offset, len(tr.entries)): |
|
173 | 173 | file, troffset, ignore = tr.entries[i] |
|
174 | 174 | repo.svfs(file, 'a').truncate(troffset) |
|
175 | 175 | if troffset == 0: |
|
176 | 176 | repo.store.markremoved(file) |
|
177 | 177 | |
|
178 | 178 | if saveheads or savebases: |
|
179 | 179 | ui.note(_("adding branch\n")) |
|
180 | 180 | f = vfs.open(chgrpfile, "rb") |
|
181 | 181 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) |
|
182 | 182 | if not repo.ui.verbose: |
|
183 | 183 | # silence internal shuffling chatter |
|
184 | 184 | repo.ui.pushbuffer() |
|
185 | 185 | if isinstance(gen, bundle2.unbundle20): |
|
186 | 186 | with repo.transaction('strip') as tr: |
|
187 | 187 | tr.hookargs = {'source': 'strip', |
|
188 | 188 | 'url': 'bundle:' + vfs.join(chgrpfile)} |
|
189 | 189 | bundle2.applybundle(repo, gen, tr, source='strip', |
|
190 | 190 | url='bundle:' + vfs.join(chgrpfile)) |
|
191 | 191 | else: |
|
192 | 192 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) |
|
193 | 193 | if not repo.ui.verbose: |
|
194 | 194 | repo.ui.popbuffer() |
|
195 | 195 | f.close() |
|
196 | 196 | |
|
197 | 197 | for m in updatebm: |
|
198 | 198 | bm[m] = repo[newbmtarget].node() |
|
199 | 199 | lock = tr = None |
|
200 | 200 | try: |
|
201 | 201 | lock = repo.lock() |
|
202 | 202 | tr = repo.transaction('repair') |
|
203 | 203 | bm.recordchange(tr) |
|
204 | 204 | tr.close() |
|
205 | 205 | finally: |
|
206 | 206 | tr.release() |
|
207 | 207 | lock.release() |
|
208 | 208 | |
|
209 | 209 | # remove undo files |
|
210 | 210 | for undovfs, undofile in repo.undofiles(): |
|
211 | 211 | try: |
|
212 | 212 | undovfs.unlink(undofile) |
|
213 | 213 | except OSError as e: |
|
214 | 214 | if e.errno != errno.ENOENT: |
|
215 | 215 | ui.warn(_('error removing %s: %s\n') % |
|
216 | 216 | (undovfs.join(undofile), str(e))) |
|
217 | 217 | |
|
218 | 218 | except: # re-raises |
|
219 | 219 | if backupfile: |
|
220 | 220 | ui.warn(_("strip failed, full bundle stored in '%s'\n") |
|
221 | 221 | % vfs.join(backupfile)) |
|
222 | 222 | elif saveheads: |
|
223 | 223 | ui.warn(_("strip failed, partial bundle stored in '%s'\n") |
|
224 | 224 | % vfs.join(chgrpfile)) |
|
225 | 225 | raise |
|
226 | 226 | else: |
|
227 | 227 | if saveheads or savebases: |
|
228 | 228 | # Remove partial backup only if there were no exceptions |
|
229 | 229 | vfs.unlink(chgrpfile) |
|
230 | 230 | |
|
231 | 231 | repo.destroyed() |
|
232 | 232 | |
|
233 | 233 | def rebuildfncache(ui, repo): |
|
234 | 234 | """Rebuilds the fncache file from repo history. |
|
235 | 235 | |
|
236 | 236 | Missing entries will be added. Extra entries will be removed. |
|
237 | 237 | """ |
|
238 | 238 | repo = repo.unfiltered() |
|
239 | 239 | |
|
240 | 240 | if 'fncache' not in repo.requirements: |
|
241 | 241 | ui.warn(_('(not rebuilding fncache because repository does not ' |
|
242 | 242 | 'support fncache)\n')) |
|
243 | 243 | return |
|
244 | 244 | |
|
245 | 245 | with repo.lock(): |
|
246 | 246 | fnc = repo.store.fncache |
|
247 | 247 | # Trigger load of fncache. |
|
248 | 248 | if 'irrelevant' in fnc: |
|
249 | 249 | pass |
|
250 | 250 | |
|
251 | 251 | oldentries = set(fnc.entries) |
|
252 | 252 | newentries = set() |
|
253 | 253 | seenfiles = set() |
|
254 | 254 | |
|
255 | 255 | repolen = len(repo) |
|
256 | 256 | for rev in repo: |
|
257 | 257 | ui.progress(_('changeset'), rev, total=repolen) |
|
258 | 258 | |
|
259 | 259 | ctx = repo[rev] |
|
260 | 260 | for f in ctx.files(): |
|
261 | 261 | # This is to minimize I/O. |
|
262 | 262 | if f in seenfiles: |
|
263 | 263 | continue |
|
264 | 264 | seenfiles.add(f) |
|
265 | 265 | |
|
266 | 266 | i = 'data/%s.i' % f |
|
267 | 267 | d = 'data/%s.d' % f |
|
268 | 268 | |
|
269 | 269 | if repo.store._exists(i): |
|
270 | 270 | newentries.add(i) |
|
271 | 271 | if repo.store._exists(d): |
|
272 | 272 | newentries.add(d) |
|
273 | 273 | |
|
274 | 274 | ui.progress(_('changeset'), None) |
|
275 | 275 | |
|
276 | if 'treemanifest' in repo: # safe but unnecessary otherwise | |
|
277 | for dir in util.dirs(seenfiles): | |
|
278 | i = 'meta/%s/00manifest.i' % dir | |
|
279 | d = 'meta/%s/00manifest.d' % dir | |
|
280 | ||
|
281 | if repo.store._exists(i): | |
|
282 | newentries.add(i) | |
|
283 | if repo.store._exists(d): | |
|
284 | newentries.add(d) | |
|
285 | ||
|
276 | 286 | addcount = len(newentries - oldentries) |
|
277 | 287 | removecount = len(oldentries - newentries) |
|
278 | 288 | for p in sorted(oldentries - newentries): |
|
279 | 289 | ui.write(_('removing %s\n') % p) |
|
280 | 290 | for p in sorted(newentries - oldentries): |
|
281 | 291 | ui.write(_('adding %s\n') % p) |
|
282 | 292 | |
|
283 | 293 | if addcount or removecount: |
|
284 | 294 | ui.write(_('%d items added, %d removed from fncache\n') % |
|
285 | 295 | (addcount, removecount)) |
|
286 | 296 | fnc.entries = newentries |
|
287 | 297 | fnc._dirty = True |
|
288 | 298 | |
|
289 | 299 | with repo.transaction('fncache') as tr: |
|
290 | 300 | fnc.write(tr) |
|
291 | 301 | else: |
|
292 | 302 | ui.write(_('fncache already up to date\n')) |
|
293 | 303 | |
|
294 | 304 | def stripbmrevset(repo, mark): |
|
295 | 305 | """ |
|
296 | 306 | The revset to strip when strip is called with -B mark |
|
297 | 307 | |
|
298 | 308 | Needs to live here so extensions can use it and wrap it even when strip is |
|
299 | 309 | not enabled or not present on a box. |
|
300 | 310 | """ |
|
301 | 311 | return repo.revs("ancestors(bookmark(%s)) - " |
|
302 | 312 | "ancestors(head() and not bookmark(%s)) - " |
|
303 | 313 | "ancestors(bookmark() and not bookmark(%s))", |
|
304 | 314 | mark, mark, mark) |
@@ -1,552 +1,553 b'' | |||
|
1 | 1 | # store.py - repository store handling for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import os |
|
12 | 12 | import stat |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from . import ( |
|
16 | 16 | error, |
|
17 | 17 | parsers, |
|
18 | 18 | scmutil, |
|
19 | 19 | util, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | _sha = util.sha1 |
|
23 | 23 | |
|
24 | 24 | # This avoids a collision between a file named foo and a dir named |
|
25 | 25 | # foo.i or foo.d |
|
26 | 26 | def _encodedir(path): |
|
27 | 27 | ''' |
|
28 | 28 | >>> _encodedir('data/foo.i') |
|
29 | 29 | 'data/foo.i' |
|
30 | 30 | >>> _encodedir('data/foo.i/bla.i') |
|
31 | 31 | 'data/foo.i.hg/bla.i' |
|
32 | 32 | >>> _encodedir('data/foo.i.hg/bla.i') |
|
33 | 33 | 'data/foo.i.hg.hg/bla.i' |
|
34 | 34 | >>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') |
|
35 | 35 | 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' |
|
36 | 36 | ''' |
|
37 | 37 | return (path |
|
38 | 38 | .replace(".hg/", ".hg.hg/") |
|
39 | 39 | .replace(".i/", ".i.hg/") |
|
40 | 40 | .replace(".d/", ".d.hg/")) |
|
41 | 41 | |
|
42 | 42 | encodedir = getattr(parsers, 'encodedir', _encodedir) |
|
43 | 43 | |
|
44 | 44 | def decodedir(path): |
|
45 | 45 | ''' |
|
46 | 46 | >>> decodedir('data/foo.i') |
|
47 | 47 | 'data/foo.i' |
|
48 | 48 | >>> decodedir('data/foo.i.hg/bla.i') |
|
49 | 49 | 'data/foo.i/bla.i' |
|
50 | 50 | >>> decodedir('data/foo.i.hg.hg/bla.i') |
|
51 | 51 | 'data/foo.i.hg/bla.i' |
|
52 | 52 | ''' |
|
53 | 53 | if ".hg/" not in path: |
|
54 | 54 | return path |
|
55 | 55 | return (path |
|
56 | 56 | .replace(".d.hg/", ".d/") |
|
57 | 57 | .replace(".i.hg/", ".i/") |
|
58 | 58 | .replace(".hg.hg/", ".hg/")) |
|
59 | 59 | |
|
60 | 60 | def _buildencodefun(): |
|
61 | 61 | ''' |
|
62 | 62 | >>> enc, dec = _buildencodefun() |
|
63 | 63 | |
|
64 | 64 | >>> enc('nothing/special.txt') |
|
65 | 65 | 'nothing/special.txt' |
|
66 | 66 | >>> dec('nothing/special.txt') |
|
67 | 67 | 'nothing/special.txt' |
|
68 | 68 | |
|
69 | 69 | >>> enc('HELLO') |
|
70 | 70 | '_h_e_l_l_o' |
|
71 | 71 | >>> dec('_h_e_l_l_o') |
|
72 | 72 | 'HELLO' |
|
73 | 73 | |
|
74 | 74 | >>> enc('hello:world?') |
|
75 | 75 | 'hello~3aworld~3f' |
|
76 | 76 | >>> dec('hello~3aworld~3f') |
|
77 | 77 | 'hello:world?' |
|
78 | 78 | |
|
79 | 79 | >>> enc('the\x07quick\xADshot') |
|
80 | 80 | 'the~07quick~adshot' |
|
81 | 81 | >>> dec('the~07quick~adshot') |
|
82 | 82 | 'the\\x07quick\\xadshot' |
|
83 | 83 | ''' |
|
84 | 84 | e = '_' |
|
85 | 85 | winreserved = [ord(x) for x in '\\:*?"<>|'] |
|
86 | 86 | cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) |
|
87 | 87 | for x in (range(32) + range(126, 256) + winreserved): |
|
88 | 88 | cmap[chr(x)] = "~%02x" % x |
|
89 | 89 | for x in range(ord("A"), ord("Z") + 1) + [ord(e)]: |
|
90 | 90 | cmap[chr(x)] = e + chr(x).lower() |
|
91 | 91 | dmap = {} |
|
92 | 92 | for k, v in cmap.iteritems(): |
|
93 | 93 | dmap[v] = k |
|
94 | 94 | def decode(s): |
|
95 | 95 | i = 0 |
|
96 | 96 | while i < len(s): |
|
97 | 97 | for l in xrange(1, 4): |
|
98 | 98 | try: |
|
99 | 99 | yield dmap[s[i:i + l]] |
|
100 | 100 | i += l |
|
101 | 101 | break |
|
102 | 102 | except KeyError: |
|
103 | 103 | pass |
|
104 | 104 | else: |
|
105 | 105 | raise KeyError |
|
106 | 106 | return (lambda s: ''.join([cmap[c] for c in s]), |
|
107 | 107 | lambda s: ''.join(list(decode(s)))) |
|
108 | 108 | |
|
109 | 109 | _encodefname, _decodefname = _buildencodefun() |
|
110 | 110 | |
|
111 | 111 | def encodefilename(s): |
|
112 | 112 | ''' |
|
113 | 113 | >>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO') |
|
114 | 114 | 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' |
|
115 | 115 | ''' |
|
116 | 116 | return _encodefname(encodedir(s)) |
|
117 | 117 | |
|
118 | 118 | def decodefilename(s): |
|
119 | 119 | ''' |
|
120 | 120 | >>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') |
|
121 | 121 | 'foo.i/bar.d/bla.hg/hi:world?/HELLO' |
|
122 | 122 | ''' |
|
123 | 123 | return decodedir(_decodefname(s)) |
|
124 | 124 | |
|
125 | 125 | def _buildlowerencodefun(): |
|
126 | 126 | ''' |
|
127 | 127 | >>> f = _buildlowerencodefun() |
|
128 | 128 | >>> f('nothing/special.txt') |
|
129 | 129 | 'nothing/special.txt' |
|
130 | 130 | >>> f('HELLO') |
|
131 | 131 | 'hello' |
|
132 | 132 | >>> f('hello:world?') |
|
133 | 133 | 'hello~3aworld~3f' |
|
134 | 134 | >>> f('the\x07quick\xADshot') |
|
135 | 135 | 'the~07quick~adshot' |
|
136 | 136 | ''' |
|
137 | 137 | winreserved = [ord(x) for x in '\\:*?"<>|'] |
|
138 | 138 | cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) |
|
139 | 139 | for x in (range(32) + range(126, 256) + winreserved): |
|
140 | 140 | cmap[chr(x)] = "~%02x" % x |
|
141 | 141 | for x in range(ord("A"), ord("Z") + 1): |
|
142 | 142 | cmap[chr(x)] = chr(x).lower() |
|
143 | 143 | return lambda s: "".join([cmap[c] for c in s]) |
|
144 | 144 | |
|
145 | 145 | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() |
|
146 | 146 | |
|
147 | 147 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 |
|
148 | 148 | _winres3 = ('aux', 'con', 'prn', 'nul') # length 3 |
|
149 | 149 | _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9) |
|
150 | 150 | def _auxencode(path, dotencode): |
|
151 | 151 | ''' |
|
152 | 152 | Encodes filenames containing names reserved by Windows or which end in |
|
153 | 153 | period or space. Does not touch other single reserved characters c. |
|
154 | 154 | Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. |
|
155 | 155 | Additionally encodes space or period at the beginning, if dotencode is |
|
156 | 156 | True. Parameter path is assumed to be all lowercase. |
|
157 | 157 | A segment only needs encoding if a reserved name appears as a |
|
158 | 158 | basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" |
|
159 | 159 | doesn't need encoding. |
|
160 | 160 | |
|
161 | 161 | >>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.' |
|
162 | 162 | >>> _auxencode(s.split('/'), True) |
|
163 | 163 | ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] |
|
164 | 164 | >>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' |
|
165 | 165 | >>> _auxencode(s.split('/'), False) |
|
166 | 166 | ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] |
|
167 | 167 | >>> _auxencode(['foo. '], True) |
|
168 | 168 | ['foo.~20'] |
|
169 | 169 | >>> _auxencode([' .foo'], True) |
|
170 | 170 | ['~20.foo'] |
|
171 | 171 | ''' |
|
172 | 172 | for i, n in enumerate(path): |
|
173 | 173 | if not n: |
|
174 | 174 | continue |
|
175 | 175 | if dotencode and n[0] in '. ': |
|
176 | 176 | n = "~%02x" % ord(n[0]) + n[1:] |
|
177 | 177 | path[i] = n |
|
178 | 178 | else: |
|
179 | 179 | l = n.find('.') |
|
180 | 180 | if l == -1: |
|
181 | 181 | l = len(n) |
|
182 | 182 | if ((l == 3 and n[:3] in _winres3) or |
|
183 | 183 | (l == 4 and n[3] <= '9' and n[3] >= '1' |
|
184 | 184 | and n[:3] in _winres4)): |
|
185 | 185 | # encode third letter ('aux' -> 'au~78') |
|
186 | 186 | ec = "~%02x" % ord(n[2]) |
|
187 | 187 | n = n[0:2] + ec + n[3:] |
|
188 | 188 | path[i] = n |
|
189 | 189 | if n[-1] in '. ': |
|
190 | 190 | # encode last period or space ('foo...' -> 'foo..~2e') |
|
191 | 191 | path[i] = n[:-1] + "~%02x" % ord(n[-1]) |
|
192 | 192 | return path |
|
193 | 193 | |
|
194 | 194 | _maxstorepathlen = 120 |
|
195 | 195 | _dirprefixlen = 8 |
|
196 | 196 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 |
|
197 | 197 | |
|
198 | 198 | def _hashencode(path, dotencode): |
|
199 | 199 | digest = _sha(path).hexdigest() |
|
200 | 200 | le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/' |
|
201 | 201 | parts = _auxencode(le, dotencode) |
|
202 | 202 | basename = parts[-1] |
|
203 | 203 | _root, ext = os.path.splitext(basename) |
|
204 | 204 | sdirs = [] |
|
205 | 205 | sdirslen = 0 |
|
206 | 206 | for p in parts[:-1]: |
|
207 | 207 | d = p[:_dirprefixlen] |
|
208 | 208 | if d[-1] in '. ': |
|
209 | 209 | # Windows can't access dirs ending in period or space |
|
210 | 210 | d = d[:-1] + '_' |
|
211 | 211 | if sdirslen == 0: |
|
212 | 212 | t = len(d) |
|
213 | 213 | else: |
|
214 | 214 | t = sdirslen + 1 + len(d) |
|
215 | 215 | if t > _maxshortdirslen: |
|
216 | 216 | break |
|
217 | 217 | sdirs.append(d) |
|
218 | 218 | sdirslen = t |
|
219 | 219 | dirs = '/'.join(sdirs) |
|
220 | 220 | if len(dirs) > 0: |
|
221 | 221 | dirs += '/' |
|
222 | 222 | res = 'dh/' + dirs + digest + ext |
|
223 | 223 | spaceleft = _maxstorepathlen - len(res) |
|
224 | 224 | if spaceleft > 0: |
|
225 | 225 | filler = basename[:spaceleft] |
|
226 | 226 | res = 'dh/' + dirs + filler + digest + ext |
|
227 | 227 | return res |
|
228 | 228 | |
|
229 | 229 | def _hybridencode(path, dotencode): |
|
230 | 230 | '''encodes path with a length limit |
|
231 | 231 | |
|
232 | 232 | Encodes all paths that begin with 'data/', according to the following. |
|
233 | 233 | |
|
234 | 234 | Default encoding (reversible): |
|
235 | 235 | |
|
236 | 236 | Encodes all uppercase letters 'X' as '_x'. All reserved or illegal |
|
237 | 237 | characters are encoded as '~xx', where xx is the two digit hex code |
|
238 | 238 | of the character (see encodefilename). |
|
239 | 239 | Relevant path components consisting of Windows reserved filenames are |
|
240 | 240 | masked by encoding the third character ('aux' -> 'au~78', see _auxencode). |
|
241 | 241 | |
|
242 | 242 | Hashed encoding (not reversible): |
|
243 | 243 | |
|
244 | 244 | If the default-encoded path is longer than _maxstorepathlen, a |
|
245 | 245 | non-reversible hybrid hashing of the path is done instead. |
|
246 | 246 | This encoding uses up to _dirprefixlen characters of all directory |
|
247 | 247 | levels of the lowerencoded path, but not more levels than can fit into |
|
248 | 248 | _maxshortdirslen. |
|
249 | 249 | Then follows the filler followed by the sha digest of the full path. |
|
250 | 250 | The filler is the beginning of the basename of the lowerencoded path |
|
251 | 251 | (the basename is everything after the last path separator). The filler |
|
252 | 252 | is as long as possible, filling in characters from the basename until |
|
253 | 253 | the encoded path has _maxstorepathlen characters (or all chars of the |
|
254 | 254 | basename have been taken). |
|
255 | 255 | The extension (e.g. '.i' or '.d') is preserved. |
|
256 | 256 | |
|
257 | 257 | The string 'data/' at the beginning is replaced with 'dh/', if the hashed |
|
258 | 258 | encoding was used. |
|
259 | 259 | ''' |
|
260 | 260 | path = encodedir(path) |
|
261 | 261 | ef = _encodefname(path).split('/') |
|
262 | 262 | res = '/'.join(_auxencode(ef, dotencode)) |
|
263 | 263 | if len(res) > _maxstorepathlen: |
|
264 | 264 | res = _hashencode(path, dotencode) |
|
265 | 265 | return res |
|
266 | 266 | |
|
267 | 267 | def _pathencode(path): |
|
268 | 268 | de = encodedir(path) |
|
269 | 269 | if len(path) > _maxstorepathlen: |
|
270 | 270 | return _hashencode(de, True) |
|
271 | 271 | ef = _encodefname(de).split('/') |
|
272 | 272 | res = '/'.join(_auxencode(ef, True)) |
|
273 | 273 | if len(res) > _maxstorepathlen: |
|
274 | 274 | return _hashencode(de, True) |
|
275 | 275 | return res |
|
276 | 276 | |
|
277 | 277 | _pathencode = getattr(parsers, 'pathencode', _pathencode) |
|
278 | 278 | |
|
279 | 279 | def _plainhybridencode(f): |
|
280 | 280 | return _hybridencode(f, False) |
|
281 | 281 | |
|
282 | 282 | def _calcmode(vfs): |
|
283 | 283 | try: |
|
284 | 284 | # files in .hg/ will be created using this mode |
|
285 | 285 | mode = vfs.stat().st_mode |
|
286 | 286 | # avoid some useless chmods |
|
287 | 287 | if (0o777 & ~util.umask) == (0o777 & mode): |
|
288 | 288 | mode = None |
|
289 | 289 | except OSError: |
|
290 | 290 | mode = None |
|
291 | 291 | return mode |
|
292 | 292 | |
|
293 | 293 | _data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i' |
|
294 | 294 | ' phaseroots obsstore') |
|
295 | 295 | |
|
296 | 296 | class basicstore(object): |
|
297 | 297 | '''base class for local repository stores''' |
|
298 | 298 | def __init__(self, path, vfstype): |
|
299 | 299 | vfs = vfstype(path) |
|
300 | 300 | self.path = vfs.base |
|
301 | 301 | self.createmode = _calcmode(vfs) |
|
302 | 302 | vfs.createmode = self.createmode |
|
303 | 303 | self.rawvfs = vfs |
|
304 | 304 | self.vfs = scmutil.filtervfs(vfs, encodedir) |
|
305 | 305 | self.opener = self.vfs |
|
306 | 306 | |
|
307 | 307 | def join(self, f): |
|
308 | 308 | return self.path + '/' + encodedir(f) |
|
309 | 309 | |
|
310 | 310 | def _walk(self, relpath, recurse): |
|
311 | 311 | '''yields (unencoded, encoded, size)''' |
|
312 | 312 | path = self.path |
|
313 | 313 | if relpath: |
|
314 | 314 | path += '/' + relpath |
|
315 | 315 | striplen = len(self.path) + 1 |
|
316 | 316 | l = [] |
|
317 | 317 | if self.rawvfs.isdir(path): |
|
318 | 318 | visit = [path] |
|
319 | 319 | readdir = self.rawvfs.readdir |
|
320 | 320 | while visit: |
|
321 | 321 | p = visit.pop() |
|
322 | 322 | for f, kind, st in readdir(p, stat=True): |
|
323 | 323 | fp = p + '/' + f |
|
324 | 324 | if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'): |
|
325 | 325 | n = util.pconvert(fp[striplen:]) |
|
326 | 326 | l.append((decodedir(n), n, st.st_size)) |
|
327 | 327 | elif kind == stat.S_IFDIR and recurse: |
|
328 | 328 | visit.append(fp) |
|
329 | 329 | l.sort() |
|
330 | 330 | return l |
|
331 | 331 | |
|
332 | 332 | def datafiles(self): |
|
333 | return self._walk('data', True) | |
|
333 | return self._walk('data', True) + self._walk('meta', True) | |
|
334 | 334 | |
|
335 | 335 | def topfiles(self): |
|
336 | 336 | # yield manifest before changelog |
|
337 | 337 | return reversed(self._walk('', False)) |
|
338 | 338 | |
|
339 | 339 | def walk(self): |
|
340 | 340 | '''yields (unencoded, encoded, size)''' |
|
341 | 341 | # yield data files first |
|
342 | 342 | for x in self.datafiles(): |
|
343 | 343 | yield x |
|
344 | 344 | for x in self.topfiles(): |
|
345 | 345 | yield x |
|
346 | 346 | |
|
347 | 347 | def copylist(self): |
|
348 | 348 | return ['requires'] + _data.split() |
|
349 | 349 | |
|
350 | 350 | def write(self, tr): |
|
351 | 351 | pass |
|
352 | 352 | |
|
353 | 353 | def invalidatecaches(self): |
|
354 | 354 | pass |
|
355 | 355 | |
|
356 | 356 | def markremoved(self, fn): |
|
357 | 357 | pass |
|
358 | 358 | |
|
359 | 359 | def __contains__(self, path): |
|
360 | 360 | '''Checks if the store contains path''' |
|
361 | 361 | path = "/".join(("data", path)) |
|
362 | 362 | # file? |
|
363 | 363 | if self.vfs.exists(path + ".i"): |
|
364 | 364 | return True |
|
365 | 365 | # dir? |
|
366 | 366 | if not path.endswith("/"): |
|
367 | 367 | path = path + "/" |
|
368 | 368 | return self.vfs.exists(path) |
|
369 | 369 | |
|
370 | 370 | class encodedstore(basicstore): |
|
371 | 371 | def __init__(self, path, vfstype): |
|
372 | 372 | vfs = vfstype(path + '/store') |
|
373 | 373 | self.path = vfs.base |
|
374 | 374 | self.createmode = _calcmode(vfs) |
|
375 | 375 | vfs.createmode = self.createmode |
|
376 | 376 | self.rawvfs = vfs |
|
377 | 377 | self.vfs = scmutil.filtervfs(vfs, encodefilename) |
|
378 | 378 | self.opener = self.vfs |
|
379 | 379 | |
|
380 | 380 | def datafiles(self): |
|
381 |
for a, b, size in s |
|
|
381 | for a, b, size in super(encodedstore, self).datafiles(): | |
|
382 | 382 | try: |
|
383 | 383 | a = decodefilename(a) |
|
384 | 384 | except KeyError: |
|
385 | 385 | a = None |
|
386 | 386 | yield a, b, size |
|
387 | 387 | |
|
388 | 388 | def join(self, f): |
|
389 | 389 | return self.path + '/' + encodefilename(f) |
|
390 | 390 | |
|
391 | 391 | def copylist(self): |
|
392 | 392 | return (['requires', '00changelog.i'] + |
|
393 | 393 | ['store/' + f for f in _data.split()]) |
|
394 | 394 | |
|
395 | 395 | class fncache(object): |
|
396 | 396 | # the filename used to be partially encoded |
|
397 | 397 | # hence the encodedir/decodedir dance |
|
398 | 398 | def __init__(self, vfs): |
|
399 | 399 | self.vfs = vfs |
|
400 | 400 | self.entries = None |
|
401 | 401 | self._dirty = False |
|
402 | 402 | |
|
403 | 403 | def _load(self): |
|
404 | 404 | '''fill the entries from the fncache file''' |
|
405 | 405 | self._dirty = False |
|
406 | 406 | try: |
|
407 | 407 | fp = self.vfs('fncache', mode='rb') |
|
408 | 408 | except IOError: |
|
409 | 409 | # skip nonexistent file |
|
410 | 410 | self.entries = set() |
|
411 | 411 | return |
|
412 | 412 | self.entries = set(decodedir(fp.read()).splitlines()) |
|
413 | 413 | if '' in self.entries: |
|
414 | 414 | fp.seek(0) |
|
415 | 415 | for n, line in enumerate(fp): |
|
416 | 416 | if not line.rstrip('\n'): |
|
417 | 417 | t = _('invalid entry in fncache, line %d') % (n + 1) |
|
418 | 418 | raise error.Abort(t) |
|
419 | 419 | fp.close() |
|
420 | 420 | |
|
421 | 421 | def write(self, tr): |
|
422 | 422 | if self._dirty: |
|
423 | 423 | tr.addbackup('fncache') |
|
424 | 424 | fp = self.vfs('fncache', mode='wb', atomictemp=True) |
|
425 | 425 | if self.entries: |
|
426 | 426 | fp.write(encodedir('\n'.join(self.entries) + '\n')) |
|
427 | 427 | fp.close() |
|
428 | 428 | self._dirty = False |
|
429 | 429 | |
|
430 | 430 | def add(self, fn): |
|
431 | 431 | if self.entries is None: |
|
432 | 432 | self._load() |
|
433 | 433 | if fn not in self.entries: |
|
434 | 434 | self._dirty = True |
|
435 | 435 | self.entries.add(fn) |
|
436 | 436 | |
|
437 | 437 | def remove(self, fn): |
|
438 | 438 | if self.entries is None: |
|
439 | 439 | self._load() |
|
440 | 440 | try: |
|
441 | 441 | self.entries.remove(fn) |
|
442 | 442 | self._dirty = True |
|
443 | 443 | except KeyError: |
|
444 | 444 | pass |
|
445 | 445 | |
|
446 | 446 | def __contains__(self, fn): |
|
447 | 447 | if self.entries is None: |
|
448 | 448 | self._load() |
|
449 | 449 | return fn in self.entries |
|
450 | 450 | |
|
451 | 451 | def __iter__(self): |
|
452 | 452 | if self.entries is None: |
|
453 | 453 | self._load() |
|
454 | 454 | return iter(self.entries) |
|
455 | 455 | |
|
456 | 456 | class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs): |
|
457 | 457 | def __init__(self, vfs, fnc, encode): |
|
458 | 458 | scmutil.auditvfs.__init__(self, vfs) |
|
459 | 459 | self.fncache = fnc |
|
460 | 460 | self.encode = encode |
|
461 | 461 | |
|
462 | 462 | def __call__(self, path, mode='r', *args, **kw): |
|
463 |
if mode not in ('r', 'rb') and path.startswith('data/') |
|
|
463 | if mode not in ('r', 'rb') and (path.startswith('data/') or | |
|
464 | path.startswith('meta/')): | |
|
464 | 465 | self.fncache.add(path) |
|
465 | 466 | return self.vfs(self.encode(path), mode, *args, **kw) |
|
466 | 467 | |
|
467 | 468 | def join(self, path): |
|
468 | 469 | if path: |
|
469 | 470 | return self.vfs.join(self.encode(path)) |
|
470 | 471 | else: |
|
471 | 472 | return self.vfs.join(path) |
|
472 | 473 | |
|
473 | 474 | class fncachestore(basicstore): |
|
474 | 475 | def __init__(self, path, vfstype, dotencode): |
|
475 | 476 | if dotencode: |
|
476 | 477 | encode = _pathencode |
|
477 | 478 | else: |
|
478 | 479 | encode = _plainhybridencode |
|
479 | 480 | self.encode = encode |
|
480 | 481 | vfs = vfstype(path + '/store') |
|
481 | 482 | self.path = vfs.base |
|
482 | 483 | self.pathsep = self.path + '/' |
|
483 | 484 | self.createmode = _calcmode(vfs) |
|
484 | 485 | vfs.createmode = self.createmode |
|
485 | 486 | self.rawvfs = vfs |
|
486 | 487 | fnc = fncache(vfs) |
|
487 | 488 | self.fncache = fnc |
|
488 | 489 | self.vfs = _fncachevfs(vfs, fnc, encode) |
|
489 | 490 | self.opener = self.vfs |
|
490 | 491 | |
|
491 | 492 | def join(self, f): |
|
492 | 493 | return self.pathsep + self.encode(f) |
|
493 | 494 | |
|
494 | 495 | def getsize(self, path): |
|
495 | 496 | return self.rawvfs.stat(path).st_size |
|
496 | 497 | |
|
497 | 498 | def datafiles(self): |
|
498 | 499 | for f in sorted(self.fncache): |
|
499 | 500 | ef = self.encode(f) |
|
500 | 501 | try: |
|
501 | 502 | yield f, ef, self.getsize(ef) |
|
502 | 503 | except OSError as err: |
|
503 | 504 | if err.errno != errno.ENOENT: |
|
504 | 505 | raise |
|
505 | 506 | |
|
506 | 507 | def copylist(self): |
|
507 | 508 | d = ('data meta dh fncache phaseroots obsstore' |
|
508 | 509 | ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i') |
|
509 | 510 | return (['requires', '00changelog.i'] + |
|
510 | 511 | ['store/' + f for f in d.split()]) |
|
511 | 512 | |
|
512 | 513 | def write(self, tr): |
|
513 | 514 | self.fncache.write(tr) |
|
514 | 515 | |
|
515 | 516 | def invalidatecaches(self): |
|
516 | 517 | self.fncache.entries = None |
|
517 | 518 | |
|
518 | 519 | def markremoved(self, fn): |
|
519 | 520 | self.fncache.remove(fn) |
|
520 | 521 | |
|
521 | 522 | def _exists(self, f): |
|
522 | 523 | ef = self.encode(f) |
|
523 | 524 | try: |
|
524 | 525 | self.getsize(ef) |
|
525 | 526 | return True |
|
526 | 527 | except OSError as err: |
|
527 | 528 | if err.errno != errno.ENOENT: |
|
528 | 529 | raise |
|
529 | 530 | # nonexistent entry |
|
530 | 531 | return False |
|
531 | 532 | |
|
532 | 533 | def __contains__(self, path): |
|
533 | 534 | '''Checks if the store contains path''' |
|
534 | 535 | path = "/".join(("data", path)) |
|
535 | 536 | # check for files (exact match) |
|
536 | 537 | e = path + '.i' |
|
537 | 538 | if e in self.fncache and self._exists(e): |
|
538 | 539 | return True |
|
539 | 540 | # now check for directories (prefix match) |
|
540 | 541 | if not path.endswith('/'): |
|
541 | 542 | path += '/' |
|
542 | 543 | for e in self.fncache: |
|
543 | 544 | if e.startswith(path) and self._exists(e): |
|
544 | 545 | return True |
|
545 | 546 | return False |
|
546 | 547 | |
|
547 | 548 | def store(requirements, path, vfstype): |
|
548 | 549 | if 'store' in requirements: |
|
549 | 550 | if 'fncache' in requirements: |
|
550 | 551 | return fncachestore(path, vfstype, 'dotencode' in requirements) |
|
551 | 552 | return encodedstore(path, vfstype) |
|
552 | 553 | return basicstore(path, vfstype) |
@@ -1,384 +1,384 b'' | |||
|
1 | 1 | # verify.py - repository integrity checking for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | |
|
12 | 12 | from .i18n import _ |
|
13 | 13 | from .node import ( |
|
14 | 14 | nullid, |
|
15 | 15 | short, |
|
16 | 16 | ) |
|
17 | 17 | |
|
18 | 18 | from . import ( |
|
19 | 19 | error, |
|
20 | 20 | revlog, |
|
21 | 21 | util, |
|
22 | 22 | ) |
|
23 | 23 | |
|
24 | 24 | def verify(repo): |
|
25 | 25 | with repo.lock(): |
|
26 | 26 | return verifier(repo).verify() |
|
27 | 27 | |
|
28 | 28 | def _normpath(f): |
|
29 | 29 | # under hg < 2.4, convert didn't sanitize paths properly, so a |
|
30 | 30 | # converted repo may contain repeated slashes |
|
31 | 31 | while '//' in f: |
|
32 | 32 | f = f.replace('//', '/') |
|
33 | 33 | return f |
|
34 | 34 | |
|
35 | 35 | def _validpath(repo, path): |
|
36 | 36 | """Returns False if a path should NOT be treated as part of a repo. |
|
37 | 37 | |
|
38 | 38 | For all in-core cases, this returns True, as we have no way for a |
|
39 | 39 | path to be mentioned in the history but not actually be |
|
40 | 40 | relevant. For narrow clones, this is important because many |
|
41 | 41 | filelogs will be missing, and changelog entries may mention |
|
42 | 42 | modified files that are outside the narrow scope. |
|
43 | 43 | """ |
|
44 | 44 | return True |
|
45 | 45 | |
|
46 | 46 | class verifier(object): |
|
47 | 47 | def __init__(self, repo): |
|
48 | 48 | self.repo = repo.unfiltered() |
|
49 | 49 | self.ui = repo.ui |
|
50 | 50 | self.badrevs = set() |
|
51 | 51 | self.errors = 0 |
|
52 | 52 | self.warnings = 0 |
|
53 | 53 | self.havecl = len(repo.changelog) > 0 |
|
54 | 54 | self.havemf = len(repo.manifest) > 0 |
|
55 | 55 | self.revlogv1 = repo.changelog.version != revlog.REVLOGV0 |
|
56 | 56 | self.lrugetctx = util.lrucachefunc(repo.changectx) |
|
57 | 57 | self.refersmf = False |
|
58 | 58 | self.fncachewarned = False |
|
59 | 59 | |
|
60 | 60 | def warn(self, msg): |
|
61 | 61 | self.ui.warn(msg + "\n") |
|
62 | 62 | self.warnings += 1 |
|
63 | 63 | |
|
64 | 64 | def err(self, linkrev, msg, filename=None): |
|
65 | 65 | if linkrev is not None: |
|
66 | 66 | self.badrevs.add(linkrev) |
|
67 | 67 | else: |
|
68 | 68 | linkrev = '?' |
|
69 | 69 | msg = "%s: %s" % (linkrev, msg) |
|
70 | 70 | if filename: |
|
71 | 71 | msg = "%s@%s" % (filename, msg) |
|
72 | 72 | self.ui.warn(" " + msg + "\n") |
|
73 | 73 | self.errors += 1 |
|
74 | 74 | |
|
75 | 75 | def exc(self, linkrev, msg, inst, filename=None): |
|
76 | 76 | if not str(inst): |
|
77 | 77 | inst = repr(inst) |
|
78 | 78 | self.err(linkrev, "%s: %s" % (msg, inst), filename) |
|
79 | 79 | |
|
80 | 80 | def checklog(self, obj, name, linkrev): |
|
81 | 81 | if not len(obj) and (self.havecl or self.havemf): |
|
82 | 82 | self.err(linkrev, _("empty or missing %s") % name) |
|
83 | 83 | return |
|
84 | 84 | |
|
85 | 85 | d = obj.checksize() |
|
86 | 86 | if d[0]: |
|
87 | 87 | self.err(None, _("data length off by %d bytes") % d[0], name) |
|
88 | 88 | if d[1]: |
|
89 | 89 | self.err(None, _("index contains %d extra bytes") % d[1], name) |
|
90 | 90 | |
|
91 | 91 | if obj.version != revlog.REVLOGV0: |
|
92 | 92 | if not self.revlogv1: |
|
93 | 93 | self.warn(_("warning: `%s' uses revlog format 1") % name) |
|
94 | 94 | elif self.revlogv1: |
|
95 | 95 | self.warn(_("warning: `%s' uses revlog format 0") % name) |
|
96 | 96 | |
|
97 | 97 | def checkentry(self, obj, i, node, seen, linkrevs, f): |
|
98 | 98 | lr = obj.linkrev(obj.rev(node)) |
|
99 | 99 | if lr < 0 or (self.havecl and lr not in linkrevs): |
|
100 | 100 | if lr < 0 or lr >= len(self.repo.changelog): |
|
101 | 101 | msg = _("rev %d points to nonexistent changeset %d") |
|
102 | 102 | else: |
|
103 | 103 | msg = _("rev %d points to unexpected changeset %d") |
|
104 | 104 | self.err(None, msg % (i, lr), f) |
|
105 | 105 | if linkrevs: |
|
106 | 106 | if f and len(linkrevs) > 1: |
|
107 | 107 | try: |
|
108 | 108 | # attempt to filter down to real linkrevs |
|
109 | 109 | linkrevs = [l for l in linkrevs |
|
110 | 110 | if self.lrugetctx(l)[f].filenode() == node] |
|
111 | 111 | except Exception: |
|
112 | 112 | pass |
|
113 | 113 | self.warn(_(" (expected %s)") % " ".join(map(str, linkrevs))) |
|
114 | 114 | lr = None # can't be trusted |
|
115 | 115 | |
|
116 | 116 | try: |
|
117 | 117 | p1, p2 = obj.parents(node) |
|
118 | 118 | if p1 not in seen and p1 != nullid: |
|
119 | 119 | self.err(lr, _("unknown parent 1 %s of %s") % |
|
120 | 120 | (short(p1), short(node)), f) |
|
121 | 121 | if p2 not in seen and p2 != nullid: |
|
122 | 122 | self.err(lr, _("unknown parent 2 %s of %s") % |
|
123 | 123 | (short(p2), short(node)), f) |
|
124 | 124 | except Exception as inst: |
|
125 | 125 | self.exc(lr, _("checking parents of %s") % short(node), inst, f) |
|
126 | 126 | |
|
127 | 127 | if node in seen: |
|
128 | 128 | self.err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f) |
|
129 | 129 | seen[node] = i |
|
130 | 130 | return lr |
|
131 | 131 | |
|
132 | 132 | def verify(self): |
|
133 | 133 | repo = self.repo |
|
134 | 134 | |
|
135 | 135 | ui = repo.ui |
|
136 | 136 | |
|
137 | 137 | if not repo.url().startswith('file:'): |
|
138 | 138 | raise error.Abort(_("cannot verify bundle or remote repos")) |
|
139 | 139 | |
|
140 | 140 | if os.path.exists(repo.sjoin("journal")): |
|
141 | 141 | ui.warn(_("abandoned transaction found - run hg recover\n")) |
|
142 | 142 | |
|
143 | 143 | if ui.verbose or not self.revlogv1: |
|
144 | 144 | ui.status(_("repository uses revlog format %d\n") % |
|
145 | 145 | (self.revlogv1 and 1 or 0)) |
|
146 | 146 | |
|
147 | 147 | mflinkrevs, filelinkrevs = self._verifychangelog() |
|
148 | 148 | |
|
149 | 149 | filenodes = self._verifymanifest(mflinkrevs) |
|
150 | 150 | |
|
151 | 151 | self._crosscheckfiles(mflinkrevs, filelinkrevs, filenodes) |
|
152 | 152 | del mflinkrevs |
|
153 | 153 | |
|
154 | 154 | totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs) |
|
155 | 155 | |
|
156 | 156 | ui.status(_("%d files, %d changesets, %d total revisions\n") % |
|
157 | 157 | (totalfiles, len(repo.changelog), filerevisions)) |
|
158 | 158 | if self.warnings: |
|
159 | 159 | ui.warn(_("%d warnings encountered!\n") % self.warnings) |
|
160 | 160 | if self.fncachewarned: |
|
161 | 161 | ui.warn(_('hint: run "hg debugrebuildfncache" to recover from ' |
|
162 | 162 | 'corrupt fncache\n')) |
|
163 | 163 | if self.errors: |
|
164 | 164 | ui.warn(_("%d integrity errors encountered!\n") % self.errors) |
|
165 | 165 | if self.badrevs: |
|
166 | 166 | ui.warn(_("(first damaged changeset appears to be %d)\n") |
|
167 | 167 | % min(self.badrevs)) |
|
168 | 168 | return 1 |
|
169 | 169 | |
|
170 | 170 | def _verifychangelog(self): |
|
171 | 171 | ui = self.ui |
|
172 | 172 | repo = self.repo |
|
173 | 173 | cl = repo.changelog |
|
174 | 174 | |
|
175 | 175 | ui.status(_("checking changesets\n")) |
|
176 | 176 | mflinkrevs = {} |
|
177 | 177 | filelinkrevs = {} |
|
178 | 178 | seen = {} |
|
179 | 179 | self.checklog(cl, "changelog", 0) |
|
180 | 180 | total = len(repo) |
|
181 | 181 | for i in repo: |
|
182 | 182 | ui.progress(_('checking'), i, total=total, unit=_('changesets')) |
|
183 | 183 | n = cl.node(i) |
|
184 | 184 | self.checkentry(cl, i, n, seen, [i], "changelog") |
|
185 | 185 | |
|
186 | 186 | try: |
|
187 | 187 | changes = cl.read(n) |
|
188 | 188 | if changes[0] != nullid: |
|
189 | 189 | mflinkrevs.setdefault(changes[0], []).append(i) |
|
190 | 190 | self.refersmf = True |
|
191 | 191 | for f in changes[3]: |
|
192 | 192 | if _validpath(repo, f): |
|
193 | 193 | filelinkrevs.setdefault(_normpath(f), []).append(i) |
|
194 | 194 | except Exception as inst: |
|
195 | 195 | self.refersmf = True |
|
196 | 196 | self.exc(i, _("unpacking changeset %s") % short(n), inst) |
|
197 | 197 | ui.progress(_('checking'), None) |
|
198 | 198 | return mflinkrevs, filelinkrevs |
|
199 | 199 | |
|
200 | 200 | def _verifymanifest(self, mflinkrevs): |
|
201 | 201 | repo = self.repo |
|
202 | 202 | ui = self.ui |
|
203 | 203 | mf = self.repo.manifest |
|
204 | 204 | |
|
205 | 205 | ui.status(_("checking manifests\n")) |
|
206 | 206 | filenodes = {} |
|
207 | 207 | seen = {} |
|
208 | 208 | if self.refersmf: |
|
209 | 209 | # Do not check manifest if there are only changelog entries with |
|
210 | 210 | # null manifests. |
|
211 | 211 | self.checklog(mf, "manifest", 0) |
|
212 | 212 | total = len(mf) |
|
213 | 213 | for i in mf: |
|
214 | 214 | ui.progress(_('checking'), i, total=total, unit=_('manifests')) |
|
215 | 215 | n = mf.node(i) |
|
216 | 216 | lr = self.checkentry(mf, i, n, seen, mflinkrevs.get(n, []), |
|
217 | 217 | "manifest") |
|
218 | 218 | if n in mflinkrevs: |
|
219 | 219 | del mflinkrevs[n] |
|
220 | 220 | else: |
|
221 | 221 | self.err(lr, _("%s not in changesets") % short(n), "manifest") |
|
222 | 222 | |
|
223 | 223 | try: |
|
224 | 224 | for f, fn in mf.readdelta(n).iteritems(): |
|
225 | 225 | if not f: |
|
226 | 226 | self.err(lr, _("file without name in manifest")) |
|
227 | 227 | elif f != "/dev/null": # ignore this in very old repos |
|
228 | 228 | if _validpath(repo, f): |
|
229 | 229 | filenodes.setdefault( |
|
230 | 230 | _normpath(f), {}).setdefault(fn, lr) |
|
231 | 231 | except Exception as inst: |
|
232 | 232 | self.exc(lr, _("reading manifest delta %s") % short(n), inst) |
|
233 | 233 | ui.progress(_('checking'), None) |
|
234 | 234 | |
|
235 | 235 | return filenodes |
|
236 | 236 | |
|
237 | 237 | def _crosscheckfiles(self, mflinkrevs, filelinkrevs, filenodes): |
|
238 | 238 | repo = self.repo |
|
239 | 239 | ui = self.ui |
|
240 | 240 | ui.status(_("crosschecking files in changesets and manifests\n")) |
|
241 | 241 | |
|
242 | 242 | total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes) |
|
243 | 243 | count = 0 |
|
244 | 244 | if self.havemf: |
|
245 | 245 | for c, m in sorted([(c, m) for m in mflinkrevs |
|
246 | 246 | for c in mflinkrevs[m]]): |
|
247 | 247 | count += 1 |
|
248 | 248 | if m == nullid: |
|
249 | 249 | continue |
|
250 | 250 | ui.progress(_('crosschecking'), count, total=total) |
|
251 | 251 | self.err(c, _("changeset refers to unknown manifest %s") % |
|
252 | 252 | short(m)) |
|
253 | 253 | |
|
254 | 254 | for f in sorted(filelinkrevs): |
|
255 | 255 | count += 1 |
|
256 | 256 | ui.progress(_('crosschecking'), count, total=total) |
|
257 | 257 | if f not in filenodes: |
|
258 | 258 | lr = filelinkrevs[f][0] |
|
259 | 259 | self.err(lr, _("in changeset but not in manifest"), f) |
|
260 | 260 | |
|
261 | 261 | if self.havecl: |
|
262 | 262 | for f in sorted(filenodes): |
|
263 | 263 | count += 1 |
|
264 | 264 | ui.progress(_('crosschecking'), count, total=total) |
|
265 | 265 | if f not in filelinkrevs: |
|
266 | 266 | try: |
|
267 | 267 | fl = repo.file(f) |
|
268 | 268 | lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]]) |
|
269 | 269 | except Exception: |
|
270 | 270 | lr = None |
|
271 | 271 | self.err(lr, _("in manifest but not in changeset"), f) |
|
272 | 272 | |
|
273 | 273 | ui.progress(_('crosschecking'), None) |
|
274 | 274 | |
|
275 | 275 | def _verifyfiles(self, filenodes, filelinkrevs): |
|
276 | 276 | repo = self.repo |
|
277 | 277 | ui = self.ui |
|
278 | 278 | lrugetctx = self.lrugetctx |
|
279 | 279 | revlogv1 = self.revlogv1 |
|
280 | 280 | havemf = self.havemf |
|
281 | 281 | ui.status(_("checking files\n")) |
|
282 | 282 | |
|
283 | 283 | storefiles = set() |
|
284 | 284 | for f, f2, size in repo.store.datafiles(): |
|
285 | 285 | if not f: |
|
286 | 286 | self.err(None, _("cannot decode filename '%s'") % f2) |
|
287 | elif size > 0 or not revlogv1: | |
|
287 | elif (size > 0 or not revlogv1) and f.startswith('data/'): | |
|
288 | 288 | storefiles.add(_normpath(f)) |
|
289 | 289 | |
|
290 | 290 | files = sorted(set(filenodes) | set(filelinkrevs)) |
|
291 | 291 | total = len(files) |
|
292 | 292 | revisions = 0 |
|
293 | 293 | for i, f in enumerate(files): |
|
294 | 294 | ui.progress(_('checking'), i, item=f, total=total) |
|
295 | 295 | try: |
|
296 | 296 | linkrevs = filelinkrevs[f] |
|
297 | 297 | except KeyError: |
|
298 | 298 | # in manifest but not in changelog |
|
299 | 299 | linkrevs = [] |
|
300 | 300 | |
|
301 | 301 | if linkrevs: |
|
302 | 302 | lr = linkrevs[0] |
|
303 | 303 | else: |
|
304 | 304 | lr = None |
|
305 | 305 | |
|
306 | 306 | try: |
|
307 | 307 | fl = repo.file(f) |
|
308 | 308 | except error.RevlogError as e: |
|
309 | 309 | self.err(lr, _("broken revlog! (%s)") % e, f) |
|
310 | 310 | continue |
|
311 | 311 | |
|
312 | 312 | for ff in fl.files(): |
|
313 | 313 | try: |
|
314 | 314 | storefiles.remove(ff) |
|
315 | 315 | except KeyError: |
|
316 | 316 | self.warn(_(" warning: revlog '%s' not in fncache!") % ff) |
|
317 | 317 | self.fncachewarned = True |
|
318 | 318 | |
|
319 | 319 | self.checklog(fl, f, lr) |
|
320 | 320 | seen = {} |
|
321 | 321 | rp = None |
|
322 | 322 | for i in fl: |
|
323 | 323 | revisions += 1 |
|
324 | 324 | n = fl.node(i) |
|
325 | 325 | lr = self.checkentry(fl, i, n, seen, linkrevs, f) |
|
326 | 326 | if f in filenodes: |
|
327 | 327 | if havemf and n not in filenodes[f]: |
|
328 | 328 | self.err(lr, _("%s not in manifests") % (short(n)), f) |
|
329 | 329 | else: |
|
330 | 330 | del filenodes[f][n] |
|
331 | 331 | |
|
332 | 332 | # verify contents |
|
333 | 333 | try: |
|
334 | 334 | l = len(fl.read(n)) |
|
335 | 335 | rp = fl.renamed(n) |
|
336 | 336 | if l != fl.size(i): |
|
337 | 337 | if len(fl.revision(n)) != fl.size(i): |
|
338 | 338 | self.err(lr, _("unpacked size is %s, %s expected") % |
|
339 | 339 | (l, fl.size(i)), f) |
|
340 | 340 | except error.CensoredNodeError: |
|
341 | 341 | # experimental config: censor.policy |
|
342 | 342 | if ui.config("censor", "policy", "abort") == "abort": |
|
343 | 343 | self.err(lr, _("censored file data"), f) |
|
344 | 344 | except Exception as inst: |
|
345 | 345 | self.exc(lr, _("unpacking %s") % short(n), inst, f) |
|
346 | 346 | |
|
347 | 347 | # check renames |
|
348 | 348 | try: |
|
349 | 349 | if rp: |
|
350 | 350 | if lr is not None and ui.verbose: |
|
351 | 351 | ctx = lrugetctx(lr) |
|
352 | 352 | found = False |
|
353 | 353 | for pctx in ctx.parents(): |
|
354 | 354 | if rp[0] in pctx: |
|
355 | 355 | found = True |
|
356 | 356 | break |
|
357 | 357 | if not found: |
|
358 | 358 | self.warn(_("warning: copy source of '%s' not" |
|
359 | 359 | " in parents of %s") % (f, ctx)) |
|
360 | 360 | fl2 = repo.file(rp[0]) |
|
361 | 361 | if not len(fl2): |
|
362 | 362 | self.err(lr, _("empty or missing copy source " |
|
363 | 363 | "revlog %s:%s") % (rp[0], short(rp[1])), f) |
|
364 | 364 | elif rp[1] == nullid: |
|
365 | 365 | ui.note(_("warning: %s@%s: copy source" |
|
366 | 366 | " revision is nullid %s:%s\n") |
|
367 | 367 | % (f, lr, rp[0], short(rp[1]))) |
|
368 | 368 | else: |
|
369 | 369 | fl2.rev(rp[1]) |
|
370 | 370 | except Exception as inst: |
|
371 | 371 | self.exc(lr, _("checking rename of %s") % short(n), inst, f) |
|
372 | 372 | |
|
373 | 373 | # cross-check |
|
374 | 374 | if f in filenodes: |
|
375 | 375 | fns = [(lr, n) for n, lr in filenodes[f].iteritems()] |
|
376 | 376 | for lr, node in sorted(fns): |
|
377 | 377 | self.err(lr, _("%s in manifests not found") % short(node), |
|
378 | 378 | f) |
|
379 | 379 | ui.progress(_('checking'), None) |
|
380 | 380 | |
|
381 | 381 | for f in storefiles: |
|
382 | 382 | self.warn(_("warning: orphan revlog '%s'") % f) |
|
383 | 383 | |
|
384 | 384 | return len(files), revisions |
@@ -1,2384 +1,2388 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | # |
|
3 | 3 | # run-tests.py - Run a set of tests on Mercurial |
|
4 | 4 | # |
|
5 | 5 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
6 | 6 | # |
|
7 | 7 | # This software may be used and distributed according to the terms of the |
|
8 | 8 | # GNU General Public License version 2 or any later version. |
|
9 | 9 | |
|
10 | 10 | # Modifying this script is tricky because it has many modes: |
|
11 | 11 | # - serial (default) vs parallel (-jN, N > 1) |
|
12 | 12 | # - no coverage (default) vs coverage (-c, -C, -s) |
|
13 | 13 | # - temp install (default) vs specific hg script (--with-hg, --local) |
|
14 | 14 | # - tests are a mix of shell scripts and Python scripts |
|
15 | 15 | # |
|
16 | 16 | # If you change this script, it is recommended that you ensure you |
|
17 | 17 | # haven't broken it by running it in various modes with a representative |
|
18 | 18 | # sample of test scripts. For example: |
|
19 | 19 | # |
|
20 | 20 | # 1) serial, no coverage, temp install: |
|
21 | 21 | # ./run-tests.py test-s* |
|
22 | 22 | # 2) serial, no coverage, local hg: |
|
23 | 23 | # ./run-tests.py --local test-s* |
|
24 | 24 | # 3) serial, coverage, temp install: |
|
25 | 25 | # ./run-tests.py -c test-s* |
|
26 | 26 | # 4) serial, coverage, local hg: |
|
27 | 27 | # ./run-tests.py -c --local test-s* # unsupported |
|
28 | 28 | # 5) parallel, no coverage, temp install: |
|
29 | 29 | # ./run-tests.py -j2 test-s* |
|
30 | 30 | # 6) parallel, no coverage, local hg: |
|
31 | 31 | # ./run-tests.py -j2 --local test-s* |
|
32 | 32 | # 7) parallel, coverage, temp install: |
|
33 | 33 | # ./run-tests.py -j2 -c test-s* # currently broken |
|
34 | 34 | # 8) parallel, coverage, local install: |
|
35 | 35 | # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken) |
|
36 | 36 | # 9) parallel, custom tmp dir: |
|
37 | 37 | # ./run-tests.py -j2 --tmpdir /tmp/myhgtests |
|
38 | 38 | # 10) parallel, pure, tests that call run-tests: |
|
39 | 39 | # ./run-tests.py --pure `grep -l run-tests.py *.t` |
|
40 | 40 | # |
|
41 | 41 | # (You could use any subset of the tests: test-s* happens to match |
|
42 | 42 | # enough that it's worth doing parallel runs, few enough that it |
|
43 | 43 | # completes fairly quickly, includes both shell and Python scripts, and |
|
44 | 44 | # includes some scripts that run daemon processes.) |
|
45 | 45 | |
|
46 | 46 | from __future__ import print_function |
|
47 | 47 | |
|
48 | 48 | from distutils import version |
|
49 | 49 | import difflib |
|
50 | 50 | import errno |
|
51 | 51 | import optparse |
|
52 | 52 | import os |
|
53 | 53 | import shutil |
|
54 | 54 | import subprocess |
|
55 | 55 | import signal |
|
56 | 56 | import socket |
|
57 | 57 | import sys |
|
58 | 58 | import tempfile |
|
59 | 59 | import time |
|
60 | 60 | import random |
|
61 | 61 | import re |
|
62 | 62 | import threading |
|
63 | 63 | import killdaemons as killmod |
|
64 | 64 | try: |
|
65 | 65 | import Queue as queue |
|
66 | 66 | except ImportError: |
|
67 | 67 | import queue |
|
68 | 68 | from xml.dom import minidom |
|
69 | 69 | import unittest |
|
70 | 70 | |
|
71 | 71 | osenvironb = getattr(os, 'environb', os.environ) |
|
72 | 72 | |
|
73 | 73 | try: |
|
74 | 74 | import json |
|
75 | 75 | except ImportError: |
|
76 | 76 | try: |
|
77 | 77 | import simplejson as json |
|
78 | 78 | except ImportError: |
|
79 | 79 | json = None |
|
80 | 80 | |
|
81 | 81 | processlock = threading.Lock() |
|
82 | 82 | |
|
83 | 83 | if sys.version_info > (3, 5, 0): |
|
84 | 84 | PYTHON3 = True |
|
85 | 85 | xrange = range # we use xrange in one place, and we'd rather not use range |
|
86 | 86 | def _bytespath(p): |
|
87 | 87 | return p.encode('utf-8') |
|
88 | 88 | |
|
89 | 89 | def _strpath(p): |
|
90 | 90 | return p.decode('utf-8') |
|
91 | 91 | |
|
92 | 92 | elif sys.version_info >= (3, 0, 0): |
|
93 | 93 | print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' % |
|
94 | 94 | (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))) |
|
95 | 95 | sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` |
|
96 | 96 | else: |
|
97 | 97 | PYTHON3 = False |
|
98 | 98 | |
|
99 | 99 | # In python 2.x, path operations are generally done using |
|
100 | 100 | # bytestrings by default, so we don't have to do any extra |
|
101 | 101 | # fiddling there. We define the wrapper functions anyway just to |
|
102 | 102 | # help keep code consistent between platforms. |
|
103 | 103 | def _bytespath(p): |
|
104 | 104 | return p |
|
105 | 105 | |
|
106 | 106 | _strpath = _bytespath |
|
107 | 107 | |
|
108 | 108 | # For Windows support |
|
109 | 109 | wifexited = getattr(os, "WIFEXITED", lambda x: False) |
|
110 | 110 | |
|
111 | 111 | def checkportisavailable(port): |
|
112 | 112 | """return true if a port seems free to bind on localhost""" |
|
113 | 113 | try: |
|
114 | 114 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
115 | 115 | s.bind(('localhost', port)) |
|
116 | 116 | s.close() |
|
117 | 117 | return True |
|
118 | 118 | except socket.error as exc: |
|
119 | 119 | if not exc.errno == errno.EADDRINUSE: |
|
120 | 120 | raise |
|
121 | 121 | return False |
|
122 | 122 | |
|
123 | 123 | closefds = os.name == 'posix' |
|
124 | 124 | def Popen4(cmd, wd, timeout, env=None): |
|
125 | 125 | processlock.acquire() |
|
126 | 126 | p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env, |
|
127 | 127 | close_fds=closefds, |
|
128 | 128 | stdin=subprocess.PIPE, stdout=subprocess.PIPE, |
|
129 | 129 | stderr=subprocess.STDOUT) |
|
130 | 130 | processlock.release() |
|
131 | 131 | |
|
132 | 132 | p.fromchild = p.stdout |
|
133 | 133 | p.tochild = p.stdin |
|
134 | 134 | p.childerr = p.stderr |
|
135 | 135 | |
|
136 | 136 | p.timeout = False |
|
137 | 137 | if timeout: |
|
138 | 138 | def t(): |
|
139 | 139 | start = time.time() |
|
140 | 140 | while time.time() - start < timeout and p.returncode is None: |
|
141 | 141 | time.sleep(.1) |
|
142 | 142 | p.timeout = True |
|
143 | 143 | if p.returncode is None: |
|
144 | 144 | terminate(p) |
|
145 | 145 | threading.Thread(target=t).start() |
|
146 | 146 | |
|
147 | 147 | return p |
|
148 | 148 | |
|
149 | 149 | PYTHON = _bytespath(sys.executable.replace('\\', '/')) |
|
150 | 150 | IMPL_PATH = b'PYTHONPATH' |
|
151 | 151 | if 'java' in sys.platform: |
|
152 | 152 | IMPL_PATH = b'JYTHONPATH' |
|
153 | 153 | |
|
154 | 154 | defaults = { |
|
155 | 155 | 'jobs': ('HGTEST_JOBS', 1), |
|
156 | 156 | 'timeout': ('HGTEST_TIMEOUT', 180), |
|
157 | 157 | 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500), |
|
158 | 158 | 'port': ('HGTEST_PORT', 20059), |
|
159 | 159 | 'shell': ('HGTEST_SHELL', 'sh'), |
|
160 | 160 | } |
|
161 | 161 | |
|
162 | 162 | def parselistfiles(files, listtype, warn=True): |
|
163 | 163 | entries = dict() |
|
164 | 164 | for filename in files: |
|
165 | 165 | try: |
|
166 | 166 | path = os.path.expanduser(os.path.expandvars(filename)) |
|
167 | 167 | f = open(path, "rb") |
|
168 | 168 | except IOError as err: |
|
169 | 169 | if err.errno != errno.ENOENT: |
|
170 | 170 | raise |
|
171 | 171 | if warn: |
|
172 | 172 | print("warning: no such %s file: %s" % (listtype, filename)) |
|
173 | 173 | continue |
|
174 | 174 | |
|
175 | 175 | for line in f.readlines(): |
|
176 | 176 | line = line.split(b'#', 1)[0].strip() |
|
177 | 177 | if line: |
|
178 | 178 | entries[line] = filename |
|
179 | 179 | |
|
180 | 180 | f.close() |
|
181 | 181 | return entries |
|
182 | 182 | |
|
183 | 183 | def getparser(): |
|
184 | 184 | """Obtain the OptionParser used by the CLI.""" |
|
185 | 185 | parser = optparse.OptionParser("%prog [options] [tests]") |
|
186 | 186 | |
|
187 | 187 | # keep these sorted |
|
188 | 188 | parser.add_option("--blacklist", action="append", |
|
189 | 189 | help="skip tests listed in the specified blacklist file") |
|
190 | 190 | parser.add_option("--whitelist", action="append", |
|
191 | 191 | help="always run tests listed in the specified whitelist file") |
|
192 | 192 | parser.add_option("--changed", type="string", |
|
193 | 193 | help="run tests that are changed in parent rev or working directory") |
|
194 | 194 | parser.add_option("-C", "--annotate", action="store_true", |
|
195 | 195 | help="output files annotated with coverage") |
|
196 | 196 | parser.add_option("-c", "--cover", action="store_true", |
|
197 | 197 | help="print a test coverage report") |
|
198 | 198 | parser.add_option("-d", "--debug", action="store_true", |
|
199 | 199 | help="debug mode: write output of test scripts to console" |
|
200 | 200 | " rather than capturing and diffing it (disables timeout)") |
|
201 | 201 | parser.add_option("-f", "--first", action="store_true", |
|
202 | 202 | help="exit on the first test failure") |
|
203 | 203 | parser.add_option("-H", "--htmlcov", action="store_true", |
|
204 | 204 | help="create an HTML report of the coverage of the files") |
|
205 | 205 | parser.add_option("-i", "--interactive", action="store_true", |
|
206 | 206 | help="prompt to accept changed output") |
|
207 | 207 | parser.add_option("-j", "--jobs", type="int", |
|
208 | 208 | help="number of jobs to run in parallel" |
|
209 | 209 | " (default: $%s or %d)" % defaults['jobs']) |
|
210 | 210 | parser.add_option("--keep-tmpdir", action="store_true", |
|
211 | 211 | help="keep temporary directory after running tests") |
|
212 | 212 | parser.add_option("-k", "--keywords", |
|
213 | 213 | help="run tests matching keywords") |
|
214 | 214 | parser.add_option("-l", "--local", action="store_true", |
|
215 | 215 | help="shortcut for --with-hg=<testdir>/../hg") |
|
216 | 216 | parser.add_option("--loop", action="store_true", |
|
217 | 217 | help="loop tests repeatedly") |
|
218 | 218 | parser.add_option("--runs-per-test", type="int", dest="runs_per_test", |
|
219 | 219 | help="run each test N times (default=1)", default=1) |
|
220 | 220 | parser.add_option("-n", "--nodiff", action="store_true", |
|
221 | 221 | help="skip showing test changes") |
|
222 | 222 | parser.add_option("-p", "--port", type="int", |
|
223 | 223 | help="port on which servers should listen" |
|
224 | 224 | " (default: $%s or %d)" % defaults['port']) |
|
225 | 225 | parser.add_option("--compiler", type="string", |
|
226 | 226 | help="compiler to build with") |
|
227 | 227 | parser.add_option("--pure", action="store_true", |
|
228 | 228 | help="use pure Python code instead of C extensions") |
|
229 | 229 | parser.add_option("-R", "--restart", action="store_true", |
|
230 | 230 | help="restart at last error") |
|
231 | 231 | parser.add_option("-r", "--retest", action="store_true", |
|
232 | 232 | help="retest failed tests") |
|
233 | 233 | parser.add_option("-S", "--noskips", action="store_true", |
|
234 | 234 | help="don't report skip tests verbosely") |
|
235 | 235 | parser.add_option("--shell", type="string", |
|
236 | 236 | help="shell to use (default: $%s or %s)" % defaults['shell']) |
|
237 | 237 | parser.add_option("-t", "--timeout", type="int", |
|
238 | 238 | help="kill errant tests after TIMEOUT seconds" |
|
239 | 239 | " (default: $%s or %d)" % defaults['timeout']) |
|
240 | 240 | parser.add_option("--slowtimeout", type="int", |
|
241 | 241 | help="kill errant slow tests after SLOWTIMEOUT seconds" |
|
242 | 242 | " (default: $%s or %d)" % defaults['slowtimeout']) |
|
243 | 243 | parser.add_option("--time", action="store_true", |
|
244 | 244 | help="time how long each test takes") |
|
245 | 245 | parser.add_option("--json", action="store_true", |
|
246 | 246 | help="store test result data in 'report.json' file") |
|
247 | 247 | parser.add_option("--tmpdir", type="string", |
|
248 | 248 | help="run tests in the given temporary directory" |
|
249 | 249 | " (implies --keep-tmpdir)") |
|
250 | 250 | parser.add_option("-v", "--verbose", action="store_true", |
|
251 | 251 | help="output verbose messages") |
|
252 | 252 | parser.add_option("--xunit", type="string", |
|
253 | 253 | help="record xunit results at specified path") |
|
254 | 254 | parser.add_option("--view", type="string", |
|
255 | 255 | help="external diff viewer") |
|
256 | 256 | parser.add_option("--with-hg", type="string", |
|
257 | 257 | metavar="HG", |
|
258 | 258 | help="test using specified hg script rather than a " |
|
259 | 259 | "temporary installation") |
|
260 | 260 | parser.add_option("-3", "--py3k-warnings", action="store_true", |
|
261 | 261 | help="enable Py3k warnings on Python 2.6+") |
|
262 | 262 | parser.add_option('--extra-config-opt', action="append", |
|
263 | 263 | help='set the given config opt in the test hgrc') |
|
264 | 264 | parser.add_option('--random', action="store_true", |
|
265 | 265 | help='run tests in random order') |
|
266 | 266 | parser.add_option('--profile-runner', action='store_true', |
|
267 | 267 | help='run statprof on run-tests') |
|
268 | 268 | parser.add_option('--allow-slow-tests', action='store_true', |
|
269 | 269 | help='allow extremely slow tests') |
|
270 | 270 | parser.add_option('--showchannels', action='store_true', |
|
271 | 271 | help='show scheduling channels') |
|
272 | 272 | |
|
273 | 273 | for option, (envvar, default) in defaults.items(): |
|
274 | 274 | defaults[option] = type(default)(os.environ.get(envvar, default)) |
|
275 | 275 | parser.set_defaults(**defaults) |
|
276 | 276 | |
|
277 | 277 | return parser |
|
278 | 278 | |
|
279 | 279 | def parseargs(args, parser): |
|
280 | 280 | """Parse arguments with our OptionParser and validate results.""" |
|
281 | 281 | (options, args) = parser.parse_args(args) |
|
282 | 282 | |
|
283 | 283 | # jython is always pure |
|
284 | 284 | if 'java' in sys.platform or '__pypy__' in sys.modules: |
|
285 | 285 | options.pure = True |
|
286 | 286 | |
|
287 | 287 | if options.with_hg: |
|
288 | 288 | options.with_hg = os.path.expanduser(options.with_hg) |
|
289 | 289 | if not (os.path.isfile(options.with_hg) and |
|
290 | 290 | os.access(options.with_hg, os.X_OK)): |
|
291 | 291 | parser.error('--with-hg must specify an executable hg script') |
|
292 | 292 | if not os.path.basename(options.with_hg) == 'hg': |
|
293 | 293 | sys.stderr.write('warning: --with-hg should specify an hg script\n') |
|
294 | 294 | if options.local: |
|
295 | 295 | testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0]))) |
|
296 | 296 | hgbin = os.path.join(os.path.dirname(testdir), b'hg') |
|
297 | 297 | if os.name != 'nt' and not os.access(hgbin, os.X_OK): |
|
298 | 298 | parser.error('--local specified, but %r not found or not executable' |
|
299 | 299 | % hgbin) |
|
300 | 300 | options.with_hg = hgbin |
|
301 | 301 | |
|
302 | 302 | options.anycoverage = options.cover or options.annotate or options.htmlcov |
|
303 | 303 | if options.anycoverage: |
|
304 | 304 | try: |
|
305 | 305 | import coverage |
|
306 | 306 | covver = version.StrictVersion(coverage.__version__).version |
|
307 | 307 | if covver < (3, 3): |
|
308 | 308 | parser.error('coverage options require coverage 3.3 or later') |
|
309 | 309 | except ImportError: |
|
310 | 310 | parser.error('coverage options now require the coverage package') |
|
311 | 311 | |
|
312 | 312 | if options.anycoverage and options.local: |
|
313 | 313 | # this needs some path mangling somewhere, I guess |
|
314 | 314 | parser.error("sorry, coverage options do not work when --local " |
|
315 | 315 | "is specified") |
|
316 | 316 | |
|
317 | 317 | if options.anycoverage and options.with_hg: |
|
318 | 318 | parser.error("sorry, coverage options do not work when --with-hg " |
|
319 | 319 | "is specified") |
|
320 | 320 | |
|
321 | 321 | global verbose |
|
322 | 322 | if options.verbose: |
|
323 | 323 | verbose = '' |
|
324 | 324 | |
|
325 | 325 | if options.tmpdir: |
|
326 | 326 | options.tmpdir = os.path.expanduser(options.tmpdir) |
|
327 | 327 | |
|
328 | 328 | if options.jobs < 1: |
|
329 | 329 | parser.error('--jobs must be positive') |
|
330 | 330 | if options.interactive and options.debug: |
|
331 | 331 | parser.error("-i/--interactive and -d/--debug are incompatible") |
|
332 | 332 | if options.debug: |
|
333 | 333 | if options.timeout != defaults['timeout']: |
|
334 | 334 | sys.stderr.write( |
|
335 | 335 | 'warning: --timeout option ignored with --debug\n') |
|
336 | 336 | if options.slowtimeout != defaults['slowtimeout']: |
|
337 | 337 | sys.stderr.write( |
|
338 | 338 | 'warning: --slowtimeout option ignored with --debug\n') |
|
339 | 339 | options.timeout = 0 |
|
340 | 340 | options.slowtimeout = 0 |
|
341 | 341 | if options.py3k_warnings: |
|
342 | 342 | if PYTHON3: |
|
343 | 343 | parser.error( |
|
344 | 344 | '--py3k-warnings can only be used on Python 2.6 and 2.7') |
|
345 | 345 | if options.blacklist: |
|
346 | 346 | options.blacklist = parselistfiles(options.blacklist, 'blacklist') |
|
347 | 347 | if options.whitelist: |
|
348 | 348 | options.whitelisted = parselistfiles(options.whitelist, 'whitelist') |
|
349 | 349 | else: |
|
350 | 350 | options.whitelisted = {} |
|
351 | 351 | |
|
352 | 352 | if options.showchannels: |
|
353 | 353 | options.nodiff = True |
|
354 | 354 | |
|
355 | 355 | return (options, args) |
|
356 | 356 | |
|
357 | 357 | def rename(src, dst): |
|
358 | 358 | """Like os.rename(), trade atomicity and opened files friendliness |
|
359 | 359 | for existing destination support. |
|
360 | 360 | """ |
|
361 | 361 | shutil.copy(src, dst) |
|
362 | 362 | os.remove(src) |
|
363 | 363 | |
|
364 | 364 | _unified_diff = difflib.unified_diff |
|
365 | 365 | if PYTHON3: |
|
366 | 366 | import functools |
|
367 | 367 | _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff) |
|
368 | 368 | |
|
369 | 369 | def getdiff(expected, output, ref, err): |
|
370 | 370 | servefail = False |
|
371 | 371 | lines = [] |
|
372 | 372 | for line in _unified_diff(expected, output, ref, err): |
|
373 | 373 | if line.startswith(b'+++') or line.startswith(b'---'): |
|
374 | 374 | line = line.replace(b'\\', b'/') |
|
375 | 375 | if line.endswith(b' \n'): |
|
376 | 376 | line = line[:-2] + b'\n' |
|
377 | 377 | lines.append(line) |
|
378 | 378 | if not servefail and line.startswith( |
|
379 | 379 | b'+ abort: child process failed to start'): |
|
380 | 380 | servefail = True |
|
381 | 381 | |
|
382 | 382 | return servefail, lines |
|
383 | 383 | |
|
384 | 384 | verbose = False |
|
385 | 385 | def vlog(*msg): |
|
386 | 386 | """Log only when in verbose mode.""" |
|
387 | 387 | if verbose is False: |
|
388 | 388 | return |
|
389 | 389 | |
|
390 | 390 | return log(*msg) |
|
391 | 391 | |
|
392 | 392 | # Bytes that break XML even in a CDATA block: control characters 0-31 |
|
393 | 393 | # sans \t, \n and \r |
|
394 | 394 | CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]") |
|
395 | 395 | |
|
396 | 396 | def cdatasafe(data): |
|
397 | 397 | """Make a string safe to include in a CDATA block. |
|
398 | 398 | |
|
399 | 399 | Certain control characters are illegal in a CDATA block, and |
|
400 | 400 | there's no way to include a ]]> in a CDATA either. This function |
|
401 | 401 | replaces illegal bytes with ? and adds a space between the ]] so |
|
402 | 402 | that it won't break the CDATA block. |
|
403 | 403 | """ |
|
404 | 404 | return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>') |
|
405 | 405 | |
|
406 | 406 | def log(*msg): |
|
407 | 407 | """Log something to stdout. |
|
408 | 408 | |
|
409 | 409 | Arguments are strings to print. |
|
410 | 410 | """ |
|
411 | 411 | with iolock: |
|
412 | 412 | if verbose: |
|
413 | 413 | print(verbose, end=' ') |
|
414 | 414 | for m in msg: |
|
415 | 415 | print(m, end=' ') |
|
416 | 416 | print() |
|
417 | 417 | sys.stdout.flush() |
|
418 | 418 | |
|
419 | 419 | def terminate(proc): |
|
420 | 420 | """Terminate subprocess (with fallback for Python versions < 2.6)""" |
|
421 | 421 | vlog('# Terminating process %d' % proc.pid) |
|
422 | 422 | try: |
|
423 | 423 | getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))() |
|
424 | 424 | except OSError: |
|
425 | 425 | pass |
|
426 | 426 | |
|
427 | 427 | def killdaemons(pidfile): |
|
428 | 428 | return killmod.killdaemons(pidfile, tryhard=False, remove=True, |
|
429 | 429 | logfn=vlog) |
|
430 | 430 | |
|
431 | 431 | class Test(unittest.TestCase): |
|
432 | 432 | """Encapsulates a single, runnable test. |
|
433 | 433 | |
|
434 | 434 | While this class conforms to the unittest.TestCase API, it differs in that |
|
435 | 435 | instances need to be instantiated manually. (Typically, unittest.TestCase |
|
436 | 436 | classes are instantiated automatically by scanning modules.) |
|
437 | 437 | """ |
|
438 | 438 | |
|
439 | 439 | # Status code reserved for skipped tests (used by hghave). |
|
440 | 440 | SKIPPED_STATUS = 80 |
|
441 | 441 | |
|
442 | 442 | def __init__(self, path, tmpdir, keeptmpdir=False, |
|
443 | 443 | debug=False, |
|
444 | 444 | timeout=defaults['timeout'], |
|
445 | 445 | startport=defaults['port'], extraconfigopts=None, |
|
446 | 446 | py3kwarnings=False, shell=None, |
|
447 | 447 | slowtimeout=defaults['slowtimeout']): |
|
448 | 448 | """Create a test from parameters. |
|
449 | 449 | |
|
450 | 450 | path is the full path to the file defining the test. |
|
451 | 451 | |
|
452 | 452 | tmpdir is the main temporary directory to use for this test. |
|
453 | 453 | |
|
454 | 454 | keeptmpdir determines whether to keep the test's temporary directory |
|
455 | 455 | after execution. It defaults to removal (False). |
|
456 | 456 | |
|
457 | 457 | debug mode will make the test execute verbosely, with unfiltered |
|
458 | 458 | output. |
|
459 | 459 | |
|
460 | 460 | timeout controls the maximum run time of the test. It is ignored when |
|
461 | 461 | debug is True. See slowtimeout for tests with #require slow. |
|
462 | 462 | |
|
463 | 463 | slowtimeout overrides timeout if the test has #require slow. |
|
464 | 464 | |
|
465 | 465 | startport controls the starting port number to use for this test. Each |
|
466 | 466 | test will reserve 3 port numbers for execution. It is the caller's |
|
467 | 467 | responsibility to allocate a non-overlapping port range to Test |
|
468 | 468 | instances. |
|
469 | 469 | |
|
470 | 470 | extraconfigopts is an iterable of extra hgrc config options. Values |
|
471 | 471 | must have the form "key=value" (something understood by hgrc). Values |
|
472 | 472 | of the form "foo.key=value" will result in "[foo] key=value". |
|
473 | 473 | |
|
474 | 474 | py3kwarnings enables Py3k warnings. |
|
475 | 475 | |
|
476 | 476 | shell is the shell to execute tests in. |
|
477 | 477 | """ |
|
478 | 478 | self.path = path |
|
479 | 479 | self.bname = os.path.basename(path) |
|
480 | 480 | self.name = _strpath(self.bname) |
|
481 | 481 | self._testdir = os.path.dirname(path) |
|
482 | 482 | self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname) |
|
483 | 483 | |
|
484 | 484 | self._threadtmp = tmpdir |
|
485 | 485 | self._keeptmpdir = keeptmpdir |
|
486 | 486 | self._debug = debug |
|
487 | 487 | self._timeout = timeout |
|
488 | 488 | self._slowtimeout = slowtimeout |
|
489 | 489 | self._startport = startport |
|
490 | 490 | self._extraconfigopts = extraconfigopts or [] |
|
491 | 491 | self._py3kwarnings = py3kwarnings |
|
492 | 492 | self._shell = _bytespath(shell) |
|
493 | 493 | |
|
494 | 494 | self._aborted = False |
|
495 | 495 | self._daemonpids = [] |
|
496 | 496 | self._finished = None |
|
497 | 497 | self._ret = None |
|
498 | 498 | self._out = None |
|
499 | 499 | self._skipped = None |
|
500 | 500 | self._testtmp = None |
|
501 | 501 | |
|
502 | 502 | # If we're not in --debug mode and reference output file exists, |
|
503 | 503 | # check test output against it. |
|
504 | 504 | if debug: |
|
505 | 505 | self._refout = None # to match "out is None" |
|
506 | 506 | elif os.path.exists(self.refpath): |
|
507 | 507 | f = open(self.refpath, 'rb') |
|
508 | 508 | self._refout = f.read().splitlines(True) |
|
509 | 509 | f.close() |
|
510 | 510 | else: |
|
511 | 511 | self._refout = [] |
|
512 | 512 | |
|
513 | 513 | # needed to get base class __repr__ running |
|
514 | 514 | @property |
|
515 | 515 | def _testMethodName(self): |
|
516 | 516 | return self.name |
|
517 | 517 | |
|
518 | 518 | def __str__(self): |
|
519 | 519 | return self.name |
|
520 | 520 | |
|
521 | 521 | def shortDescription(self): |
|
522 | 522 | return self.name |
|
523 | 523 | |
|
524 | 524 | def setUp(self): |
|
525 | 525 | """Tasks to perform before run().""" |
|
526 | 526 | self._finished = False |
|
527 | 527 | self._ret = None |
|
528 | 528 | self._out = None |
|
529 | 529 | self._skipped = None |
|
530 | 530 | |
|
531 | 531 | try: |
|
532 | 532 | os.mkdir(self._threadtmp) |
|
533 | 533 | except OSError as e: |
|
534 | 534 | if e.errno != errno.EEXIST: |
|
535 | 535 | raise |
|
536 | 536 | |
|
537 | 537 | self._testtmp = os.path.join(self._threadtmp, |
|
538 | 538 | os.path.basename(self.path)) |
|
539 | 539 | os.mkdir(self._testtmp) |
|
540 | 540 | |
|
541 | 541 | # Remove any previous output files. |
|
542 | 542 | if os.path.exists(self.errpath): |
|
543 | 543 | try: |
|
544 | 544 | os.remove(self.errpath) |
|
545 | 545 | except OSError as e: |
|
546 | 546 | # We might have raced another test to clean up a .err |
|
547 | 547 | # file, so ignore ENOENT when removing a previous .err |
|
548 | 548 | # file. |
|
549 | 549 | if e.errno != errno.ENOENT: |
|
550 | 550 | raise |
|
551 | 551 | |
|
552 | 552 | def run(self, result): |
|
553 | 553 | """Run this test and report results against a TestResult instance.""" |
|
554 | 554 | # This function is extremely similar to unittest.TestCase.run(). Once |
|
555 | 555 | # we require Python 2.7 (or at least its version of unittest), this |
|
556 | 556 | # function can largely go away. |
|
557 | 557 | self._result = result |
|
558 | 558 | result.startTest(self) |
|
559 | 559 | try: |
|
560 | 560 | try: |
|
561 | 561 | self.setUp() |
|
562 | 562 | except (KeyboardInterrupt, SystemExit): |
|
563 | 563 | self._aborted = True |
|
564 | 564 | raise |
|
565 | 565 | except Exception: |
|
566 | 566 | result.addError(self, sys.exc_info()) |
|
567 | 567 | return |
|
568 | 568 | |
|
569 | 569 | success = False |
|
570 | 570 | try: |
|
571 | 571 | self.runTest() |
|
572 | 572 | except KeyboardInterrupt: |
|
573 | 573 | self._aborted = True |
|
574 | 574 | raise |
|
575 | 575 | except SkipTest as e: |
|
576 | 576 | result.addSkip(self, str(e)) |
|
577 | 577 | # The base class will have already counted this as a |
|
578 | 578 | # test we "ran", but we want to exclude skipped tests |
|
579 | 579 | # from those we count towards those run. |
|
580 | 580 | result.testsRun -= 1 |
|
581 | 581 | except IgnoreTest as e: |
|
582 | 582 | result.addIgnore(self, str(e)) |
|
583 | 583 | # As with skips, ignores also should be excluded from |
|
584 | 584 | # the number of tests executed. |
|
585 | 585 | result.testsRun -= 1 |
|
586 | 586 | except WarnTest as e: |
|
587 | 587 | result.addWarn(self, str(e)) |
|
588 | 588 | except ReportedTest as e: |
|
589 | 589 | pass |
|
590 | 590 | except self.failureException as e: |
|
591 | 591 | # This differs from unittest in that we don't capture |
|
592 | 592 | # the stack trace. This is for historical reasons and |
|
593 | 593 | # this decision could be revisited in the future, |
|
594 | 594 | # especially for PythonTest instances. |
|
595 | 595 | if result.addFailure(self, str(e)): |
|
596 | 596 | success = True |
|
597 | 597 | except Exception: |
|
598 | 598 | result.addError(self, sys.exc_info()) |
|
599 | 599 | else: |
|
600 | 600 | success = True |
|
601 | 601 | |
|
602 | 602 | try: |
|
603 | 603 | self.tearDown() |
|
604 | 604 | except (KeyboardInterrupt, SystemExit): |
|
605 | 605 | self._aborted = True |
|
606 | 606 | raise |
|
607 | 607 | except Exception: |
|
608 | 608 | result.addError(self, sys.exc_info()) |
|
609 | 609 | success = False |
|
610 | 610 | |
|
611 | 611 | if success: |
|
612 | 612 | result.addSuccess(self) |
|
613 | 613 | finally: |
|
614 | 614 | result.stopTest(self, interrupted=self._aborted) |
|
615 | 615 | |
|
616 | 616 | def runTest(self): |
|
617 | 617 | """Run this test instance. |
|
618 | 618 | |
|
619 | 619 | This will return a tuple describing the result of the test. |
|
620 | 620 | """ |
|
621 | 621 | env = self._getenv() |
|
622 | 622 | self._daemonpids.append(env['DAEMON_PIDS']) |
|
623 | 623 | self._createhgrc(env['HGRCPATH']) |
|
624 | 624 | |
|
625 | 625 | vlog('# Test', self.name) |
|
626 | 626 | |
|
627 | 627 | ret, out = self._run(env) |
|
628 | 628 | self._finished = True |
|
629 | 629 | self._ret = ret |
|
630 | 630 | self._out = out |
|
631 | 631 | |
|
632 | 632 | def describe(ret): |
|
633 | 633 | if ret < 0: |
|
634 | 634 | return 'killed by signal: %d' % -ret |
|
635 | 635 | return 'returned error code %d' % ret |
|
636 | 636 | |
|
637 | 637 | self._skipped = False |
|
638 | 638 | |
|
639 | 639 | if ret == self.SKIPPED_STATUS: |
|
640 | 640 | if out is None: # Debug mode, nothing to parse. |
|
641 | 641 | missing = ['unknown'] |
|
642 | 642 | failed = None |
|
643 | 643 | else: |
|
644 | 644 | missing, failed = TTest.parsehghaveoutput(out) |
|
645 | 645 | |
|
646 | 646 | if not missing: |
|
647 | 647 | missing = ['skipped'] |
|
648 | 648 | |
|
649 | 649 | if failed: |
|
650 | 650 | self.fail('hg have failed checking for %s' % failed[-1]) |
|
651 | 651 | else: |
|
652 | 652 | self._skipped = True |
|
653 | 653 | raise SkipTest(missing[-1]) |
|
654 | 654 | elif ret == 'timeout': |
|
655 | 655 | self.fail('timed out') |
|
656 | 656 | elif ret is False: |
|
657 | 657 | raise WarnTest('no result code from test') |
|
658 | 658 | elif out != self._refout: |
|
659 | 659 | # Diff generation may rely on written .err file. |
|
660 | 660 | if (ret != 0 or out != self._refout) and not self._skipped \ |
|
661 | 661 | and not self._debug: |
|
662 | 662 | f = open(self.errpath, 'wb') |
|
663 | 663 | for line in out: |
|
664 | 664 | f.write(line) |
|
665 | 665 | f.close() |
|
666 | 666 | |
|
667 | 667 | # The result object handles diff calculation for us. |
|
668 | 668 | if self._result.addOutputMismatch(self, ret, out, self._refout): |
|
669 | 669 | # change was accepted, skip failing |
|
670 | 670 | return |
|
671 | 671 | |
|
672 | 672 | if ret: |
|
673 | 673 | msg = 'output changed and ' + describe(ret) |
|
674 | 674 | else: |
|
675 | 675 | msg = 'output changed' |
|
676 | 676 | |
|
677 | 677 | self.fail(msg) |
|
678 | 678 | elif ret: |
|
679 | 679 | self.fail(describe(ret)) |
|
680 | 680 | |
|
681 | 681 | def tearDown(self): |
|
682 | 682 | """Tasks to perform after run().""" |
|
683 | 683 | for entry in self._daemonpids: |
|
684 | 684 | killdaemons(entry) |
|
685 | 685 | self._daemonpids = [] |
|
686 | 686 | |
|
687 | 687 | if self._keeptmpdir: |
|
688 | 688 | log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' % |
|
689 | 689 | (self._testtmp, self._threadtmp)) |
|
690 | 690 | else: |
|
691 | 691 | shutil.rmtree(self._testtmp, True) |
|
692 | 692 | shutil.rmtree(self._threadtmp, True) |
|
693 | 693 | |
|
694 | 694 | if (self._ret != 0 or self._out != self._refout) and not self._skipped \ |
|
695 | 695 | and not self._debug and self._out: |
|
696 | 696 | f = open(self.errpath, 'wb') |
|
697 | 697 | for line in self._out: |
|
698 | 698 | f.write(line) |
|
699 | 699 | f.close() |
|
700 | 700 | |
|
701 | 701 | vlog("# Ret was:", self._ret, '(%s)' % self.name) |
|
702 | 702 | |
|
703 | 703 | def _run(self, env): |
|
704 | 704 | # This should be implemented in child classes to run tests. |
|
705 | 705 | raise SkipTest('unknown test type') |
|
706 | 706 | |
|
707 | 707 | def abort(self): |
|
708 | 708 | """Terminate execution of this test.""" |
|
709 | 709 | self._aborted = True |
|
710 | 710 | |
|
711 | 711 | def _getreplacements(self): |
|
712 | 712 | """Obtain a mapping of text replacements to apply to test output. |
|
713 | 713 | |
|
714 | 714 | Test output needs to be normalized so it can be compared to expected |
|
715 | 715 | output. This function defines how some of that normalization will |
|
716 | 716 | occur. |
|
717 | 717 | """ |
|
718 | 718 | r = [ |
|
719 | 719 | (br':%d\b' % self._startport, b':$HGPORT'), |
|
720 | 720 | (br':%d\b' % (self._startport + 1), b':$HGPORT1'), |
|
721 | 721 | (br':%d\b' % (self._startport + 2), b':$HGPORT2'), |
|
722 | (br':%d\b' % (self._startport + 2), b':$HGPORT3'), | |
|
723 | (br':%d\b' % (self._startport + 2), b':$HGPORT4'), | |
|
722 | 724 | (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$', |
|
723 | 725 | br'\1 (glob)'), |
|
724 | 726 | ] |
|
725 | 727 | |
|
726 | 728 | if os.name == 'nt': |
|
727 | 729 | r.append( |
|
728 | 730 | (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or |
|
729 | 731 | c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c |
|
730 | 732 | for c in self._testtmp), b'$TESTTMP')) |
|
731 | 733 | else: |
|
732 | 734 | r.append((re.escape(self._testtmp), b'$TESTTMP')) |
|
733 | 735 | |
|
734 | 736 | return r |
|
735 | 737 | |
|
736 | 738 | def _getenv(self): |
|
737 | 739 | """Obtain environment variables to use during test execution.""" |
|
738 | 740 | env = os.environ.copy() |
|
739 | 741 | env['TESTTMP'] = self._testtmp |
|
740 | 742 | env['HOME'] = self._testtmp |
|
741 | 743 | env["HGPORT"] = str(self._startport) |
|
742 | 744 | env["HGPORT1"] = str(self._startport + 1) |
|
743 | 745 | env["HGPORT2"] = str(self._startport + 2) |
|
746 | env["HGPORT3"] = str(self._startport + 3) | |
|
747 | env["HGPORT4"] = str(self._startport + 4) | |
|
744 | 748 | env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc') |
|
745 | 749 | env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids') |
|
746 | 750 | env["HGEDITOR"] = ('"' + sys.executable + '"' |
|
747 | 751 | + ' -c "import sys; sys.exit(0)"') |
|
748 | 752 | env["HGMERGE"] = "internal:merge" |
|
749 | 753 | env["HGUSER"] = "test" |
|
750 | 754 | env["HGENCODING"] = "ascii" |
|
751 | 755 | env["HGENCODINGMODE"] = "strict" |
|
752 | 756 | |
|
753 | 757 | # Reset some environment variables to well-known values so that |
|
754 | 758 | # the tests produce repeatable output. |
|
755 | 759 | env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C' |
|
756 | 760 | env['TZ'] = 'GMT' |
|
757 | 761 | env["EMAIL"] = "Foo Bar <foo.bar@example.com>" |
|
758 | 762 | env['COLUMNS'] = '80' |
|
759 | 763 | env['TERM'] = 'xterm' |
|
760 | 764 | |
|
761 | 765 | for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' + |
|
762 | 766 | 'NO_PROXY').split(): |
|
763 | 767 | if k in env: |
|
764 | 768 | del env[k] |
|
765 | 769 | |
|
766 | 770 | # unset env related to hooks |
|
767 | 771 | for k in env.keys(): |
|
768 | 772 | if k.startswith('HG_'): |
|
769 | 773 | del env[k] |
|
770 | 774 | |
|
771 | 775 | return env |
|
772 | 776 | |
|
773 | 777 | def _createhgrc(self, path): |
|
774 | 778 | """Create an hgrc file for this test.""" |
|
775 | 779 | hgrc = open(path, 'wb') |
|
776 | 780 | hgrc.write(b'[ui]\n') |
|
777 | 781 | hgrc.write(b'slash = True\n') |
|
778 | 782 | hgrc.write(b'interactive = False\n') |
|
779 | 783 | hgrc.write(b'mergemarkers = detailed\n') |
|
780 | 784 | hgrc.write(b'promptecho = True\n') |
|
781 | 785 | hgrc.write(b'[defaults]\n') |
|
782 | 786 | hgrc.write(b'backout = -d "0 0"\n') |
|
783 | 787 | hgrc.write(b'commit = -d "0 0"\n') |
|
784 | 788 | hgrc.write(b'shelve = --date "0 0"\n') |
|
785 | 789 | hgrc.write(b'tag = -d "0 0"\n') |
|
786 | 790 | hgrc.write(b'[devel]\n') |
|
787 | 791 | hgrc.write(b'all-warnings = true\n') |
|
788 | 792 | hgrc.write(b'[largefiles]\n') |
|
789 | 793 | hgrc.write(b'usercache = %s\n' % |
|
790 | 794 | (os.path.join(self._testtmp, b'.cache/largefiles'))) |
|
791 | 795 | |
|
792 | 796 | for opt in self._extraconfigopts: |
|
793 | 797 | section, key = opt.split('.', 1) |
|
794 | 798 | assert '=' in key, ('extra config opt %s must ' |
|
795 | 799 | 'have an = for assignment' % opt) |
|
796 | 800 | hgrc.write(b'[%s]\n%s\n' % (section, key)) |
|
797 | 801 | hgrc.close() |
|
798 | 802 | |
|
799 | 803 | def fail(self, msg): |
|
800 | 804 | # unittest differentiates between errored and failed. |
|
801 | 805 | # Failed is denoted by AssertionError (by default at least). |
|
802 | 806 | raise AssertionError(msg) |
|
803 | 807 | |
|
804 | 808 | def _runcommand(self, cmd, env, normalizenewlines=False): |
|
805 | 809 | """Run command in a sub-process, capturing the output (stdout and |
|
806 | 810 | stderr). |
|
807 | 811 | |
|
808 | 812 | Return a tuple (exitcode, output). output is None in debug mode. |
|
809 | 813 | """ |
|
810 | 814 | if self._debug: |
|
811 | 815 | proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp, |
|
812 | 816 | env=env) |
|
813 | 817 | ret = proc.wait() |
|
814 | 818 | return (ret, None) |
|
815 | 819 | |
|
816 | 820 | proc = Popen4(cmd, self._testtmp, self._timeout, env) |
|
817 | 821 | def cleanup(): |
|
818 | 822 | terminate(proc) |
|
819 | 823 | ret = proc.wait() |
|
820 | 824 | if ret == 0: |
|
821 | 825 | ret = signal.SIGTERM << 8 |
|
822 | 826 | killdaemons(env['DAEMON_PIDS']) |
|
823 | 827 | return ret |
|
824 | 828 | |
|
825 | 829 | output = '' |
|
826 | 830 | proc.tochild.close() |
|
827 | 831 | |
|
828 | 832 | try: |
|
829 | 833 | output = proc.fromchild.read() |
|
830 | 834 | except KeyboardInterrupt: |
|
831 | 835 | vlog('# Handling keyboard interrupt') |
|
832 | 836 | cleanup() |
|
833 | 837 | raise |
|
834 | 838 | |
|
835 | 839 | ret = proc.wait() |
|
836 | 840 | if wifexited(ret): |
|
837 | 841 | ret = os.WEXITSTATUS(ret) |
|
838 | 842 | |
|
839 | 843 | if proc.timeout: |
|
840 | 844 | ret = 'timeout' |
|
841 | 845 | |
|
842 | 846 | if ret: |
|
843 | 847 | killdaemons(env['DAEMON_PIDS']) |
|
844 | 848 | |
|
845 | 849 | for s, r in self._getreplacements(): |
|
846 | 850 | output = re.sub(s, r, output) |
|
847 | 851 | |
|
848 | 852 | if normalizenewlines: |
|
849 | 853 | output = output.replace('\r\n', '\n') |
|
850 | 854 | |
|
851 | 855 | return ret, output.splitlines(True) |
|
852 | 856 | |
|
853 | 857 | class PythonTest(Test): |
|
854 | 858 | """A Python-based test.""" |
|
855 | 859 | |
|
856 | 860 | @property |
|
857 | 861 | def refpath(self): |
|
858 | 862 | return os.path.join(self._testdir, b'%s.out' % self.bname) |
|
859 | 863 | |
|
860 | 864 | def _run(self, env): |
|
861 | 865 | py3kswitch = self._py3kwarnings and b' -3' or b'' |
|
862 | 866 | cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path) |
|
863 | 867 | vlog("# Running", cmd) |
|
864 | 868 | normalizenewlines = os.name == 'nt' |
|
865 | 869 | result = self._runcommand(cmd, env, |
|
866 | 870 | normalizenewlines=normalizenewlines) |
|
867 | 871 | if self._aborted: |
|
868 | 872 | raise KeyboardInterrupt() |
|
869 | 873 | |
|
870 | 874 | return result |
|
871 | 875 | |
|
872 | 876 | # This script may want to drop globs from lines matching these patterns on |
|
873 | 877 | # Windows, but check-code.py wants a glob on these lines unconditionally. Don't |
|
874 | 878 | # warn if that is the case for anything matching these lines. |
|
875 | 879 | checkcodeglobpats = [ |
|
876 | 880 | re.compile(br'^pushing to \$TESTTMP/.*[^)]$'), |
|
877 | 881 | re.compile(br'^moving \S+/.*[^)]$'), |
|
878 | 882 | re.compile(br'^pulling from \$TESTTMP/.*[^)]$') |
|
879 | 883 | ] |
|
880 | 884 | |
|
881 | 885 | bchr = chr |
|
882 | 886 | if PYTHON3: |
|
883 | 887 | bchr = lambda x: bytes([x]) |
|
884 | 888 | |
|
885 | 889 | class TTest(Test): |
|
886 | 890 | """A "t test" is a test backed by a .t file.""" |
|
887 | 891 | |
|
888 | 892 | SKIPPED_PREFIX = 'skipped: ' |
|
889 | 893 | FAILED_PREFIX = 'hghave check failed: ' |
|
890 | 894 | NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search |
|
891 | 895 | |
|
892 | 896 | ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub |
|
893 | 897 | ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256)) |
|
894 | 898 | ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'}) |
|
895 | 899 | |
|
896 | 900 | @property |
|
897 | 901 | def refpath(self): |
|
898 | 902 | return os.path.join(self._testdir, self.bname) |
|
899 | 903 | |
|
900 | 904 | def _run(self, env): |
|
901 | 905 | f = open(self.path, 'rb') |
|
902 | 906 | lines = f.readlines() |
|
903 | 907 | f.close() |
|
904 | 908 | |
|
905 | 909 | salt, script, after, expected = self._parsetest(lines) |
|
906 | 910 | |
|
907 | 911 | # Write out the generated script. |
|
908 | 912 | fname = b'%s.sh' % self._testtmp |
|
909 | 913 | f = open(fname, 'wb') |
|
910 | 914 | for l in script: |
|
911 | 915 | f.write(l) |
|
912 | 916 | f.close() |
|
913 | 917 | |
|
914 | 918 | cmd = b'%s "%s"' % (self._shell, fname) |
|
915 | 919 | vlog("# Running", cmd) |
|
916 | 920 | |
|
917 | 921 | exitcode, output = self._runcommand(cmd, env) |
|
918 | 922 | |
|
919 | 923 | if self._aborted: |
|
920 | 924 | raise KeyboardInterrupt() |
|
921 | 925 | |
|
922 | 926 | # Do not merge output if skipped. Return hghave message instead. |
|
923 | 927 | # Similarly, with --debug, output is None. |
|
924 | 928 | if exitcode == self.SKIPPED_STATUS or output is None: |
|
925 | 929 | return exitcode, output |
|
926 | 930 | |
|
927 | 931 | return self._processoutput(exitcode, output, salt, after, expected) |
|
928 | 932 | |
|
929 | 933 | def _hghave(self, reqs): |
|
930 | 934 | # TODO do something smarter when all other uses of hghave are gone. |
|
931 | 935 | runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__))) |
|
932 | 936 | tdir = runtestdir.replace(b'\\', b'/') |
|
933 | 937 | proc = Popen4(b'%s -c "%s/hghave %s"' % |
|
934 | 938 | (self._shell, tdir, b' '.join(reqs)), |
|
935 | 939 | self._testtmp, 0, self._getenv()) |
|
936 | 940 | stdout, stderr = proc.communicate() |
|
937 | 941 | ret = proc.wait() |
|
938 | 942 | if wifexited(ret): |
|
939 | 943 | ret = os.WEXITSTATUS(ret) |
|
940 | 944 | if ret == 2: |
|
941 | 945 | print(stdout) |
|
942 | 946 | sys.exit(1) |
|
943 | 947 | |
|
944 | 948 | if ret != 0: |
|
945 | 949 | return False, stdout |
|
946 | 950 | |
|
947 | 951 | if 'slow' in reqs: |
|
948 | 952 | self._timeout = self._slowtimeout |
|
949 | 953 | return True, None |
|
950 | 954 | |
|
951 | 955 | def _parsetest(self, lines): |
|
952 | 956 | # We generate a shell script which outputs unique markers to line |
|
953 | 957 | # up script results with our source. These markers include input |
|
954 | 958 | # line number and the last return code. |
|
955 | 959 | salt = b"SALT%d" % time.time() |
|
956 | 960 | def addsalt(line, inpython): |
|
957 | 961 | if inpython: |
|
958 | 962 | script.append(b'%s %d 0\n' % (salt, line)) |
|
959 | 963 | else: |
|
960 | 964 | script.append(b'echo %s %d $?\n' % (salt, line)) |
|
961 | 965 | |
|
962 | 966 | script = [] |
|
963 | 967 | |
|
964 | 968 | # After we run the shell script, we re-unify the script output |
|
965 | 969 | # with non-active parts of the source, with synchronization by our |
|
966 | 970 | # SALT line number markers. The after table contains the non-active |
|
967 | 971 | # components, ordered by line number. |
|
968 | 972 | after = {} |
|
969 | 973 | |
|
970 | 974 | # Expected shell script output. |
|
971 | 975 | expected = {} |
|
972 | 976 | |
|
973 | 977 | pos = prepos = -1 |
|
974 | 978 | |
|
975 | 979 | # True or False when in a true or false conditional section |
|
976 | 980 | skipping = None |
|
977 | 981 | |
|
978 | 982 | # We keep track of whether or not we're in a Python block so we |
|
979 | 983 | # can generate the surrounding doctest magic. |
|
980 | 984 | inpython = False |
|
981 | 985 | |
|
982 | 986 | if self._debug: |
|
983 | 987 | script.append(b'set -x\n') |
|
984 | 988 | if os.getenv('MSYSTEM'): |
|
985 | 989 | script.append(b'alias pwd="pwd -W"\n') |
|
986 | 990 | |
|
987 | 991 | for n, l in enumerate(lines): |
|
988 | 992 | if not l.endswith(b'\n'): |
|
989 | 993 | l += b'\n' |
|
990 | 994 | if l.startswith(b'#require'): |
|
991 | 995 | lsplit = l.split() |
|
992 | 996 | if len(lsplit) < 2 or lsplit[0] != b'#require': |
|
993 | 997 | after.setdefault(pos, []).append(' !!! invalid #require\n') |
|
994 | 998 | haveresult, message = self._hghave(lsplit[1:]) |
|
995 | 999 | if not haveresult: |
|
996 | 1000 | script = [b'echo "%s"\nexit 80\n' % message] |
|
997 | 1001 | break |
|
998 | 1002 | after.setdefault(pos, []).append(l) |
|
999 | 1003 | elif l.startswith(b'#if'): |
|
1000 | 1004 | lsplit = l.split() |
|
1001 | 1005 | if len(lsplit) < 2 or lsplit[0] != b'#if': |
|
1002 | 1006 | after.setdefault(pos, []).append(' !!! invalid #if\n') |
|
1003 | 1007 | if skipping is not None: |
|
1004 | 1008 | after.setdefault(pos, []).append(' !!! nested #if\n') |
|
1005 | 1009 | skipping = not self._hghave(lsplit[1:])[0] |
|
1006 | 1010 | after.setdefault(pos, []).append(l) |
|
1007 | 1011 | elif l.startswith(b'#else'): |
|
1008 | 1012 | if skipping is None: |
|
1009 | 1013 | after.setdefault(pos, []).append(' !!! missing #if\n') |
|
1010 | 1014 | skipping = not skipping |
|
1011 | 1015 | after.setdefault(pos, []).append(l) |
|
1012 | 1016 | elif l.startswith(b'#endif'): |
|
1013 | 1017 | if skipping is None: |
|
1014 | 1018 | after.setdefault(pos, []).append(' !!! missing #if\n') |
|
1015 | 1019 | skipping = None |
|
1016 | 1020 | after.setdefault(pos, []).append(l) |
|
1017 | 1021 | elif skipping: |
|
1018 | 1022 | after.setdefault(pos, []).append(l) |
|
1019 | 1023 | elif l.startswith(b' >>> '): # python inlines |
|
1020 | 1024 | after.setdefault(pos, []).append(l) |
|
1021 | 1025 | prepos = pos |
|
1022 | 1026 | pos = n |
|
1023 | 1027 | if not inpython: |
|
1024 | 1028 | # We've just entered a Python block. Add the header. |
|
1025 | 1029 | inpython = True |
|
1026 | 1030 | addsalt(prepos, False) # Make sure we report the exit code. |
|
1027 | 1031 | script.append(b'%s -m heredoctest <<EOF\n' % PYTHON) |
|
1028 | 1032 | addsalt(n, True) |
|
1029 | 1033 | script.append(l[2:]) |
|
1030 | 1034 | elif l.startswith(b' ... '): # python inlines |
|
1031 | 1035 | after.setdefault(prepos, []).append(l) |
|
1032 | 1036 | script.append(l[2:]) |
|
1033 | 1037 | elif l.startswith(b' $ '): # commands |
|
1034 | 1038 | if inpython: |
|
1035 | 1039 | script.append(b'EOF\n') |
|
1036 | 1040 | inpython = False |
|
1037 | 1041 | after.setdefault(pos, []).append(l) |
|
1038 | 1042 | prepos = pos |
|
1039 | 1043 | pos = n |
|
1040 | 1044 | addsalt(n, False) |
|
1041 | 1045 | cmd = l[4:].split() |
|
1042 | 1046 | if len(cmd) == 2 and cmd[0] == b'cd': |
|
1043 | 1047 | l = b' $ cd %s || exit 1\n' % cmd[1] |
|
1044 | 1048 | script.append(l[4:]) |
|
1045 | 1049 | elif l.startswith(b' > '): # continuations |
|
1046 | 1050 | after.setdefault(prepos, []).append(l) |
|
1047 | 1051 | script.append(l[4:]) |
|
1048 | 1052 | elif l.startswith(b' '): # results |
|
1049 | 1053 | # Queue up a list of expected results. |
|
1050 | 1054 | expected.setdefault(pos, []).append(l[2:]) |
|
1051 | 1055 | else: |
|
1052 | 1056 | if inpython: |
|
1053 | 1057 | script.append(b'EOF\n') |
|
1054 | 1058 | inpython = False |
|
1055 | 1059 | # Non-command/result. Queue up for merged output. |
|
1056 | 1060 | after.setdefault(pos, []).append(l) |
|
1057 | 1061 | |
|
1058 | 1062 | if inpython: |
|
1059 | 1063 | script.append(b'EOF\n') |
|
1060 | 1064 | if skipping is not None: |
|
1061 | 1065 | after.setdefault(pos, []).append(' !!! missing #endif\n') |
|
1062 | 1066 | addsalt(n + 1, False) |
|
1063 | 1067 | |
|
1064 | 1068 | return salt, script, after, expected |
|
1065 | 1069 | |
|
1066 | 1070 | def _processoutput(self, exitcode, output, salt, after, expected): |
|
1067 | 1071 | # Merge the script output back into a unified test. |
|
1068 | 1072 | warnonly = 1 # 1: not yet; 2: yes; 3: for sure not |
|
1069 | 1073 | if exitcode != 0: |
|
1070 | 1074 | warnonly = 3 |
|
1071 | 1075 | |
|
1072 | 1076 | pos = -1 |
|
1073 | 1077 | postout = [] |
|
1074 | 1078 | for l in output: |
|
1075 | 1079 | lout, lcmd = l, None |
|
1076 | 1080 | if salt in l: |
|
1077 | 1081 | lout, lcmd = l.split(salt, 1) |
|
1078 | 1082 | |
|
1079 | 1083 | while lout: |
|
1080 | 1084 | if not lout.endswith(b'\n'): |
|
1081 | 1085 | lout += b' (no-eol)\n' |
|
1082 | 1086 | |
|
1083 | 1087 | # Find the expected output at the current position. |
|
1084 | 1088 | el = None |
|
1085 | 1089 | if expected.get(pos, None): |
|
1086 | 1090 | el = expected[pos].pop(0) |
|
1087 | 1091 | |
|
1088 | 1092 | r = TTest.linematch(el, lout) |
|
1089 | 1093 | if isinstance(r, str): |
|
1090 | 1094 | if r == '+glob': |
|
1091 | 1095 | lout = el[:-1] + ' (glob)\n' |
|
1092 | 1096 | r = '' # Warn only this line. |
|
1093 | 1097 | elif r == '-glob': |
|
1094 | 1098 | lout = ''.join(el.rsplit(' (glob)', 1)) |
|
1095 | 1099 | r = '' # Warn only this line. |
|
1096 | 1100 | elif r == "retry": |
|
1097 | 1101 | postout.append(b' ' + el) |
|
1098 | 1102 | continue |
|
1099 | 1103 | else: |
|
1100 | 1104 | log('\ninfo, unknown linematch result: %r\n' % r) |
|
1101 | 1105 | r = False |
|
1102 | 1106 | if r: |
|
1103 | 1107 | postout.append(b' ' + el) |
|
1104 | 1108 | else: |
|
1105 | 1109 | if self.NEEDESCAPE(lout): |
|
1106 | 1110 | lout = TTest._stringescape(b'%s (esc)\n' % |
|
1107 | 1111 | lout.rstrip(b'\n')) |
|
1108 | 1112 | postout.append(b' ' + lout) # Let diff deal with it. |
|
1109 | 1113 | if r != '': # If line failed. |
|
1110 | 1114 | warnonly = 3 # for sure not |
|
1111 | 1115 | elif warnonly == 1: # Is "not yet" and line is warn only. |
|
1112 | 1116 | warnonly = 2 # Yes do warn. |
|
1113 | 1117 | break |
|
1114 | 1118 | |
|
1115 | 1119 | # clean up any optional leftovers |
|
1116 | 1120 | while expected.get(pos, None): |
|
1117 | 1121 | el = expected[pos].pop(0) |
|
1118 | 1122 | if not el.endswith(b" (?)\n"): |
|
1119 | 1123 | expected[pos].insert(0, el) |
|
1120 | 1124 | break |
|
1121 | 1125 | postout.append(b' ' + el) |
|
1122 | 1126 | |
|
1123 | 1127 | if lcmd: |
|
1124 | 1128 | # Add on last return code. |
|
1125 | 1129 | ret = int(lcmd.split()[1]) |
|
1126 | 1130 | if ret != 0: |
|
1127 | 1131 | postout.append(b' [%d]\n' % ret) |
|
1128 | 1132 | if pos in after: |
|
1129 | 1133 | # Merge in non-active test bits. |
|
1130 | 1134 | postout += after.pop(pos) |
|
1131 | 1135 | pos = int(lcmd.split()[0]) |
|
1132 | 1136 | |
|
1133 | 1137 | if pos in after: |
|
1134 | 1138 | postout += after.pop(pos) |
|
1135 | 1139 | |
|
1136 | 1140 | if warnonly == 2: |
|
1137 | 1141 | exitcode = False # Set exitcode to warned. |
|
1138 | 1142 | |
|
1139 | 1143 | return exitcode, postout |
|
1140 | 1144 | |
|
1141 | 1145 | @staticmethod |
|
1142 | 1146 | def rematch(el, l): |
|
1143 | 1147 | try: |
|
1144 | 1148 | # use \Z to ensure that the regex matches to the end of the string |
|
1145 | 1149 | if os.name == 'nt': |
|
1146 | 1150 | return re.match(el + br'\r?\n\Z', l) |
|
1147 | 1151 | return re.match(el + br'\n\Z', l) |
|
1148 | 1152 | except re.error: |
|
1149 | 1153 | # el is an invalid regex |
|
1150 | 1154 | return False |
|
1151 | 1155 | |
|
1152 | 1156 | @staticmethod |
|
1153 | 1157 | def globmatch(el, l): |
|
1154 | 1158 | # The only supported special characters are * and ? plus / which also |
|
1155 | 1159 | # matches \ on windows. Escaping of these characters is supported. |
|
1156 | 1160 | if el + b'\n' == l: |
|
1157 | 1161 | if os.altsep: |
|
1158 | 1162 | # matching on "/" is not needed for this line |
|
1159 | 1163 | for pat in checkcodeglobpats: |
|
1160 | 1164 | if pat.match(el): |
|
1161 | 1165 | return True |
|
1162 | 1166 | return b'-glob' |
|
1163 | 1167 | return True |
|
1164 | 1168 | i, n = 0, len(el) |
|
1165 | 1169 | res = b'' |
|
1166 | 1170 | while i < n: |
|
1167 | 1171 | c = el[i:i + 1] |
|
1168 | 1172 | i += 1 |
|
1169 | 1173 | if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/': |
|
1170 | 1174 | res += el[i - 1:i + 1] |
|
1171 | 1175 | i += 1 |
|
1172 | 1176 | elif c == b'*': |
|
1173 | 1177 | res += b'.*' |
|
1174 | 1178 | elif c == b'?': |
|
1175 | 1179 | res += b'.' |
|
1176 | 1180 | elif c == b'/' and os.altsep: |
|
1177 | 1181 | res += b'[/\\\\]' |
|
1178 | 1182 | else: |
|
1179 | 1183 | res += re.escape(c) |
|
1180 | 1184 | return TTest.rematch(res, l) |
|
1181 | 1185 | |
|
1182 | 1186 | @staticmethod |
|
1183 | 1187 | def linematch(el, l): |
|
1184 | 1188 | retry = False |
|
1185 | 1189 | if el == l: # perfect match (fast) |
|
1186 | 1190 | return True |
|
1187 | 1191 | if el: |
|
1188 | 1192 | if el.endswith(b" (?)\n"): |
|
1189 | 1193 | retry = "retry" |
|
1190 | 1194 | el = el[:-5] + "\n" |
|
1191 | 1195 | if el.endswith(b" (esc)\n"): |
|
1192 | 1196 | if PYTHON3: |
|
1193 | 1197 | el = el[:-7].decode('unicode_escape') + '\n' |
|
1194 | 1198 | el = el.encode('utf-8') |
|
1195 | 1199 | else: |
|
1196 | 1200 | el = el[:-7].decode('string-escape') + '\n' |
|
1197 | 1201 | if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l: |
|
1198 | 1202 | return True |
|
1199 | 1203 | if el.endswith(b" (re)\n"): |
|
1200 | 1204 | return TTest.rematch(el[:-6], l) or retry |
|
1201 | 1205 | if el.endswith(b" (glob)\n"): |
|
1202 | 1206 | # ignore '(glob)' added to l by 'replacements' |
|
1203 | 1207 | if l.endswith(b" (glob)\n"): |
|
1204 | 1208 | l = l[:-8] + b"\n" |
|
1205 | 1209 | return TTest.globmatch(el[:-8], l) |
|
1206 | 1210 | if os.altsep and l.replace(b'\\', b'/') == el: |
|
1207 | 1211 | return b'+glob' |
|
1208 | 1212 | return retry |
|
1209 | 1213 | |
|
1210 | 1214 | @staticmethod |
|
1211 | 1215 | def parsehghaveoutput(lines): |
|
1212 | 1216 | '''Parse hghave log lines. |
|
1213 | 1217 | |
|
1214 | 1218 | Return tuple of lists (missing, failed): |
|
1215 | 1219 | * the missing/unknown features |
|
1216 | 1220 | * the features for which existence check failed''' |
|
1217 | 1221 | missing = [] |
|
1218 | 1222 | failed = [] |
|
1219 | 1223 | for line in lines: |
|
1220 | 1224 | if line.startswith(TTest.SKIPPED_PREFIX): |
|
1221 | 1225 | line = line.splitlines()[0] |
|
1222 | 1226 | missing.append(line[len(TTest.SKIPPED_PREFIX):]) |
|
1223 | 1227 | elif line.startswith(TTest.FAILED_PREFIX): |
|
1224 | 1228 | line = line.splitlines()[0] |
|
1225 | 1229 | failed.append(line[len(TTest.FAILED_PREFIX):]) |
|
1226 | 1230 | |
|
1227 | 1231 | return missing, failed |
|
1228 | 1232 | |
|
1229 | 1233 | @staticmethod |
|
1230 | 1234 | def _escapef(m): |
|
1231 | 1235 | return TTest.ESCAPEMAP[m.group(0)] |
|
1232 | 1236 | |
|
1233 | 1237 | @staticmethod |
|
1234 | 1238 | def _stringescape(s): |
|
1235 | 1239 | return TTest.ESCAPESUB(TTest._escapef, s) |
|
1236 | 1240 | |
|
1237 | 1241 | iolock = threading.RLock() |
|
1238 | 1242 | |
|
1239 | 1243 | class SkipTest(Exception): |
|
1240 | 1244 | """Raised to indicate that a test is to be skipped.""" |
|
1241 | 1245 | |
|
1242 | 1246 | class IgnoreTest(Exception): |
|
1243 | 1247 | """Raised to indicate that a test is to be ignored.""" |
|
1244 | 1248 | |
|
1245 | 1249 | class WarnTest(Exception): |
|
1246 | 1250 | """Raised to indicate that a test warned.""" |
|
1247 | 1251 | |
|
1248 | 1252 | class ReportedTest(Exception): |
|
1249 | 1253 | """Raised to indicate that a test already reported.""" |
|
1250 | 1254 | |
|
1251 | 1255 | class TestResult(unittest._TextTestResult): |
|
1252 | 1256 | """Holds results when executing via unittest.""" |
|
1253 | 1257 | # Don't worry too much about accessing the non-public _TextTestResult. |
|
1254 | 1258 | # It is relatively common in Python testing tools. |
|
1255 | 1259 | def __init__(self, options, *args, **kwargs): |
|
1256 | 1260 | super(TestResult, self).__init__(*args, **kwargs) |
|
1257 | 1261 | |
|
1258 | 1262 | self._options = options |
|
1259 | 1263 | |
|
1260 | 1264 | # unittest.TestResult didn't have skipped until 2.7. We need to |
|
1261 | 1265 | # polyfill it. |
|
1262 | 1266 | self.skipped = [] |
|
1263 | 1267 | |
|
1264 | 1268 | # We have a custom "ignored" result that isn't present in any Python |
|
1265 | 1269 | # unittest implementation. It is very similar to skipped. It may make |
|
1266 | 1270 | # sense to map it into skip some day. |
|
1267 | 1271 | self.ignored = [] |
|
1268 | 1272 | |
|
1269 | 1273 | # We have a custom "warned" result that isn't present in any Python |
|
1270 | 1274 | # unittest implementation. It is very similar to failed. It may make |
|
1271 | 1275 | # sense to map it into fail some day. |
|
1272 | 1276 | self.warned = [] |
|
1273 | 1277 | |
|
1274 | 1278 | self.times = [] |
|
1275 | 1279 | self._firststarttime = None |
|
1276 | 1280 | # Data stored for the benefit of generating xunit reports. |
|
1277 | 1281 | self.successes = [] |
|
1278 | 1282 | self.faildata = {} |
|
1279 | 1283 | |
|
1280 | 1284 | def addFailure(self, test, reason): |
|
1281 | 1285 | self.failures.append((test, reason)) |
|
1282 | 1286 | |
|
1283 | 1287 | if self._options.first: |
|
1284 | 1288 | self.stop() |
|
1285 | 1289 | else: |
|
1286 | 1290 | with iolock: |
|
1287 | 1291 | if reason == "timed out": |
|
1288 | 1292 | self.stream.write('t') |
|
1289 | 1293 | else: |
|
1290 | 1294 | if not self._options.nodiff: |
|
1291 | 1295 | self.stream.write('\nERROR: %s output changed\n' % test) |
|
1292 | 1296 | self.stream.write('!') |
|
1293 | 1297 | |
|
1294 | 1298 | self.stream.flush() |
|
1295 | 1299 | |
|
1296 | 1300 | def addSuccess(self, test): |
|
1297 | 1301 | with iolock: |
|
1298 | 1302 | super(TestResult, self).addSuccess(test) |
|
1299 | 1303 | self.successes.append(test) |
|
1300 | 1304 | |
|
1301 | 1305 | def addError(self, test, err): |
|
1302 | 1306 | super(TestResult, self).addError(test, err) |
|
1303 | 1307 | if self._options.first: |
|
1304 | 1308 | self.stop() |
|
1305 | 1309 | |
|
1306 | 1310 | # Polyfill. |
|
1307 | 1311 | def addSkip(self, test, reason): |
|
1308 | 1312 | self.skipped.append((test, reason)) |
|
1309 | 1313 | with iolock: |
|
1310 | 1314 | if self.showAll: |
|
1311 | 1315 | self.stream.writeln('skipped %s' % reason) |
|
1312 | 1316 | else: |
|
1313 | 1317 | self.stream.write('s') |
|
1314 | 1318 | self.stream.flush() |
|
1315 | 1319 | |
|
1316 | 1320 | def addIgnore(self, test, reason): |
|
1317 | 1321 | self.ignored.append((test, reason)) |
|
1318 | 1322 | with iolock: |
|
1319 | 1323 | if self.showAll: |
|
1320 | 1324 | self.stream.writeln('ignored %s' % reason) |
|
1321 | 1325 | else: |
|
1322 | 1326 | if reason not in ('not retesting', "doesn't match keyword"): |
|
1323 | 1327 | self.stream.write('i') |
|
1324 | 1328 | else: |
|
1325 | 1329 | self.testsRun += 1 |
|
1326 | 1330 | self.stream.flush() |
|
1327 | 1331 | |
|
1328 | 1332 | def addWarn(self, test, reason): |
|
1329 | 1333 | self.warned.append((test, reason)) |
|
1330 | 1334 | |
|
1331 | 1335 | if self._options.first: |
|
1332 | 1336 | self.stop() |
|
1333 | 1337 | |
|
1334 | 1338 | with iolock: |
|
1335 | 1339 | if self.showAll: |
|
1336 | 1340 | self.stream.writeln('warned %s' % reason) |
|
1337 | 1341 | else: |
|
1338 | 1342 | self.stream.write('~') |
|
1339 | 1343 | self.stream.flush() |
|
1340 | 1344 | |
|
1341 | 1345 | def addOutputMismatch(self, test, ret, got, expected): |
|
1342 | 1346 | """Record a mismatch in test output for a particular test.""" |
|
1343 | 1347 | if self.shouldStop: |
|
1344 | 1348 | # don't print, some other test case already failed and |
|
1345 | 1349 | # printed, we're just stale and probably failed due to our |
|
1346 | 1350 | # temp dir getting cleaned up. |
|
1347 | 1351 | return |
|
1348 | 1352 | |
|
1349 | 1353 | accepted = False |
|
1350 | 1354 | failed = False |
|
1351 | 1355 | lines = [] |
|
1352 | 1356 | |
|
1353 | 1357 | with iolock: |
|
1354 | 1358 | if self._options.nodiff: |
|
1355 | 1359 | pass |
|
1356 | 1360 | elif self._options.view: |
|
1357 | 1361 | v = self._options.view |
|
1358 | 1362 | if PYTHON3: |
|
1359 | 1363 | v = _bytespath(v) |
|
1360 | 1364 | os.system(b"%s %s %s" % |
|
1361 | 1365 | (v, test.refpath, test.errpath)) |
|
1362 | 1366 | else: |
|
1363 | 1367 | servefail, lines = getdiff(expected, got, |
|
1364 | 1368 | test.refpath, test.errpath) |
|
1365 | 1369 | if servefail: |
|
1366 | 1370 | self.addFailure( |
|
1367 | 1371 | test, |
|
1368 | 1372 | 'server failed to start (HGPORT=%s)' % test._startport) |
|
1369 | 1373 | raise ReportedTest('server failed to start') |
|
1370 | 1374 | else: |
|
1371 | 1375 | self.stream.write('\n') |
|
1372 | 1376 | for line in lines: |
|
1373 | 1377 | if PYTHON3: |
|
1374 | 1378 | self.stream.flush() |
|
1375 | 1379 | self.stream.buffer.write(line) |
|
1376 | 1380 | self.stream.buffer.flush() |
|
1377 | 1381 | else: |
|
1378 | 1382 | self.stream.write(line) |
|
1379 | 1383 | self.stream.flush() |
|
1380 | 1384 | |
|
1381 | 1385 | # handle interactive prompt without releasing iolock |
|
1382 | 1386 | if self._options.interactive: |
|
1383 | 1387 | self.stream.write('Accept this change? [n] ') |
|
1384 | 1388 | answer = sys.stdin.readline().strip() |
|
1385 | 1389 | if answer.lower() in ('y', 'yes'): |
|
1386 | 1390 | if test.name.endswith('.t'): |
|
1387 | 1391 | rename(test.errpath, test.path) |
|
1388 | 1392 | else: |
|
1389 | 1393 | rename(test.errpath, '%s.out' % test.path) |
|
1390 | 1394 | accepted = True |
|
1391 | 1395 | if not accepted and not failed: |
|
1392 | 1396 | self.faildata[test.name] = b''.join(lines) |
|
1393 | 1397 | |
|
1394 | 1398 | return accepted |
|
1395 | 1399 | |
|
1396 | 1400 | def startTest(self, test): |
|
1397 | 1401 | super(TestResult, self).startTest(test) |
|
1398 | 1402 | |
|
1399 | 1403 | # os.times module computes the user time and system time spent by |
|
1400 | 1404 | # child's processes along with real elapsed time taken by a process. |
|
1401 | 1405 | # This module has one limitation. It can only work for Linux user |
|
1402 | 1406 | # and not for Windows. |
|
1403 | 1407 | test.started = os.times() |
|
1404 | 1408 | if self._firststarttime is None: # thread racy but irrelevant |
|
1405 | 1409 | self._firststarttime = test.started[4] |
|
1406 | 1410 | |
|
1407 | 1411 | def stopTest(self, test, interrupted=False): |
|
1408 | 1412 | super(TestResult, self).stopTest(test) |
|
1409 | 1413 | |
|
1410 | 1414 | test.stopped = os.times() |
|
1411 | 1415 | |
|
1412 | 1416 | starttime = test.started |
|
1413 | 1417 | endtime = test.stopped |
|
1414 | 1418 | origin = self._firststarttime |
|
1415 | 1419 | self.times.append((test.name, |
|
1416 | 1420 | endtime[2] - starttime[2], # user space CPU time |
|
1417 | 1421 | endtime[3] - starttime[3], # sys space CPU time |
|
1418 | 1422 | endtime[4] - starttime[4], # real time |
|
1419 | 1423 | starttime[4] - origin, # start date in run context |
|
1420 | 1424 | endtime[4] - origin, # end date in run context |
|
1421 | 1425 | )) |
|
1422 | 1426 | |
|
1423 | 1427 | if interrupted: |
|
1424 | 1428 | with iolock: |
|
1425 | 1429 | self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % ( |
|
1426 | 1430 | test.name, self.times[-1][3])) |
|
1427 | 1431 | |
|
1428 | 1432 | class TestSuite(unittest.TestSuite): |
|
1429 | 1433 | """Custom unittest TestSuite that knows how to execute Mercurial tests.""" |
|
1430 | 1434 | |
|
1431 | 1435 | def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None, |
|
1432 | 1436 | retest=False, keywords=None, loop=False, runs_per_test=1, |
|
1433 | 1437 | loadtest=None, showchannels=False, |
|
1434 | 1438 | *args, **kwargs): |
|
1435 | 1439 | """Create a new instance that can run tests with a configuration. |
|
1436 | 1440 | |
|
1437 | 1441 | testdir specifies the directory where tests are executed from. This |
|
1438 | 1442 | is typically the ``tests`` directory from Mercurial's source |
|
1439 | 1443 | repository. |
|
1440 | 1444 | |
|
1441 | 1445 | jobs specifies the number of jobs to run concurrently. Each test |
|
1442 | 1446 | executes on its own thread. Tests actually spawn new processes, so |
|
1443 | 1447 | state mutation should not be an issue. |
|
1444 | 1448 | |
|
1445 | 1449 | If there is only one job, it will use the main thread. |
|
1446 | 1450 | |
|
1447 | 1451 | whitelist and blacklist denote tests that have been whitelisted and |
|
1448 | 1452 | blacklisted, respectively. These arguments don't belong in TestSuite. |
|
1449 | 1453 | Instead, whitelist and blacklist should be handled by the thing that |
|
1450 | 1454 | populates the TestSuite with tests. They are present to preserve |
|
1451 | 1455 | backwards compatible behavior which reports skipped tests as part |
|
1452 | 1456 | of the results. |
|
1453 | 1457 | |
|
1454 | 1458 | retest denotes whether to retest failed tests. This arguably belongs |
|
1455 | 1459 | outside of TestSuite. |
|
1456 | 1460 | |
|
1457 | 1461 | keywords denotes key words that will be used to filter which tests |
|
1458 | 1462 | to execute. This arguably belongs outside of TestSuite. |
|
1459 | 1463 | |
|
1460 | 1464 | loop denotes whether to loop over tests forever. |
|
1461 | 1465 | """ |
|
1462 | 1466 | super(TestSuite, self).__init__(*args, **kwargs) |
|
1463 | 1467 | |
|
1464 | 1468 | self._jobs = jobs |
|
1465 | 1469 | self._whitelist = whitelist |
|
1466 | 1470 | self._blacklist = blacklist |
|
1467 | 1471 | self._retest = retest |
|
1468 | 1472 | self._keywords = keywords |
|
1469 | 1473 | self._loop = loop |
|
1470 | 1474 | self._runs_per_test = runs_per_test |
|
1471 | 1475 | self._loadtest = loadtest |
|
1472 | 1476 | self._showchannels = showchannels |
|
1473 | 1477 | |
|
1474 | 1478 | def run(self, result): |
|
1475 | 1479 | # We have a number of filters that need to be applied. We do this |
|
1476 | 1480 | # here instead of inside Test because it makes the running logic for |
|
1477 | 1481 | # Test simpler. |
|
1478 | 1482 | tests = [] |
|
1479 | 1483 | num_tests = [0] |
|
1480 | 1484 | for test in self._tests: |
|
1481 | 1485 | def get(): |
|
1482 | 1486 | num_tests[0] += 1 |
|
1483 | 1487 | if getattr(test, 'should_reload', False): |
|
1484 | 1488 | return self._loadtest(test.bname, num_tests[0]) |
|
1485 | 1489 | return test |
|
1486 | 1490 | if not os.path.exists(test.path): |
|
1487 | 1491 | result.addSkip(test, "Doesn't exist") |
|
1488 | 1492 | continue |
|
1489 | 1493 | |
|
1490 | 1494 | if not (self._whitelist and test.name in self._whitelist): |
|
1491 | 1495 | if self._blacklist and test.bname in self._blacklist: |
|
1492 | 1496 | result.addSkip(test, 'blacklisted') |
|
1493 | 1497 | continue |
|
1494 | 1498 | |
|
1495 | 1499 | if self._retest and not os.path.exists(test.errpath): |
|
1496 | 1500 | result.addIgnore(test, 'not retesting') |
|
1497 | 1501 | continue |
|
1498 | 1502 | |
|
1499 | 1503 | if self._keywords: |
|
1500 | 1504 | f = open(test.path, 'rb') |
|
1501 | 1505 | t = f.read().lower() + test.bname.lower() |
|
1502 | 1506 | f.close() |
|
1503 | 1507 | ignored = False |
|
1504 | 1508 | for k in self._keywords.lower().split(): |
|
1505 | 1509 | if k not in t: |
|
1506 | 1510 | result.addIgnore(test, "doesn't match keyword") |
|
1507 | 1511 | ignored = True |
|
1508 | 1512 | break |
|
1509 | 1513 | |
|
1510 | 1514 | if ignored: |
|
1511 | 1515 | continue |
|
1512 | 1516 | for _ in xrange(self._runs_per_test): |
|
1513 | 1517 | tests.append(get()) |
|
1514 | 1518 | |
|
1515 | 1519 | runtests = list(tests) |
|
1516 | 1520 | done = queue.Queue() |
|
1517 | 1521 | running = 0 |
|
1518 | 1522 | |
|
1519 | 1523 | channels = [""] * self._jobs |
|
1520 | 1524 | |
|
1521 | 1525 | def job(test, result): |
|
1522 | 1526 | for n, v in enumerate(channels): |
|
1523 | 1527 | if not v: |
|
1524 | 1528 | channel = n |
|
1525 | 1529 | break |
|
1526 | 1530 | channels[channel] = "=" + test.name[5:].split(".")[0] |
|
1527 | 1531 | try: |
|
1528 | 1532 | test(result) |
|
1529 | 1533 | done.put(None) |
|
1530 | 1534 | except KeyboardInterrupt: |
|
1531 | 1535 | pass |
|
1532 | 1536 | except: # re-raises |
|
1533 | 1537 | done.put(('!', test, 'run-test raised an error, see traceback')) |
|
1534 | 1538 | raise |
|
1535 | 1539 | try: |
|
1536 | 1540 | channels[channel] = '' |
|
1537 | 1541 | except IndexError: |
|
1538 | 1542 | pass |
|
1539 | 1543 | |
|
1540 | 1544 | def stat(): |
|
1541 | 1545 | count = 0 |
|
1542 | 1546 | while channels: |
|
1543 | 1547 | d = '\n%03s ' % count |
|
1544 | 1548 | for n, v in enumerate(channels): |
|
1545 | 1549 | if v: |
|
1546 | 1550 | d += v[0] |
|
1547 | 1551 | channels[n] = v[1:] or '.' |
|
1548 | 1552 | else: |
|
1549 | 1553 | d += ' ' |
|
1550 | 1554 | d += ' ' |
|
1551 | 1555 | with iolock: |
|
1552 | 1556 | sys.stdout.write(d + ' ') |
|
1553 | 1557 | sys.stdout.flush() |
|
1554 | 1558 | for x in xrange(10): |
|
1555 | 1559 | if channels: |
|
1556 | 1560 | time.sleep(.1) |
|
1557 | 1561 | count += 1 |
|
1558 | 1562 | |
|
1559 | 1563 | stoppedearly = False |
|
1560 | 1564 | |
|
1561 | 1565 | if self._showchannels: |
|
1562 | 1566 | statthread = threading.Thread(target=stat, name="stat") |
|
1563 | 1567 | statthread.start() |
|
1564 | 1568 | |
|
1565 | 1569 | try: |
|
1566 | 1570 | while tests or running: |
|
1567 | 1571 | if not done.empty() or running == self._jobs or not tests: |
|
1568 | 1572 | try: |
|
1569 | 1573 | done.get(True, 1) |
|
1570 | 1574 | running -= 1 |
|
1571 | 1575 | if result and result.shouldStop: |
|
1572 | 1576 | stoppedearly = True |
|
1573 | 1577 | break |
|
1574 | 1578 | except queue.Empty: |
|
1575 | 1579 | continue |
|
1576 | 1580 | if tests and not running == self._jobs: |
|
1577 | 1581 | test = tests.pop(0) |
|
1578 | 1582 | if self._loop: |
|
1579 | 1583 | if getattr(test, 'should_reload', False): |
|
1580 | 1584 | num_tests[0] += 1 |
|
1581 | 1585 | tests.append( |
|
1582 | 1586 | self._loadtest(test.name, num_tests[0])) |
|
1583 | 1587 | else: |
|
1584 | 1588 | tests.append(test) |
|
1585 | 1589 | if self._jobs == 1: |
|
1586 | 1590 | job(test, result) |
|
1587 | 1591 | else: |
|
1588 | 1592 | t = threading.Thread(target=job, name=test.name, |
|
1589 | 1593 | args=(test, result)) |
|
1590 | 1594 | t.start() |
|
1591 | 1595 | running += 1 |
|
1592 | 1596 | |
|
1593 | 1597 | # If we stop early we still need to wait on started tests to |
|
1594 | 1598 | # finish. Otherwise, there is a race between the test completing |
|
1595 | 1599 | # and the test's cleanup code running. This could result in the |
|
1596 | 1600 | # test reporting incorrect. |
|
1597 | 1601 | if stoppedearly: |
|
1598 | 1602 | while running: |
|
1599 | 1603 | try: |
|
1600 | 1604 | done.get(True, 1) |
|
1601 | 1605 | running -= 1 |
|
1602 | 1606 | except queue.Empty: |
|
1603 | 1607 | continue |
|
1604 | 1608 | except KeyboardInterrupt: |
|
1605 | 1609 | for test in runtests: |
|
1606 | 1610 | test.abort() |
|
1607 | 1611 | |
|
1608 | 1612 | channels = [] |
|
1609 | 1613 | |
|
1610 | 1614 | return result |
|
1611 | 1615 | |
|
1612 | 1616 | # Save the most recent 5 wall-clock runtimes of each test to a |
|
1613 | 1617 | # human-readable text file named .testtimes. Tests are sorted |
|
1614 | 1618 | # alphabetically, while times for each test are listed from oldest to |
|
1615 | 1619 | # newest. |
|
1616 | 1620 | |
|
1617 | 1621 | def loadtimes(testdir): |
|
1618 | 1622 | times = [] |
|
1619 | 1623 | try: |
|
1620 | 1624 | with open(os.path.join(testdir, '.testtimes-')) as fp: |
|
1621 | 1625 | for line in fp: |
|
1622 | 1626 | ts = line.split() |
|
1623 | 1627 | times.append((ts[0], [float(t) for t in ts[1:]])) |
|
1624 | 1628 | except IOError as err: |
|
1625 | 1629 | if err.errno != errno.ENOENT: |
|
1626 | 1630 | raise |
|
1627 | 1631 | return times |
|
1628 | 1632 | |
|
1629 | 1633 | def savetimes(testdir, result): |
|
1630 | 1634 | saved = dict(loadtimes(testdir)) |
|
1631 | 1635 | maxruns = 5 |
|
1632 | 1636 | skipped = set([str(t[0]) for t in result.skipped]) |
|
1633 | 1637 | for tdata in result.times: |
|
1634 | 1638 | test, real = tdata[0], tdata[3] |
|
1635 | 1639 | if test not in skipped: |
|
1636 | 1640 | ts = saved.setdefault(test, []) |
|
1637 | 1641 | ts.append(real) |
|
1638 | 1642 | ts[:] = ts[-maxruns:] |
|
1639 | 1643 | |
|
1640 | 1644 | fd, tmpname = tempfile.mkstemp(prefix='.testtimes', |
|
1641 | 1645 | dir=testdir, text=True) |
|
1642 | 1646 | with os.fdopen(fd, 'w') as fp: |
|
1643 | 1647 | for name, ts in sorted(saved.iteritems()): |
|
1644 | 1648 | fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts]))) |
|
1645 | 1649 | timepath = os.path.join(testdir, '.testtimes') |
|
1646 | 1650 | try: |
|
1647 | 1651 | os.unlink(timepath) |
|
1648 | 1652 | except OSError: |
|
1649 | 1653 | pass |
|
1650 | 1654 | try: |
|
1651 | 1655 | os.rename(tmpname, timepath) |
|
1652 | 1656 | except OSError: |
|
1653 | 1657 | pass |
|
1654 | 1658 | |
|
1655 | 1659 | class TextTestRunner(unittest.TextTestRunner): |
|
1656 | 1660 | """Custom unittest test runner that uses appropriate settings.""" |
|
1657 | 1661 | |
|
1658 | 1662 | def __init__(self, runner, *args, **kwargs): |
|
1659 | 1663 | super(TextTestRunner, self).__init__(*args, **kwargs) |
|
1660 | 1664 | |
|
1661 | 1665 | self._runner = runner |
|
1662 | 1666 | |
|
1663 | 1667 | def run(self, test): |
|
1664 | 1668 | result = TestResult(self._runner.options, self.stream, |
|
1665 | 1669 | self.descriptions, self.verbosity) |
|
1666 | 1670 | |
|
1667 | 1671 | test(result) |
|
1668 | 1672 | |
|
1669 | 1673 | failed = len(result.failures) |
|
1670 | 1674 | warned = len(result.warned) |
|
1671 | 1675 | skipped = len(result.skipped) |
|
1672 | 1676 | ignored = len(result.ignored) |
|
1673 | 1677 | |
|
1674 | 1678 | with iolock: |
|
1675 | 1679 | self.stream.writeln('') |
|
1676 | 1680 | |
|
1677 | 1681 | if not self._runner.options.noskips: |
|
1678 | 1682 | for test, msg in result.skipped: |
|
1679 | 1683 | self.stream.writeln('Skipped %s: %s' % (test.name, msg)) |
|
1680 | 1684 | for test, msg in result.warned: |
|
1681 | 1685 | self.stream.writeln('Warned %s: %s' % (test.name, msg)) |
|
1682 | 1686 | for test, msg in result.failures: |
|
1683 | 1687 | self.stream.writeln('Failed %s: %s' % (test.name, msg)) |
|
1684 | 1688 | for test, msg in result.errors: |
|
1685 | 1689 | self.stream.writeln('Errored %s: %s' % (test.name, msg)) |
|
1686 | 1690 | |
|
1687 | 1691 | if self._runner.options.xunit: |
|
1688 | 1692 | with open(self._runner.options.xunit, 'wb') as xuf: |
|
1689 | 1693 | timesd = dict((t[0], t[3]) for t in result.times) |
|
1690 | 1694 | doc = minidom.Document() |
|
1691 | 1695 | s = doc.createElement('testsuite') |
|
1692 | 1696 | s.setAttribute('name', 'run-tests') |
|
1693 | 1697 | s.setAttribute('tests', str(result.testsRun)) |
|
1694 | 1698 | s.setAttribute('errors', "0") # TODO |
|
1695 | 1699 | s.setAttribute('failures', str(failed)) |
|
1696 | 1700 | s.setAttribute('skipped', str(skipped + ignored)) |
|
1697 | 1701 | doc.appendChild(s) |
|
1698 | 1702 | for tc in result.successes: |
|
1699 | 1703 | t = doc.createElement('testcase') |
|
1700 | 1704 | t.setAttribute('name', tc.name) |
|
1701 | 1705 | t.setAttribute('time', '%.3f' % timesd[tc.name]) |
|
1702 | 1706 | s.appendChild(t) |
|
1703 | 1707 | for tc, err in sorted(result.faildata.items()): |
|
1704 | 1708 | t = doc.createElement('testcase') |
|
1705 | 1709 | t.setAttribute('name', tc) |
|
1706 | 1710 | t.setAttribute('time', '%.3f' % timesd[tc]) |
|
1707 | 1711 | # createCDATASection expects a unicode or it will |
|
1708 | 1712 | # convert using default conversion rules, which will |
|
1709 | 1713 | # fail if string isn't ASCII. |
|
1710 | 1714 | err = cdatasafe(err).decode('utf-8', 'replace') |
|
1711 | 1715 | cd = doc.createCDATASection(err) |
|
1712 | 1716 | t.appendChild(cd) |
|
1713 | 1717 | s.appendChild(t) |
|
1714 | 1718 | xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8')) |
|
1715 | 1719 | |
|
1716 | 1720 | if self._runner.options.json: |
|
1717 | 1721 | if json is None: |
|
1718 | 1722 | raise ImportError("json module not installed") |
|
1719 | 1723 | jsonpath = os.path.join(self._runner._testdir, 'report.json') |
|
1720 | 1724 | with open(jsonpath, 'w') as fp: |
|
1721 | 1725 | timesd = {} |
|
1722 | 1726 | for tdata in result.times: |
|
1723 | 1727 | test = tdata[0] |
|
1724 | 1728 | timesd[test] = tdata[1:] |
|
1725 | 1729 | |
|
1726 | 1730 | outcome = {} |
|
1727 | 1731 | groups = [('success', ((tc, None) |
|
1728 | 1732 | for tc in result.successes)), |
|
1729 | 1733 | ('failure', result.failures), |
|
1730 | 1734 | ('skip', result.skipped)] |
|
1731 | 1735 | for res, testcases in groups: |
|
1732 | 1736 | for tc, __ in testcases: |
|
1733 | 1737 | if tc.name in timesd: |
|
1734 | 1738 | tres = {'result': res, |
|
1735 | 1739 | 'time': ('%0.3f' % timesd[tc.name][2]), |
|
1736 | 1740 | 'cuser': ('%0.3f' % timesd[tc.name][0]), |
|
1737 | 1741 | 'csys': ('%0.3f' % timesd[tc.name][1]), |
|
1738 | 1742 | 'start': ('%0.3f' % timesd[tc.name][3]), |
|
1739 | 1743 | 'end': ('%0.3f' % timesd[tc.name][4]), |
|
1740 | 1744 | 'diff': result.faildata.get(tc.name, |
|
1741 | 1745 | ''), |
|
1742 | 1746 | } |
|
1743 | 1747 | else: |
|
1744 | 1748 | # blacklisted test |
|
1745 | 1749 | tres = {'result': res} |
|
1746 | 1750 | |
|
1747 | 1751 | outcome[tc.name] = tres |
|
1748 | 1752 | jsonout = json.dumps(outcome, sort_keys=True, indent=4) |
|
1749 | 1753 | fp.writelines(("testreport =", jsonout)) |
|
1750 | 1754 | |
|
1751 | 1755 | self._runner._checkhglib('Tested') |
|
1752 | 1756 | |
|
1753 | 1757 | savetimes(self._runner._testdir, result) |
|
1754 | 1758 | self.stream.writeln( |
|
1755 | 1759 | '# Ran %d tests, %d skipped, %d warned, %d failed.' |
|
1756 | 1760 | % (result.testsRun, |
|
1757 | 1761 | skipped + ignored, warned, failed)) |
|
1758 | 1762 | if failed: |
|
1759 | 1763 | self.stream.writeln('python hash seed: %s' % |
|
1760 | 1764 | os.environ['PYTHONHASHSEED']) |
|
1761 | 1765 | if self._runner.options.time: |
|
1762 | 1766 | self.printtimes(result.times) |
|
1763 | 1767 | |
|
1764 | 1768 | return result |
|
1765 | 1769 | |
|
1766 | 1770 | def printtimes(self, times): |
|
1767 | 1771 | # iolock held by run |
|
1768 | 1772 | self.stream.writeln('# Producing time report') |
|
1769 | 1773 | times.sort(key=lambda t: (t[3])) |
|
1770 | 1774 | cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s' |
|
1771 | 1775 | self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' % |
|
1772 | 1776 | ('start', 'end', 'cuser', 'csys', 'real', 'Test')) |
|
1773 | 1777 | for tdata in times: |
|
1774 | 1778 | test = tdata[0] |
|
1775 | 1779 | cuser, csys, real, start, end = tdata[1:6] |
|
1776 | 1780 | self.stream.writeln(cols % (start, end, cuser, csys, real, test)) |
|
1777 | 1781 | |
|
1778 | 1782 | class TestRunner(object): |
|
1779 | 1783 | """Holds context for executing tests. |
|
1780 | 1784 | |
|
1781 | 1785 | Tests rely on a lot of state. This object holds it for them. |
|
1782 | 1786 | """ |
|
1783 | 1787 | |
|
1784 | 1788 | # Programs required to run tests. |
|
1785 | 1789 | REQUIREDTOOLS = [ |
|
1786 | 1790 | os.path.basename(_bytespath(sys.executable)), |
|
1787 | 1791 | b'diff', |
|
1788 | 1792 | b'grep', |
|
1789 | 1793 | b'unzip', |
|
1790 | 1794 | b'gunzip', |
|
1791 | 1795 | b'bunzip2', |
|
1792 | 1796 | b'sed', |
|
1793 | 1797 | ] |
|
1794 | 1798 | |
|
1795 | 1799 | # Maps file extensions to test class. |
|
1796 | 1800 | TESTTYPES = [ |
|
1797 | 1801 | (b'.py', PythonTest), |
|
1798 | 1802 | (b'.t', TTest), |
|
1799 | 1803 | ] |
|
1800 | 1804 | |
|
1801 | 1805 | def __init__(self): |
|
1802 | 1806 | self.options = None |
|
1803 | 1807 | self._hgroot = None |
|
1804 | 1808 | self._testdir = None |
|
1805 | 1809 | self._hgtmp = None |
|
1806 | 1810 | self._installdir = None |
|
1807 | 1811 | self._bindir = None |
|
1808 | 1812 | self._tmpbinddir = None |
|
1809 | 1813 | self._pythondir = None |
|
1810 | 1814 | self._coveragefile = None |
|
1811 | 1815 | self._createdfiles = [] |
|
1812 | 1816 | self._hgpath = None |
|
1813 | 1817 | self._portoffset = 0 |
|
1814 | 1818 | self._ports = {} |
|
1815 | 1819 | |
|
1816 | 1820 | def run(self, args, parser=None): |
|
1817 | 1821 | """Run the test suite.""" |
|
1818 | 1822 | oldmask = os.umask(0o22) |
|
1819 | 1823 | try: |
|
1820 | 1824 | parser = parser or getparser() |
|
1821 | 1825 | options, args = parseargs(args, parser) |
|
1822 | 1826 | # positional arguments are paths to test files to run, so |
|
1823 | 1827 | # we make sure they're all bytestrings |
|
1824 | 1828 | args = [_bytespath(a) for a in args] |
|
1825 | 1829 | self.options = options |
|
1826 | 1830 | |
|
1827 | 1831 | self._checktools() |
|
1828 | 1832 | tests = self.findtests(args) |
|
1829 | 1833 | if options.profile_runner: |
|
1830 | 1834 | import statprof |
|
1831 | 1835 | statprof.start() |
|
1832 | 1836 | result = self._run(tests) |
|
1833 | 1837 | if options.profile_runner: |
|
1834 | 1838 | statprof.stop() |
|
1835 | 1839 | statprof.display() |
|
1836 | 1840 | return result |
|
1837 | 1841 | |
|
1838 | 1842 | finally: |
|
1839 | 1843 | os.umask(oldmask) |
|
1840 | 1844 | |
|
1841 | 1845 | def _run(self, tests): |
|
1842 | 1846 | if self.options.random: |
|
1843 | 1847 | random.shuffle(tests) |
|
1844 | 1848 | else: |
|
1845 | 1849 | # keywords for slow tests |
|
1846 | 1850 | slow = {b'svn': 10, |
|
1847 | 1851 | b'cvs': 10, |
|
1848 | 1852 | b'hghave': 10, |
|
1849 | 1853 | b'largefiles-update': 10, |
|
1850 | 1854 | b'run-tests': 10, |
|
1851 | 1855 | b'corruption': 10, |
|
1852 | 1856 | b'race': 10, |
|
1853 | 1857 | b'i18n': 10, |
|
1854 | 1858 | b'check': 100, |
|
1855 | 1859 | b'gendoc': 100, |
|
1856 | 1860 | b'contrib-perf': 200, |
|
1857 | 1861 | } |
|
1858 | 1862 | perf = {} |
|
1859 | 1863 | def sortkey(f): |
|
1860 | 1864 | # run largest tests first, as they tend to take the longest |
|
1861 | 1865 | try: |
|
1862 | 1866 | return perf[f] |
|
1863 | 1867 | except KeyError: |
|
1864 | 1868 | try: |
|
1865 | 1869 | val = -os.stat(f).st_size |
|
1866 | 1870 | except OSError as e: |
|
1867 | 1871 | if e.errno != errno.ENOENT: |
|
1868 | 1872 | raise |
|
1869 | 1873 | perf[f] = -1e9 # file does not exist, tell early |
|
1870 | 1874 | return -1e9 |
|
1871 | 1875 | for kw, mul in slow.items(): |
|
1872 | 1876 | if kw in f: |
|
1873 | 1877 | val *= mul |
|
1874 | 1878 | if f.endswith('.py'): |
|
1875 | 1879 | val /= 10.0 |
|
1876 | 1880 | perf[f] = val / 1000.0 |
|
1877 | 1881 | return perf[f] |
|
1878 | 1882 | tests.sort(key=sortkey) |
|
1879 | 1883 | |
|
1880 | 1884 | self._testdir = osenvironb[b'TESTDIR'] = getattr( |
|
1881 | 1885 | os, 'getcwdb', os.getcwd)() |
|
1882 | 1886 | |
|
1883 | 1887 | if 'PYTHONHASHSEED' not in os.environ: |
|
1884 | 1888 | # use a random python hash seed all the time |
|
1885 | 1889 | # we do the randomness ourself to know what seed is used |
|
1886 | 1890 | os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32)) |
|
1887 | 1891 | |
|
1888 | 1892 | if self.options.tmpdir: |
|
1889 | 1893 | self.options.keep_tmpdir = True |
|
1890 | 1894 | tmpdir = _bytespath(self.options.tmpdir) |
|
1891 | 1895 | if os.path.exists(tmpdir): |
|
1892 | 1896 | # Meaning of tmpdir has changed since 1.3: we used to create |
|
1893 | 1897 | # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if |
|
1894 | 1898 | # tmpdir already exists. |
|
1895 | 1899 | print("error: temp dir %r already exists" % tmpdir) |
|
1896 | 1900 | return 1 |
|
1897 | 1901 | |
|
1898 | 1902 | # Automatically removing tmpdir sounds convenient, but could |
|
1899 | 1903 | # really annoy anyone in the habit of using "--tmpdir=/tmp" |
|
1900 | 1904 | # or "--tmpdir=$HOME". |
|
1901 | 1905 | #vlog("# Removing temp dir", tmpdir) |
|
1902 | 1906 | #shutil.rmtree(tmpdir) |
|
1903 | 1907 | os.makedirs(tmpdir) |
|
1904 | 1908 | else: |
|
1905 | 1909 | d = None |
|
1906 | 1910 | if os.name == 'nt': |
|
1907 | 1911 | # without this, we get the default temp dir location, but |
|
1908 | 1912 | # in all lowercase, which causes troubles with paths (issue3490) |
|
1909 | 1913 | d = osenvironb.get(b'TMP', None) |
|
1910 | 1914 | tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d) |
|
1911 | 1915 | |
|
1912 | 1916 | self._hgtmp = osenvironb[b'HGTMP'] = ( |
|
1913 | 1917 | os.path.realpath(tmpdir)) |
|
1914 | 1918 | |
|
1915 | 1919 | if self.options.with_hg: |
|
1916 | 1920 | self._installdir = None |
|
1917 | 1921 | whg = self.options.with_hg |
|
1918 | 1922 | # If --with-hg is not specified, we have bytes already, |
|
1919 | 1923 | # but if it was specified in python3 we get a str, so we |
|
1920 | 1924 | # have to encode it back into a bytes. |
|
1921 | 1925 | if PYTHON3: |
|
1922 | 1926 | if not isinstance(whg, bytes): |
|
1923 | 1927 | whg = _bytespath(whg) |
|
1924 | 1928 | self._bindir = os.path.dirname(os.path.realpath(whg)) |
|
1925 | 1929 | assert isinstance(self._bindir, bytes) |
|
1926 | 1930 | self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin') |
|
1927 | 1931 | os.makedirs(self._tmpbindir) |
|
1928 | 1932 | |
|
1929 | 1933 | # This looks redundant with how Python initializes sys.path from |
|
1930 | 1934 | # the location of the script being executed. Needed because the |
|
1931 | 1935 | # "hg" specified by --with-hg is not the only Python script |
|
1932 | 1936 | # executed in the test suite that needs to import 'mercurial' |
|
1933 | 1937 | # ... which means it's not really redundant at all. |
|
1934 | 1938 | self._pythondir = self._bindir |
|
1935 | 1939 | else: |
|
1936 | 1940 | self._installdir = os.path.join(self._hgtmp, b"install") |
|
1937 | 1941 | self._bindir = osenvironb[b"BINDIR"] = \ |
|
1938 | 1942 | os.path.join(self._installdir, b"bin") |
|
1939 | 1943 | self._tmpbindir = self._bindir |
|
1940 | 1944 | self._pythondir = os.path.join(self._installdir, b"lib", b"python") |
|
1941 | 1945 | |
|
1942 | 1946 | osenvironb[b"BINDIR"] = self._bindir |
|
1943 | 1947 | osenvironb[b"PYTHON"] = PYTHON |
|
1944 | 1948 | |
|
1945 | 1949 | fileb = _bytespath(__file__) |
|
1946 | 1950 | runtestdir = os.path.abspath(os.path.dirname(fileb)) |
|
1947 | 1951 | osenvironb[b'RUNTESTDIR'] = runtestdir |
|
1948 | 1952 | if PYTHON3: |
|
1949 | 1953 | sepb = _bytespath(os.pathsep) |
|
1950 | 1954 | else: |
|
1951 | 1955 | sepb = os.pathsep |
|
1952 | 1956 | path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb) |
|
1953 | 1957 | if os.path.islink(__file__): |
|
1954 | 1958 | # test helper will likely be at the end of the symlink |
|
1955 | 1959 | realfile = os.path.realpath(fileb) |
|
1956 | 1960 | realdir = os.path.abspath(os.path.dirname(realfile)) |
|
1957 | 1961 | path.insert(2, realdir) |
|
1958 | 1962 | if self._testdir != runtestdir: |
|
1959 | 1963 | path = [self._testdir] + path |
|
1960 | 1964 | if self._tmpbindir != self._bindir: |
|
1961 | 1965 | path = [self._tmpbindir] + path |
|
1962 | 1966 | osenvironb[b"PATH"] = sepb.join(path) |
|
1963 | 1967 | |
|
1964 | 1968 | # Include TESTDIR in PYTHONPATH so that out-of-tree extensions |
|
1965 | 1969 | # can run .../tests/run-tests.py test-foo where test-foo |
|
1966 | 1970 | # adds an extension to HGRC. Also include run-test.py directory to |
|
1967 | 1971 | # import modules like heredoctest. |
|
1968 | 1972 | pypath = [self._pythondir, self._testdir, runtestdir] |
|
1969 | 1973 | # We have to augment PYTHONPATH, rather than simply replacing |
|
1970 | 1974 | # it, in case external libraries are only available via current |
|
1971 | 1975 | # PYTHONPATH. (In particular, the Subversion bindings on OS X |
|
1972 | 1976 | # are in /opt/subversion.) |
|
1973 | 1977 | oldpypath = osenvironb.get(IMPL_PATH) |
|
1974 | 1978 | if oldpypath: |
|
1975 | 1979 | pypath.append(oldpypath) |
|
1976 | 1980 | osenvironb[IMPL_PATH] = sepb.join(pypath) |
|
1977 | 1981 | |
|
1978 | 1982 | if self.options.pure: |
|
1979 | 1983 | os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure" |
|
1980 | 1984 | |
|
1981 | 1985 | if self.options.allow_slow_tests: |
|
1982 | 1986 | os.environ["HGTEST_SLOW"] = "slow" |
|
1983 | 1987 | elif 'HGTEST_SLOW' in os.environ: |
|
1984 | 1988 | del os.environ['HGTEST_SLOW'] |
|
1985 | 1989 | |
|
1986 | 1990 | self._coveragefile = os.path.join(self._testdir, b'.coverage') |
|
1987 | 1991 | |
|
1988 | 1992 | vlog("# Using TESTDIR", self._testdir) |
|
1989 | 1993 | vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR']) |
|
1990 | 1994 | vlog("# Using HGTMP", self._hgtmp) |
|
1991 | 1995 | vlog("# Using PATH", os.environ["PATH"]) |
|
1992 | 1996 | vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH]) |
|
1993 | 1997 | |
|
1994 | 1998 | try: |
|
1995 | 1999 | return self._runtests(tests) or 0 |
|
1996 | 2000 | finally: |
|
1997 | 2001 | time.sleep(.1) |
|
1998 | 2002 | self._cleanup() |
|
1999 | 2003 | |
|
2000 | 2004 | def findtests(self, args): |
|
2001 | 2005 | """Finds possible test files from arguments. |
|
2002 | 2006 | |
|
2003 | 2007 | If you wish to inject custom tests into the test harness, this would |
|
2004 | 2008 | be a good function to monkeypatch or override in a derived class. |
|
2005 | 2009 | """ |
|
2006 | 2010 | if not args: |
|
2007 | 2011 | if self.options.changed: |
|
2008 | 2012 | proc = Popen4('hg st --rev "%s" -man0 .' % |
|
2009 | 2013 | self.options.changed, None, 0) |
|
2010 | 2014 | stdout, stderr = proc.communicate() |
|
2011 | 2015 | args = stdout.strip(b'\0').split(b'\0') |
|
2012 | 2016 | else: |
|
2013 | 2017 | args = os.listdir(b'.') |
|
2014 | 2018 | |
|
2015 | 2019 | return [t for t in args |
|
2016 | 2020 | if os.path.basename(t).startswith(b'test-') |
|
2017 | 2021 | and (t.endswith(b'.py') or t.endswith(b'.t'))] |
|
2018 | 2022 | |
|
2019 | 2023 | def _runtests(self, tests): |
|
2020 | 2024 | try: |
|
2021 | 2025 | if self._installdir: |
|
2022 | 2026 | self._installhg() |
|
2023 | 2027 | self._checkhglib("Testing") |
|
2024 | 2028 | else: |
|
2025 | 2029 | self._usecorrectpython() |
|
2026 | 2030 | |
|
2027 | 2031 | if self.options.restart: |
|
2028 | 2032 | orig = list(tests) |
|
2029 | 2033 | while tests: |
|
2030 | 2034 | if os.path.exists(tests[0] + ".err"): |
|
2031 | 2035 | break |
|
2032 | 2036 | tests.pop(0) |
|
2033 | 2037 | if not tests: |
|
2034 | 2038 | print("running all tests") |
|
2035 | 2039 | tests = orig |
|
2036 | 2040 | |
|
2037 | 2041 | tests = [self._gettest(t, i) for i, t in enumerate(tests)] |
|
2038 | 2042 | |
|
2039 | 2043 | failed = False |
|
2040 | 2044 | warned = False |
|
2041 | 2045 | kws = self.options.keywords |
|
2042 | 2046 | if kws is not None and PYTHON3: |
|
2043 | 2047 | kws = kws.encode('utf-8') |
|
2044 | 2048 | |
|
2045 | 2049 | suite = TestSuite(self._testdir, |
|
2046 | 2050 | jobs=self.options.jobs, |
|
2047 | 2051 | whitelist=self.options.whitelisted, |
|
2048 | 2052 | blacklist=self.options.blacklist, |
|
2049 | 2053 | retest=self.options.retest, |
|
2050 | 2054 | keywords=kws, |
|
2051 | 2055 | loop=self.options.loop, |
|
2052 | 2056 | runs_per_test=self.options.runs_per_test, |
|
2053 | 2057 | showchannels=self.options.showchannels, |
|
2054 | 2058 | tests=tests, loadtest=self._gettest) |
|
2055 | 2059 | verbosity = 1 |
|
2056 | 2060 | if self.options.verbose: |
|
2057 | 2061 | verbosity = 2 |
|
2058 | 2062 | runner = TextTestRunner(self, verbosity=verbosity) |
|
2059 | 2063 | result = runner.run(suite) |
|
2060 | 2064 | |
|
2061 | 2065 | if result.failures: |
|
2062 | 2066 | failed = True |
|
2063 | 2067 | if result.warned: |
|
2064 | 2068 | warned = True |
|
2065 | 2069 | |
|
2066 | 2070 | if self.options.anycoverage: |
|
2067 | 2071 | self._outputcoverage() |
|
2068 | 2072 | except KeyboardInterrupt: |
|
2069 | 2073 | failed = True |
|
2070 | 2074 | print("\ninterrupted!") |
|
2071 | 2075 | |
|
2072 | 2076 | if failed: |
|
2073 | 2077 | return 1 |
|
2074 | 2078 | if warned: |
|
2075 | 2079 | return 80 |
|
2076 | 2080 | |
|
2077 | 2081 | def _getport(self, count): |
|
2078 | 2082 | port = self._ports.get(count) # do we have a cached entry? |
|
2079 | 2083 | if port is None: |
|
2080 | 2084 | portneeded = 3 |
|
2081 | 2085 | # above 100 tries we just give up and let test reports failure |
|
2082 | 2086 | for tries in xrange(100): |
|
2083 | 2087 | allfree = True |
|
2084 | 2088 | port = self.options.port + self._portoffset |
|
2085 | 2089 | for idx in xrange(portneeded): |
|
2086 | 2090 | if not checkportisavailable(port + idx): |
|
2087 | 2091 | allfree = False |
|
2088 | 2092 | break |
|
2089 | 2093 | self._portoffset += portneeded |
|
2090 | 2094 | if allfree: |
|
2091 | 2095 | break |
|
2092 | 2096 | self._ports[count] = port |
|
2093 | 2097 | return port |
|
2094 | 2098 | |
|
2095 | 2099 | def _gettest(self, test, count): |
|
2096 | 2100 | """Obtain a Test by looking at its filename. |
|
2097 | 2101 | |
|
2098 | 2102 | Returns a Test instance. The Test may not be runnable if it doesn't |
|
2099 | 2103 | map to a known type. |
|
2100 | 2104 | """ |
|
2101 | 2105 | lctest = test.lower() |
|
2102 | 2106 | testcls = Test |
|
2103 | 2107 | |
|
2104 | 2108 | for ext, cls in self.TESTTYPES: |
|
2105 | 2109 | if lctest.endswith(ext): |
|
2106 | 2110 | testcls = cls |
|
2107 | 2111 | break |
|
2108 | 2112 | |
|
2109 | 2113 | refpath = os.path.join(self._testdir, test) |
|
2110 | 2114 | tmpdir = os.path.join(self._hgtmp, b'child%d' % count) |
|
2111 | 2115 | |
|
2112 | 2116 | t = testcls(refpath, tmpdir, |
|
2113 | 2117 | keeptmpdir=self.options.keep_tmpdir, |
|
2114 | 2118 | debug=self.options.debug, |
|
2115 | 2119 | timeout=self.options.timeout, |
|
2116 | 2120 | startport=self._getport(count), |
|
2117 | 2121 | extraconfigopts=self.options.extra_config_opt, |
|
2118 | 2122 | py3kwarnings=self.options.py3k_warnings, |
|
2119 | 2123 | shell=self.options.shell) |
|
2120 | 2124 | t.should_reload = True |
|
2121 | 2125 | return t |
|
2122 | 2126 | |
|
2123 | 2127 | def _cleanup(self): |
|
2124 | 2128 | """Clean up state from this test invocation.""" |
|
2125 | 2129 | |
|
2126 | 2130 | if self.options.keep_tmpdir: |
|
2127 | 2131 | return |
|
2128 | 2132 | |
|
2129 | 2133 | vlog("# Cleaning up HGTMP", self._hgtmp) |
|
2130 | 2134 | shutil.rmtree(self._hgtmp, True) |
|
2131 | 2135 | for f in self._createdfiles: |
|
2132 | 2136 | try: |
|
2133 | 2137 | os.remove(f) |
|
2134 | 2138 | except OSError: |
|
2135 | 2139 | pass |
|
2136 | 2140 | |
|
2137 | 2141 | def _usecorrectpython(self): |
|
2138 | 2142 | """Configure the environment to use the appropriate Python in tests.""" |
|
2139 | 2143 | # Tests must use the same interpreter as us or bad things will happen. |
|
2140 | 2144 | pyexename = sys.platform == 'win32' and b'python.exe' or b'python' |
|
2141 | 2145 | if getattr(os, 'symlink', None): |
|
2142 | 2146 | vlog("# Making python executable in test path a symlink to '%s'" % |
|
2143 | 2147 | sys.executable) |
|
2144 | 2148 | mypython = os.path.join(self._tmpbindir, pyexename) |
|
2145 | 2149 | try: |
|
2146 | 2150 | if os.readlink(mypython) == sys.executable: |
|
2147 | 2151 | return |
|
2148 | 2152 | os.unlink(mypython) |
|
2149 | 2153 | except OSError as err: |
|
2150 | 2154 | if err.errno != errno.ENOENT: |
|
2151 | 2155 | raise |
|
2152 | 2156 | if self._findprogram(pyexename) != sys.executable: |
|
2153 | 2157 | try: |
|
2154 | 2158 | os.symlink(sys.executable, mypython) |
|
2155 | 2159 | self._createdfiles.append(mypython) |
|
2156 | 2160 | except OSError as err: |
|
2157 | 2161 | # child processes may race, which is harmless |
|
2158 | 2162 | if err.errno != errno.EEXIST: |
|
2159 | 2163 | raise |
|
2160 | 2164 | else: |
|
2161 | 2165 | exedir, exename = os.path.split(sys.executable) |
|
2162 | 2166 | vlog("# Modifying search path to find %s as %s in '%s'" % |
|
2163 | 2167 | (exename, pyexename, exedir)) |
|
2164 | 2168 | path = os.environ['PATH'].split(os.pathsep) |
|
2165 | 2169 | while exedir in path: |
|
2166 | 2170 | path.remove(exedir) |
|
2167 | 2171 | os.environ['PATH'] = os.pathsep.join([exedir] + path) |
|
2168 | 2172 | if not self._findprogram(pyexename): |
|
2169 | 2173 | print("WARNING: Cannot find %s in search path" % pyexename) |
|
2170 | 2174 | |
|
2171 | 2175 | def _installhg(self): |
|
2172 | 2176 | """Install hg into the test environment. |
|
2173 | 2177 | |
|
2174 | 2178 | This will also configure hg with the appropriate testing settings. |
|
2175 | 2179 | """ |
|
2176 | 2180 | vlog("# Performing temporary installation of HG") |
|
2177 | 2181 | installerrs = os.path.join(b"tests", b"install.err") |
|
2178 | 2182 | compiler = '' |
|
2179 | 2183 | if self.options.compiler: |
|
2180 | 2184 | compiler = '--compiler ' + self.options.compiler |
|
2181 | 2185 | if self.options.pure: |
|
2182 | 2186 | pure = b"--pure" |
|
2183 | 2187 | else: |
|
2184 | 2188 | pure = b"" |
|
2185 | 2189 | py3 = '' |
|
2186 | 2190 | |
|
2187 | 2191 | # Run installer in hg root |
|
2188 | 2192 | script = os.path.realpath(sys.argv[0]) |
|
2189 | 2193 | exe = sys.executable |
|
2190 | 2194 | if PYTHON3: |
|
2191 | 2195 | py3 = b'--c2to3' |
|
2192 | 2196 | compiler = _bytespath(compiler) |
|
2193 | 2197 | script = _bytespath(script) |
|
2194 | 2198 | exe = _bytespath(exe) |
|
2195 | 2199 | hgroot = os.path.dirname(os.path.dirname(script)) |
|
2196 | 2200 | self._hgroot = hgroot |
|
2197 | 2201 | os.chdir(hgroot) |
|
2198 | 2202 | nohome = b'--home=""' |
|
2199 | 2203 | if os.name == 'nt': |
|
2200 | 2204 | # The --home="" trick works only on OS where os.sep == '/' |
|
2201 | 2205 | # because of a distutils convert_path() fast-path. Avoid it at |
|
2202 | 2206 | # least on Windows for now, deal with .pydistutils.cfg bugs |
|
2203 | 2207 | # when they happen. |
|
2204 | 2208 | nohome = b'' |
|
2205 | 2209 | cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all' |
|
2206 | 2210 | b' build %(compiler)s --build-base="%(base)s"' |
|
2207 | 2211 | b' install --force --prefix="%(prefix)s"' |
|
2208 | 2212 | b' --install-lib="%(libdir)s"' |
|
2209 | 2213 | b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' |
|
2210 | 2214 | % {b'exe': exe, b'py3': py3, b'pure': pure, |
|
2211 | 2215 | b'compiler': compiler, |
|
2212 | 2216 | b'base': os.path.join(self._hgtmp, b"build"), |
|
2213 | 2217 | b'prefix': self._installdir, b'libdir': self._pythondir, |
|
2214 | 2218 | b'bindir': self._bindir, |
|
2215 | 2219 | b'nohome': nohome, b'logfile': installerrs}) |
|
2216 | 2220 | |
|
2217 | 2221 | # setuptools requires install directories to exist. |
|
2218 | 2222 | def makedirs(p): |
|
2219 | 2223 | try: |
|
2220 | 2224 | os.makedirs(p) |
|
2221 | 2225 | except OSError as e: |
|
2222 | 2226 | if e.errno != errno.EEXIST: |
|
2223 | 2227 | raise |
|
2224 | 2228 | makedirs(self._pythondir) |
|
2225 | 2229 | makedirs(self._bindir) |
|
2226 | 2230 | |
|
2227 | 2231 | vlog("# Running", cmd) |
|
2228 | 2232 | if os.system(cmd) == 0: |
|
2229 | 2233 | if not self.options.verbose: |
|
2230 | 2234 | try: |
|
2231 | 2235 | os.remove(installerrs) |
|
2232 | 2236 | except OSError as e: |
|
2233 | 2237 | if e.errno != errno.ENOENT: |
|
2234 | 2238 | raise |
|
2235 | 2239 | else: |
|
2236 | 2240 | f = open(installerrs, 'rb') |
|
2237 | 2241 | for line in f: |
|
2238 | 2242 | if PYTHON3: |
|
2239 | 2243 | sys.stdout.buffer.write(line) |
|
2240 | 2244 | else: |
|
2241 | 2245 | sys.stdout.write(line) |
|
2242 | 2246 | f.close() |
|
2243 | 2247 | sys.exit(1) |
|
2244 | 2248 | os.chdir(self._testdir) |
|
2245 | 2249 | |
|
2246 | 2250 | self._usecorrectpython() |
|
2247 | 2251 | |
|
2248 | 2252 | if self.options.py3k_warnings and not self.options.anycoverage: |
|
2249 | 2253 | vlog("# Updating hg command to enable Py3k Warnings switch") |
|
2250 | 2254 | f = open(os.path.join(self._bindir, 'hg'), 'rb') |
|
2251 | 2255 | lines = [line.rstrip() for line in f] |
|
2252 | 2256 | lines[0] += ' -3' |
|
2253 | 2257 | f.close() |
|
2254 | 2258 | f = open(os.path.join(self._bindir, 'hg'), 'wb') |
|
2255 | 2259 | for line in lines: |
|
2256 | 2260 | f.write(line + '\n') |
|
2257 | 2261 | f.close() |
|
2258 | 2262 | |
|
2259 | 2263 | hgbat = os.path.join(self._bindir, b'hg.bat') |
|
2260 | 2264 | if os.path.isfile(hgbat): |
|
2261 | 2265 | # hg.bat expects to be put in bin/scripts while run-tests.py |
|
2262 | 2266 | # installation layout put it in bin/ directly. Fix it |
|
2263 | 2267 | f = open(hgbat, 'rb') |
|
2264 | 2268 | data = f.read() |
|
2265 | 2269 | f.close() |
|
2266 | 2270 | if b'"%~dp0..\python" "%~dp0hg" %*' in data: |
|
2267 | 2271 | data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*', |
|
2268 | 2272 | b'"%~dp0python" "%~dp0hg" %*') |
|
2269 | 2273 | f = open(hgbat, 'wb') |
|
2270 | 2274 | f.write(data) |
|
2271 | 2275 | f.close() |
|
2272 | 2276 | else: |
|
2273 | 2277 | print('WARNING: cannot fix hg.bat reference to python.exe') |
|
2274 | 2278 | |
|
2275 | 2279 | if self.options.anycoverage: |
|
2276 | 2280 | custom = os.path.join(self._testdir, 'sitecustomize.py') |
|
2277 | 2281 | target = os.path.join(self._pythondir, 'sitecustomize.py') |
|
2278 | 2282 | vlog('# Installing coverage trigger to %s' % target) |
|
2279 | 2283 | shutil.copyfile(custom, target) |
|
2280 | 2284 | rc = os.path.join(self._testdir, '.coveragerc') |
|
2281 | 2285 | vlog('# Installing coverage rc to %s' % rc) |
|
2282 | 2286 | os.environ['COVERAGE_PROCESS_START'] = rc |
|
2283 | 2287 | covdir = os.path.join(self._installdir, '..', 'coverage') |
|
2284 | 2288 | try: |
|
2285 | 2289 | os.mkdir(covdir) |
|
2286 | 2290 | except OSError as e: |
|
2287 | 2291 | if e.errno != errno.EEXIST: |
|
2288 | 2292 | raise |
|
2289 | 2293 | |
|
2290 | 2294 | os.environ['COVERAGE_DIR'] = covdir |
|
2291 | 2295 | |
|
2292 | 2296 | def _checkhglib(self, verb): |
|
2293 | 2297 | """Ensure that the 'mercurial' package imported by python is |
|
2294 | 2298 | the one we expect it to be. If not, print a warning to stderr.""" |
|
2295 | 2299 | if ((self._bindir == self._pythondir) and |
|
2296 | 2300 | (self._bindir != self._tmpbindir)): |
|
2297 | 2301 | # The pythondir has been inferred from --with-hg flag. |
|
2298 | 2302 | # We cannot expect anything sensible here. |
|
2299 | 2303 | return |
|
2300 | 2304 | expecthg = os.path.join(self._pythondir, b'mercurial') |
|
2301 | 2305 | actualhg = self._gethgpath() |
|
2302 | 2306 | if os.path.abspath(actualhg) != os.path.abspath(expecthg): |
|
2303 | 2307 | sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n' |
|
2304 | 2308 | ' (expected %s)\n' |
|
2305 | 2309 | % (verb, actualhg, expecthg)) |
|
2306 | 2310 | def _gethgpath(self): |
|
2307 | 2311 | """Return the path to the mercurial package that is actually found by |
|
2308 | 2312 | the current Python interpreter.""" |
|
2309 | 2313 | if self._hgpath is not None: |
|
2310 | 2314 | return self._hgpath |
|
2311 | 2315 | |
|
2312 | 2316 | cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"' |
|
2313 | 2317 | cmd = cmd % PYTHON |
|
2314 | 2318 | if PYTHON3: |
|
2315 | 2319 | cmd = _strpath(cmd) |
|
2316 | 2320 | pipe = os.popen(cmd) |
|
2317 | 2321 | try: |
|
2318 | 2322 | self._hgpath = _bytespath(pipe.read().strip()) |
|
2319 | 2323 | finally: |
|
2320 | 2324 | pipe.close() |
|
2321 | 2325 | |
|
2322 | 2326 | return self._hgpath |
|
2323 | 2327 | |
|
2324 | 2328 | def _outputcoverage(self): |
|
2325 | 2329 | """Produce code coverage output.""" |
|
2326 | 2330 | from coverage import coverage |
|
2327 | 2331 | |
|
2328 | 2332 | vlog('# Producing coverage report') |
|
2329 | 2333 | # chdir is the easiest way to get short, relative paths in the |
|
2330 | 2334 | # output. |
|
2331 | 2335 | os.chdir(self._hgroot) |
|
2332 | 2336 | covdir = os.path.join(self._installdir, '..', 'coverage') |
|
2333 | 2337 | cov = coverage(data_file=os.path.join(covdir, 'cov')) |
|
2334 | 2338 | |
|
2335 | 2339 | # Map install directory paths back to source directory. |
|
2336 | 2340 | cov.config.paths['srcdir'] = ['.', self._pythondir] |
|
2337 | 2341 | |
|
2338 | 2342 | cov.combine() |
|
2339 | 2343 | |
|
2340 | 2344 | omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]] |
|
2341 | 2345 | cov.report(ignore_errors=True, omit=omit) |
|
2342 | 2346 | |
|
2343 | 2347 | if self.options.htmlcov: |
|
2344 | 2348 | htmldir = os.path.join(self._testdir, 'htmlcov') |
|
2345 | 2349 | cov.html_report(directory=htmldir, omit=omit) |
|
2346 | 2350 | if self.options.annotate: |
|
2347 | 2351 | adir = os.path.join(self._testdir, 'annotated') |
|
2348 | 2352 | if not os.path.isdir(adir): |
|
2349 | 2353 | os.mkdir(adir) |
|
2350 | 2354 | cov.annotate(directory=adir, omit=omit) |
|
2351 | 2355 | |
|
2352 | 2356 | def _findprogram(self, program): |
|
2353 | 2357 | """Search PATH for a executable program""" |
|
2354 | 2358 | dpb = _bytespath(os.defpath) |
|
2355 | 2359 | sepb = _bytespath(os.pathsep) |
|
2356 | 2360 | for p in osenvironb.get(b'PATH', dpb).split(sepb): |
|
2357 | 2361 | name = os.path.join(p, program) |
|
2358 | 2362 | if os.name == 'nt' or os.access(name, os.X_OK): |
|
2359 | 2363 | return name |
|
2360 | 2364 | return None |
|
2361 | 2365 | |
|
2362 | 2366 | def _checktools(self): |
|
2363 | 2367 | """Ensure tools required to run tests are present.""" |
|
2364 | 2368 | for p in self.REQUIREDTOOLS: |
|
2365 | 2369 | if os.name == 'nt' and not p.endswith('.exe'): |
|
2366 | 2370 | p += '.exe' |
|
2367 | 2371 | found = self._findprogram(p) |
|
2368 | 2372 | if found: |
|
2369 | 2373 | vlog("# Found prerequisite", p, "at", found) |
|
2370 | 2374 | else: |
|
2371 | 2375 | print("WARNING: Did not find prerequisite tool: %s " % p) |
|
2372 | 2376 | |
|
2373 | 2377 | if __name__ == '__main__': |
|
2374 | 2378 | runner = TestRunner() |
|
2375 | 2379 | |
|
2376 | 2380 | try: |
|
2377 | 2381 | import msvcrt |
|
2378 | 2382 | msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) |
|
2379 | 2383 | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) |
|
2380 | 2384 | msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) |
|
2381 | 2385 | except ImportError: |
|
2382 | 2386 | pass |
|
2383 | 2387 | |
|
2384 | 2388 | sys.exit(runner.run(sys.argv[1:])) |
@@ -1,568 +1,628 b'' | |||
|
1 | 1 | $ cat << EOF >> $HGRCPATH |
|
2 | 2 | > [format] |
|
3 | 3 | > usegeneraldelta=yes |
|
4 | 4 | > [ui] |
|
5 | 5 | > ssh=python "$TESTDIR/dummyssh" |
|
6 | 6 | > EOF |
|
7 | 7 | |
|
8 | 8 | Set up repo |
|
9 | 9 | |
|
10 | 10 | $ hg --config experimental.treemanifest=True init repo |
|
11 | 11 | $ cd repo |
|
12 | 12 | |
|
13 | 13 | Requirements get set on init |
|
14 | 14 | |
|
15 | 15 | $ grep treemanifest .hg/requires |
|
16 | 16 | treemanifest |
|
17 | 17 | |
|
18 | 18 | Without directories, looks like any other repo |
|
19 | 19 | |
|
20 | 20 | $ echo 0 > a |
|
21 | 21 | $ echo 0 > b |
|
22 | 22 | $ hg ci -Aqm initial |
|
23 | 23 | $ hg debugdata -m 0 |
|
24 | 24 | a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc) |
|
25 | 25 | b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc) |
|
26 | 26 | |
|
27 | 27 | Submanifest is stored in separate revlog |
|
28 | 28 | |
|
29 | 29 | $ mkdir dir1 |
|
30 | 30 | $ echo 1 > dir1/a |
|
31 | 31 | $ echo 1 > dir1/b |
|
32 | 32 | $ echo 1 > e |
|
33 | 33 | $ hg ci -Aqm 'add dir1' |
|
34 | 34 | $ hg debugdata -m 1 |
|
35 | 35 | a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc) |
|
36 | 36 | b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc) |
|
37 | 37 | dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc) |
|
38 | 38 | e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc) |
|
39 | 39 | $ hg debugdata --dir dir1 0 |
|
40 | 40 | a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc) |
|
41 | 41 | b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc) |
|
42 | 42 | |
|
43 | 43 | Can add nested directories |
|
44 | 44 | |
|
45 | 45 | $ mkdir dir1/dir1 |
|
46 | 46 | $ echo 2 > dir1/dir1/a |
|
47 | 47 | $ echo 2 > dir1/dir1/b |
|
48 | 48 | $ mkdir dir1/dir2 |
|
49 | 49 | $ echo 2 > dir1/dir2/a |
|
50 | 50 | $ echo 2 > dir1/dir2/b |
|
51 | 51 | $ hg ci -Aqm 'add dir1/dir1' |
|
52 | 52 | $ hg files -r . |
|
53 | 53 | a |
|
54 | 54 | b |
|
55 | 55 | dir1/a (glob) |
|
56 | 56 | dir1/b (glob) |
|
57 | 57 | dir1/dir1/a (glob) |
|
58 | 58 | dir1/dir1/b (glob) |
|
59 | 59 | dir1/dir2/a (glob) |
|
60 | 60 | dir1/dir2/b (glob) |
|
61 | 61 | e |
|
62 | 62 | |
|
63 | 63 | Revision is not created for unchanged directory |
|
64 | 64 | |
|
65 | 65 | $ mkdir dir2 |
|
66 | 66 | $ echo 3 > dir2/a |
|
67 | 67 | $ hg add dir2 |
|
68 | 68 | adding dir2/a (glob) |
|
69 | 69 | $ hg debugindex --dir dir1 > before |
|
70 | 70 | $ hg ci -qm 'add dir2' |
|
71 | 71 | $ hg debugindex --dir dir1 > after |
|
72 | 72 | $ diff before after |
|
73 | 73 | $ rm before after |
|
74 | 74 | |
|
75 | 75 | Removing directory does not create an revlog entry |
|
76 | 76 | |
|
77 | 77 | $ hg rm dir1/dir1 |
|
78 | 78 | removing dir1/dir1/a (glob) |
|
79 | 79 | removing dir1/dir1/b (glob) |
|
80 | 80 | $ hg debugindex --dir dir1/dir1 > before |
|
81 | 81 | $ hg ci -qm 'remove dir1/dir1' |
|
82 | 82 | $ hg debugindex --dir dir1/dir1 > after |
|
83 | 83 | $ diff before after |
|
84 | 84 | $ rm before after |
|
85 | 85 | |
|
86 | 86 | Check that hg files (calls treemanifest.walk()) works |
|
87 | 87 | without loading all directory revlogs |
|
88 | 88 | |
|
89 | 89 | $ hg co 'desc("add dir2")' |
|
90 | 90 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
91 | 91 | $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup |
|
92 | 92 | $ hg files -r . dir1 |
|
93 | 93 | dir1/a (glob) |
|
94 | 94 | dir1/b (glob) |
|
95 | 95 | dir1/dir1/a (glob) |
|
96 | 96 | dir1/dir1/b (glob) |
|
97 | 97 | dir1/dir2/a (glob) |
|
98 | 98 | dir1/dir2/b (glob) |
|
99 | 99 | |
|
100 | 100 | Check that status between revisions works (calls treemanifest.matches()) |
|
101 | 101 | without loading all directory revlogs |
|
102 | 102 | |
|
103 | 103 | $ hg status --rev 'desc("add dir1")' --rev . dir1 |
|
104 | 104 | A dir1/dir1/a |
|
105 | 105 | A dir1/dir1/b |
|
106 | 106 | A dir1/dir2/a |
|
107 | 107 | A dir1/dir2/b |
|
108 | 108 | $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2 |
|
109 | 109 | |
|
110 | 110 | Merge creates 2-parent revision of directory revlog |
|
111 | 111 | |
|
112 | 112 | $ echo 5 > dir1/a |
|
113 | 113 | $ hg ci -Aqm 'modify dir1/a' |
|
114 | 114 | $ hg co '.^' |
|
115 | 115 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
116 | 116 | $ echo 6 > dir1/b |
|
117 | 117 | $ hg ci -Aqm 'modify dir1/b' |
|
118 | 118 | $ hg merge 'desc("modify dir1/a")' |
|
119 | 119 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
120 | 120 | (branch merge, don't forget to commit) |
|
121 | 121 | $ hg ci -m 'conflict-free merge involving dir1/' |
|
122 | 122 | $ cat dir1/a |
|
123 | 123 | 5 |
|
124 | 124 | $ cat dir1/b |
|
125 | 125 | 6 |
|
126 | 126 | $ hg debugindex --dir dir1 |
|
127 | 127 | rev offset length delta linkrev nodeid p1 p2 |
|
128 | 128 | 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000 |
|
129 | 129 | 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000 |
|
130 | 130 | 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000 |
|
131 | 131 | 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000 |
|
132 | 132 | 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000 |
|
133 | 133 | 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce |
|
134 | 134 | |
|
135 | 135 | Merge keeping directory from parent 1 does not create revlog entry. (Note that |
|
136 | 136 | dir1's manifest does change, but only because dir1/a's filelog changes.) |
|
137 | 137 | |
|
138 | 138 | $ hg co 'desc("add dir2")' |
|
139 | 139 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
140 | 140 | $ echo 8 > dir2/a |
|
141 | 141 | $ hg ci -m 'modify dir2/a' |
|
142 | 142 | created new head |
|
143 | 143 | |
|
144 | 144 | $ hg debugindex --dir dir2 > before |
|
145 | 145 | $ hg merge 'desc("modify dir1/a")' |
|
146 | 146 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
147 | 147 | (branch merge, don't forget to commit) |
|
148 | 148 | $ hg revert -r 'desc("modify dir2/a")' . |
|
149 | 149 | reverting dir1/a (glob) |
|
150 | 150 | $ hg ci -m 'merge, keeping parent 1' |
|
151 | 151 | $ hg debugindex --dir dir2 > after |
|
152 | 152 | $ diff before after |
|
153 | 153 | $ rm before after |
|
154 | 154 | |
|
155 | 155 | Merge keeping directory from parent 2 does not create revlog entry. (Note that |
|
156 | 156 | dir2's manifest does change, but only because dir2/a's filelog changes.) |
|
157 | 157 | |
|
158 | 158 | $ hg co 'desc("modify dir2/a")' |
|
159 | 159 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
160 | 160 | $ hg debugindex --dir dir1 > before |
|
161 | 161 | $ hg merge 'desc("modify dir1/a")' |
|
162 | 162 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
163 | 163 | (branch merge, don't forget to commit) |
|
164 | 164 | $ hg revert -r 'desc("modify dir1/a")' . |
|
165 | 165 | reverting dir2/a (glob) |
|
166 | 166 | $ hg ci -m 'merge, keeping parent 2' |
|
167 | 167 | created new head |
|
168 | 168 | $ hg debugindex --dir dir1 > after |
|
169 | 169 | $ diff before after |
|
170 | 170 | $ rm before after |
|
171 | 171 | |
|
172 | 172 | Create flat source repo for tests with mixed flat/tree manifests |
|
173 | 173 | |
|
174 | 174 | $ cd .. |
|
175 | 175 | $ hg init repo-flat |
|
176 | 176 | $ cd repo-flat |
|
177 | 177 | |
|
178 | 178 | Create a few commits with flat manifest |
|
179 | 179 | |
|
180 | 180 | $ echo 0 > a |
|
181 | 181 | $ echo 0 > b |
|
182 | 182 | $ echo 0 > e |
|
183 | 183 | $ for d in dir1 dir1/dir1 dir1/dir2 dir2 |
|
184 | 184 | > do |
|
185 | 185 | > mkdir $d |
|
186 | 186 | > echo 0 > $d/a |
|
187 | 187 | > echo 0 > $d/b |
|
188 | 188 | > done |
|
189 | 189 | $ hg ci -Aqm initial |
|
190 | 190 | |
|
191 | 191 | $ echo 1 > a |
|
192 | 192 | $ echo 1 > dir1/a |
|
193 | 193 | $ echo 1 > dir1/dir1/a |
|
194 | 194 | $ hg ci -Aqm 'modify on branch 1' |
|
195 | 195 | |
|
196 | 196 | $ hg co 0 |
|
197 | 197 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
198 | 198 | $ echo 2 > b |
|
199 | 199 | $ echo 2 > dir1/b |
|
200 | 200 | $ echo 2 > dir1/dir1/b |
|
201 | 201 | $ hg ci -Aqm 'modify on branch 2' |
|
202 | 202 | |
|
203 | 203 | $ hg merge 1 |
|
204 | 204 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
205 | 205 | (branch merge, don't forget to commit) |
|
206 | 206 | $ hg ci -m 'merge of flat manifests to new flat manifest' |
|
207 | 207 | |
|
208 | 208 | $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log |
|
209 | 209 | $ cat hg.pid >> $DAEMON_PIDS |
|
210 | 210 | |
|
211 | 211 | Create clone with tree manifests enabled |
|
212 | 212 | |
|
213 | 213 | $ cd .. |
|
214 | 214 | $ hg clone --config experimental.treemanifest=1 \ |
|
215 | 215 | > http://localhost:$HGPORT repo-mixed -r 1 |
|
216 | 216 | adding changesets |
|
217 | 217 | adding manifests |
|
218 | 218 | adding file changes |
|
219 | 219 | added 2 changesets with 14 changes to 11 files |
|
220 | 220 | updating to branch default |
|
221 | 221 | 11 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
222 | 222 | $ cd repo-mixed |
|
223 | 223 | $ test -d .hg/store/meta |
|
224 | 224 | [1] |
|
225 | 225 | $ grep treemanifest .hg/requires |
|
226 | 226 | treemanifest |
|
227 | 227 | |
|
228 | 228 | Should be possible to push updates from flat to tree manifest repo |
|
229 | 229 | |
|
230 | 230 | $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed |
|
231 | 231 | pushing to ssh://user@dummy/repo-mixed |
|
232 | 232 | searching for changes |
|
233 | 233 | remote: adding changesets |
|
234 | 234 | remote: adding manifests |
|
235 | 235 | remote: adding file changes |
|
236 | 236 | remote: added 2 changesets with 3 changes to 3 files |
|
237 | 237 | |
|
238 | 238 | Commit should store revlog per directory |
|
239 | 239 | |
|
240 | 240 | $ hg co 1 |
|
241 | 241 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
242 | 242 | $ echo 3 > a |
|
243 | 243 | $ echo 3 > dir1/a |
|
244 | 244 | $ echo 3 > dir1/dir1/a |
|
245 | 245 | $ hg ci -m 'first tree' |
|
246 | 246 | created new head |
|
247 | 247 | $ find .hg/store/meta | sort |
|
248 | 248 | .hg/store/meta |
|
249 | 249 | .hg/store/meta/dir1 |
|
250 | 250 | .hg/store/meta/dir1/00manifest.i |
|
251 | 251 | .hg/store/meta/dir1/dir1 |
|
252 | 252 | .hg/store/meta/dir1/dir1/00manifest.i |
|
253 | 253 | .hg/store/meta/dir1/dir2 |
|
254 | 254 | .hg/store/meta/dir1/dir2/00manifest.i |
|
255 | 255 | .hg/store/meta/dir2 |
|
256 | 256 | .hg/store/meta/dir2/00manifest.i |
|
257 | 257 | |
|
258 | 258 | Merge of two trees |
|
259 | 259 | |
|
260 | 260 | $ hg co 2 |
|
261 | 261 | 6 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
262 | 262 | $ hg merge 1 |
|
263 | 263 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
264 | 264 | (branch merge, don't forget to commit) |
|
265 | 265 | $ hg ci -m 'merge of flat manifests to new tree manifest' |
|
266 | 266 | created new head |
|
267 | 267 | $ hg diff -r 3 |
|
268 | 268 | |
|
269 | 269 | Parent of tree root manifest should be flat manifest, and two for merge |
|
270 | 270 | |
|
271 | 271 | $ hg debugindex -m |
|
272 | 272 | rev offset length delta linkrev nodeid p1 p2 |
|
273 | 273 | 0 0 80 -1 0 40536115ed9e 000000000000 000000000000 |
|
274 | 274 | 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000 |
|
275 | 275 | 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000 |
|
276 | 276 | 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255 |
|
277 | 277 | 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000 |
|
278 | 278 | 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255 |
|
279 | 279 | |
|
280 | 280 | |
|
281 | 281 | Status across flat/tree boundary should work |
|
282 | 282 | |
|
283 | 283 | $ hg status --rev '.^' --rev . |
|
284 | 284 | M a |
|
285 | 285 | M dir1/a |
|
286 | 286 | M dir1/dir1/a |
|
287 | 287 | |
|
288 | 288 | |
|
289 | 289 | Turning off treemanifest config has no effect |
|
290 | 290 | |
|
291 | 291 | $ hg debugindex --dir dir1 |
|
292 | 292 | rev offset length delta linkrev nodeid p1 p2 |
|
293 | 293 | 0 0 127 -1 4 064927a0648a 000000000000 000000000000 |
|
294 | 294 | 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000 |
|
295 | 295 | $ echo 2 > dir1/a |
|
296 | 296 | $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a' |
|
297 | 297 | $ hg debugindex --dir dir1 |
|
298 | 298 | rev offset length delta linkrev nodeid p1 p2 |
|
299 | 299 | 0 0 127 -1 4 064927a0648a 000000000000 000000000000 |
|
300 | 300 | 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000 |
|
301 | 301 | 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000 |
|
302 | 302 | |
|
303 | 303 | Stripping and recovering changes should work |
|
304 | 304 | |
|
305 | 305 | $ hg st --change tip |
|
306 | 306 | M dir1/a |
|
307 | 307 | $ hg --config extensions.strip= strip tip |
|
308 | 308 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
309 | 309 | saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob) |
|
310 | 310 | $ hg unbundle -q .hg/strip-backup/* |
|
311 | 311 | $ hg st --change tip |
|
312 | 312 | M dir1/a |
|
313 | 313 | |
|
314 | 314 | Shelving and unshelving should work |
|
315 | 315 | |
|
316 | 316 | $ echo foo >> dir1/a |
|
317 | 317 | $ hg --config extensions.shelve= shelve |
|
318 | 318 | shelved as default |
|
319 | 319 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
320 | 320 | $ hg --config extensions.shelve= unshelve |
|
321 | 321 | unshelving change 'default' |
|
322 | 322 | $ hg diff --nodates |
|
323 | 323 | diff -r 708a273da119 dir1/a |
|
324 | 324 | --- a/dir1/a |
|
325 | 325 | +++ b/dir1/a |
|
326 | 326 | @@ -1,1 +1,2 @@ |
|
327 | 327 | 1 |
|
328 | 328 | +foo |
|
329 | 329 | |
|
330 | 330 | Pushing from treemanifest repo to an empty repo makes that a treemanifest repo |
|
331 | 331 | |
|
332 | 332 | $ cd .. |
|
333 | 333 | $ hg init empty-repo |
|
334 | 334 | $ cat << EOF >> empty-repo/.hg/hgrc |
|
335 | 335 | > [experimental] |
|
336 | 336 | > changegroup3=yes |
|
337 | 337 | > EOF |
|
338 | 338 | $ grep treemanifest empty-repo/.hg/requires |
|
339 | 339 | [1] |
|
340 | 340 | $ hg push -R repo -r 0 empty-repo |
|
341 | 341 | pushing to empty-repo |
|
342 | 342 | searching for changes |
|
343 | 343 | adding changesets |
|
344 | 344 | adding manifests |
|
345 | 345 | adding file changes |
|
346 | 346 | added 1 changesets with 2 changes to 2 files |
|
347 | 347 | $ grep treemanifest empty-repo/.hg/requires |
|
348 | 348 | treemanifest |
|
349 | 349 | |
|
350 | 350 | Pushing to an empty repo works |
|
351 | 351 | |
|
352 | 352 | $ hg --config experimental.treemanifest=1 init clone |
|
353 | 353 | $ grep treemanifest clone/.hg/requires |
|
354 | 354 | treemanifest |
|
355 | 355 | $ hg push -R repo clone |
|
356 | 356 | pushing to clone |
|
357 | 357 | searching for changes |
|
358 | 358 | adding changesets |
|
359 | 359 | adding manifests |
|
360 | 360 | adding file changes |
|
361 | 361 | added 11 changesets with 15 changes to 10 files (+3 heads) |
|
362 | 362 | $ grep treemanifest clone/.hg/requires |
|
363 | 363 | treemanifest |
|
364 | 364 | |
|
365 | 365 | Create deeper repo with tree manifests. |
|
366 | 366 | |
|
367 | 367 | $ hg --config experimental.treemanifest=True init deeprepo |
|
368 | 368 | $ cd deeprepo |
|
369 | 369 | |
|
370 |
$ mkdir |
|
|
370 | $ mkdir .A | |
|
371 | 371 | $ mkdir b |
|
372 | 372 | $ mkdir b/bar |
|
373 | 373 | $ mkdir b/bar/orange |
|
374 | 374 | $ mkdir b/bar/orange/fly |
|
375 | 375 | $ mkdir b/foo |
|
376 | 376 | $ mkdir b/foo/apple |
|
377 | 377 | $ mkdir b/foo/apple/bees |
|
378 | 378 | |
|
379 |
$ touch |
|
|
380 |
$ touch |
|
|
379 | $ touch .A/one.txt | |
|
380 | $ touch .A/two.txt | |
|
381 | 381 | $ touch b/bar/fruits.txt |
|
382 | 382 | $ touch b/bar/orange/fly/gnat.py |
|
383 | 383 | $ touch b/bar/orange/fly/housefly.txt |
|
384 | 384 | $ touch b/foo/apple/bees/flower.py |
|
385 | 385 | $ touch c.txt |
|
386 | 386 | $ touch d.py |
|
387 | 387 | |
|
388 | 388 | $ hg ci -Aqm 'initial' |
|
389 | 389 | |
|
390 | 390 | We'll see that visitdir works by removing some treemanifest revlogs and running |
|
391 | 391 | the files command with various parameters. |
|
392 | 392 | |
|
393 | 393 | Test files from the root. |
|
394 | 394 | |
|
395 | 395 | $ hg files -r . |
|
396 |
|
|
|
397 |
|
|
|
396 | .A/one.txt (glob) | |
|
397 | .A/two.txt (glob) | |
|
398 | 398 | b/bar/fruits.txt (glob) |
|
399 | 399 | b/bar/orange/fly/gnat.py (glob) |
|
400 | 400 | b/bar/orange/fly/housefly.txt (glob) |
|
401 | 401 | b/foo/apple/bees/flower.py (glob) |
|
402 | 402 | c.txt |
|
403 | 403 | d.py |
|
404 | 404 | |
|
405 | 405 | Excludes with a glob should not exclude everything from the glob's root |
|
406 | 406 | |
|
407 | 407 | $ hg files -r . -X 'b/fo?' b |
|
408 | 408 | b/bar/fruits.txt (glob) |
|
409 | 409 | b/bar/orange/fly/gnat.py (glob) |
|
410 | 410 | b/bar/orange/fly/housefly.txt (glob) |
|
411 | 411 | $ cp -r .hg/store .hg/store-copy |
|
412 | 412 | |
|
413 | 413 | Test files for a subdirectory. |
|
414 | 414 | |
|
415 | $ rm -r .hg/store/meta/a | |
|
415 | $ rm -r .hg/store/meta/~2e_a | |
|
416 | 416 | $ hg files -r . b |
|
417 | 417 | b/bar/fruits.txt (glob) |
|
418 | 418 | b/bar/orange/fly/gnat.py (glob) |
|
419 | 419 | b/bar/orange/fly/housefly.txt (glob) |
|
420 | 420 | b/foo/apple/bees/flower.py (glob) |
|
421 | 421 | $ cp -rT .hg/store-copy .hg/store |
|
422 | 422 | |
|
423 | 423 | Test files with just includes and excludes. |
|
424 | 424 | |
|
425 | $ rm -r .hg/store/meta/a | |
|
425 | $ rm -r .hg/store/meta/~2e_a | |
|
426 | 426 | $ rm -r .hg/store/meta/b/bar/orange/fly |
|
427 | 427 | $ rm -r .hg/store/meta/b/foo/apple/bees |
|
428 | 428 | $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees |
|
429 | 429 | b/bar/fruits.txt (glob) |
|
430 | 430 | $ cp -rT .hg/store-copy .hg/store |
|
431 | 431 | |
|
432 | 432 | Test files for a subdirectory, excluding a directory within it. |
|
433 | 433 | |
|
434 | $ rm -r .hg/store/meta/a | |
|
434 | $ rm -r .hg/store/meta/~2e_a | |
|
435 | 435 | $ rm -r .hg/store/meta/b/foo |
|
436 | 436 | $ hg files -r . -X path:b/foo b |
|
437 | 437 | b/bar/fruits.txt (glob) |
|
438 | 438 | b/bar/orange/fly/gnat.py (glob) |
|
439 | 439 | b/bar/orange/fly/housefly.txt (glob) |
|
440 | 440 | $ cp -rT .hg/store-copy .hg/store |
|
441 | 441 | |
|
442 | 442 | Test files for a sub directory, including only a directory within it, and |
|
443 | 443 | including an unrelated directory. |
|
444 | 444 | |
|
445 | $ rm -r .hg/store/meta/a | |
|
445 | $ rm -r .hg/store/meta/~2e_a | |
|
446 | 446 | $ rm -r .hg/store/meta/b/foo |
|
447 | 447 | $ hg files -r . -I path:b/bar/orange -I path:a b |
|
448 | 448 | b/bar/orange/fly/gnat.py (glob) |
|
449 | 449 | b/bar/orange/fly/housefly.txt (glob) |
|
450 | 450 | $ cp -rT .hg/store-copy .hg/store |
|
451 | 451 | |
|
452 | 452 | Test files for a pattern, including a directory, and excluding a directory |
|
453 | 453 | within that. |
|
454 | 454 | |
|
455 | $ rm -r .hg/store/meta/a | |
|
455 | $ rm -r .hg/store/meta/~2e_a | |
|
456 | 456 | $ rm -r .hg/store/meta/b/foo |
|
457 | 457 | $ rm -r .hg/store/meta/b/bar/orange |
|
458 | 458 | $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange |
|
459 | 459 | b/bar/fruits.txt (glob) |
|
460 | 460 | $ cp -rT .hg/store-copy .hg/store |
|
461 | 461 | |
|
462 | 462 | Add some more changes to the deep repo |
|
463 | 463 | $ echo narf >> b/bar/fruits.txt |
|
464 | 464 | $ hg ci -m narf |
|
465 | 465 | $ echo troz >> b/bar/orange/fly/gnat.py |
|
466 | 466 | $ hg ci -m troz |
|
467 | 467 | |
|
468 | 468 | Test cloning a treemanifest repo over http. |
|
469 | 469 | $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log |
|
470 | 470 | $ cat hg.pid >> $DAEMON_PIDS |
|
471 | 471 | $ cd .. |
|
472 | 472 | We can clone even with the knob turned off and we'll get a treemanifest repo. |
|
473 | 473 | $ hg clone --config experimental.treemanifest=False \ |
|
474 | 474 | > --config experimental.changegroup3=True \ |
|
475 | 475 | > http://localhost:$HGPORT2 deepclone |
|
476 | 476 | requesting all changes |
|
477 | 477 | adding changesets |
|
478 | 478 | adding manifests |
|
479 | 479 | adding file changes |
|
480 | 480 | added 3 changesets with 10 changes to 8 files |
|
481 | 481 | updating to branch default |
|
482 | 482 | 8 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
483 | 483 | No server errors. |
|
484 | 484 | $ cat deeprepo/errors.log |
|
485 | 485 | requires got updated to include treemanifest |
|
486 | 486 | $ cat deepclone/.hg/requires | grep treemanifest |
|
487 | 487 | treemanifest |
|
488 | 488 | Tree manifest revlogs exist. |
|
489 | 489 | $ find deepclone/.hg/store/meta | sort |
|
490 | 490 | deepclone/.hg/store/meta |
|
491 | deepclone/.hg/store/meta/a | |
|
492 | deepclone/.hg/store/meta/a/00manifest.i | |
|
493 | 491 | deepclone/.hg/store/meta/b |
|
494 | 492 | deepclone/.hg/store/meta/b/00manifest.i |
|
495 | 493 | deepclone/.hg/store/meta/b/bar |
|
496 | 494 | deepclone/.hg/store/meta/b/bar/00manifest.i |
|
497 | 495 | deepclone/.hg/store/meta/b/bar/orange |
|
498 | 496 | deepclone/.hg/store/meta/b/bar/orange/00manifest.i |
|
499 | 497 | deepclone/.hg/store/meta/b/bar/orange/fly |
|
500 | 498 | deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i |
|
501 | 499 | deepclone/.hg/store/meta/b/foo |
|
502 | 500 | deepclone/.hg/store/meta/b/foo/00manifest.i |
|
503 | 501 | deepclone/.hg/store/meta/b/foo/apple |
|
504 | 502 | deepclone/.hg/store/meta/b/foo/apple/00manifest.i |
|
505 | 503 | deepclone/.hg/store/meta/b/foo/apple/bees |
|
506 | 504 | deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i |
|
505 | deepclone/.hg/store/meta/~2e_a | |
|
506 | deepclone/.hg/store/meta/~2e_a/00manifest.i | |
|
507 | 507 | Verify passes. |
|
508 | 508 | $ cd deepclone |
|
509 | 509 | $ hg verify |
|
510 | 510 | checking changesets |
|
511 | 511 | checking manifests |
|
512 | 512 | crosschecking files in changesets and manifests |
|
513 | 513 | checking files |
|
514 | 514 | 8 files, 3 changesets, 10 total revisions |
|
515 | 515 | $ cd .. |
|
516 | 516 | |
|
517 | 517 | Create clones using old repo formats to use in later tests |
|
518 | 518 | $ hg clone --config format.usestore=False \ |
|
519 | 519 | > --config experimental.changegroup3=True \ |
|
520 | 520 | > http://localhost:$HGPORT2 deeprepo-basicstore |
|
521 | 521 | requesting all changes |
|
522 | 522 | adding changesets |
|
523 | 523 | adding manifests |
|
524 | 524 | adding file changes |
|
525 | 525 | added 3 changesets with 10 changes to 8 files |
|
526 | 526 | updating to branch default |
|
527 | 527 | 8 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
528 |
$ |
|
|
528 | $ cd deeprepo-basicstore | |
|
529 | $ grep store .hg/requires | |
|
529 | 530 | [1] |
|
531 | $ hg serve -p $HGPORT3 -d --pid-file=hg.pid --errorlog=errors.log | |
|
532 | $ cat hg.pid >> $DAEMON_PIDS | |
|
533 | $ cd .. | |
|
530 | 534 | $ hg clone --config format.usefncache=False \ |
|
531 | 535 | > --config experimental.changegroup3=True \ |
|
532 | 536 | > http://localhost:$HGPORT2 deeprepo-encodedstore |
|
533 | 537 | requesting all changes |
|
534 | 538 | adding changesets |
|
535 | 539 | adding manifests |
|
536 | 540 | adding file changes |
|
537 | 541 | added 3 changesets with 10 changes to 8 files |
|
538 | 542 | updating to branch default |
|
539 | 543 | 8 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
540 |
$ |
|
|
544 | $ cd deeprepo-encodedstore | |
|
545 | $ grep fncache .hg/requires | |
|
541 | 546 | [1] |
|
547 | $ hg serve -p $HGPORT4 -d --pid-file=hg.pid --errorlog=errors.log | |
|
548 | $ cat hg.pid >> $DAEMON_PIDS | |
|
549 | $ cd .. | |
|
542 | 550 | |
|
543 | 551 | Local clone with basicstore |
|
544 | 552 | $ hg clone -U deeprepo-basicstore local-clone-basicstore |
|
545 | 553 | $ hg -R local-clone-basicstore verify |
|
546 | 554 | checking changesets |
|
547 | 555 | checking manifests |
|
548 | 556 | crosschecking files in changesets and manifests |
|
549 | 557 | checking files |
|
550 | 558 | 8 files, 3 changesets, 10 total revisions |
|
551 | 559 | |
|
552 | 560 | Local clone with encodedstore |
|
553 | 561 | $ hg clone -U deeprepo-encodedstore local-clone-encodedstore |
|
554 | 562 | $ hg -R local-clone-encodedstore verify |
|
555 | 563 | checking changesets |
|
556 | 564 | checking manifests |
|
557 | 565 | crosschecking files in changesets and manifests |
|
558 | 566 | checking files |
|
559 | 567 | 8 files, 3 changesets, 10 total revisions |
|
560 | 568 | |
|
561 | 569 | Local clone with fncachestore |
|
562 | 570 | $ hg clone -U deeprepo local-clone-fncachestore |
|
563 | 571 | $ hg -R local-clone-fncachestore verify |
|
564 | 572 | checking changesets |
|
565 | 573 | checking manifests |
|
566 | 574 | crosschecking files in changesets and manifests |
|
567 | 575 | checking files |
|
568 | 576 | 8 files, 3 changesets, 10 total revisions |
|
577 | ||
|
578 | Stream clone with basicstore | |
|
579 | $ hg clone --config experimental.changegroup3=True --uncompressed -U \ | |
|
580 | > http://localhost:$HGPORT3 stream-clone-basicstore | |
|
581 | streaming all changes | |
|
582 | 18 files to transfer, * of data (glob) | |
|
583 | transferred * in * seconds (*) (glob) | |
|
584 | searching for changes | |
|
585 | no changes found | |
|
586 | $ hg -R stream-clone-basicstore verify | |
|
587 | checking changesets | |
|
588 | checking manifests | |
|
589 | crosschecking files in changesets and manifests | |
|
590 | checking files | |
|
591 | 8 files, 3 changesets, 10 total revisions | |
|
592 | ||
|
593 | Stream clone with encodedstore | |
|
594 | $ hg clone --config experimental.changegroup3=True --uncompressed -U \ | |
|
595 | > http://localhost:$HGPORT4 stream-clone-encodedstore | |
|
596 | streaming all changes | |
|
597 | 18 files to transfer, * of data (glob) | |
|
598 | transferred * in * seconds (*) (glob) | |
|
599 | searching for changes | |
|
600 | no changes found | |
|
601 | $ hg -R stream-clone-encodedstore verify | |
|
602 | checking changesets | |
|
603 | checking manifests | |
|
604 | crosschecking files in changesets and manifests | |
|
605 | checking files | |
|
606 | 8 files, 3 changesets, 10 total revisions | |
|
607 | ||
|
608 | Stream clone with fncachestore | |
|
609 | $ hg clone --config experimental.changegroup3=True --uncompressed -U \ | |
|
610 | > http://localhost:$HGPORT2 stream-clone-fncachestore | |
|
611 | streaming all changes | |
|
612 | 18 files to transfer, * of data (glob) | |
|
613 | transferred * in * seconds (*) (glob) | |
|
614 | searching for changes | |
|
615 | no changes found | |
|
616 | $ hg -R stream-clone-fncachestore verify | |
|
617 | checking changesets | |
|
618 | checking manifests | |
|
619 | crosschecking files in changesets and manifests | |
|
620 | checking files | |
|
621 | 8 files, 3 changesets, 10 total revisions | |
|
622 | ||
|
623 | Packed bundle | |
|
624 | $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg | |
|
625 | writing 3349 bytes for 18 files | |
|
626 | bundle requirements: generaldelta, revlogv1, treemanifest | |
|
627 | $ hg debugbundle --spec repo-packed.hg | |
|
628 | none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest |
General Comments 0
You need to be logged in to leave comments.
Login now