##// END OF EJS Templates
lfs: introduce a user level cache for lfs files...
Matt Harbison -
r35281:8e72f915 default
parent child Browse files
Show More
@@ -1,184 +1,191 b''
1 1 # lfs - hash-preserving large file support using Git-LFS protocol
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """lfs - large file support (EXPERIMENTAL)
9 9
10 10 Configs::
11 11
12 12 [lfs]
13 13 # Remote endpoint. Multiple protocols are supported:
14 14 # - http(s)://user:pass@example.com/path
15 15 # git-lfs endpoint
16 16 # - file:///tmp/path
17 17 # local filesystem, usually for testing
18 18 # if unset, lfs will prompt setting this when it must use this value.
19 19 # (default: unset)
20 20 url = https://example.com/lfs
21 21
22 22 # size of a file to make it use LFS
23 23 threshold = 10M
24 24
25 25 # how many times to retry before giving up on transferring an object
26 26 retry = 5
27
28 # the local directory to store lfs files for sharing across local clones.
29 # If not set, the cache is located in an OS specific cache location.
30 usercache = /path/to/global/cache
27 31 """
28 32
29 33 from __future__ import absolute_import
30 34
31 35 from mercurial.i18n import _
32 36
33 37 from mercurial import (
34 38 bundle2,
35 39 changegroup,
36 40 context,
37 41 exchange,
38 42 extensions,
39 43 filelog,
40 44 hg,
41 45 localrepo,
42 46 registrar,
43 47 revlog,
44 48 scmutil,
45 49 vfs as vfsmod,
46 50 )
47 51
48 52 from . import (
49 53 blobstore,
50 54 wrapper,
51 55 )
52 56
53 57 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
54 58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
55 59 # be specifying the version(s) of Mercurial they are tested with, or
56 60 # leave the attribute unspecified.
57 61 testedwith = 'ships-with-hg-core'
58 62
59 63 configtable = {}
60 64 configitem = registrar.configitem(configtable)
61 65
62 66 configitem('lfs', 'url',
63 67 default=configitem.dynamicdefault,
64 68 )
69 configitem('lfs', 'usercache',
70 default=None,
71 )
65 72 configitem('lfs', 'threshold',
66 73 default=None,
67 74 )
68 75 configitem('lfs', 'retry',
69 76 default=5,
70 77 )
71 78 # Deprecated
72 79 configitem('lfs', 'remotestore',
73 80 default=None,
74 81 )
75 82 # Deprecated
76 83 configitem('lfs', 'dummy',
77 84 default=None,
78 85 )
79 86 # Deprecated
80 87 configitem('lfs', 'git-lfs',
81 88 default=None,
82 89 )
83 90
84 91 cmdtable = {}
85 92 command = registrar.command(cmdtable)
86 93
87 94 templatekeyword = registrar.templatekeyword()
88 95
89 96 def featuresetup(ui, supported):
90 97 # don't die on seeing a repo with the lfs requirement
91 98 supported |= {'lfs'}
92 99
93 100 def uisetup(ui):
94 101 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
95 102
96 103 def reposetup(ui, repo):
97 104 # Nothing to do with a remote repo
98 105 if not repo.local():
99 106 return
100 107
101 108 threshold = repo.ui.configbytes('lfs', 'threshold')
102 109
103 110 repo.svfs.options['lfsthreshold'] = threshold
104 111 repo.svfs.lfslocalblobstore = blobstore.local(repo)
105 112 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
106 113
107 114 # Push hook
108 115 repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
109 116
110 117 if 'lfs' not in repo.requirements:
111 118 def checkrequireslfs(ui, repo, **kwargs):
112 119 if 'lfs' not in repo.requirements:
113 120 ctx = repo[kwargs['node']]
114 121 # TODO: is there a way to just walk the files in the commit?
115 122 if any(ctx[f].islfs() for f in ctx.files()):
116 123 repo.requirements.add('lfs')
117 124 repo._writerequirements()
118 125
119 126 ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
120 127
121 128 def wrapfilelog(filelog):
122 129 wrapfunction = extensions.wrapfunction
123 130
124 131 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
125 132 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
126 133 wrapfunction(filelog, 'size', wrapper.filelogsize)
127 134
128 135 def extsetup(ui):
129 136 wrapfilelog(filelog.filelog)
130 137
131 138 wrapfunction = extensions.wrapfunction
132 139
133 140 wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
134 141
135 142 wrapfunction(changegroup,
136 143 'supportedoutgoingversions',
137 144 wrapper.supportedoutgoingversions)
138 145 wrapfunction(changegroup,
139 146 'allsupportedversions',
140 147 wrapper.allsupportedversions)
141 148
142 149 wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
143 150 wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
144 151 context.basefilectx.islfs = wrapper.filectxislfs
145 152
146 153 revlog.addflagprocessor(
147 154 revlog.REVIDX_EXTSTORED,
148 155 (
149 156 wrapper.readfromstore,
150 157 wrapper.writetostore,
151 158 wrapper.bypasscheckhash,
152 159 ),
153 160 )
154 161
155 162 wrapfunction(hg, 'clone', wrapper.hgclone)
156 163 wrapfunction(hg, 'postshare', wrapper.hgpostshare)
157 164
158 165 # Make bundle choose changegroup3 instead of changegroup2. This affects
159 166 # "hg bundle" command. Note: it does not cover all bundle formats like
160 167 # "packed1". Using "packed1" with lfs will likely cause trouble.
161 168 names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
162 169 for k in names:
163 170 exchange._bundlespeccgversions[k] = '03'
164 171
165 172 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
166 173 # options and blob stores are passed from othervfs to the new readonlyvfs.
167 174 wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
168 175
169 176 # when writing a bundle via "hg bundle" command, upload related LFS blobs
170 177 wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
171 178
172 179 @templatekeyword('lfs_files')
173 180 def lfsfiles(repo, ctx, **args):
174 181 """List of strings. LFS files added or modified by the changeset."""
175 182 pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
176 183 return sorted(pointers.keys())
177 184
178 185 @command('debuglfsupload',
179 186 [('r', 'rev', [], _('upload large files introduced by REV'))])
180 187 def debuglfsupload(ui, repo, **opts):
181 188 """upload lfs blobs added by the working copy parent or given revisions"""
182 189 revs = opts.get('rev', [])
183 190 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
184 191 wrapper.uploadblobs(repo, pointers)
@@ -1,347 +1,358 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import json
11 11 import os
12 12 import re
13 13
14 14 from mercurial.i18n import _
15 15
16 16 from mercurial import (
17 17 error,
18 18 url as urlmod,
19 19 util,
20 20 vfs as vfsmod,
21 21 )
22 22
23 from ..largefiles import lfutil
24
23 25 # 64 bytes for SHA256
24 26 _lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
25 27
26 28 class lfsvfs(vfsmod.vfs):
27 29 def join(self, path):
28 30 """split the path at first two characters, like: XX/XXXXX..."""
29 31 if not _lfsre.match(path):
30 32 raise error.ProgrammingError('unexpected lfs path: %s' % path)
31 33 return super(lfsvfs, self).join(path[0:2], path[2:])
32 34
33 35 class filewithprogress(object):
34 36 """a file-like object that supports __len__ and read.
35 37
36 38 Useful to provide progress information for how many bytes are read.
37 39 """
38 40
39 41 def __init__(self, fp, callback):
40 42 self._fp = fp
41 43 self._callback = callback # func(readsize)
42 44 fp.seek(0, os.SEEK_END)
43 45 self._len = fp.tell()
44 46 fp.seek(0)
45 47
46 48 def __len__(self):
47 49 return self._len
48 50
49 51 def read(self, size):
50 52 if self._fp is None:
51 53 return b''
52 54 data = self._fp.read(size)
53 55 if data:
54 56 if self._callback:
55 57 self._callback(len(data))
56 58 else:
57 59 self._fp.close()
58 60 self._fp = None
59 61 return data
60 62
61 63 class local(object):
62 64 """Local blobstore for large file contents.
63 65
64 66 This blobstore is used both as a cache and as a staging area for large blobs
65 67 to be uploaded to the remote blobstore.
66 68 """
67 69
68 70 def __init__(self, repo):
69 71 fullpath = repo.svfs.join('lfs/objects')
70 72 self.vfs = lfsvfs(fullpath)
73 usercache = lfutil._usercachedir(repo.ui, 'lfs')
74 self.cachevfs = lfsvfs(usercache)
71 75
72 76 def write(self, oid, data):
73 77 """Write blob to local blobstore."""
74 78 with self.vfs(oid, 'wb', atomictemp=True) as fp:
75 79 fp.write(data)
76 80
81 # XXX: should we verify the content of the cache, and hardlink back to
82 # the local store on success, but truncate, write and link on failure?
83 if not self.cachevfs.exists(oid):
84 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
85
77 86 def read(self, oid):
78 87 """Read blob from local blobstore."""
88 if not self.vfs.exists(oid):
89 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
79 90 return self.vfs.read(oid)
80 91
81 92 def has(self, oid):
82 93 """Returns True if the local blobstore contains the requested blob,
83 94 False otherwise."""
84 return self.vfs.exists(oid)
95 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
85 96
86 97 class _gitlfsremote(object):
87 98
88 99 def __init__(self, repo, url):
89 100 ui = repo.ui
90 101 self.ui = ui
91 102 baseurl, authinfo = url.authinfo()
92 103 self.baseurl = baseurl.rstrip('/')
93 104 self.urlopener = urlmod.opener(ui, authinfo)
94 105 self.retry = ui.configint('lfs', 'retry')
95 106
96 107 def writebatch(self, pointers, fromstore):
97 108 """Batch upload from local to remote blobstore."""
98 109 self._batch(pointers, fromstore, 'upload')
99 110
100 111 def readbatch(self, pointers, tostore):
101 112 """Batch download from remote to local blostore."""
102 113 self._batch(pointers, tostore, 'download')
103 114
104 115 def _batchrequest(self, pointers, action):
105 116 """Get metadata about objects pointed by pointers for given action
106 117
107 118 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
108 119 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
109 120 """
110 121 objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
111 122 requestdata = json.dumps({
112 123 'objects': objects,
113 124 'operation': action,
114 125 })
115 126 batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
116 127 data=requestdata)
117 128 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
118 129 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
119 130 try:
120 131 rawjson = self.urlopener.open(batchreq).read()
121 132 except util.urlerr.httperror as ex:
122 133 raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
123 134 % (ex, action))
124 135 try:
125 136 response = json.loads(rawjson)
126 137 except ValueError:
127 138 raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
128 139 % rawjson)
129 140 return response
130 141
131 142 def _checkforservererror(self, pointers, responses):
132 143 """Scans errors from objects
133 144
134 145 Returns LfsRemoteError if any objects has an error"""
135 146 for response in responses:
136 147 error = response.get('error')
137 148 if error:
138 149 ptrmap = {p.oid(): p for p in pointers}
139 150 p = ptrmap.get(response['oid'], None)
140 151 if error['code'] == 404 and p:
141 152 filename = getattr(p, 'filename', 'unknown')
142 153 raise LfsRemoteError(
143 154 _(('LFS server error. Remote object '
144 155 'for file %s not found: %r')) % (filename, response))
145 156 raise LfsRemoteError(_('LFS server error: %r') % response)
146 157
147 158 def _extractobjects(self, response, pointers, action):
148 159 """extract objects from response of the batch API
149 160
150 161 response: parsed JSON object returned by batch API
151 162 return response['objects'] filtered by action
152 163 raise if any object has an error
153 164 """
154 165 # Scan errors from objects - fail early
155 166 objects = response.get('objects', [])
156 167 self._checkforservererror(pointers, objects)
157 168
158 169 # Filter objects with given action. Practically, this skips uploading
159 170 # objects which exist in the server.
160 171 filteredobjects = [o for o in objects if action in o.get('actions', [])]
161 172 # But for downloading, we want all objects. Therefore missing objects
162 173 # should be considered an error.
163 174 if action == 'download':
164 175 if len(filteredobjects) < len(objects):
165 176 missing = [o.get('oid', '?')
166 177 for o in objects
167 178 if action not in o.get('actions', [])]
168 179 raise LfsRemoteError(
169 180 _('LFS server claims required objects do not exist:\n%s')
170 181 % '\n'.join(missing))
171 182
172 183 return filteredobjects
173 184
174 185 def _basictransfer(self, obj, action, localstore, progress=None):
175 186 """Download or upload a single object using basic transfer protocol
176 187
177 188 obj: dict, an object description returned by batch API
178 189 action: string, one of ['upload', 'download']
179 190 localstore: blobstore.local
180 191
181 192 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
182 193 basic-transfers.md
183 194 """
184 195 oid = str(obj['oid'])
185 196
186 197 href = str(obj['actions'][action].get('href'))
187 198 headers = obj['actions'][action].get('header', {}).items()
188 199
189 200 request = util.urlreq.request(href)
190 201 if action == 'upload':
191 202 # If uploading blobs, read data from local blobstore.
192 203 request.data = filewithprogress(localstore.vfs(oid), progress)
193 204 request.get_method = lambda: 'PUT'
194 205
195 206 for k, v in headers:
196 207 request.add_header(k, v)
197 208
198 209 response = b''
199 210 try:
200 211 req = self.urlopener.open(request)
201 212 while True:
202 213 data = req.read(1048576)
203 214 if not data:
204 215 break
205 216 if action == 'download' and progress:
206 217 progress(len(data))
207 218 response += data
208 219 except util.urlerr.httperror as ex:
209 220 raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
210 221 % (ex, oid, action))
211 222
212 223 if action == 'download':
213 224 # If downloading blobs, store downloaded data to local blobstore
214 225 localstore.write(oid, response)
215 226
216 227 def _batch(self, pointers, localstore, action):
217 228 if action not in ['upload', 'download']:
218 229 raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
219 230
220 231 response = self._batchrequest(pointers, action)
221 232 prunningsize = [0]
222 233 objects = self._extractobjects(response, pointers, action)
223 234 total = sum(x.get('size', 0) for x in objects)
224 235 topic = {'upload': _('lfs uploading'),
225 236 'download': _('lfs downloading')}[action]
226 237 if self.ui.verbose and len(objects) > 1:
227 238 self.ui.write(_('lfs: need to transfer %d objects (%s)\n')
228 239 % (len(objects), util.bytecount(total)))
229 240 self.ui.progress(topic, 0, total=total)
230 241 def progress(size):
231 242 # advance progress bar by "size" bytes
232 243 prunningsize[0] += size
233 244 self.ui.progress(topic, prunningsize[0], total=total)
234 245 for obj in sorted(objects, key=lambda o: o.get('oid')):
235 246 objsize = obj.get('size', 0)
236 247 if self.ui.verbose:
237 248 if action == 'download':
238 249 msg = _('lfs: downloading %s (%s)\n')
239 250 elif action == 'upload':
240 251 msg = _('lfs: uploading %s (%s)\n')
241 252 self.ui.write(msg % (obj.get('oid'), util.bytecount(objsize)))
242 253 origrunningsize = prunningsize[0]
243 254 retry = self.retry
244 255 while True:
245 256 prunningsize[0] = origrunningsize
246 257 try:
247 258 self._basictransfer(obj, action, localstore,
248 259 progress=progress)
249 260 break
250 261 except Exception as ex:
251 262 if retry > 0:
252 263 if self.ui.verbose:
253 264 self.ui.write(
254 265 _('lfs: failed: %r (remaining retry %d)\n')
255 266 % (ex, retry))
256 267 retry -= 1
257 268 continue
258 269 raise
259 270
260 271 self.ui.progress(topic, pos=None, total=total)
261 272
262 273 def __del__(self):
263 274 # copied from mercurial/httppeer.py
264 275 urlopener = getattr(self, 'urlopener', None)
265 276 if urlopener:
266 277 for h in urlopener.handlers:
267 278 h.close()
268 279 getattr(h, "close_all", lambda : None)()
269 280
270 281 class _dummyremote(object):
271 282 """Dummy store storing blobs to temp directory."""
272 283
273 284 def __init__(self, repo, url):
274 285 fullpath = repo.vfs.join('lfs', url.path)
275 286 self.vfs = lfsvfs(fullpath)
276 287
277 288 def writebatch(self, pointers, fromstore):
278 289 for p in pointers:
279 290 content = fromstore.read(p.oid())
280 291 with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
281 292 fp.write(content)
282 293
283 294 def readbatch(self, pointers, tostore):
284 295 for p in pointers:
285 296 content = self.vfs.read(p.oid())
286 297 tostore.write(p.oid(), content)
287 298
288 299 class _nullremote(object):
289 300 """Null store storing blobs to /dev/null."""
290 301
291 302 def __init__(self, repo, url):
292 303 pass
293 304
294 305 def writebatch(self, pointers, fromstore):
295 306 pass
296 307
297 308 def readbatch(self, pointers, tostore):
298 309 pass
299 310
300 311 class _promptremote(object):
301 312 """Prompt user to set lfs.url when accessed."""
302 313
303 314 def __init__(self, repo, url):
304 315 pass
305 316
306 317 def writebatch(self, pointers, fromstore, ui=None):
307 318 self._prompt()
308 319
309 320 def readbatch(self, pointers, tostore, ui=None):
310 321 self._prompt()
311 322
312 323 def _prompt(self):
313 324 raise error.Abort(_('lfs.url needs to be configured'))
314 325
315 326 _storemap = {
316 327 'https': _gitlfsremote,
317 328 'http': _gitlfsremote,
318 329 'file': _dummyremote,
319 330 'null': _nullremote,
320 331 None: _promptremote,
321 332 }
322 333
323 334 def remote(repo):
324 335 """remotestore factory. return a store in _storemap depending on config"""
325 336 defaulturl = ''
326 337
327 338 # convert deprecated configs to the new url. TODO: remove this if other
328 339 # places are migrated to the new url config.
329 340 # deprecated config: lfs.remotestore
330 341 deprecatedstore = repo.ui.config('lfs', 'remotestore')
331 342 if deprecatedstore == 'dummy':
332 343 # deprecated config: lfs.remotepath
333 344 defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath')
334 345 elif deprecatedstore == 'git-lfs':
335 346 # deprecated config: lfs.remoteurl
336 347 defaulturl = repo.ui.config('lfs', 'remoteurl')
337 348 elif deprecatedstore == 'null':
338 349 defaulturl = 'null://'
339 350
340 351 url = util.url(repo.ui.config('lfs', 'url', defaulturl))
341 352 scheme = url.scheme
342 353 if scheme not in _storemap:
343 354 raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
344 355 return _storemap[scheme](repo, url)
345 356
346 357 class LfsRemoteError(error.RevlogError):
347 358 pass
@@ -1,3014 +1,3017 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import os
55 55 import random
56 56 import re
57 57 import shutil
58 58 import signal
59 59 import socket
60 60 import subprocess
61 61 import sys
62 62 import sysconfig
63 63 import tempfile
64 64 import threading
65 65 import time
66 66 import unittest
67 67 import xml.dom.minidom as minidom
68 68
69 69 try:
70 70 import Queue as queue
71 71 except ImportError:
72 72 import queue
73 73
74 74 try:
75 75 import shlex
76 76 shellquote = shlex.quote
77 77 except (ImportError, AttributeError):
78 78 import pipes
79 79 shellquote = pipes.quote
80 80
81 81 if os.environ.get('RTUNICODEPEDANTRY', False):
82 82 try:
83 83 reload(sys)
84 84 sys.setdefaultencoding("undefined")
85 85 except NameError:
86 86 pass
87 87
88 88 origenviron = os.environ.copy()
89 89 osenvironb = getattr(os, 'environb', os.environ)
90 90 processlock = threading.Lock()
91 91
92 92 pygmentspresent = False
93 93 # ANSI color is unsupported prior to Windows 10
94 94 if os.name != 'nt':
95 95 try: # is pygments installed
96 96 import pygments
97 97 import pygments.lexers as lexers
98 98 import pygments.lexer as lexer
99 99 import pygments.formatters as formatters
100 100 import pygments.token as token
101 101 import pygments.style as style
102 102 pygmentspresent = True
103 103 difflexer = lexers.DiffLexer()
104 104 terminal256formatter = formatters.Terminal256Formatter()
105 105 except ImportError:
106 106 pass
107 107
108 108 if pygmentspresent:
109 109 class TestRunnerStyle(style.Style):
110 110 default_style = ""
111 111 skipped = token.string_to_tokentype("Token.Generic.Skipped")
112 112 failed = token.string_to_tokentype("Token.Generic.Failed")
113 113 skippedname = token.string_to_tokentype("Token.Generic.SName")
114 114 failedname = token.string_to_tokentype("Token.Generic.FName")
115 115 styles = {
116 116 skipped: '#e5e5e5',
117 117 skippedname: '#00ffff',
118 118 failed: '#7f0000',
119 119 failedname: '#ff0000',
120 120 }
121 121
122 122 class TestRunnerLexer(lexer.RegexLexer):
123 123 tokens = {
124 124 'root': [
125 125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 128 ],
129 129 'skipped': [
130 130 (r'[\w-]+\.(t|py)', token.Generic.SName),
131 131 (r':.*', token.Generic.Skipped),
132 132 ],
133 133 'failed': [
134 134 (r'[\w-]+\.(t|py)', token.Generic.FName),
135 135 (r'(:| ).*', token.Generic.Failed),
136 136 ]
137 137 }
138 138
139 139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 140 runnerlexer = TestRunnerLexer()
141 141
142 142 if sys.version_info > (3, 5, 0):
143 143 PYTHON3 = True
144 144 xrange = range # we use xrange in one place, and we'd rather not use range
145 145 def _bytespath(p):
146 146 if p is None:
147 147 return p
148 148 return p.encode('utf-8')
149 149
150 150 def _strpath(p):
151 151 if p is None:
152 152 return p
153 153 return p.decode('utf-8')
154 154
155 155 elif sys.version_info >= (3, 0, 0):
156 156 print('%s is only supported on Python 3.5+ and 2.7, not %s' %
157 157 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
158 158 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
159 159 else:
160 160 PYTHON3 = False
161 161
162 162 # In python 2.x, path operations are generally done using
163 163 # bytestrings by default, so we don't have to do any extra
164 164 # fiddling there. We define the wrapper functions anyway just to
165 165 # help keep code consistent between platforms.
166 166 def _bytespath(p):
167 167 return p
168 168
169 169 _strpath = _bytespath
170 170
171 171 # For Windows support
172 172 wifexited = getattr(os, "WIFEXITED", lambda x: False)
173 173
174 174 # Whether to use IPv6
175 175 def checksocketfamily(name, port=20058):
176 176 """return true if we can listen on localhost using family=name
177 177
178 178 name should be either 'AF_INET', or 'AF_INET6'.
179 179 port being used is okay - EADDRINUSE is considered as successful.
180 180 """
181 181 family = getattr(socket, name, None)
182 182 if family is None:
183 183 return False
184 184 try:
185 185 s = socket.socket(family, socket.SOCK_STREAM)
186 186 s.bind(('localhost', port))
187 187 s.close()
188 188 return True
189 189 except socket.error as exc:
190 190 if exc.errno == errno.EADDRINUSE:
191 191 return True
192 192 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
193 193 return False
194 194 else:
195 195 raise
196 196 else:
197 197 return False
198 198
199 199 # useipv6 will be set by parseargs
200 200 useipv6 = None
201 201
202 202 def checkportisavailable(port):
203 203 """return true if a port seems free to bind on localhost"""
204 204 if useipv6:
205 205 family = socket.AF_INET6
206 206 else:
207 207 family = socket.AF_INET
208 208 try:
209 209 s = socket.socket(family, socket.SOCK_STREAM)
210 210 s.bind(('localhost', port))
211 211 s.close()
212 212 return True
213 213 except socket.error as exc:
214 214 if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
215 215 errno.EPROTONOSUPPORT):
216 216 raise
217 217 return False
218 218
219 219 closefds = os.name == 'posix'
220 220 def Popen4(cmd, wd, timeout, env=None):
221 221 processlock.acquire()
222 222 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
223 223 close_fds=closefds,
224 224 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
225 225 stderr=subprocess.STDOUT)
226 226 processlock.release()
227 227
228 228 p.fromchild = p.stdout
229 229 p.tochild = p.stdin
230 230 p.childerr = p.stderr
231 231
232 232 p.timeout = False
233 233 if timeout:
234 234 def t():
235 235 start = time.time()
236 236 while time.time() - start < timeout and p.returncode is None:
237 237 time.sleep(.1)
238 238 p.timeout = True
239 239 if p.returncode is None:
240 240 terminate(p)
241 241 threading.Thread(target=t).start()
242 242
243 243 return p
244 244
245 245 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
246 246 IMPL_PATH = b'PYTHONPATH'
247 247 if 'java' in sys.platform:
248 248 IMPL_PATH = b'JYTHONPATH'
249 249
250 250 defaults = {
251 251 'jobs': ('HGTEST_JOBS', 1),
252 252 'timeout': ('HGTEST_TIMEOUT', 180),
253 253 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
254 254 'port': ('HGTEST_PORT', 20059),
255 255 'shell': ('HGTEST_SHELL', 'sh'),
256 256 }
257 257
258 258 def canonpath(path):
259 259 return os.path.realpath(os.path.expanduser(path))
260 260
261 261 def parselistfiles(files, listtype, warn=True):
262 262 entries = dict()
263 263 for filename in files:
264 264 try:
265 265 path = os.path.expanduser(os.path.expandvars(filename))
266 266 f = open(path, "rb")
267 267 except IOError as err:
268 268 if err.errno != errno.ENOENT:
269 269 raise
270 270 if warn:
271 271 print("warning: no such %s file: %s" % (listtype, filename))
272 272 continue
273 273
274 274 for line in f.readlines():
275 275 line = line.split(b'#', 1)[0].strip()
276 276 if line:
277 277 entries[line] = filename
278 278
279 279 f.close()
280 280 return entries
281 281
282 282 def parsettestcases(path):
283 283 """read a .t test file, return a set of test case names
284 284
285 285 If path does not exist, return an empty set.
286 286 """
287 287 cases = set()
288 288 try:
289 289 with open(path, 'rb') as f:
290 290 for l in f:
291 291 if l.startswith(b'#testcases '):
292 292 cases.update(l[11:].split())
293 293 except IOError as ex:
294 294 if ex.errno != errno.ENOENT:
295 295 raise
296 296 return cases
297 297
298 298 def getparser():
299 299 """Obtain the OptionParser used by the CLI."""
300 300 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
301 301
302 302 selection = parser.add_argument_group('Test Selection')
303 303 selection.add_argument('--allow-slow-tests', action='store_true',
304 304 help='allow extremely slow tests')
305 305 selection.add_argument("--blacklist", action="append",
306 306 help="skip tests listed in the specified blacklist file")
307 307 selection.add_argument("--changed",
308 308 help="run tests that are changed in parent rev or working directory")
309 309 selection.add_argument("-k", "--keywords",
310 310 help="run tests matching keywords")
311 311 selection.add_argument("-r", "--retest", action="store_true",
312 312 help = "retest failed tests")
313 313 selection.add_argument("--test-list", action="append",
314 314 help="read tests to run from the specified file")
315 315 selection.add_argument("--whitelist", action="append",
316 316 help="always run tests listed in the specified whitelist file")
317 317 selection.add_argument('tests', metavar='TESTS', nargs='*',
318 318 help='Tests to run')
319 319
320 320 harness = parser.add_argument_group('Test Harness Behavior')
321 321 harness.add_argument('--bisect-repo',
322 322 metavar='bisect_repo',
323 323 help=("Path of a repo to bisect. Use together with "
324 324 "--known-good-rev"))
325 325 harness.add_argument("-d", "--debug", action="store_true",
326 326 help="debug mode: write output of test scripts to console"
327 327 " rather than capturing and diffing it (disables timeout)")
328 328 harness.add_argument("-f", "--first", action="store_true",
329 329 help="exit on the first test failure")
330 330 harness.add_argument("-i", "--interactive", action="store_true",
331 331 help="prompt to accept changed output")
332 332 harness.add_argument("-j", "--jobs", type=int,
333 333 help="number of jobs to run in parallel"
334 334 " (default: $%s or %d)" % defaults['jobs'])
335 335 harness.add_argument("--keep-tmpdir", action="store_true",
336 336 help="keep temporary directory after running tests")
337 337 harness.add_argument('--known-good-rev',
338 338 metavar="known_good_rev",
339 339 help=("Automatically bisect any failures using this "
340 340 "revision as a known-good revision."))
341 341 harness.add_argument("--list-tests", action="store_true",
342 342 help="list tests instead of running them")
343 343 harness.add_argument("--loop", action="store_true",
344 344 help="loop tests repeatedly")
345 345 harness.add_argument('--random', action="store_true",
346 346 help='run tests in random order')
347 347 harness.add_argument("-p", "--port", type=int,
348 348 help="port on which servers should listen"
349 349 " (default: $%s or %d)" % defaults['port'])
350 350 harness.add_argument('--profile-runner', action='store_true',
351 351 help='run statprof on run-tests')
352 352 harness.add_argument("-R", "--restart", action="store_true",
353 353 help="restart at last error")
354 354 harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
355 355 help="run each test N times (default=1)", default=1)
356 356 harness.add_argument("--shell",
357 357 help="shell to use (default: $%s or %s)" % defaults['shell'])
358 358 harness.add_argument('--showchannels', action='store_true',
359 359 help='show scheduling channels')
360 360 harness.add_argument("--slowtimeout", type=int,
361 361 help="kill errant slow tests after SLOWTIMEOUT seconds"
362 362 " (default: $%s or %d)" % defaults['slowtimeout'])
363 363 harness.add_argument("-t", "--timeout", type=int,
364 364 help="kill errant tests after TIMEOUT seconds"
365 365 " (default: $%s or %d)" % defaults['timeout'])
366 366 harness.add_argument("--tmpdir",
367 367 help="run tests in the given temporary directory"
368 368 " (implies --keep-tmpdir)")
369 369 harness.add_argument("-v", "--verbose", action="store_true",
370 370 help="output verbose messages")
371 371
372 372 hgconf = parser.add_argument_group('Mercurial Configuration')
373 373 hgconf.add_argument("--chg", action="store_true",
374 374 help="install and use chg wrapper in place of hg")
375 375 hgconf.add_argument("--compiler",
376 376 help="compiler to build with")
377 377 hgconf.add_argument('--extra-config-opt', action="append", default=[],
378 378 help='set the given config opt in the test hgrc')
379 379 hgconf.add_argument("-l", "--local", action="store_true",
380 380 help="shortcut for --with-hg=<testdir>/../hg, "
381 381 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
382 382 hgconf.add_argument("--ipv6", action="store_true",
383 383 help="prefer IPv6 to IPv4 for network related tests")
384 384 hgconf.add_argument("--pure", action="store_true",
385 385 help="use pure Python code instead of C extensions")
386 386 hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
387 387 help="enable Py3k warnings on Python 2.7+")
388 388 hgconf.add_argument("--with-chg", metavar="CHG",
389 389 help="use specified chg wrapper in place of hg")
390 390 hgconf.add_argument("--with-hg",
391 391 metavar="HG",
392 392 help="test using specified hg script rather than a "
393 393 "temporary installation")
394 394 # This option should be deleted once test-check-py3-compat.t and other
395 395 # Python 3 tests run with Python 3.
396 396 hgconf.add_argument("--with-python3", metavar="PYTHON3",
397 397 help="Python 3 interpreter (if running under Python 2)"
398 398 " (TEMPORARY)")
399 399
400 400 reporting = parser.add_argument_group('Results Reporting')
401 401 reporting.add_argument("-C", "--annotate", action="store_true",
402 402 help="output files annotated with coverage")
403 403 reporting.add_argument("--color", choices=["always", "auto", "never"],
404 404 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
405 405 help="colorisation: always|auto|never (default: auto)")
406 406 reporting.add_argument("-c", "--cover", action="store_true",
407 407 help="print a test coverage report")
408 408 reporting.add_argument('--exceptions', action='store_true',
409 409 help='log all exceptions and generate an exception report')
410 410 reporting.add_argument("-H", "--htmlcov", action="store_true",
411 411 help="create an HTML report of the coverage of the files")
412 412 reporting.add_argument("--json", action="store_true",
413 413 help="store test result data in 'report.json' file")
414 414 reporting.add_argument("--outputdir",
415 415 help="directory to write error logs to (default=test directory)")
416 416 reporting.add_argument("-n", "--nodiff", action="store_true",
417 417 help="skip showing test changes")
418 418 reporting.add_argument("-S", "--noskips", action="store_true",
419 419 help="don't report skip tests verbosely")
420 420 reporting.add_argument("--time", action="store_true",
421 421 help="time how long each test takes")
422 422 reporting.add_argument("--view",
423 423 help="external diff viewer")
424 424 reporting.add_argument("--xunit",
425 425 help="record xunit results at specified path")
426 426
427 427 for option, (envvar, default) in defaults.items():
428 428 defaults[option] = type(default)(os.environ.get(envvar, default))
429 429 parser.set_defaults(**defaults)
430 430
431 431 return parser
432 432
433 433 def parseargs(args, parser):
434 434 """Parse arguments with our OptionParser and validate results."""
435 435 options = parser.parse_args(args)
436 436
437 437 # jython is always pure
438 438 if 'java' in sys.platform or '__pypy__' in sys.modules:
439 439 options.pure = True
440 440
441 441 if options.with_hg:
442 442 options.with_hg = canonpath(_bytespath(options.with_hg))
443 443 if not (os.path.isfile(options.with_hg) and
444 444 os.access(options.with_hg, os.X_OK)):
445 445 parser.error('--with-hg must specify an executable hg script')
446 446 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
447 447 sys.stderr.write('warning: --with-hg should specify an hg script\n')
448 448 if options.local:
449 449 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
450 450 reporootdir = os.path.dirname(testdir)
451 451 pathandattrs = [(b'hg', 'with_hg')]
452 452 if options.chg:
453 453 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
454 454 for relpath, attr in pathandattrs:
455 455 binpath = os.path.join(reporootdir, relpath)
456 456 if os.name != 'nt' and not os.access(binpath, os.X_OK):
457 457 parser.error('--local specified, but %r not found or '
458 458 'not executable' % binpath)
459 459 setattr(options, attr, binpath)
460 460
461 461 if (options.chg or options.with_chg) and os.name == 'nt':
462 462 parser.error('chg does not work on %s' % os.name)
463 463 if options.with_chg:
464 464 options.chg = False # no installation to temporary location
465 465 options.with_chg = canonpath(_bytespath(options.with_chg))
466 466 if not (os.path.isfile(options.with_chg) and
467 467 os.access(options.with_chg, os.X_OK)):
468 468 parser.error('--with-chg must specify a chg executable')
469 469 if options.chg and options.with_hg:
470 470 # chg shares installation location with hg
471 471 parser.error('--chg does not work when --with-hg is specified '
472 472 '(use --with-chg instead)')
473 473
474 474 if options.color == 'always' and not pygmentspresent:
475 475 sys.stderr.write('warning: --color=always ignored because '
476 476 'pygments is not installed\n')
477 477
478 478 if options.bisect_repo and not options.known_good_rev:
479 479 parser.error("--bisect-repo cannot be used without --known-good-rev")
480 480
481 481 global useipv6
482 482 if options.ipv6:
483 483 useipv6 = checksocketfamily('AF_INET6')
484 484 else:
485 485 # only use IPv6 if IPv4 is unavailable and IPv6 is available
486 486 useipv6 = ((not checksocketfamily('AF_INET'))
487 487 and checksocketfamily('AF_INET6'))
488 488
489 489 options.anycoverage = options.cover or options.annotate or options.htmlcov
490 490 if options.anycoverage:
491 491 try:
492 492 import coverage
493 493 covver = version.StrictVersion(coverage.__version__).version
494 494 if covver < (3, 3):
495 495 parser.error('coverage options require coverage 3.3 or later')
496 496 except ImportError:
497 497 parser.error('coverage options now require the coverage package')
498 498
499 499 if options.anycoverage and options.local:
500 500 # this needs some path mangling somewhere, I guess
501 501 parser.error("sorry, coverage options do not work when --local "
502 502 "is specified")
503 503
504 504 if options.anycoverage and options.with_hg:
505 505 parser.error("sorry, coverage options do not work when --with-hg "
506 506 "is specified")
507 507
508 508 global verbose
509 509 if options.verbose:
510 510 verbose = ''
511 511
512 512 if options.tmpdir:
513 513 options.tmpdir = canonpath(options.tmpdir)
514 514
515 515 if options.jobs < 1:
516 516 parser.error('--jobs must be positive')
517 517 if options.interactive and options.debug:
518 518 parser.error("-i/--interactive and -d/--debug are incompatible")
519 519 if options.debug:
520 520 if options.timeout != defaults['timeout']:
521 521 sys.stderr.write(
522 522 'warning: --timeout option ignored with --debug\n')
523 523 if options.slowtimeout != defaults['slowtimeout']:
524 524 sys.stderr.write(
525 525 'warning: --slowtimeout option ignored with --debug\n')
526 526 options.timeout = 0
527 527 options.slowtimeout = 0
528 528 if options.py3k_warnings:
529 529 if PYTHON3:
530 530 parser.error(
531 531 '--py3k-warnings can only be used on Python 2.7')
532 532 if options.with_python3:
533 533 if PYTHON3:
534 534 parser.error('--with-python3 cannot be used when executing with '
535 535 'Python 3')
536 536
537 537 options.with_python3 = canonpath(options.with_python3)
538 538 # Verify Python3 executable is acceptable.
539 539 proc = subprocess.Popen([options.with_python3, b'--version'],
540 540 stdout=subprocess.PIPE,
541 541 stderr=subprocess.STDOUT)
542 542 out, _err = proc.communicate()
543 543 ret = proc.wait()
544 544 if ret != 0:
545 545 parser.error('could not determine version of python 3')
546 546 if not out.startswith('Python '):
547 547 parser.error('unexpected output from python3 --version: %s' %
548 548 out)
549 549 vers = version.LooseVersion(out[len('Python '):])
550 550 if vers < version.LooseVersion('3.5.0'):
551 551 parser.error('--with-python3 version must be 3.5.0 or greater; '
552 552 'got %s' % out)
553 553
554 554 if options.blacklist:
555 555 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
556 556 if options.whitelist:
557 557 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
558 558 else:
559 559 options.whitelisted = {}
560 560
561 561 if options.showchannels:
562 562 options.nodiff = True
563 563
564 564 return options
565 565
566 566 def rename(src, dst):
567 567 """Like os.rename(), trade atomicity and opened files friendliness
568 568 for existing destination support.
569 569 """
570 570 shutil.copy(src, dst)
571 571 os.remove(src)
572 572
573 573 _unified_diff = difflib.unified_diff
574 574 if PYTHON3:
575 575 import functools
576 576 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
577 577
578 578 def getdiff(expected, output, ref, err):
579 579 servefail = False
580 580 lines = []
581 581 for line in _unified_diff(expected, output, ref, err):
582 582 if line.startswith(b'+++') or line.startswith(b'---'):
583 583 line = line.replace(b'\\', b'/')
584 584 if line.endswith(b' \n'):
585 585 line = line[:-2] + b'\n'
586 586 lines.append(line)
587 587 if not servefail and line.startswith(
588 588 b'+ abort: child process failed to start'):
589 589 servefail = True
590 590
591 591 return servefail, lines
592 592
593 593 verbose = False
594 594 def vlog(*msg):
595 595 """Log only when in verbose mode."""
596 596 if verbose is False:
597 597 return
598 598
599 599 return log(*msg)
600 600
601 601 # Bytes that break XML even in a CDATA block: control characters 0-31
602 602 # sans \t, \n and \r
603 603 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
604 604
605 605 # Match feature conditionalized output lines in the form, capturing the feature
606 606 # list in group 2, and the preceeding line output in group 1:
607 607 #
608 608 # output..output (feature !)\n
609 609 optline = re.compile(b'(.*) \((.+?) !\)\n$')
610 610
611 611 def cdatasafe(data):
612 612 """Make a string safe to include in a CDATA block.
613 613
614 614 Certain control characters are illegal in a CDATA block, and
615 615 there's no way to include a ]]> in a CDATA either. This function
616 616 replaces illegal bytes with ? and adds a space between the ]] so
617 617 that it won't break the CDATA block.
618 618 """
619 619 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
620 620
621 621 def log(*msg):
622 622 """Log something to stdout.
623 623
624 624 Arguments are strings to print.
625 625 """
626 626 with iolock:
627 627 if verbose:
628 628 print(verbose, end=' ')
629 629 for m in msg:
630 630 print(m, end=' ')
631 631 print()
632 632 sys.stdout.flush()
633 633
634 634 def highlightdiff(line, color):
635 635 if not color:
636 636 return line
637 637 assert pygmentspresent
638 638 return pygments.highlight(line.decode('latin1'), difflexer,
639 639 terminal256formatter).encode('latin1')
640 640
641 641 def highlightmsg(msg, color):
642 642 if not color:
643 643 return msg
644 644 assert pygmentspresent
645 645 return pygments.highlight(msg, runnerlexer, runnerformatter)
646 646
647 647 def terminate(proc):
648 648 """Terminate subprocess"""
649 649 vlog('# Terminating process %d' % proc.pid)
650 650 try:
651 651 proc.terminate()
652 652 except OSError:
653 653 pass
654 654
655 655 def killdaemons(pidfile):
656 656 import killdaemons as killmod
657 657 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
658 658 logfn=vlog)
659 659
660 660 class Test(unittest.TestCase):
661 661 """Encapsulates a single, runnable test.
662 662
663 663 While this class conforms to the unittest.TestCase API, it differs in that
664 664 instances need to be instantiated manually. (Typically, unittest.TestCase
665 665 classes are instantiated automatically by scanning modules.)
666 666 """
667 667
668 668 # Status code reserved for skipped tests (used by hghave).
669 669 SKIPPED_STATUS = 80
670 670
671 671 def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
672 672 debug=False,
673 673 timeout=None,
674 674 startport=None, extraconfigopts=None,
675 675 py3kwarnings=False, shell=None, hgcommand=None,
676 676 slowtimeout=None, usechg=False,
677 677 useipv6=False):
678 678 """Create a test from parameters.
679 679
680 680 path is the full path to the file defining the test.
681 681
682 682 tmpdir is the main temporary directory to use for this test.
683 683
684 684 keeptmpdir determines whether to keep the test's temporary directory
685 685 after execution. It defaults to removal (False).
686 686
687 687 debug mode will make the test execute verbosely, with unfiltered
688 688 output.
689 689
690 690 timeout controls the maximum run time of the test. It is ignored when
691 691 debug is True. See slowtimeout for tests with #require slow.
692 692
693 693 slowtimeout overrides timeout if the test has #require slow.
694 694
695 695 startport controls the starting port number to use for this test. Each
696 696 test will reserve 3 port numbers for execution. It is the caller's
697 697 responsibility to allocate a non-overlapping port range to Test
698 698 instances.
699 699
700 700 extraconfigopts is an iterable of extra hgrc config options. Values
701 701 must have the form "key=value" (something understood by hgrc). Values
702 702 of the form "foo.key=value" will result in "[foo] key=value".
703 703
704 704 py3kwarnings enables Py3k warnings.
705 705
706 706 shell is the shell to execute tests in.
707 707 """
708 708 if timeout is None:
709 709 timeout = defaults['timeout']
710 710 if startport is None:
711 711 startport = defaults['port']
712 712 if slowtimeout is None:
713 713 slowtimeout = defaults['slowtimeout']
714 714 self.path = path
715 715 self.bname = os.path.basename(path)
716 716 self.name = _strpath(self.bname)
717 717 self._testdir = os.path.dirname(path)
718 718 self._outputdir = outputdir
719 719 self._tmpname = os.path.basename(path)
720 720 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
721 721
722 722 self._threadtmp = tmpdir
723 723 self._keeptmpdir = keeptmpdir
724 724 self._debug = debug
725 725 self._timeout = timeout
726 726 self._slowtimeout = slowtimeout
727 727 self._startport = startport
728 728 self._extraconfigopts = extraconfigopts or []
729 729 self._py3kwarnings = py3kwarnings
730 730 self._shell = _bytespath(shell)
731 731 self._hgcommand = hgcommand or b'hg'
732 732 self._usechg = usechg
733 733 self._useipv6 = useipv6
734 734
735 735 self._aborted = False
736 736 self._daemonpids = []
737 737 self._finished = None
738 738 self._ret = None
739 739 self._out = None
740 740 self._skipped = None
741 741 self._testtmp = None
742 742 self._chgsockdir = None
743 743
744 744 self._refout = self.readrefout()
745 745
746 746 def readrefout(self):
747 747 """read reference output"""
748 748 # If we're not in --debug mode and reference output file exists,
749 749 # check test output against it.
750 750 if self._debug:
751 751 return None # to match "out is None"
752 752 elif os.path.exists(self.refpath):
753 753 with open(self.refpath, 'rb') as f:
754 754 return f.read().splitlines(True)
755 755 else:
756 756 return []
757 757
758 758 # needed to get base class __repr__ running
759 759 @property
760 760 def _testMethodName(self):
761 761 return self.name
762 762
763 763 def __str__(self):
764 764 return self.name
765 765
766 766 def shortDescription(self):
767 767 return self.name
768 768
769 769 def setUp(self):
770 770 """Tasks to perform before run()."""
771 771 self._finished = False
772 772 self._ret = None
773 773 self._out = None
774 774 self._skipped = None
775 775
776 776 try:
777 777 os.mkdir(self._threadtmp)
778 778 except OSError as e:
779 779 if e.errno != errno.EEXIST:
780 780 raise
781 781
782 782 name = self._tmpname
783 783 self._testtmp = os.path.join(self._threadtmp, name)
784 784 os.mkdir(self._testtmp)
785 785
786 786 # Remove any previous output files.
787 787 if os.path.exists(self.errpath):
788 788 try:
789 789 os.remove(self.errpath)
790 790 except OSError as e:
791 791 # We might have raced another test to clean up a .err
792 792 # file, so ignore ENOENT when removing a previous .err
793 793 # file.
794 794 if e.errno != errno.ENOENT:
795 795 raise
796 796
797 797 if self._usechg:
798 798 self._chgsockdir = os.path.join(self._threadtmp,
799 799 b'%s.chgsock' % name)
800 800 os.mkdir(self._chgsockdir)
801 801
802 802 def run(self, result):
803 803 """Run this test and report results against a TestResult instance."""
804 804 # This function is extremely similar to unittest.TestCase.run(). Once
805 805 # we require Python 2.7 (or at least its version of unittest), this
806 806 # function can largely go away.
807 807 self._result = result
808 808 result.startTest(self)
809 809 try:
810 810 try:
811 811 self.setUp()
812 812 except (KeyboardInterrupt, SystemExit):
813 813 self._aborted = True
814 814 raise
815 815 except Exception:
816 816 result.addError(self, sys.exc_info())
817 817 return
818 818
819 819 success = False
820 820 try:
821 821 self.runTest()
822 822 except KeyboardInterrupt:
823 823 self._aborted = True
824 824 raise
825 825 except unittest.SkipTest as e:
826 826 result.addSkip(self, str(e))
827 827 # The base class will have already counted this as a
828 828 # test we "ran", but we want to exclude skipped tests
829 829 # from those we count towards those run.
830 830 result.testsRun -= 1
831 831 except self.failureException as e:
832 832 # This differs from unittest in that we don't capture
833 833 # the stack trace. This is for historical reasons and
834 834 # this decision could be revisited in the future,
835 835 # especially for PythonTest instances.
836 836 if result.addFailure(self, str(e)):
837 837 success = True
838 838 except Exception:
839 839 result.addError(self, sys.exc_info())
840 840 else:
841 841 success = True
842 842
843 843 try:
844 844 self.tearDown()
845 845 except (KeyboardInterrupt, SystemExit):
846 846 self._aborted = True
847 847 raise
848 848 except Exception:
849 849 result.addError(self, sys.exc_info())
850 850 success = False
851 851
852 852 if success:
853 853 result.addSuccess(self)
854 854 finally:
855 855 result.stopTest(self, interrupted=self._aborted)
856 856
857 857 def runTest(self):
858 858 """Run this test instance.
859 859
860 860 This will return a tuple describing the result of the test.
861 861 """
862 862 env = self._getenv()
863 863 self._genrestoreenv(env)
864 864 self._daemonpids.append(env['DAEMON_PIDS'])
865 865 self._createhgrc(env['HGRCPATH'])
866 866
867 867 vlog('# Test', self.name)
868 868
869 869 ret, out = self._run(env)
870 870 self._finished = True
871 871 self._ret = ret
872 872 self._out = out
873 873
874 874 def describe(ret):
875 875 if ret < 0:
876 876 return 'killed by signal: %d' % -ret
877 877 return 'returned error code %d' % ret
878 878
879 879 self._skipped = False
880 880
881 881 if ret == self.SKIPPED_STATUS:
882 882 if out is None: # Debug mode, nothing to parse.
883 883 missing = ['unknown']
884 884 failed = None
885 885 else:
886 886 missing, failed = TTest.parsehghaveoutput(out)
887 887
888 888 if not missing:
889 889 missing = ['skipped']
890 890
891 891 if failed:
892 892 self.fail('hg have failed checking for %s' % failed[-1])
893 893 else:
894 894 self._skipped = True
895 895 raise unittest.SkipTest(missing[-1])
896 896 elif ret == 'timeout':
897 897 self.fail('timed out')
898 898 elif ret is False:
899 899 self.fail('no result code from test')
900 900 elif out != self._refout:
901 901 # Diff generation may rely on written .err file.
902 902 if (ret != 0 or out != self._refout) and not self._skipped \
903 903 and not self._debug:
904 904 f = open(self.errpath, 'wb')
905 905 for line in out:
906 906 f.write(line)
907 907 f.close()
908 908
909 909 # The result object handles diff calculation for us.
910 910 if self._result.addOutputMismatch(self, ret, out, self._refout):
911 911 # change was accepted, skip failing
912 912 return
913 913
914 914 if ret:
915 915 msg = 'output changed and ' + describe(ret)
916 916 else:
917 917 msg = 'output changed'
918 918
919 919 self.fail(msg)
920 920 elif ret:
921 921 self.fail(describe(ret))
922 922
923 923 def tearDown(self):
924 924 """Tasks to perform after run()."""
925 925 for entry in self._daemonpids:
926 926 killdaemons(entry)
927 927 self._daemonpids = []
928 928
929 929 if self._keeptmpdir:
930 930 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
931 931 (self._testtmp.decode('utf-8'),
932 932 self._threadtmp.decode('utf-8')))
933 933 else:
934 934 shutil.rmtree(self._testtmp, True)
935 935 shutil.rmtree(self._threadtmp, True)
936 936
937 937 if self._usechg:
938 938 # chgservers will stop automatically after they find the socket
939 939 # files are deleted
940 940 shutil.rmtree(self._chgsockdir, True)
941 941
942 942 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
943 943 and not self._debug and self._out:
944 944 f = open(self.errpath, 'wb')
945 945 for line in self._out:
946 946 f.write(line)
947 947 f.close()
948 948
949 949 vlog("# Ret was:", self._ret, '(%s)' % self.name)
950 950
951 951 def _run(self, env):
952 952 # This should be implemented in child classes to run tests.
953 953 raise unittest.SkipTest('unknown test type')
954 954
955 955 def abort(self):
956 956 """Terminate execution of this test."""
957 957 self._aborted = True
958 958
959 959 def _portmap(self, i):
960 960 offset = b'' if i == 0 else b'%d' % i
961 961 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
962 962
963 963 def _getreplacements(self):
964 964 """Obtain a mapping of text replacements to apply to test output.
965 965
966 966 Test output needs to be normalized so it can be compared to expected
967 967 output. This function defines how some of that normalization will
968 968 occur.
969 969 """
970 970 r = [
971 971 # This list should be parallel to defineport in _getenv
972 972 self._portmap(0),
973 973 self._portmap(1),
974 974 self._portmap(2),
975 975 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
976 976 br'\1 (glob)'),
977 977 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
978 978 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
979 979 ]
980 980 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
981 981
982 982 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
983 983
984 984 if os.path.exists(replacementfile):
985 985 data = {}
986 986 with open(replacementfile, mode='rb') as source:
987 987 # the intermediate 'compile' step help with debugging
988 988 code = compile(source.read(), replacementfile, 'exec')
989 989 exec(code, data)
990 990 r.extend(data.get('substitutions', ()))
991 991 return r
992 992
993 993 def _escapepath(self, p):
994 994 if os.name == 'nt':
995 995 return (
996 996 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
997 997 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
998 998 for c in p))
999 999 )
1000 1000 else:
1001 1001 return re.escape(p)
1002 1002
1003 1003 def _localip(self):
1004 1004 if self._useipv6:
1005 1005 return b'::1'
1006 1006 else:
1007 1007 return b'127.0.0.1'
1008 1008
1009 1009 def _genrestoreenv(self, testenv):
1010 1010 """Generate a script that can be used by tests to restore the original
1011 1011 environment."""
1012 1012 # Put the restoreenv script inside self._threadtmp
1013 1013 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1014 1014 testenv['HGTEST_RESTOREENV'] = scriptpath
1015 1015
1016 1016 # Only restore environment variable names that the shell allows
1017 1017 # us to export.
1018 1018 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1019 1019
1020 1020 # Do not restore these variables; otherwise tests would fail.
1021 1021 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1022 1022
1023 1023 with open(scriptpath, 'w') as envf:
1024 1024 for name, value in origenviron.items():
1025 1025 if not name_regex.match(name):
1026 1026 # Skip environment variables with unusual names not
1027 1027 # allowed by most shells.
1028 1028 continue
1029 1029 if name in reqnames:
1030 1030 continue
1031 1031 envf.write('%s=%s\n' % (name, shellquote(value)))
1032 1032
1033 1033 for name in testenv:
1034 1034 if name in origenviron or name in reqnames:
1035 1035 continue
1036 1036 envf.write('unset %s\n' % (name,))
1037 1037
1038 1038 def _getenv(self):
1039 1039 """Obtain environment variables to use during test execution."""
1040 1040 def defineport(i):
1041 1041 offset = '' if i == 0 else '%s' % i
1042 1042 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1043 1043 env = os.environ.copy()
1044 1044 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
1045 1045 env['HGEMITWARNINGS'] = '1'
1046 1046 env['TESTTMP'] = self._testtmp
1047 1047 env['HOME'] = self._testtmp
1048 1048 # This number should match portneeded in _getport
1049 1049 for port in xrange(3):
1050 1050 # This list should be parallel to _portmap in _getreplacements
1051 1051 defineport(port)
1052 1052 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
1053 1053 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
1054 1054 env["HGEDITOR"] = ('"' + sys.executable + '"'
1055 1055 + ' -c "import sys; sys.exit(0)"')
1056 1056 env["HGMERGE"] = "internal:merge"
1057 1057 env["HGUSER"] = "test"
1058 1058 env["HGENCODING"] = "ascii"
1059 1059 env["HGENCODINGMODE"] = "strict"
1060 1060 env['HGIPV6'] = str(int(self._useipv6))
1061 1061
1062 1062 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1063 1063 # IP addresses.
1064 1064 env['LOCALIP'] = self._localip()
1065 1065
1066 1066 # Reset some environment variables to well-known values so that
1067 1067 # the tests produce repeatable output.
1068 1068 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1069 1069 env['TZ'] = 'GMT'
1070 1070 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1071 1071 env['COLUMNS'] = '80'
1072 1072 env['TERM'] = 'xterm'
1073 1073
1074 1074 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
1075 1075 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
1076 1076 'NO_PROXY CHGDEBUG').split():
1077 1077 if k in env:
1078 1078 del env[k]
1079 1079
1080 1080 # unset env related to hooks
1081 1081 for k in env.keys():
1082 1082 if k.startswith('HG_'):
1083 1083 del env[k]
1084 1084
1085 1085 if self._usechg:
1086 1086 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1087 1087
1088 1088 return env
1089 1089
1090 1090 def _createhgrc(self, path):
1091 1091 """Create an hgrc file for this test."""
1092 1092 hgrc = open(path, 'wb')
1093 1093 hgrc.write(b'[ui]\n')
1094 1094 hgrc.write(b'slash = True\n')
1095 1095 hgrc.write(b'interactive = False\n')
1096 1096 hgrc.write(b'mergemarkers = detailed\n')
1097 1097 hgrc.write(b'promptecho = True\n')
1098 1098 hgrc.write(b'[defaults]\n')
1099 1099 hgrc.write(b'[devel]\n')
1100 1100 hgrc.write(b'all-warnings = true\n')
1101 1101 hgrc.write(b'default-date = 0 0\n')
1102 1102 hgrc.write(b'[largefiles]\n')
1103 1103 hgrc.write(b'usercache = %s\n' %
1104 1104 (os.path.join(self._testtmp, b'.cache/largefiles')))
1105 hgrc.write(b'[lfs]\n')
1106 hgrc.write(b'usercache = %s\n' %
1107 (os.path.join(self._testtmp, b'.cache/lfs')))
1105 1108 hgrc.write(b'[web]\n')
1106 1109 hgrc.write(b'address = localhost\n')
1107 1110 hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
1108 1111
1109 1112 for opt in self._extraconfigopts:
1110 1113 section, key = opt.encode('utf-8').split(b'.', 1)
1111 1114 assert b'=' in key, ('extra config opt %s must '
1112 1115 'have an = for assignment' % opt)
1113 1116 hgrc.write(b'[%s]\n%s\n' % (section, key))
1114 1117 hgrc.close()
1115 1118
1116 1119 def fail(self, msg):
1117 1120 # unittest differentiates between errored and failed.
1118 1121 # Failed is denoted by AssertionError (by default at least).
1119 1122 raise AssertionError(msg)
1120 1123
1121 1124 def _runcommand(self, cmd, env, normalizenewlines=False):
1122 1125 """Run command in a sub-process, capturing the output (stdout and
1123 1126 stderr).
1124 1127
1125 1128 Return a tuple (exitcode, output). output is None in debug mode.
1126 1129 """
1127 1130 if self._debug:
1128 1131 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
1129 1132 env=env)
1130 1133 ret = proc.wait()
1131 1134 return (ret, None)
1132 1135
1133 1136 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1134 1137 def cleanup():
1135 1138 terminate(proc)
1136 1139 ret = proc.wait()
1137 1140 if ret == 0:
1138 1141 ret = signal.SIGTERM << 8
1139 1142 killdaemons(env['DAEMON_PIDS'])
1140 1143 return ret
1141 1144
1142 1145 output = ''
1143 1146 proc.tochild.close()
1144 1147
1145 1148 try:
1146 1149 output = proc.fromchild.read()
1147 1150 except KeyboardInterrupt:
1148 1151 vlog('# Handling keyboard interrupt')
1149 1152 cleanup()
1150 1153 raise
1151 1154
1152 1155 ret = proc.wait()
1153 1156 if wifexited(ret):
1154 1157 ret = os.WEXITSTATUS(ret)
1155 1158
1156 1159 if proc.timeout:
1157 1160 ret = 'timeout'
1158 1161
1159 1162 if ret:
1160 1163 killdaemons(env['DAEMON_PIDS'])
1161 1164
1162 1165 for s, r in self._getreplacements():
1163 1166 output = re.sub(s, r, output)
1164 1167
1165 1168 if normalizenewlines:
1166 1169 output = output.replace('\r\n', '\n')
1167 1170
1168 1171 return ret, output.splitlines(True)
1169 1172
1170 1173 class PythonTest(Test):
1171 1174 """A Python-based test."""
1172 1175
1173 1176 @property
1174 1177 def refpath(self):
1175 1178 return os.path.join(self._testdir, b'%s.out' % self.bname)
1176 1179
1177 1180 def _run(self, env):
1178 1181 py3kswitch = self._py3kwarnings and b' -3' or b''
1179 1182 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
1180 1183 vlog("# Running", cmd)
1181 1184 normalizenewlines = os.name == 'nt'
1182 1185 result = self._runcommand(cmd, env,
1183 1186 normalizenewlines=normalizenewlines)
1184 1187 if self._aborted:
1185 1188 raise KeyboardInterrupt()
1186 1189
1187 1190 return result
1188 1191
1189 1192 # Some glob patterns apply only in some circumstances, so the script
1190 1193 # might want to remove (glob) annotations that otherwise should be
1191 1194 # retained.
1192 1195 checkcodeglobpats = [
1193 1196 # On Windows it looks like \ doesn't require a (glob), but we know
1194 1197 # better.
1195 1198 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1196 1199 re.compile(br'^moving \S+/.*[^)]$'),
1197 1200 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1198 1201 # Not all platforms have 127.0.0.1 as loopback (though most do),
1199 1202 # so we always glob that too.
1200 1203 re.compile(br'.*\$LOCALIP.*$'),
1201 1204 ]
1202 1205
1203 1206 bchr = chr
1204 1207 if PYTHON3:
1205 1208 bchr = lambda x: bytes([x])
1206 1209
1207 1210 class TTest(Test):
1208 1211 """A "t test" is a test backed by a .t file."""
1209 1212
1210 1213 SKIPPED_PREFIX = b'skipped: '
1211 1214 FAILED_PREFIX = b'hghave check failed: '
1212 1215 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1213 1216
1214 1217 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1215 1218 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
1216 1219 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1217 1220
1218 1221 def __init__(self, path, *args, **kwds):
1219 1222 # accept an extra "case" parameter
1220 1223 case = kwds.pop('case', None)
1221 1224 self._case = case
1222 1225 self._allcases = parsettestcases(path)
1223 1226 super(TTest, self).__init__(path, *args, **kwds)
1224 1227 if case:
1225 1228 self.name = '%s (case %s)' % (self.name, _strpath(case))
1226 1229 self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
1227 1230 self._tmpname += b'-%s' % case
1228 1231
1229 1232 @property
1230 1233 def refpath(self):
1231 1234 return os.path.join(self._testdir, self.bname)
1232 1235
1233 1236 def _run(self, env):
1234 1237 f = open(self.path, 'rb')
1235 1238 lines = f.readlines()
1236 1239 f.close()
1237 1240
1238 1241 # .t file is both reference output and the test input, keep reference
1239 1242 # output updated with the the test input. This avoids some race
1240 1243 # conditions where the reference output does not match the actual test.
1241 1244 if self._refout is not None:
1242 1245 self._refout = lines
1243 1246
1244 1247 salt, script, after, expected = self._parsetest(lines)
1245 1248
1246 1249 # Write out the generated script.
1247 1250 fname = b'%s.sh' % self._testtmp
1248 1251 f = open(fname, 'wb')
1249 1252 for l in script:
1250 1253 f.write(l)
1251 1254 f.close()
1252 1255
1253 1256 cmd = b'%s "%s"' % (self._shell, fname)
1254 1257 vlog("# Running", cmd)
1255 1258
1256 1259 exitcode, output = self._runcommand(cmd, env)
1257 1260
1258 1261 if self._aborted:
1259 1262 raise KeyboardInterrupt()
1260 1263
1261 1264 # Do not merge output if skipped. Return hghave message instead.
1262 1265 # Similarly, with --debug, output is None.
1263 1266 if exitcode == self.SKIPPED_STATUS or output is None:
1264 1267 return exitcode, output
1265 1268
1266 1269 return self._processoutput(exitcode, output, salt, after, expected)
1267 1270
1268 1271 def _hghave(self, reqs):
1269 1272 # TODO do something smarter when all other uses of hghave are gone.
1270 1273 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1271 1274 tdir = runtestdir.replace(b'\\', b'/')
1272 1275 proc = Popen4(b'%s -c "%s/hghave %s"' %
1273 1276 (self._shell, tdir, b' '.join(reqs)),
1274 1277 self._testtmp, 0, self._getenv())
1275 1278 stdout, stderr = proc.communicate()
1276 1279 ret = proc.wait()
1277 1280 if wifexited(ret):
1278 1281 ret = os.WEXITSTATUS(ret)
1279 1282 if ret == 2:
1280 1283 print(stdout.decode('utf-8'))
1281 1284 sys.exit(1)
1282 1285
1283 1286 if ret != 0:
1284 1287 return False, stdout
1285 1288
1286 1289 if b'slow' in reqs:
1287 1290 self._timeout = self._slowtimeout
1288 1291 return True, None
1289 1292
1290 1293 def _iftest(self, args):
1291 1294 # implements "#if"
1292 1295 reqs = []
1293 1296 for arg in args:
1294 1297 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1295 1298 if arg[3:] == self._case:
1296 1299 return False
1297 1300 elif arg in self._allcases:
1298 1301 if arg != self._case:
1299 1302 return False
1300 1303 else:
1301 1304 reqs.append(arg)
1302 1305 return self._hghave(reqs)[0]
1303 1306
1304 1307 def _parsetest(self, lines):
1305 1308 # We generate a shell script which outputs unique markers to line
1306 1309 # up script results with our source. These markers include input
1307 1310 # line number and the last return code.
1308 1311 salt = b"SALT%d" % time.time()
1309 1312 def addsalt(line, inpython):
1310 1313 if inpython:
1311 1314 script.append(b'%s %d 0\n' % (salt, line))
1312 1315 else:
1313 1316 script.append(b'echo %s %d $?\n' % (salt, line))
1314 1317
1315 1318 script = []
1316 1319
1317 1320 # After we run the shell script, we re-unify the script output
1318 1321 # with non-active parts of the source, with synchronization by our
1319 1322 # SALT line number markers. The after table contains the non-active
1320 1323 # components, ordered by line number.
1321 1324 after = {}
1322 1325
1323 1326 # Expected shell script output.
1324 1327 expected = {}
1325 1328
1326 1329 pos = prepos = -1
1327 1330
1328 1331 # True or False when in a true or false conditional section
1329 1332 skipping = None
1330 1333
1331 1334 # We keep track of whether or not we're in a Python block so we
1332 1335 # can generate the surrounding doctest magic.
1333 1336 inpython = False
1334 1337
1335 1338 if self._debug:
1336 1339 script.append(b'set -x\n')
1337 1340 if self._hgcommand != b'hg':
1338 1341 script.append(b'alias hg="%s"\n' % self._hgcommand)
1339 1342 if os.getenv('MSYSTEM'):
1340 1343 script.append(b'alias pwd="pwd -W"\n')
1341 1344
1342 1345 n = 0
1343 1346 for n, l in enumerate(lines):
1344 1347 if not l.endswith(b'\n'):
1345 1348 l += b'\n'
1346 1349 if l.startswith(b'#require'):
1347 1350 lsplit = l.split()
1348 1351 if len(lsplit) < 2 or lsplit[0] != b'#require':
1349 1352 after.setdefault(pos, []).append(' !!! invalid #require\n')
1350 1353 haveresult, message = self._hghave(lsplit[1:])
1351 1354 if not haveresult:
1352 1355 script = [b'echo "%s"\nexit 80\n' % message]
1353 1356 break
1354 1357 after.setdefault(pos, []).append(l)
1355 1358 elif l.startswith(b'#if'):
1356 1359 lsplit = l.split()
1357 1360 if len(lsplit) < 2 or lsplit[0] != b'#if':
1358 1361 after.setdefault(pos, []).append(' !!! invalid #if\n')
1359 1362 if skipping is not None:
1360 1363 after.setdefault(pos, []).append(' !!! nested #if\n')
1361 1364 skipping = not self._iftest(lsplit[1:])
1362 1365 after.setdefault(pos, []).append(l)
1363 1366 elif l.startswith(b'#else'):
1364 1367 if skipping is None:
1365 1368 after.setdefault(pos, []).append(' !!! missing #if\n')
1366 1369 skipping = not skipping
1367 1370 after.setdefault(pos, []).append(l)
1368 1371 elif l.startswith(b'#endif'):
1369 1372 if skipping is None:
1370 1373 after.setdefault(pos, []).append(' !!! missing #if\n')
1371 1374 skipping = None
1372 1375 after.setdefault(pos, []).append(l)
1373 1376 elif skipping:
1374 1377 after.setdefault(pos, []).append(l)
1375 1378 elif l.startswith(b' >>> '): # python inlines
1376 1379 after.setdefault(pos, []).append(l)
1377 1380 prepos = pos
1378 1381 pos = n
1379 1382 if not inpython:
1380 1383 # We've just entered a Python block. Add the header.
1381 1384 inpython = True
1382 1385 addsalt(prepos, False) # Make sure we report the exit code.
1383 1386 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1384 1387 addsalt(n, True)
1385 1388 script.append(l[2:])
1386 1389 elif l.startswith(b' ... '): # python inlines
1387 1390 after.setdefault(prepos, []).append(l)
1388 1391 script.append(l[2:])
1389 1392 elif l.startswith(b' $ '): # commands
1390 1393 if inpython:
1391 1394 script.append(b'EOF\n')
1392 1395 inpython = False
1393 1396 after.setdefault(pos, []).append(l)
1394 1397 prepos = pos
1395 1398 pos = n
1396 1399 addsalt(n, False)
1397 1400 cmd = l[4:].split()
1398 1401 if len(cmd) == 2 and cmd[0] == b'cd':
1399 1402 l = b' $ cd %s || exit 1\n' % cmd[1]
1400 1403 script.append(l[4:])
1401 1404 elif l.startswith(b' > '): # continuations
1402 1405 after.setdefault(prepos, []).append(l)
1403 1406 script.append(l[4:])
1404 1407 elif l.startswith(b' '): # results
1405 1408 # Queue up a list of expected results.
1406 1409 expected.setdefault(pos, []).append(l[2:])
1407 1410 else:
1408 1411 if inpython:
1409 1412 script.append(b'EOF\n')
1410 1413 inpython = False
1411 1414 # Non-command/result. Queue up for merged output.
1412 1415 after.setdefault(pos, []).append(l)
1413 1416
1414 1417 if inpython:
1415 1418 script.append(b'EOF\n')
1416 1419 if skipping is not None:
1417 1420 after.setdefault(pos, []).append(' !!! missing #endif\n')
1418 1421 addsalt(n + 1, False)
1419 1422
1420 1423 return salt, script, after, expected
1421 1424
1422 1425 def _processoutput(self, exitcode, output, salt, after, expected):
1423 1426 # Merge the script output back into a unified test.
1424 1427 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1425 1428 if exitcode != 0:
1426 1429 warnonly = 3
1427 1430
1428 1431 pos = -1
1429 1432 postout = []
1430 1433 for l in output:
1431 1434 lout, lcmd = l, None
1432 1435 if salt in l:
1433 1436 lout, lcmd = l.split(salt, 1)
1434 1437
1435 1438 while lout:
1436 1439 if not lout.endswith(b'\n'):
1437 1440 lout += b' (no-eol)\n'
1438 1441
1439 1442 # Find the expected output at the current position.
1440 1443 els = [None]
1441 1444 if expected.get(pos, None):
1442 1445 els = expected[pos]
1443 1446
1444 1447 i = 0
1445 1448 optional = []
1446 1449 while i < len(els):
1447 1450 el = els[i]
1448 1451
1449 1452 r = self.linematch(el, lout)
1450 1453 if isinstance(r, str):
1451 1454 if r == '+glob':
1452 1455 lout = el[:-1] + ' (glob)\n'
1453 1456 r = '' # Warn only this line.
1454 1457 elif r == '-glob':
1455 1458 lout = ''.join(el.rsplit(' (glob)', 1))
1456 1459 r = '' # Warn only this line.
1457 1460 elif r == "retry":
1458 1461 postout.append(b' ' + el)
1459 1462 els.pop(i)
1460 1463 break
1461 1464 else:
1462 1465 log('\ninfo, unknown linematch result: %r\n' % r)
1463 1466 r = False
1464 1467 if r:
1465 1468 els.pop(i)
1466 1469 break
1467 1470 if el:
1468 1471 if el.endswith(b" (?)\n"):
1469 1472 optional.append(i)
1470 1473 else:
1471 1474 m = optline.match(el)
1472 1475 if m:
1473 1476 conditions = [
1474 1477 c for c in m.group(2).split(b' ')]
1475 1478
1476 1479 if not self._iftest(conditions):
1477 1480 optional.append(i)
1478 1481
1479 1482 i += 1
1480 1483
1481 1484 if r:
1482 1485 if r == "retry":
1483 1486 continue
1484 1487 # clean up any optional leftovers
1485 1488 for i in optional:
1486 1489 postout.append(b' ' + els[i])
1487 1490 for i in reversed(optional):
1488 1491 del els[i]
1489 1492 postout.append(b' ' + el)
1490 1493 else:
1491 1494 if self.NEEDESCAPE(lout):
1492 1495 lout = TTest._stringescape(b'%s (esc)\n' %
1493 1496 lout.rstrip(b'\n'))
1494 1497 postout.append(b' ' + lout) # Let diff deal with it.
1495 1498 if r != '': # If line failed.
1496 1499 warnonly = 3 # for sure not
1497 1500 elif warnonly == 1: # Is "not yet" and line is warn only.
1498 1501 warnonly = 2 # Yes do warn.
1499 1502 break
1500 1503 else:
1501 1504 # clean up any optional leftovers
1502 1505 while expected.get(pos, None):
1503 1506 el = expected[pos].pop(0)
1504 1507 if el:
1505 1508 if not el.endswith(b" (?)\n"):
1506 1509 m = optline.match(el)
1507 1510 if m:
1508 1511 conditions = [c for c in m.group(2).split(b' ')]
1509 1512
1510 1513 if self._iftest(conditions):
1511 1514 # Don't append as optional line
1512 1515 continue
1513 1516 else:
1514 1517 continue
1515 1518 postout.append(b' ' + el)
1516 1519
1517 1520 if lcmd:
1518 1521 # Add on last return code.
1519 1522 ret = int(lcmd.split()[1])
1520 1523 if ret != 0:
1521 1524 postout.append(b' [%d]\n' % ret)
1522 1525 if pos in after:
1523 1526 # Merge in non-active test bits.
1524 1527 postout += after.pop(pos)
1525 1528 pos = int(lcmd.split()[0])
1526 1529
1527 1530 if pos in after:
1528 1531 postout += after.pop(pos)
1529 1532
1530 1533 if warnonly == 2:
1531 1534 exitcode = False # Set exitcode to warned.
1532 1535
1533 1536 return exitcode, postout
1534 1537
1535 1538 @staticmethod
1536 1539 def rematch(el, l):
1537 1540 try:
1538 1541 el = b'(?:' + el + b')'
1539 1542 # use \Z to ensure that the regex matches to the end of the string
1540 1543 if os.name == 'nt':
1541 1544 return re.match(el + br'\r?\n\Z', l)
1542 1545 return re.match(el + br'\n\Z', l)
1543 1546 except re.error:
1544 1547 # el is an invalid regex
1545 1548 return False
1546 1549
1547 1550 @staticmethod
1548 1551 def globmatch(el, l):
1549 1552 # The only supported special characters are * and ? plus / which also
1550 1553 # matches \ on windows. Escaping of these characters is supported.
1551 1554 if el + b'\n' == l:
1552 1555 if os.altsep:
1553 1556 # matching on "/" is not needed for this line
1554 1557 for pat in checkcodeglobpats:
1555 1558 if pat.match(el):
1556 1559 return True
1557 1560 return b'-glob'
1558 1561 return True
1559 1562 el = el.replace(b'$LOCALIP', b'*')
1560 1563 i, n = 0, len(el)
1561 1564 res = b''
1562 1565 while i < n:
1563 1566 c = el[i:i + 1]
1564 1567 i += 1
1565 1568 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1566 1569 res += el[i - 1:i + 1]
1567 1570 i += 1
1568 1571 elif c == b'*':
1569 1572 res += b'.*'
1570 1573 elif c == b'?':
1571 1574 res += b'.'
1572 1575 elif c == b'/' and os.altsep:
1573 1576 res += b'[/\\\\]'
1574 1577 else:
1575 1578 res += re.escape(c)
1576 1579 return TTest.rematch(res, l)
1577 1580
1578 1581 def linematch(self, el, l):
1579 1582 retry = False
1580 1583 if el == l: # perfect match (fast)
1581 1584 return True
1582 1585 if el:
1583 1586 if el.endswith(b" (?)\n"):
1584 1587 retry = "retry"
1585 1588 el = el[:-5] + b"\n"
1586 1589 else:
1587 1590 m = optline.match(el)
1588 1591 if m:
1589 1592 conditions = [c for c in m.group(2).split(b' ')]
1590 1593
1591 1594 el = m.group(1) + b"\n"
1592 1595 if not self._iftest(conditions):
1593 1596 retry = "retry" # Not required by listed features
1594 1597
1595 1598 if el.endswith(b" (esc)\n"):
1596 1599 if PYTHON3:
1597 1600 el = el[:-7].decode('unicode_escape') + '\n'
1598 1601 el = el.encode('utf-8')
1599 1602 else:
1600 1603 el = el[:-7].decode('string-escape') + '\n'
1601 1604 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1602 1605 return True
1603 1606 if el.endswith(b" (re)\n"):
1604 1607 return TTest.rematch(el[:-6], l) or retry
1605 1608 if el.endswith(b" (glob)\n"):
1606 1609 # ignore '(glob)' added to l by 'replacements'
1607 1610 if l.endswith(b" (glob)\n"):
1608 1611 l = l[:-8] + b"\n"
1609 1612 return TTest.globmatch(el[:-8], l) or retry
1610 1613 if os.altsep and l.replace(b'\\', b'/') == el:
1611 1614 return b'+glob'
1612 1615 return retry
1613 1616
1614 1617 @staticmethod
1615 1618 def parsehghaveoutput(lines):
1616 1619 '''Parse hghave log lines.
1617 1620
1618 1621 Return tuple of lists (missing, failed):
1619 1622 * the missing/unknown features
1620 1623 * the features for which existence check failed'''
1621 1624 missing = []
1622 1625 failed = []
1623 1626 for line in lines:
1624 1627 if line.startswith(TTest.SKIPPED_PREFIX):
1625 1628 line = line.splitlines()[0]
1626 1629 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1627 1630 elif line.startswith(TTest.FAILED_PREFIX):
1628 1631 line = line.splitlines()[0]
1629 1632 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1630 1633
1631 1634 return missing, failed
1632 1635
1633 1636 @staticmethod
1634 1637 def _escapef(m):
1635 1638 return TTest.ESCAPEMAP[m.group(0)]
1636 1639
1637 1640 @staticmethod
1638 1641 def _stringescape(s):
1639 1642 return TTest.ESCAPESUB(TTest._escapef, s)
1640 1643
1641 1644 iolock = threading.RLock()
1642 1645
1643 1646 class TestResult(unittest._TextTestResult):
1644 1647 """Holds results when executing via unittest."""
1645 1648 # Don't worry too much about accessing the non-public _TextTestResult.
1646 1649 # It is relatively common in Python testing tools.
1647 1650 def __init__(self, options, *args, **kwargs):
1648 1651 super(TestResult, self).__init__(*args, **kwargs)
1649 1652
1650 1653 self._options = options
1651 1654
1652 1655 # unittest.TestResult didn't have skipped until 2.7. We need to
1653 1656 # polyfill it.
1654 1657 self.skipped = []
1655 1658
1656 1659 # We have a custom "ignored" result that isn't present in any Python
1657 1660 # unittest implementation. It is very similar to skipped. It may make
1658 1661 # sense to map it into skip some day.
1659 1662 self.ignored = []
1660 1663
1661 1664 self.times = []
1662 1665 self._firststarttime = None
1663 1666 # Data stored for the benefit of generating xunit reports.
1664 1667 self.successes = []
1665 1668 self.faildata = {}
1666 1669
1667 1670 if options.color == 'auto':
1668 1671 self.color = pygmentspresent and self.stream.isatty()
1669 1672 elif options.color == 'never':
1670 1673 self.color = False
1671 1674 else: # 'always', for testing purposes
1672 1675 self.color = pygmentspresent
1673 1676
1674 1677 def addFailure(self, test, reason):
1675 1678 self.failures.append((test, reason))
1676 1679
1677 1680 if self._options.first:
1678 1681 self.stop()
1679 1682 else:
1680 1683 with iolock:
1681 1684 if reason == "timed out":
1682 1685 self.stream.write('t')
1683 1686 else:
1684 1687 if not self._options.nodiff:
1685 1688 self.stream.write('\n')
1686 1689 # Exclude the '\n' from highlighting to lex correctly
1687 1690 formatted = 'ERROR: %s output changed\n' % test
1688 1691 self.stream.write(highlightmsg(formatted, self.color))
1689 1692 self.stream.write('!')
1690 1693
1691 1694 self.stream.flush()
1692 1695
1693 1696 def addSuccess(self, test):
1694 1697 with iolock:
1695 1698 super(TestResult, self).addSuccess(test)
1696 1699 self.successes.append(test)
1697 1700
1698 1701 def addError(self, test, err):
1699 1702 super(TestResult, self).addError(test, err)
1700 1703 if self._options.first:
1701 1704 self.stop()
1702 1705
1703 1706 # Polyfill.
1704 1707 def addSkip(self, test, reason):
1705 1708 self.skipped.append((test, reason))
1706 1709 with iolock:
1707 1710 if self.showAll:
1708 1711 self.stream.writeln('skipped %s' % reason)
1709 1712 else:
1710 1713 self.stream.write('s')
1711 1714 self.stream.flush()
1712 1715
1713 1716 def addIgnore(self, test, reason):
1714 1717 self.ignored.append((test, reason))
1715 1718 with iolock:
1716 1719 if self.showAll:
1717 1720 self.stream.writeln('ignored %s' % reason)
1718 1721 else:
1719 1722 if reason not in ('not retesting', "doesn't match keyword"):
1720 1723 self.stream.write('i')
1721 1724 else:
1722 1725 self.testsRun += 1
1723 1726 self.stream.flush()
1724 1727
1725 1728 def addOutputMismatch(self, test, ret, got, expected):
1726 1729 """Record a mismatch in test output for a particular test."""
1727 1730 if self.shouldStop:
1728 1731 # don't print, some other test case already failed and
1729 1732 # printed, we're just stale and probably failed due to our
1730 1733 # temp dir getting cleaned up.
1731 1734 return
1732 1735
1733 1736 accepted = False
1734 1737 lines = []
1735 1738
1736 1739 with iolock:
1737 1740 if self._options.nodiff:
1738 1741 pass
1739 1742 elif self._options.view:
1740 1743 v = self._options.view
1741 1744 if PYTHON3:
1742 1745 v = _bytespath(v)
1743 1746 os.system(b"%s %s %s" %
1744 1747 (v, test.refpath, test.errpath))
1745 1748 else:
1746 1749 servefail, lines = getdiff(expected, got,
1747 1750 test.refpath, test.errpath)
1748 1751 if servefail:
1749 1752 raise test.failureException(
1750 1753 'server failed to start (HGPORT=%s)' % test._startport)
1751 1754 else:
1752 1755 self.stream.write('\n')
1753 1756 for line in lines:
1754 1757 line = highlightdiff(line, self.color)
1755 1758 if PYTHON3:
1756 1759 self.stream.flush()
1757 1760 self.stream.buffer.write(line)
1758 1761 self.stream.buffer.flush()
1759 1762 else:
1760 1763 self.stream.write(line)
1761 1764 self.stream.flush()
1762 1765
1763 1766 # handle interactive prompt without releasing iolock
1764 1767 if self._options.interactive:
1765 1768 if test.readrefout() != expected:
1766 1769 self.stream.write(
1767 1770 'Reference output has changed (run again to prompt '
1768 1771 'changes)')
1769 1772 else:
1770 1773 self.stream.write('Accept this change? [n] ')
1771 1774 answer = sys.stdin.readline().strip()
1772 1775 if answer.lower() in ('y', 'yes'):
1773 1776 if test.path.endswith(b'.t'):
1774 1777 rename(test.errpath, test.path)
1775 1778 else:
1776 1779 rename(test.errpath, '%s.out' % test.path)
1777 1780 accepted = True
1778 1781 if not accepted:
1779 1782 self.faildata[test.name] = b''.join(lines)
1780 1783
1781 1784 return accepted
1782 1785
1783 1786 def startTest(self, test):
1784 1787 super(TestResult, self).startTest(test)
1785 1788
1786 1789 # os.times module computes the user time and system time spent by
1787 1790 # child's processes along with real elapsed time taken by a process.
1788 1791 # This module has one limitation. It can only work for Linux user
1789 1792 # and not for Windows.
1790 1793 test.started = os.times()
1791 1794 if self._firststarttime is None: # thread racy but irrelevant
1792 1795 self._firststarttime = test.started[4]
1793 1796
1794 1797 def stopTest(self, test, interrupted=False):
1795 1798 super(TestResult, self).stopTest(test)
1796 1799
1797 1800 test.stopped = os.times()
1798 1801
1799 1802 starttime = test.started
1800 1803 endtime = test.stopped
1801 1804 origin = self._firststarttime
1802 1805 self.times.append((test.name,
1803 1806 endtime[2] - starttime[2], # user space CPU time
1804 1807 endtime[3] - starttime[3], # sys space CPU time
1805 1808 endtime[4] - starttime[4], # real time
1806 1809 starttime[4] - origin, # start date in run context
1807 1810 endtime[4] - origin, # end date in run context
1808 1811 ))
1809 1812
1810 1813 if interrupted:
1811 1814 with iolock:
1812 1815 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1813 1816 test.name, self.times[-1][3]))
1814 1817
1815 1818 class TestSuite(unittest.TestSuite):
1816 1819 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1817 1820
1818 1821 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1819 1822 retest=False, keywords=None, loop=False, runs_per_test=1,
1820 1823 loadtest=None, showchannels=False,
1821 1824 *args, **kwargs):
1822 1825 """Create a new instance that can run tests with a configuration.
1823 1826
1824 1827 testdir specifies the directory where tests are executed from. This
1825 1828 is typically the ``tests`` directory from Mercurial's source
1826 1829 repository.
1827 1830
1828 1831 jobs specifies the number of jobs to run concurrently. Each test
1829 1832 executes on its own thread. Tests actually spawn new processes, so
1830 1833 state mutation should not be an issue.
1831 1834
1832 1835 If there is only one job, it will use the main thread.
1833 1836
1834 1837 whitelist and blacklist denote tests that have been whitelisted and
1835 1838 blacklisted, respectively. These arguments don't belong in TestSuite.
1836 1839 Instead, whitelist and blacklist should be handled by the thing that
1837 1840 populates the TestSuite with tests. They are present to preserve
1838 1841 backwards compatible behavior which reports skipped tests as part
1839 1842 of the results.
1840 1843
1841 1844 retest denotes whether to retest failed tests. This arguably belongs
1842 1845 outside of TestSuite.
1843 1846
1844 1847 keywords denotes key words that will be used to filter which tests
1845 1848 to execute. This arguably belongs outside of TestSuite.
1846 1849
1847 1850 loop denotes whether to loop over tests forever.
1848 1851 """
1849 1852 super(TestSuite, self).__init__(*args, **kwargs)
1850 1853
1851 1854 self._jobs = jobs
1852 1855 self._whitelist = whitelist
1853 1856 self._blacklist = blacklist
1854 1857 self._retest = retest
1855 1858 self._keywords = keywords
1856 1859 self._loop = loop
1857 1860 self._runs_per_test = runs_per_test
1858 1861 self._loadtest = loadtest
1859 1862 self._showchannels = showchannels
1860 1863
1861 1864 def run(self, result):
1862 1865 # We have a number of filters that need to be applied. We do this
1863 1866 # here instead of inside Test because it makes the running logic for
1864 1867 # Test simpler.
1865 1868 tests = []
1866 1869 num_tests = [0]
1867 1870 for test in self._tests:
1868 1871 def get():
1869 1872 num_tests[0] += 1
1870 1873 if getattr(test, 'should_reload', False):
1871 1874 return self._loadtest(test, num_tests[0])
1872 1875 return test
1873 1876 if not os.path.exists(test.path):
1874 1877 result.addSkip(test, "Doesn't exist")
1875 1878 continue
1876 1879
1877 1880 if not (self._whitelist and test.bname in self._whitelist):
1878 1881 if self._blacklist and test.bname in self._blacklist:
1879 1882 result.addSkip(test, 'blacklisted')
1880 1883 continue
1881 1884
1882 1885 if self._retest and not os.path.exists(test.errpath):
1883 1886 result.addIgnore(test, 'not retesting')
1884 1887 continue
1885 1888
1886 1889 if self._keywords:
1887 1890 f = open(test.path, 'rb')
1888 1891 t = f.read().lower() + test.bname.lower()
1889 1892 f.close()
1890 1893 ignored = False
1891 1894 for k in self._keywords.lower().split():
1892 1895 if k not in t:
1893 1896 result.addIgnore(test, "doesn't match keyword")
1894 1897 ignored = True
1895 1898 break
1896 1899
1897 1900 if ignored:
1898 1901 continue
1899 1902 for _ in xrange(self._runs_per_test):
1900 1903 tests.append(get())
1901 1904
1902 1905 runtests = list(tests)
1903 1906 done = queue.Queue()
1904 1907 running = 0
1905 1908
1906 1909 channels = [""] * self._jobs
1907 1910
1908 1911 def job(test, result):
1909 1912 for n, v in enumerate(channels):
1910 1913 if not v:
1911 1914 channel = n
1912 1915 break
1913 1916 else:
1914 1917 raise ValueError('Could not find output channel')
1915 1918 channels[channel] = "=" + test.name[5:].split(".")[0]
1916 1919 try:
1917 1920 test(result)
1918 1921 done.put(None)
1919 1922 except KeyboardInterrupt:
1920 1923 pass
1921 1924 except: # re-raises
1922 1925 done.put(('!', test, 'run-test raised an error, see traceback'))
1923 1926 raise
1924 1927 finally:
1925 1928 try:
1926 1929 channels[channel] = ''
1927 1930 except IndexError:
1928 1931 pass
1929 1932
1930 1933 def stat():
1931 1934 count = 0
1932 1935 while channels:
1933 1936 d = '\n%03s ' % count
1934 1937 for n, v in enumerate(channels):
1935 1938 if v:
1936 1939 d += v[0]
1937 1940 channels[n] = v[1:] or '.'
1938 1941 else:
1939 1942 d += ' '
1940 1943 d += ' '
1941 1944 with iolock:
1942 1945 sys.stdout.write(d + ' ')
1943 1946 sys.stdout.flush()
1944 1947 for x in xrange(10):
1945 1948 if channels:
1946 1949 time.sleep(.1)
1947 1950 count += 1
1948 1951
1949 1952 stoppedearly = False
1950 1953
1951 1954 if self._showchannels:
1952 1955 statthread = threading.Thread(target=stat, name="stat")
1953 1956 statthread.start()
1954 1957
1955 1958 try:
1956 1959 while tests or running:
1957 1960 if not done.empty() or running == self._jobs or not tests:
1958 1961 try:
1959 1962 done.get(True, 1)
1960 1963 running -= 1
1961 1964 if result and result.shouldStop:
1962 1965 stoppedearly = True
1963 1966 break
1964 1967 except queue.Empty:
1965 1968 continue
1966 1969 if tests and not running == self._jobs:
1967 1970 test = tests.pop(0)
1968 1971 if self._loop:
1969 1972 if getattr(test, 'should_reload', False):
1970 1973 num_tests[0] += 1
1971 1974 tests.append(
1972 1975 self._loadtest(test, num_tests[0]))
1973 1976 else:
1974 1977 tests.append(test)
1975 1978 if self._jobs == 1:
1976 1979 job(test, result)
1977 1980 else:
1978 1981 t = threading.Thread(target=job, name=test.name,
1979 1982 args=(test, result))
1980 1983 t.start()
1981 1984 running += 1
1982 1985
1983 1986 # If we stop early we still need to wait on started tests to
1984 1987 # finish. Otherwise, there is a race between the test completing
1985 1988 # and the test's cleanup code running. This could result in the
1986 1989 # test reporting incorrect.
1987 1990 if stoppedearly:
1988 1991 while running:
1989 1992 try:
1990 1993 done.get(True, 1)
1991 1994 running -= 1
1992 1995 except queue.Empty:
1993 1996 continue
1994 1997 except KeyboardInterrupt:
1995 1998 for test in runtests:
1996 1999 test.abort()
1997 2000
1998 2001 channels = []
1999 2002
2000 2003 return result
2001 2004
2002 2005 # Save the most recent 5 wall-clock runtimes of each test to a
2003 2006 # human-readable text file named .testtimes. Tests are sorted
2004 2007 # alphabetically, while times for each test are listed from oldest to
2005 2008 # newest.
2006 2009
2007 2010 def loadtimes(outputdir):
2008 2011 times = []
2009 2012 try:
2010 2013 with open(os.path.join(outputdir, b'.testtimes-')) as fp:
2011 2014 for line in fp:
2012 2015 ts = line.split()
2013 2016 times.append((ts[0], [float(t) for t in ts[1:]]))
2014 2017 except IOError as err:
2015 2018 if err.errno != errno.ENOENT:
2016 2019 raise
2017 2020 return times
2018 2021
2019 2022 def savetimes(outputdir, result):
2020 2023 saved = dict(loadtimes(outputdir))
2021 2024 maxruns = 5
2022 2025 skipped = set([str(t[0]) for t in result.skipped])
2023 2026 for tdata in result.times:
2024 2027 test, real = tdata[0], tdata[3]
2025 2028 if test not in skipped:
2026 2029 ts = saved.setdefault(test, [])
2027 2030 ts.append(real)
2028 2031 ts[:] = ts[-maxruns:]
2029 2032
2030 2033 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
2031 2034 dir=outputdir, text=True)
2032 2035 with os.fdopen(fd, 'w') as fp:
2033 2036 for name, ts in sorted(saved.items()):
2034 2037 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2035 2038 timepath = os.path.join(outputdir, b'.testtimes')
2036 2039 try:
2037 2040 os.unlink(timepath)
2038 2041 except OSError:
2039 2042 pass
2040 2043 try:
2041 2044 os.rename(tmpname, timepath)
2042 2045 except OSError:
2043 2046 pass
2044 2047
2045 2048 class TextTestRunner(unittest.TextTestRunner):
2046 2049 """Custom unittest test runner that uses appropriate settings."""
2047 2050
2048 2051 def __init__(self, runner, *args, **kwargs):
2049 2052 super(TextTestRunner, self).__init__(*args, **kwargs)
2050 2053
2051 2054 self._runner = runner
2052 2055
2053 2056 def listtests(self, test):
2054 2057 result = TestResult(self._runner.options, self.stream,
2055 2058 self.descriptions, 0)
2056 2059 test = sorted(test, key=lambda t: t.name)
2057 2060 for t in test:
2058 2061 print(t.name)
2059 2062 result.addSuccess(t)
2060 2063
2061 2064 if self._runner.options.xunit:
2062 2065 with open(self._runner.options.xunit, "wb") as xuf:
2063 2066 self._writexunit(result, xuf)
2064 2067
2065 2068 if self._runner.options.json:
2066 2069 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2067 2070 with open(jsonpath, 'w') as fp:
2068 2071 self._writejson(result, fp)
2069 2072
2070 2073 return result
2071 2074
2072 2075 def run(self, test):
2073 2076 result = TestResult(self._runner.options, self.stream,
2074 2077 self.descriptions, self.verbosity)
2075 2078
2076 2079 test(result)
2077 2080
2078 2081 failed = len(result.failures)
2079 2082 skipped = len(result.skipped)
2080 2083 ignored = len(result.ignored)
2081 2084
2082 2085 with iolock:
2083 2086 self.stream.writeln('')
2084 2087
2085 2088 if not self._runner.options.noskips:
2086 2089 for test, msg in result.skipped:
2087 2090 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2088 2091 self.stream.write(highlightmsg(formatted, result.color))
2089 2092 for test, msg in result.failures:
2090 2093 formatted = 'Failed %s: %s\n' % (test.name, msg)
2091 2094 self.stream.write(highlightmsg(formatted, result.color))
2092 2095 for test, msg in result.errors:
2093 2096 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2094 2097
2095 2098 if self._runner.options.xunit:
2096 2099 with open(self._runner.options.xunit, "wb") as xuf:
2097 2100 self._writexunit(result, xuf)
2098 2101
2099 2102 if self._runner.options.json:
2100 2103 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2101 2104 with open(jsonpath, 'w') as fp:
2102 2105 self._writejson(result, fp)
2103 2106
2104 2107 self._runner._checkhglib('Tested')
2105 2108
2106 2109 savetimes(self._runner._outputdir, result)
2107 2110
2108 2111 if failed and self._runner.options.known_good_rev:
2109 2112 self._bisecttests(t for t, m in result.failures)
2110 2113 self.stream.writeln(
2111 2114 '# Ran %d tests, %d skipped, %d failed.'
2112 2115 % (result.testsRun, skipped + ignored, failed))
2113 2116 if failed:
2114 2117 self.stream.writeln('python hash seed: %s' %
2115 2118 os.environ['PYTHONHASHSEED'])
2116 2119 if self._runner.options.time:
2117 2120 self.printtimes(result.times)
2118 2121
2119 2122 if self._runner.options.exceptions:
2120 2123 exceptions = aggregateexceptions(
2121 2124 os.path.join(self._runner._outputdir, b'exceptions'))
2122 2125 total = sum(exceptions.values())
2123 2126
2124 2127 self.stream.writeln('Exceptions Report:')
2125 2128 self.stream.writeln('%d total from %d frames' %
2126 2129 (total, len(exceptions)))
2127 2130 for (frame, line, exc), count in exceptions.most_common():
2128 2131 self.stream.writeln('%d\t%s: %s' % (count, frame, exc))
2129 2132
2130 2133 self.stream.flush()
2131 2134
2132 2135 return result
2133 2136
2134 2137 def _bisecttests(self, tests):
2135 2138 bisectcmd = ['hg', 'bisect']
2136 2139 bisectrepo = self._runner.options.bisect_repo
2137 2140 if bisectrepo:
2138 2141 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2139 2142 def pread(args):
2140 2143 env = os.environ.copy()
2141 2144 env['HGPLAIN'] = '1'
2142 2145 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
2143 2146 stdout=subprocess.PIPE, env=env)
2144 2147 data = p.stdout.read()
2145 2148 p.wait()
2146 2149 return data
2147 2150 for test in tests:
2148 2151 pread(bisectcmd + ['--reset']),
2149 2152 pread(bisectcmd + ['--bad', '.'])
2150 2153 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2151 2154 # TODO: we probably need to forward more options
2152 2155 # that alter hg's behavior inside the tests.
2153 2156 opts = ''
2154 2157 withhg = self._runner.options.with_hg
2155 2158 if withhg:
2156 2159 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
2157 2160 rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts,
2158 2161 test)
2159 2162 data = pread(bisectcmd + ['--command', rtc])
2160 2163 m = re.search(
2161 2164 (br'\nThe first (?P<goodbad>bad|good) revision '
2162 2165 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2163 2166 br'summary: +(?P<summary>[^\n]+)\n'),
2164 2167 data, (re.MULTILINE | re.DOTALL))
2165 2168 if m is None:
2166 2169 self.stream.writeln(
2167 2170 'Failed to identify failure point for %s' % test)
2168 2171 continue
2169 2172 dat = m.groupdict()
2170 2173 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
2171 2174 self.stream.writeln(
2172 2175 '%s %s by %s (%s)' % (
2173 2176 test, verb, dat['node'], dat['summary']))
2174 2177
2175 2178 def printtimes(self, times):
2176 2179 # iolock held by run
2177 2180 self.stream.writeln('# Producing time report')
2178 2181 times.sort(key=lambda t: (t[3]))
2179 2182 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2180 2183 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
2181 2184 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
2182 2185 for tdata in times:
2183 2186 test = tdata[0]
2184 2187 cuser, csys, real, start, end = tdata[1:6]
2185 2188 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2186 2189
2187 2190 @staticmethod
2188 2191 def _writexunit(result, outf):
2189 2192 # See http://llg.cubic.org/docs/junit/ for a reference.
2190 2193 timesd = dict((t[0], t[3]) for t in result.times)
2191 2194 doc = minidom.Document()
2192 2195 s = doc.createElement('testsuite')
2193 2196 s.setAttribute('name', 'run-tests')
2194 2197 s.setAttribute('tests', str(result.testsRun))
2195 2198 s.setAttribute('errors', "0") # TODO
2196 2199 s.setAttribute('failures', str(len(result.failures)))
2197 2200 s.setAttribute('skipped', str(len(result.skipped) +
2198 2201 len(result.ignored)))
2199 2202 doc.appendChild(s)
2200 2203 for tc in result.successes:
2201 2204 t = doc.createElement('testcase')
2202 2205 t.setAttribute('name', tc.name)
2203 2206 tctime = timesd.get(tc.name)
2204 2207 if tctime is not None:
2205 2208 t.setAttribute('time', '%.3f' % tctime)
2206 2209 s.appendChild(t)
2207 2210 for tc, err in sorted(result.faildata.items()):
2208 2211 t = doc.createElement('testcase')
2209 2212 t.setAttribute('name', tc)
2210 2213 tctime = timesd.get(tc)
2211 2214 if tctime is not None:
2212 2215 t.setAttribute('time', '%.3f' % tctime)
2213 2216 # createCDATASection expects a unicode or it will
2214 2217 # convert using default conversion rules, which will
2215 2218 # fail if string isn't ASCII.
2216 2219 err = cdatasafe(err).decode('utf-8', 'replace')
2217 2220 cd = doc.createCDATASection(err)
2218 2221 # Use 'failure' here instead of 'error' to match errors = 0,
2219 2222 # failures = len(result.failures) in the testsuite element.
2220 2223 failelem = doc.createElement('failure')
2221 2224 failelem.setAttribute('message', 'output changed')
2222 2225 failelem.setAttribute('type', 'output-mismatch')
2223 2226 failelem.appendChild(cd)
2224 2227 t.appendChild(failelem)
2225 2228 s.appendChild(t)
2226 2229 for tc, message in result.skipped:
2227 2230 # According to the schema, 'skipped' has no attributes. So store
2228 2231 # the skip message as a text node instead.
2229 2232 t = doc.createElement('testcase')
2230 2233 t.setAttribute('name', tc.name)
2231 2234 binmessage = message.encode('utf-8')
2232 2235 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2233 2236 cd = doc.createCDATASection(message)
2234 2237 skipelem = doc.createElement('skipped')
2235 2238 skipelem.appendChild(cd)
2236 2239 t.appendChild(skipelem)
2237 2240 s.appendChild(t)
2238 2241 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2239 2242
2240 2243 @staticmethod
2241 2244 def _writejson(result, outf):
2242 2245 timesd = {}
2243 2246 for tdata in result.times:
2244 2247 test = tdata[0]
2245 2248 timesd[test] = tdata[1:]
2246 2249
2247 2250 outcome = {}
2248 2251 groups = [('success', ((tc, None)
2249 2252 for tc in result.successes)),
2250 2253 ('failure', result.failures),
2251 2254 ('skip', result.skipped)]
2252 2255 for res, testcases in groups:
2253 2256 for tc, __ in testcases:
2254 2257 if tc.name in timesd:
2255 2258 diff = result.faildata.get(tc.name, b'')
2256 2259 try:
2257 2260 diff = diff.decode('unicode_escape')
2258 2261 except UnicodeDecodeError as e:
2259 2262 diff = '%r decoding diff, sorry' % e
2260 2263 tres = {'result': res,
2261 2264 'time': ('%0.3f' % timesd[tc.name][2]),
2262 2265 'cuser': ('%0.3f' % timesd[tc.name][0]),
2263 2266 'csys': ('%0.3f' % timesd[tc.name][1]),
2264 2267 'start': ('%0.3f' % timesd[tc.name][3]),
2265 2268 'end': ('%0.3f' % timesd[tc.name][4]),
2266 2269 'diff': diff,
2267 2270 }
2268 2271 else:
2269 2272 # blacklisted test
2270 2273 tres = {'result': res}
2271 2274
2272 2275 outcome[tc.name] = tres
2273 2276 jsonout = json.dumps(outcome, sort_keys=True, indent=4,
2274 2277 separators=(',', ': '))
2275 2278 outf.writelines(("testreport =", jsonout))
2276 2279
2277 2280 class TestRunner(object):
2278 2281 """Holds context for executing tests.
2279 2282
2280 2283 Tests rely on a lot of state. This object holds it for them.
2281 2284 """
2282 2285
2283 2286 # Programs required to run tests.
2284 2287 REQUIREDTOOLS = [
2285 2288 b'diff',
2286 2289 b'grep',
2287 2290 b'unzip',
2288 2291 b'gunzip',
2289 2292 b'bunzip2',
2290 2293 b'sed',
2291 2294 ]
2292 2295
2293 2296 # Maps file extensions to test class.
2294 2297 TESTTYPES = [
2295 2298 (b'.py', PythonTest),
2296 2299 (b'.t', TTest),
2297 2300 ]
2298 2301
2299 2302 def __init__(self):
2300 2303 self.options = None
2301 2304 self._hgroot = None
2302 2305 self._testdir = None
2303 2306 self._outputdir = None
2304 2307 self._hgtmp = None
2305 2308 self._installdir = None
2306 2309 self._bindir = None
2307 2310 self._tmpbinddir = None
2308 2311 self._pythondir = None
2309 2312 self._coveragefile = None
2310 2313 self._createdfiles = []
2311 2314 self._hgcommand = None
2312 2315 self._hgpath = None
2313 2316 self._portoffset = 0
2314 2317 self._ports = {}
2315 2318
2316 2319 def run(self, args, parser=None):
2317 2320 """Run the test suite."""
2318 2321 oldmask = os.umask(0o22)
2319 2322 try:
2320 2323 parser = parser or getparser()
2321 2324 options = parseargs(args, parser)
2322 2325 tests = [_bytespath(a) for a in options.tests]
2323 2326 if options.test_list is not None:
2324 2327 for listfile in options.test_list:
2325 2328 with open(listfile, 'rb') as f:
2326 2329 tests.extend(t for t in f.read().splitlines() if t)
2327 2330 self.options = options
2328 2331
2329 2332 self._checktools()
2330 2333 testdescs = self.findtests(tests)
2331 2334 if options.profile_runner:
2332 2335 import statprof
2333 2336 statprof.start()
2334 2337 result = self._run(testdescs)
2335 2338 if options.profile_runner:
2336 2339 statprof.stop()
2337 2340 statprof.display()
2338 2341 return result
2339 2342
2340 2343 finally:
2341 2344 os.umask(oldmask)
2342 2345
2343 2346 def _run(self, testdescs):
2344 2347 if self.options.random:
2345 2348 random.shuffle(testdescs)
2346 2349 else:
2347 2350 # keywords for slow tests
2348 2351 slow = {b'svn': 10,
2349 2352 b'cvs': 10,
2350 2353 b'hghave': 10,
2351 2354 b'largefiles-update': 10,
2352 2355 b'run-tests': 10,
2353 2356 b'corruption': 10,
2354 2357 b'race': 10,
2355 2358 b'i18n': 10,
2356 2359 b'check': 100,
2357 2360 b'gendoc': 100,
2358 2361 b'contrib-perf': 200,
2359 2362 }
2360 2363 perf = {}
2361 2364 def sortkey(f):
2362 2365 # run largest tests first, as they tend to take the longest
2363 2366 f = f['path']
2364 2367 try:
2365 2368 return perf[f]
2366 2369 except KeyError:
2367 2370 try:
2368 2371 val = -os.stat(f).st_size
2369 2372 except OSError as e:
2370 2373 if e.errno != errno.ENOENT:
2371 2374 raise
2372 2375 perf[f] = -1e9 # file does not exist, tell early
2373 2376 return -1e9
2374 2377 for kw, mul in slow.items():
2375 2378 if kw in f:
2376 2379 val *= mul
2377 2380 if f.endswith(b'.py'):
2378 2381 val /= 10.0
2379 2382 perf[f] = val / 1000.0
2380 2383 return perf[f]
2381 2384 testdescs.sort(key=sortkey)
2382 2385
2383 2386 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2384 2387 os, 'getcwdb', os.getcwd)()
2385 2388 # assume all tests in same folder for now
2386 2389 if testdescs:
2387 2390 pathname = os.path.dirname(testdescs[0]['path'])
2388 2391 if pathname:
2389 2392 osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
2390 2393 pathname)
2391 2394 if self.options.outputdir:
2392 2395 self._outputdir = canonpath(_bytespath(self.options.outputdir))
2393 2396 else:
2394 2397 self._outputdir = self._testdir
2395 2398 if testdescs and pathname:
2396 2399 self._outputdir = os.path.join(self._outputdir, pathname)
2397 2400
2398 2401 if 'PYTHONHASHSEED' not in os.environ:
2399 2402 # use a random python hash seed all the time
2400 2403 # we do the randomness ourself to know what seed is used
2401 2404 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2402 2405
2403 2406 if self.options.tmpdir:
2404 2407 self.options.keep_tmpdir = True
2405 2408 tmpdir = _bytespath(self.options.tmpdir)
2406 2409 if os.path.exists(tmpdir):
2407 2410 # Meaning of tmpdir has changed since 1.3: we used to create
2408 2411 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2409 2412 # tmpdir already exists.
2410 2413 print("error: temp dir %r already exists" % tmpdir)
2411 2414 return 1
2412 2415
2413 2416 # Automatically removing tmpdir sounds convenient, but could
2414 2417 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2415 2418 # or "--tmpdir=$HOME".
2416 2419 #vlog("# Removing temp dir", tmpdir)
2417 2420 #shutil.rmtree(tmpdir)
2418 2421 os.makedirs(tmpdir)
2419 2422 else:
2420 2423 d = None
2421 2424 if os.name == 'nt':
2422 2425 # without this, we get the default temp dir location, but
2423 2426 # in all lowercase, which causes troubles with paths (issue3490)
2424 2427 d = osenvironb.get(b'TMP', None)
2425 2428 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2426 2429
2427 2430 self._hgtmp = osenvironb[b'HGTMP'] = (
2428 2431 os.path.realpath(tmpdir))
2429 2432
2430 2433 if self.options.with_hg:
2431 2434 self._installdir = None
2432 2435 whg = self.options.with_hg
2433 2436 self._bindir = os.path.dirname(os.path.realpath(whg))
2434 2437 assert isinstance(self._bindir, bytes)
2435 2438 self._hgcommand = os.path.basename(whg)
2436 2439 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2437 2440 os.makedirs(self._tmpbindir)
2438 2441
2439 2442 # This looks redundant with how Python initializes sys.path from
2440 2443 # the location of the script being executed. Needed because the
2441 2444 # "hg" specified by --with-hg is not the only Python script
2442 2445 # executed in the test suite that needs to import 'mercurial'
2443 2446 # ... which means it's not really redundant at all.
2444 2447 self._pythondir = self._bindir
2445 2448 else:
2446 2449 self._installdir = os.path.join(self._hgtmp, b"install")
2447 2450 self._bindir = os.path.join(self._installdir, b"bin")
2448 2451 self._hgcommand = b'hg'
2449 2452 self._tmpbindir = self._bindir
2450 2453 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2451 2454
2452 2455 # set CHGHG, then replace "hg" command by "chg"
2453 2456 chgbindir = self._bindir
2454 2457 if self.options.chg or self.options.with_chg:
2455 2458 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2456 2459 else:
2457 2460 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2458 2461 if self.options.chg:
2459 2462 self._hgcommand = b'chg'
2460 2463 elif self.options.with_chg:
2461 2464 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2462 2465 self._hgcommand = os.path.basename(self.options.with_chg)
2463 2466
2464 2467 osenvironb[b"BINDIR"] = self._bindir
2465 2468 osenvironb[b"PYTHON"] = PYTHON
2466 2469
2467 2470 if self.options.with_python3:
2468 2471 osenvironb[b'PYTHON3'] = self.options.with_python3
2469 2472
2470 2473 fileb = _bytespath(__file__)
2471 2474 runtestdir = os.path.abspath(os.path.dirname(fileb))
2472 2475 osenvironb[b'RUNTESTDIR'] = runtestdir
2473 2476 if PYTHON3:
2474 2477 sepb = _bytespath(os.pathsep)
2475 2478 else:
2476 2479 sepb = os.pathsep
2477 2480 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2478 2481 if os.path.islink(__file__):
2479 2482 # test helper will likely be at the end of the symlink
2480 2483 realfile = os.path.realpath(fileb)
2481 2484 realdir = os.path.abspath(os.path.dirname(realfile))
2482 2485 path.insert(2, realdir)
2483 2486 if chgbindir != self._bindir:
2484 2487 path.insert(1, chgbindir)
2485 2488 if self._testdir != runtestdir:
2486 2489 path = [self._testdir] + path
2487 2490 if self._tmpbindir != self._bindir:
2488 2491 path = [self._tmpbindir] + path
2489 2492 osenvironb[b"PATH"] = sepb.join(path)
2490 2493
2491 2494 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2492 2495 # can run .../tests/run-tests.py test-foo where test-foo
2493 2496 # adds an extension to HGRC. Also include run-test.py directory to
2494 2497 # import modules like heredoctest.
2495 2498 pypath = [self._pythondir, self._testdir, runtestdir]
2496 2499 # We have to augment PYTHONPATH, rather than simply replacing
2497 2500 # it, in case external libraries are only available via current
2498 2501 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2499 2502 # are in /opt/subversion.)
2500 2503 oldpypath = osenvironb.get(IMPL_PATH)
2501 2504 if oldpypath:
2502 2505 pypath.append(oldpypath)
2503 2506 osenvironb[IMPL_PATH] = sepb.join(pypath)
2504 2507
2505 2508 if self.options.pure:
2506 2509 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2507 2510 os.environ["HGMODULEPOLICY"] = "py"
2508 2511
2509 2512 if self.options.allow_slow_tests:
2510 2513 os.environ["HGTEST_SLOW"] = "slow"
2511 2514 elif 'HGTEST_SLOW' in os.environ:
2512 2515 del os.environ['HGTEST_SLOW']
2513 2516
2514 2517 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2515 2518
2516 2519 if self.options.exceptions:
2517 2520 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
2518 2521 try:
2519 2522 os.makedirs(exceptionsdir)
2520 2523 except OSError as e:
2521 2524 if e.errno != errno.EEXIST:
2522 2525 raise
2523 2526
2524 2527 # Remove all existing exception reports.
2525 2528 for f in os.listdir(exceptionsdir):
2526 2529 os.unlink(os.path.join(exceptionsdir, f))
2527 2530
2528 2531 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
2529 2532 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
2530 2533 self.options.extra_config_opt.append(
2531 2534 'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
2532 2535
2533 2536 vlog("# Using TESTDIR", self._testdir)
2534 2537 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2535 2538 vlog("# Using HGTMP", self._hgtmp)
2536 2539 vlog("# Using PATH", os.environ["PATH"])
2537 2540 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2538 2541 vlog("# Writing to directory", self._outputdir)
2539 2542
2540 2543 try:
2541 2544 return self._runtests(testdescs) or 0
2542 2545 finally:
2543 2546 time.sleep(.1)
2544 2547 self._cleanup()
2545 2548
2546 2549 def findtests(self, args):
2547 2550 """Finds possible test files from arguments.
2548 2551
2549 2552 If you wish to inject custom tests into the test harness, this would
2550 2553 be a good function to monkeypatch or override in a derived class.
2551 2554 """
2552 2555 if not args:
2553 2556 if self.options.changed:
2554 2557 proc = Popen4('hg st --rev "%s" -man0 .' %
2555 2558 self.options.changed, None, 0)
2556 2559 stdout, stderr = proc.communicate()
2557 2560 args = stdout.strip(b'\0').split(b'\0')
2558 2561 else:
2559 2562 args = os.listdir(b'.')
2560 2563
2561 2564 expanded_args = []
2562 2565 for arg in args:
2563 2566 if os.path.isdir(arg):
2564 2567 if not arg.endswith(b'/'):
2565 2568 arg += b'/'
2566 2569 expanded_args.extend([arg + a for a in os.listdir(arg)])
2567 2570 else:
2568 2571 expanded_args.append(arg)
2569 2572 args = expanded_args
2570 2573
2571 2574 tests = []
2572 2575 for t in args:
2573 2576 if not (os.path.basename(t).startswith(b'test-')
2574 2577 and (t.endswith(b'.py') or t.endswith(b'.t'))):
2575 2578 continue
2576 2579 if t.endswith(b'.t'):
2577 2580 # .t file may contain multiple test cases
2578 2581 cases = sorted(parsettestcases(t))
2579 2582 if cases:
2580 2583 tests += [{'path': t, 'case': c} for c in sorted(cases)]
2581 2584 else:
2582 2585 tests.append({'path': t})
2583 2586 else:
2584 2587 tests.append({'path': t})
2585 2588 return tests
2586 2589
2587 2590 def _runtests(self, testdescs):
2588 2591 def _reloadtest(test, i):
2589 2592 # convert a test back to its description dict
2590 2593 desc = {'path': test.path}
2591 2594 case = getattr(test, '_case', None)
2592 2595 if case:
2593 2596 desc['case'] = case
2594 2597 return self._gettest(desc, i)
2595 2598
2596 2599 try:
2597 2600 if self.options.restart:
2598 2601 orig = list(testdescs)
2599 2602 while testdescs:
2600 2603 desc = testdescs[0]
2601 2604 # desc['path'] is a relative path
2602 2605 if 'case' in desc:
2603 2606 errpath = b'%s.%s.err' % (desc['path'], desc['case'])
2604 2607 else:
2605 2608 errpath = b'%s.err' % desc['path']
2606 2609 errpath = os.path.join(self._outputdir, errpath)
2607 2610 if os.path.exists(errpath):
2608 2611 break
2609 2612 testdescs.pop(0)
2610 2613 if not testdescs:
2611 2614 print("running all tests")
2612 2615 testdescs = orig
2613 2616
2614 2617 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
2615 2618
2616 2619 failed = False
2617 2620 kws = self.options.keywords
2618 2621 if kws is not None and PYTHON3:
2619 2622 kws = kws.encode('utf-8')
2620 2623
2621 2624 suite = TestSuite(self._testdir,
2622 2625 jobs=self.options.jobs,
2623 2626 whitelist=self.options.whitelisted,
2624 2627 blacklist=self.options.blacklist,
2625 2628 retest=self.options.retest,
2626 2629 keywords=kws,
2627 2630 loop=self.options.loop,
2628 2631 runs_per_test=self.options.runs_per_test,
2629 2632 showchannels=self.options.showchannels,
2630 2633 tests=tests, loadtest=_reloadtest)
2631 2634 verbosity = 1
2632 2635 if self.options.verbose:
2633 2636 verbosity = 2
2634 2637 runner = TextTestRunner(self, verbosity=verbosity)
2635 2638
2636 2639 if self.options.list_tests:
2637 2640 result = runner.listtests(suite)
2638 2641 else:
2639 2642 if self._installdir:
2640 2643 self._installhg()
2641 2644 self._checkhglib("Testing")
2642 2645 else:
2643 2646 self._usecorrectpython()
2644 2647 if self.options.chg:
2645 2648 assert self._installdir
2646 2649 self._installchg()
2647 2650
2648 2651 result = runner.run(suite)
2649 2652
2650 2653 if result.failures:
2651 2654 failed = True
2652 2655
2653 2656 if self.options.anycoverage:
2654 2657 self._outputcoverage()
2655 2658 except KeyboardInterrupt:
2656 2659 failed = True
2657 2660 print("\ninterrupted!")
2658 2661
2659 2662 if failed:
2660 2663 return 1
2661 2664
2662 2665 def _getport(self, count):
2663 2666 port = self._ports.get(count) # do we have a cached entry?
2664 2667 if port is None:
2665 2668 portneeded = 3
2666 2669 # above 100 tries we just give up and let test reports failure
2667 2670 for tries in xrange(100):
2668 2671 allfree = True
2669 2672 port = self.options.port + self._portoffset
2670 2673 for idx in xrange(portneeded):
2671 2674 if not checkportisavailable(port + idx):
2672 2675 allfree = False
2673 2676 break
2674 2677 self._portoffset += portneeded
2675 2678 if allfree:
2676 2679 break
2677 2680 self._ports[count] = port
2678 2681 return port
2679 2682
2680 2683 def _gettest(self, testdesc, count):
2681 2684 """Obtain a Test by looking at its filename.
2682 2685
2683 2686 Returns a Test instance. The Test may not be runnable if it doesn't
2684 2687 map to a known type.
2685 2688 """
2686 2689 path = testdesc['path']
2687 2690 lctest = path.lower()
2688 2691 testcls = Test
2689 2692
2690 2693 for ext, cls in self.TESTTYPES:
2691 2694 if lctest.endswith(ext):
2692 2695 testcls = cls
2693 2696 break
2694 2697
2695 2698 refpath = os.path.join(self._testdir, path)
2696 2699 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2697 2700
2698 2701 # extra keyword parameters. 'case' is used by .t tests
2699 2702 kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
2700 2703
2701 2704 t = testcls(refpath, self._outputdir, tmpdir,
2702 2705 keeptmpdir=self.options.keep_tmpdir,
2703 2706 debug=self.options.debug,
2704 2707 timeout=self.options.timeout,
2705 2708 startport=self._getport(count),
2706 2709 extraconfigopts=self.options.extra_config_opt,
2707 2710 py3kwarnings=self.options.py3k_warnings,
2708 2711 shell=self.options.shell,
2709 2712 hgcommand=self._hgcommand,
2710 2713 usechg=bool(self.options.with_chg or self.options.chg),
2711 2714 useipv6=useipv6, **kwds)
2712 2715 t.should_reload = True
2713 2716 return t
2714 2717
2715 2718 def _cleanup(self):
2716 2719 """Clean up state from this test invocation."""
2717 2720 if self.options.keep_tmpdir:
2718 2721 return
2719 2722
2720 2723 vlog("# Cleaning up HGTMP", self._hgtmp)
2721 2724 shutil.rmtree(self._hgtmp, True)
2722 2725 for f in self._createdfiles:
2723 2726 try:
2724 2727 os.remove(f)
2725 2728 except OSError:
2726 2729 pass
2727 2730
2728 2731 def _usecorrectpython(self):
2729 2732 """Configure the environment to use the appropriate Python in tests."""
2730 2733 # Tests must use the same interpreter as us or bad things will happen.
2731 2734 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2732 2735 if getattr(os, 'symlink', None):
2733 2736 vlog("# Making python executable in test path a symlink to '%s'" %
2734 2737 sys.executable)
2735 2738 mypython = os.path.join(self._tmpbindir, pyexename)
2736 2739 try:
2737 2740 if os.readlink(mypython) == sys.executable:
2738 2741 return
2739 2742 os.unlink(mypython)
2740 2743 except OSError as err:
2741 2744 if err.errno != errno.ENOENT:
2742 2745 raise
2743 2746 if self._findprogram(pyexename) != sys.executable:
2744 2747 try:
2745 2748 os.symlink(sys.executable, mypython)
2746 2749 self._createdfiles.append(mypython)
2747 2750 except OSError as err:
2748 2751 # child processes may race, which is harmless
2749 2752 if err.errno != errno.EEXIST:
2750 2753 raise
2751 2754 else:
2752 2755 exedir, exename = os.path.split(sys.executable)
2753 2756 vlog("# Modifying search path to find %s as %s in '%s'" %
2754 2757 (exename, pyexename, exedir))
2755 2758 path = os.environ['PATH'].split(os.pathsep)
2756 2759 while exedir in path:
2757 2760 path.remove(exedir)
2758 2761 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2759 2762 if not self._findprogram(pyexename):
2760 2763 print("WARNING: Cannot find %s in search path" % pyexename)
2761 2764
2762 2765 def _installhg(self):
2763 2766 """Install hg into the test environment.
2764 2767
2765 2768 This will also configure hg with the appropriate testing settings.
2766 2769 """
2767 2770 vlog("# Performing temporary installation of HG")
2768 2771 installerrs = os.path.join(self._hgtmp, b"install.err")
2769 2772 compiler = ''
2770 2773 if self.options.compiler:
2771 2774 compiler = '--compiler ' + self.options.compiler
2772 2775 if self.options.pure:
2773 2776 pure = b"--pure"
2774 2777 else:
2775 2778 pure = b""
2776 2779
2777 2780 # Run installer in hg root
2778 2781 script = os.path.realpath(sys.argv[0])
2779 2782 exe = sys.executable
2780 2783 if PYTHON3:
2781 2784 compiler = _bytespath(compiler)
2782 2785 script = _bytespath(script)
2783 2786 exe = _bytespath(exe)
2784 2787 hgroot = os.path.dirname(os.path.dirname(script))
2785 2788 self._hgroot = hgroot
2786 2789 os.chdir(hgroot)
2787 2790 nohome = b'--home=""'
2788 2791 if os.name == 'nt':
2789 2792 # The --home="" trick works only on OS where os.sep == '/'
2790 2793 # because of a distutils convert_path() fast-path. Avoid it at
2791 2794 # least on Windows for now, deal with .pydistutils.cfg bugs
2792 2795 # when they happen.
2793 2796 nohome = b''
2794 2797 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2795 2798 b' build %(compiler)s --build-base="%(base)s"'
2796 2799 b' install --force --prefix="%(prefix)s"'
2797 2800 b' --install-lib="%(libdir)s"'
2798 2801 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2799 2802 % {b'exe': exe, b'pure': pure,
2800 2803 b'compiler': compiler,
2801 2804 b'base': os.path.join(self._hgtmp, b"build"),
2802 2805 b'prefix': self._installdir, b'libdir': self._pythondir,
2803 2806 b'bindir': self._bindir,
2804 2807 b'nohome': nohome, b'logfile': installerrs})
2805 2808
2806 2809 # setuptools requires install directories to exist.
2807 2810 def makedirs(p):
2808 2811 try:
2809 2812 os.makedirs(p)
2810 2813 except OSError as e:
2811 2814 if e.errno != errno.EEXIST:
2812 2815 raise
2813 2816 makedirs(self._pythondir)
2814 2817 makedirs(self._bindir)
2815 2818
2816 2819 vlog("# Running", cmd)
2817 2820 if os.system(cmd) == 0:
2818 2821 if not self.options.verbose:
2819 2822 try:
2820 2823 os.remove(installerrs)
2821 2824 except OSError as e:
2822 2825 if e.errno != errno.ENOENT:
2823 2826 raise
2824 2827 else:
2825 2828 f = open(installerrs, 'rb')
2826 2829 for line in f:
2827 2830 if PYTHON3:
2828 2831 sys.stdout.buffer.write(line)
2829 2832 else:
2830 2833 sys.stdout.write(line)
2831 2834 f.close()
2832 2835 sys.exit(1)
2833 2836 os.chdir(self._testdir)
2834 2837
2835 2838 self._usecorrectpython()
2836 2839
2837 2840 if self.options.py3k_warnings and not self.options.anycoverage:
2838 2841 vlog("# Updating hg command to enable Py3k Warnings switch")
2839 2842 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2840 2843 lines = [line.rstrip() for line in f]
2841 2844 lines[0] += ' -3'
2842 2845 f.close()
2843 2846 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2844 2847 for line in lines:
2845 2848 f.write(line + '\n')
2846 2849 f.close()
2847 2850
2848 2851 hgbat = os.path.join(self._bindir, b'hg.bat')
2849 2852 if os.path.isfile(hgbat):
2850 2853 # hg.bat expects to be put in bin/scripts while run-tests.py
2851 2854 # installation layout put it in bin/ directly. Fix it
2852 2855 f = open(hgbat, 'rb')
2853 2856 data = f.read()
2854 2857 f.close()
2855 2858 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2856 2859 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2857 2860 b'"%~dp0python" "%~dp0hg" %*')
2858 2861 f = open(hgbat, 'wb')
2859 2862 f.write(data)
2860 2863 f.close()
2861 2864 else:
2862 2865 print('WARNING: cannot fix hg.bat reference to python.exe')
2863 2866
2864 2867 if self.options.anycoverage:
2865 2868 custom = os.path.join(self._testdir, 'sitecustomize.py')
2866 2869 target = os.path.join(self._pythondir, 'sitecustomize.py')
2867 2870 vlog('# Installing coverage trigger to %s' % target)
2868 2871 shutil.copyfile(custom, target)
2869 2872 rc = os.path.join(self._testdir, '.coveragerc')
2870 2873 vlog('# Installing coverage rc to %s' % rc)
2871 2874 os.environ['COVERAGE_PROCESS_START'] = rc
2872 2875 covdir = os.path.join(self._installdir, '..', 'coverage')
2873 2876 try:
2874 2877 os.mkdir(covdir)
2875 2878 except OSError as e:
2876 2879 if e.errno != errno.EEXIST:
2877 2880 raise
2878 2881
2879 2882 os.environ['COVERAGE_DIR'] = covdir
2880 2883
2881 2884 def _checkhglib(self, verb):
2882 2885 """Ensure that the 'mercurial' package imported by python is
2883 2886 the one we expect it to be. If not, print a warning to stderr."""
2884 2887 if ((self._bindir == self._pythondir) and
2885 2888 (self._bindir != self._tmpbindir)):
2886 2889 # The pythondir has been inferred from --with-hg flag.
2887 2890 # We cannot expect anything sensible here.
2888 2891 return
2889 2892 expecthg = os.path.join(self._pythondir, b'mercurial')
2890 2893 actualhg = self._gethgpath()
2891 2894 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2892 2895 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2893 2896 ' (expected %s)\n'
2894 2897 % (verb, actualhg, expecthg))
2895 2898 def _gethgpath(self):
2896 2899 """Return the path to the mercurial package that is actually found by
2897 2900 the current Python interpreter."""
2898 2901 if self._hgpath is not None:
2899 2902 return self._hgpath
2900 2903
2901 2904 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2902 2905 cmd = cmd % PYTHON
2903 2906 if PYTHON3:
2904 2907 cmd = _strpath(cmd)
2905 2908 pipe = os.popen(cmd)
2906 2909 try:
2907 2910 self._hgpath = _bytespath(pipe.read().strip())
2908 2911 finally:
2909 2912 pipe.close()
2910 2913
2911 2914 return self._hgpath
2912 2915
2913 2916 def _installchg(self):
2914 2917 """Install chg into the test environment"""
2915 2918 vlog('# Performing temporary installation of CHG')
2916 2919 assert os.path.dirname(self._bindir) == self._installdir
2917 2920 assert self._hgroot, 'must be called after _installhg()'
2918 2921 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2919 2922 % {b'make': 'make', # TODO: switch by option or environment?
2920 2923 b'prefix': self._installdir})
2921 2924 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2922 2925 vlog("# Running", cmd)
2923 2926 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2924 2927 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2925 2928 stderr=subprocess.STDOUT)
2926 2929 out, _err = proc.communicate()
2927 2930 if proc.returncode != 0:
2928 2931 if PYTHON3:
2929 2932 sys.stdout.buffer.write(out)
2930 2933 else:
2931 2934 sys.stdout.write(out)
2932 2935 sys.exit(1)
2933 2936
2934 2937 def _outputcoverage(self):
2935 2938 """Produce code coverage output."""
2936 2939 import coverage
2937 2940 coverage = coverage.coverage
2938 2941
2939 2942 vlog('# Producing coverage report')
2940 2943 # chdir is the easiest way to get short, relative paths in the
2941 2944 # output.
2942 2945 os.chdir(self._hgroot)
2943 2946 covdir = os.path.join(self._installdir, '..', 'coverage')
2944 2947 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2945 2948
2946 2949 # Map install directory paths back to source directory.
2947 2950 cov.config.paths['srcdir'] = ['.', self._pythondir]
2948 2951
2949 2952 cov.combine()
2950 2953
2951 2954 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2952 2955 cov.report(ignore_errors=True, omit=omit)
2953 2956
2954 2957 if self.options.htmlcov:
2955 2958 htmldir = os.path.join(self._outputdir, 'htmlcov')
2956 2959 cov.html_report(directory=htmldir, omit=omit)
2957 2960 if self.options.annotate:
2958 2961 adir = os.path.join(self._outputdir, 'annotated')
2959 2962 if not os.path.isdir(adir):
2960 2963 os.mkdir(adir)
2961 2964 cov.annotate(directory=adir, omit=omit)
2962 2965
2963 2966 def _findprogram(self, program):
2964 2967 """Search PATH for a executable program"""
2965 2968 dpb = _bytespath(os.defpath)
2966 2969 sepb = _bytespath(os.pathsep)
2967 2970 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2968 2971 name = os.path.join(p, program)
2969 2972 if os.name == 'nt' or os.access(name, os.X_OK):
2970 2973 return name
2971 2974 return None
2972 2975
2973 2976 def _checktools(self):
2974 2977 """Ensure tools required to run tests are present."""
2975 2978 for p in self.REQUIREDTOOLS:
2976 2979 if os.name == 'nt' and not p.endswith('.exe'):
2977 2980 p += '.exe'
2978 2981 found = self._findprogram(p)
2979 2982 if found:
2980 2983 vlog("# Found prerequisite", p, "at", found)
2981 2984 else:
2982 2985 print("WARNING: Did not find prerequisite tool: %s " %
2983 2986 p.decode("utf-8"))
2984 2987
2985 2988 def aggregateexceptions(path):
2986 2989 exceptions = collections.Counter()
2987 2990
2988 2991 for f in os.listdir(path):
2989 2992 with open(os.path.join(path, f), 'rb') as fh:
2990 2993 data = fh.read().split(b'\0')
2991 2994 if len(data) != 4:
2992 2995 continue
2993 2996
2994 2997 exc, mainframe, hgframe, hgline = data
2995 2998 exc = exc.decode('utf-8')
2996 2999 mainframe = mainframe.decode('utf-8')
2997 3000 hgframe = hgframe.decode('utf-8')
2998 3001 hgline = hgline.decode('utf-8')
2999 3002 exceptions[(hgframe, hgline, exc)] += 1
3000 3003
3001 3004 return exceptions
3002 3005
3003 3006 if __name__ == '__main__':
3004 3007 runner = TestRunner()
3005 3008
3006 3009 try:
3007 3010 import msvcrt
3008 3011 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3009 3012 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3010 3013 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3011 3014 except ImportError:
3012 3015 pass
3013 3016
3014 3017 sys.exit(runner.run(sys.argv[1:]))
@@ -1,99 +1,100 b''
1 1 Create a repository:
2 2
3 3 $ hg config
4 4 devel.all-warnings=true
5 5 devel.default-date=0 0
6 6 extensions.fsmonitor= (fsmonitor !)
7 7 largefiles.usercache=$TESTTMP/.cache/largefiles (glob)
8 lfs.usercache=$TESTTMP/.cache/lfs (glob)
8 9 ui.slash=True
9 10 ui.interactive=False
10 11 ui.mergemarkers=detailed
11 12 ui.promptecho=True
12 13 web.address=localhost
13 14 web\.ipv6=(?:True|False) (re)
14 15 $ hg init t
15 16 $ cd t
16 17
17 18 Prepare a changeset:
18 19
19 20 $ echo a > a
20 21 $ hg add a
21 22
22 23 $ hg status
23 24 A a
24 25
25 26 Writes to stdio succeed and fail appropriately
26 27
27 28 #if devfull
28 29 $ hg status 2>/dev/full
29 30 A a
30 31
31 32 $ hg status >/dev/full
32 33 abort: No space left on device
33 34 [255]
34 35 #endif
35 36
36 37 #if devfull no-chg
37 38 $ hg status >/dev/full 2>&1
38 39 [1]
39 40
40 41 $ hg status ENOENT 2>/dev/full
41 42 [1]
42 43 #endif
43 44
44 45 #if devfull chg
45 46 $ hg status >/dev/full 2>&1
46 47 [255]
47 48
48 49 $ hg status ENOENT 2>/dev/full
49 50 [255]
50 51 #endif
51 52
52 53 $ hg commit -m test
53 54
54 55 This command is ancient:
55 56
56 57 $ hg history
57 58 changeset: 0:acb14030fe0a
58 59 tag: tip
59 60 user: test
60 61 date: Thu Jan 01 00:00:00 1970 +0000
61 62 summary: test
62 63
63 64
64 65 Verify that updating to revision 0 via commands.update() works properly
65 66
66 67 $ cat <<EOF > update_to_rev0.py
67 68 > from mercurial import ui, hg, commands
68 69 > myui = ui.ui.load()
69 70 > repo = hg.repository(myui, path='.')
70 71 > commands.update(myui, repo, rev=0)
71 72 > EOF
72 73 $ hg up null
73 74 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
74 75 $ $PYTHON ./update_to_rev0.py
75 76 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 77 $ hg identify -n
77 78 0
78 79
79 80
80 81 Poke around at hashes:
81 82
82 83 $ hg manifest --debug
83 84 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644 a
84 85
85 86 $ hg cat a
86 87 a
87 88
88 89 Verify should succeed:
89 90
90 91 $ hg verify
91 92 checking changesets
92 93 checking manifests
93 94 crosschecking files in changesets and manifests
94 95 checking files
95 96 1 files, 1 changesets, 1 total revisions
96 97
97 98 At the end...
98 99
99 100 $ cd ..
@@ -1,1023 +1,1024 b''
1 1 #if windows
2 2 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
3 3 #else
4 4 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
5 5 #endif
6 6 $ export PYTHONPATH
7 7
8 8 typical client does not want echo-back messages, so test without it:
9 9
10 10 $ grep -v '^promptecho ' < $HGRCPATH >> $HGRCPATH.new
11 11 $ mv $HGRCPATH.new $HGRCPATH
12 12
13 13 $ hg init repo
14 14 $ cd repo
15 15
16 16 >>> from __future__ import absolute_import, print_function
17 17 >>> import os
18 18 >>> import sys
19 19 >>> from hgclient import check, readchannel, runcommand
20 20 >>> @check
21 21 ... def hellomessage(server):
22 22 ... ch, data = readchannel(server)
23 23 ... print('%c, %r' % (ch, data))
24 24 ... # run an arbitrary command to make sure the next thing the server
25 25 ... # sends isn't part of the hello message
26 26 ... runcommand(server, ['id'])
27 27 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
28 28 *** runcommand id
29 29 000000000000 tip
30 30
31 31 >>> from hgclient import check
32 32 >>> @check
33 33 ... def unknowncommand(server):
34 34 ... server.stdin.write('unknowncommand\n')
35 35 abort: unknown command unknowncommand
36 36
37 37 >>> from hgclient import check, readchannel, runcommand
38 38 >>> @check
39 39 ... def checkruncommand(server):
40 40 ... # hello block
41 41 ... readchannel(server)
42 42 ...
43 43 ... # no args
44 44 ... runcommand(server, [])
45 45 ...
46 46 ... # global options
47 47 ... runcommand(server, ['id', '--quiet'])
48 48 ...
49 49 ... # make sure global options don't stick through requests
50 50 ... runcommand(server, ['id'])
51 51 ...
52 52 ... # --config
53 53 ... runcommand(server, ['id', '--config', 'ui.quiet=True'])
54 54 ...
55 55 ... # make sure --config doesn't stick
56 56 ... runcommand(server, ['id'])
57 57 ...
58 58 ... # negative return code should be masked
59 59 ... runcommand(server, ['id', '-runknown'])
60 60 *** runcommand
61 61 Mercurial Distributed SCM
62 62
63 63 basic commands:
64 64
65 65 add add the specified files on the next commit
66 66 annotate show changeset information by line for each file
67 67 clone make a copy of an existing repository
68 68 commit commit the specified files or all outstanding changes
69 69 diff diff repository (or selected files)
70 70 export dump the header and diffs for one or more changesets
71 71 forget forget the specified files on the next commit
72 72 init create a new repository in the given directory
73 73 log show revision history of entire repository or files
74 74 merge merge another revision into working directory
75 75 pull pull changes from the specified source
76 76 push push changes to the specified destination
77 77 remove remove the specified files on the next commit
78 78 serve start stand-alone webserver
79 79 status show changed files in the working directory
80 80 summary summarize working directory state
81 81 update update working directory (or switch revisions)
82 82
83 83 (use 'hg help' for the full list of commands or 'hg -v' for details)
84 84 *** runcommand id --quiet
85 85 000000000000
86 86 *** runcommand id
87 87 000000000000 tip
88 88 *** runcommand id --config ui.quiet=True
89 89 000000000000
90 90 *** runcommand id
91 91 000000000000 tip
92 92 *** runcommand id -runknown
93 93 abort: unknown revision 'unknown'!
94 94 [255]
95 95
96 96 >>> from hgclient import check, readchannel
97 97 >>> @check
98 98 ... def inputeof(server):
99 99 ... readchannel(server)
100 100 ... server.stdin.write('runcommand\n')
101 101 ... # close stdin while server is waiting for input
102 102 ... server.stdin.close()
103 103 ...
104 104 ... # server exits with 1 if the pipe closed while reading the command
105 105 ... print('server exit code =', server.wait())
106 106 server exit code = 1
107 107
108 108 >>> from hgclient import check, readchannel, runcommand, stringio
109 109 >>> @check
110 110 ... def serverinput(server):
111 111 ... readchannel(server)
112 112 ...
113 113 ... patch = """
114 114 ... # HG changeset patch
115 115 ... # User test
116 116 ... # Date 0 0
117 117 ... # Node ID c103a3dec114d882c98382d684d8af798d09d857
118 118 ... # Parent 0000000000000000000000000000000000000000
119 119 ... 1
120 120 ...
121 121 ... diff -r 000000000000 -r c103a3dec114 a
122 122 ... --- /dev/null Thu Jan 01 00:00:00 1970 +0000
123 123 ... +++ b/a Thu Jan 01 00:00:00 1970 +0000
124 124 ... @@ -0,0 +1,1 @@
125 125 ... +1
126 126 ... """
127 127 ...
128 128 ... runcommand(server, ['import', '-'], input=stringio(patch))
129 129 ... runcommand(server, ['log'])
130 130 *** runcommand import -
131 131 applying patch from stdin
132 132 *** runcommand log
133 133 changeset: 0:eff892de26ec
134 134 tag: tip
135 135 user: test
136 136 date: Thu Jan 01 00:00:00 1970 +0000
137 137 summary: 1
138 138
139 139
140 140 check strict parsing of early options:
141 141
142 142 >>> import os
143 143 >>> from hgclient import check, readchannel, runcommand
144 144 >>> os.environ['HGPLAIN'] = '+strictflags'
145 145 >>> @check
146 146 ... def cwd(server):
147 147 ... readchannel(server)
148 148 ... runcommand(server, ['log', '-b', '--config=alias.log=!echo pwned',
149 149 ... 'default'])
150 150 *** runcommand log -b --config=alias.log=!echo pwned default
151 151 abort: unknown revision '--config=alias.log=!echo pwned'!
152 152 [255]
153 153
154 154 check that "histedit --commands=-" can read rules from the input channel:
155 155
156 156 >>> import cStringIO
157 157 >>> from hgclient import check, readchannel, runcommand
158 158 >>> @check
159 159 ... def serverinput(server):
160 160 ... readchannel(server)
161 161 ... rules = 'pick eff892de26ec\n'
162 162 ... runcommand(server, ['histedit', '0', '--commands=-',
163 163 ... '--config', 'extensions.histedit='],
164 164 ... input=cStringIO.StringIO(rules))
165 165 *** runcommand histedit 0 --commands=- --config extensions.histedit=
166 166
167 167 check that --cwd doesn't persist between requests:
168 168
169 169 $ mkdir foo
170 170 $ touch foo/bar
171 171 >>> from hgclient import check, readchannel, runcommand
172 172 >>> @check
173 173 ... def cwd(server):
174 174 ... readchannel(server)
175 175 ... runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
176 176 ... runcommand(server, ['st', 'foo/bar'])
177 177 *** runcommand --cwd foo st bar
178 178 ? bar
179 179 *** runcommand st foo/bar
180 180 ? foo/bar
181 181
182 182 $ rm foo/bar
183 183
184 184
185 185 check that local configs for the cached repo aren't inherited when -R is used:
186 186
187 187 $ cat <<EOF >> .hg/hgrc
188 188 > [ui]
189 189 > foo = bar
190 190 > EOF
191 191
192 192 >>> from hgclient import check, readchannel, runcommand, sep
193 193 >>> @check
194 194 ... def localhgrc(server):
195 195 ... readchannel(server)
196 196 ...
197 197 ... # the cached repo local hgrc contains ui.foo=bar, so showconfig should
198 198 ... # show it
199 199 ... runcommand(server, ['showconfig'], outfilter=sep)
200 200 ...
201 201 ... # but not for this repo
202 202 ... runcommand(server, ['init', 'foo'])
203 203 ... runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
204 204 *** runcommand showconfig
205 205 bundle.mainreporoot=$TESTTMP/repo
206 206 devel.all-warnings=true
207 207 devel.default-date=0 0
208 208 extensions.fsmonitor= (fsmonitor !)
209 209 largefiles.usercache=$TESTTMP/.cache/largefiles
210 lfs.usercache=$TESTTMP/.cache/lfs
210 211 ui.slash=True
211 212 ui.interactive=False
212 213 ui.mergemarkers=detailed
213 214 ui.usehttp2=true (?)
214 215 ui.foo=bar
215 216 ui.nontty=true
216 217 web.address=localhost
217 218 web\.ipv6=(?:True|False) (re)
218 219 *** runcommand init foo
219 220 *** runcommand -R foo showconfig ui defaults
220 221 ui.slash=True
221 222 ui.interactive=False
222 223 ui.mergemarkers=detailed
223 224 ui.usehttp2=true (?)
224 225 ui.nontty=true
225 226
226 227 $ rm -R foo
227 228
228 229 #if windows
229 230 $ PYTHONPATH="$TESTTMP/repo;$PYTHONPATH"
230 231 #else
231 232 $ PYTHONPATH="$TESTTMP/repo:$PYTHONPATH"
232 233 #endif
233 234
234 235 $ cat <<EOF > hook.py
235 236 > from __future__ import print_function
236 237 > import sys
237 238 > def hook(**args):
238 239 > print('hook talking')
239 240 > print('now try to read something: %r' % sys.stdin.read())
240 241 > EOF
241 242
242 243 >>> from hgclient import check, readchannel, runcommand, stringio
243 244 >>> @check
244 245 ... def hookoutput(server):
245 246 ... readchannel(server)
246 247 ... runcommand(server, ['--config',
247 248 ... 'hooks.pre-identify=python:hook.hook',
248 249 ... 'id'],
249 250 ... input=stringio('some input'))
250 251 *** runcommand --config hooks.pre-identify=python:hook.hook id
251 252 eff892de26ec tip
252 253
253 254 Clean hook cached version
254 255 $ rm hook.py*
255 256 $ rm -Rf __pycache__
256 257
257 258 $ echo a >> a
258 259 >>> import os
259 260 >>> from hgclient import check, readchannel, runcommand
260 261 >>> @check
261 262 ... def outsidechanges(server):
262 263 ... readchannel(server)
263 264 ... runcommand(server, ['status'])
264 265 ... os.system('hg ci -Am2')
265 266 ... runcommand(server, ['tip'])
266 267 ... runcommand(server, ['status'])
267 268 *** runcommand status
268 269 M a
269 270 *** runcommand tip
270 271 changeset: 1:d3a0a68be6de
271 272 tag: tip
272 273 user: test
273 274 date: Thu Jan 01 00:00:00 1970 +0000
274 275 summary: 2
275 276
276 277 *** runcommand status
277 278
278 279 >>> import os
279 280 >>> from hgclient import check, readchannel, runcommand
280 281 >>> @check
281 282 ... def bookmarks(server):
282 283 ... readchannel(server)
283 284 ... runcommand(server, ['bookmarks'])
284 285 ...
285 286 ... # changes .hg/bookmarks
286 287 ... os.system('hg bookmark -i bm1')
287 288 ... os.system('hg bookmark -i bm2')
288 289 ... runcommand(server, ['bookmarks'])
289 290 ...
290 291 ... # changes .hg/bookmarks.current
291 292 ... os.system('hg upd bm1 -q')
292 293 ... runcommand(server, ['bookmarks'])
293 294 ...
294 295 ... runcommand(server, ['bookmarks', 'bm3'])
295 296 ... f = open('a', 'ab')
296 297 ... f.write('a\n')
297 298 ... f.close()
298 299 ... runcommand(server, ['commit', '-Amm'])
299 300 ... runcommand(server, ['bookmarks'])
300 301 ... print('')
301 302 *** runcommand bookmarks
302 303 no bookmarks set
303 304 *** runcommand bookmarks
304 305 bm1 1:d3a0a68be6de
305 306 bm2 1:d3a0a68be6de
306 307 *** runcommand bookmarks
307 308 * bm1 1:d3a0a68be6de
308 309 bm2 1:d3a0a68be6de
309 310 *** runcommand bookmarks bm3
310 311 *** runcommand commit -Amm
311 312 *** runcommand bookmarks
312 313 bm1 1:d3a0a68be6de
313 314 bm2 1:d3a0a68be6de
314 315 * bm3 2:aef17e88f5f0
315 316
316 317
317 318 >>> import os
318 319 >>> from hgclient import check, readchannel, runcommand
319 320 >>> @check
320 321 ... def tagscache(server):
321 322 ... readchannel(server)
322 323 ... runcommand(server, ['id', '-t', '-r', '0'])
323 324 ... os.system('hg tag -r 0 foo')
324 325 ... runcommand(server, ['id', '-t', '-r', '0'])
325 326 *** runcommand id -t -r 0
326 327
327 328 *** runcommand id -t -r 0
328 329 foo
329 330
330 331 >>> import os
331 332 >>> from hgclient import check, readchannel, runcommand
332 333 >>> @check
333 334 ... def setphase(server):
334 335 ... readchannel(server)
335 336 ... runcommand(server, ['phase', '-r', '.'])
336 337 ... os.system('hg phase -r . -p')
337 338 ... runcommand(server, ['phase', '-r', '.'])
338 339 *** runcommand phase -r .
339 340 3: draft
340 341 *** runcommand phase -r .
341 342 3: public
342 343
343 344 $ echo a >> a
344 345 >>> from hgclient import check, readchannel, runcommand
345 346 >>> @check
346 347 ... def rollback(server):
347 348 ... readchannel(server)
348 349 ... runcommand(server, ['phase', '-r', '.', '-p'])
349 350 ... runcommand(server, ['commit', '-Am.'])
350 351 ... runcommand(server, ['rollback'])
351 352 ... runcommand(server, ['phase', '-r', '.'])
352 353 ... print('')
353 354 *** runcommand phase -r . -p
354 355 no phases changed
355 356 *** runcommand commit -Am.
356 357 *** runcommand rollback
357 358 repository tip rolled back to revision 3 (undo commit)
358 359 working directory now based on revision 3
359 360 *** runcommand phase -r .
360 361 3: public
361 362
362 363
363 364 >>> import os
364 365 >>> from hgclient import check, readchannel, runcommand
365 366 >>> @check
366 367 ... def branch(server):
367 368 ... readchannel(server)
368 369 ... runcommand(server, ['branch'])
369 370 ... os.system('hg branch foo')
370 371 ... runcommand(server, ['branch'])
371 372 ... os.system('hg branch default')
372 373 *** runcommand branch
373 374 default
374 375 marked working directory as branch foo
375 376 (branches are permanent and global, did you want a bookmark?)
376 377 *** runcommand branch
377 378 foo
378 379 marked working directory as branch default
379 380 (branches are permanent and global, did you want a bookmark?)
380 381
381 382 $ touch .hgignore
382 383 >>> import os
383 384 >>> from hgclient import check, readchannel, runcommand
384 385 >>> @check
385 386 ... def hgignore(server):
386 387 ... readchannel(server)
387 388 ... runcommand(server, ['commit', '-Am.'])
388 389 ... f = open('ignored-file', 'ab')
389 390 ... f.write('')
390 391 ... f.close()
391 392 ... f = open('.hgignore', 'ab')
392 393 ... f.write('ignored-file')
393 394 ... f.close()
394 395 ... runcommand(server, ['status', '-i', '-u'])
395 396 ... print('')
396 397 *** runcommand commit -Am.
397 398 adding .hgignore
398 399 *** runcommand status -i -u
399 400 I ignored-file
400 401
401 402
402 403 cache of non-public revisions should be invalidated on repository change
403 404 (issue4855):
404 405
405 406 >>> import os
406 407 >>> from hgclient import check, readchannel, runcommand
407 408 >>> @check
408 409 ... def phasesetscacheaftercommit(server):
409 410 ... readchannel(server)
410 411 ... # load _phasecache._phaserevs and _phasesets
411 412 ... runcommand(server, ['log', '-qr', 'draft()'])
412 413 ... # create draft commits by another process
413 414 ... for i in xrange(5, 7):
414 415 ... f = open('a', 'ab')
415 416 ... f.seek(0, os.SEEK_END)
416 417 ... f.write('a\n')
417 418 ... f.close()
418 419 ... os.system('hg commit -Aqm%d' % i)
419 420 ... # new commits should be listed as draft revisions
420 421 ... runcommand(server, ['log', '-qr', 'draft()'])
421 422 ... print('')
422 423 *** runcommand log -qr draft()
423 424 4:7966c8e3734d
424 425 *** runcommand log -qr draft()
425 426 4:7966c8e3734d
426 427 5:41f6602d1c4f
427 428 6:10501e202c35
428 429
429 430
430 431 >>> import os
431 432 >>> from hgclient import check, readchannel, runcommand
432 433 >>> @check
433 434 ... def phasesetscacheafterstrip(server):
434 435 ... readchannel(server)
435 436 ... # load _phasecache._phaserevs and _phasesets
436 437 ... runcommand(server, ['log', '-qr', 'draft()'])
437 438 ... # strip cached revisions by another process
438 439 ... os.system('hg --config extensions.strip= strip -q 5')
439 440 ... # shouldn't abort by "unknown revision '6'"
440 441 ... runcommand(server, ['log', '-qr', 'draft()'])
441 442 ... print('')
442 443 *** runcommand log -qr draft()
443 444 4:7966c8e3734d
444 445 5:41f6602d1c4f
445 446 6:10501e202c35
446 447 *** runcommand log -qr draft()
447 448 4:7966c8e3734d
448 449
449 450
450 451 cache of phase roots should be invalidated on strip (issue3827):
451 452
452 453 >>> import os
453 454 >>> from hgclient import check, readchannel, runcommand, sep
454 455 >>> @check
455 456 ... def phasecacheafterstrip(server):
456 457 ... readchannel(server)
457 458 ...
458 459 ... # create new head, 5:731265503d86
459 460 ... runcommand(server, ['update', '-C', '0'])
460 461 ... f = open('a', 'ab')
461 462 ... f.write('a\n')
462 463 ... f.close()
463 464 ... runcommand(server, ['commit', '-Am.', 'a'])
464 465 ... runcommand(server, ['log', '-Gq'])
465 466 ...
466 467 ... # make it public; draft marker moves to 4:7966c8e3734d
467 468 ... runcommand(server, ['phase', '-p', '.'])
468 469 ... # load _phasecache.phaseroots
469 470 ... runcommand(server, ['phase', '.'], outfilter=sep)
470 471 ...
471 472 ... # strip 1::4 outside server
472 473 ... os.system('hg -q --config extensions.mq= strip 1')
473 474 ...
474 475 ... # shouldn't raise "7966c8e3734d: no node!"
475 476 ... runcommand(server, ['branches'])
476 477 *** runcommand update -C 0
477 478 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
478 479 (leaving bookmark bm3)
479 480 *** runcommand commit -Am. a
480 481 created new head
481 482 *** runcommand log -Gq
482 483 @ 5:731265503d86
483 484 |
484 485 | o 4:7966c8e3734d
485 486 | |
486 487 | o 3:b9b85890c400
487 488 | |
488 489 | o 2:aef17e88f5f0
489 490 | |
490 491 | o 1:d3a0a68be6de
491 492 |/
492 493 o 0:eff892de26ec
493 494
494 495 *** runcommand phase -p .
495 496 *** runcommand phase .
496 497 5: public
497 498 *** runcommand branches
498 499 default 1:731265503d86
499 500
500 501 in-memory cache must be reloaded if transaction is aborted. otherwise
501 502 changelog and manifest would have invalid node:
502 503
503 504 $ echo a >> a
504 505 >>> from hgclient import check, readchannel, runcommand
505 506 >>> @check
506 507 ... def txabort(server):
507 508 ... readchannel(server)
508 509 ... runcommand(server, ['commit', '--config', 'hooks.pretxncommit=false',
509 510 ... '-mfoo'])
510 511 ... runcommand(server, ['verify'])
511 512 *** runcommand commit --config hooks.pretxncommit=false -mfoo
512 513 transaction abort!
513 514 rollback completed
514 515 abort: pretxncommit hook exited with status 1
515 516 [255]
516 517 *** runcommand verify
517 518 checking changesets
518 519 checking manifests
519 520 crosschecking files in changesets and manifests
520 521 checking files
521 522 1 files, 2 changesets, 2 total revisions
522 523 $ hg revert --no-backup -aq
523 524
524 525 $ cat >> .hg/hgrc << EOF
525 526 > [experimental]
526 527 > evolution.createmarkers=True
527 528 > EOF
528 529
529 530 >>> import os
530 531 >>> from hgclient import check, readchannel, runcommand
531 532 >>> @check
532 533 ... def obsolete(server):
533 534 ... readchannel(server)
534 535 ...
535 536 ... runcommand(server, ['up', 'null'])
536 537 ... runcommand(server, ['phase', '-df', 'tip'])
537 538 ... cmd = 'hg debugobsolete `hg log -r tip --template {node}`'
538 539 ... if os.name == 'nt':
539 540 ... cmd = 'sh -c "%s"' % cmd # run in sh, not cmd.exe
540 541 ... os.system(cmd)
541 542 ... runcommand(server, ['log', '--hidden'])
542 543 ... runcommand(server, ['log'])
543 544 *** runcommand up null
544 545 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
545 546 *** runcommand phase -df tip
546 547 obsoleted 1 changesets
547 548 *** runcommand log --hidden
548 549 changeset: 1:731265503d86
549 550 tag: tip
550 551 user: test
551 552 date: Thu Jan 01 00:00:00 1970 +0000
552 553 obsolete: pruned
553 554 summary: .
554 555
555 556 changeset: 0:eff892de26ec
556 557 bookmark: bm1
557 558 bookmark: bm2
558 559 bookmark: bm3
559 560 user: test
560 561 date: Thu Jan 01 00:00:00 1970 +0000
561 562 summary: 1
562 563
563 564 *** runcommand log
564 565 changeset: 0:eff892de26ec
565 566 bookmark: bm1
566 567 bookmark: bm2
567 568 bookmark: bm3
568 569 tag: tip
569 570 user: test
570 571 date: Thu Jan 01 00:00:00 1970 +0000
571 572 summary: 1
572 573
573 574
574 575 $ cat <<EOF >> .hg/hgrc
575 576 > [extensions]
576 577 > mq =
577 578 > EOF
578 579
579 580 >>> import os
580 581 >>> from hgclient import check, readchannel, runcommand
581 582 >>> @check
582 583 ... def mqoutsidechanges(server):
583 584 ... readchannel(server)
584 585 ...
585 586 ... # load repo.mq
586 587 ... runcommand(server, ['qapplied'])
587 588 ... os.system('hg qnew 0.diff')
588 589 ... # repo.mq should be invalidated
589 590 ... runcommand(server, ['qapplied'])
590 591 ...
591 592 ... runcommand(server, ['qpop', '--all'])
592 593 ... os.system('hg qqueue --create foo')
593 594 ... # repo.mq should be recreated to point to new queue
594 595 ... runcommand(server, ['qqueue', '--active'])
595 596 *** runcommand qapplied
596 597 *** runcommand qapplied
597 598 0.diff
598 599 *** runcommand qpop --all
599 600 popping 0.diff
600 601 patch queue now empty
601 602 *** runcommand qqueue --active
602 603 foo
603 604
604 605 $ cat <<EOF > dbgui.py
605 606 > import os
606 607 > import sys
607 608 > from mercurial import commands, registrar
608 609 > cmdtable = {}
609 610 > command = registrar.command(cmdtable)
610 611 > @command(b"debuggetpass", norepo=True)
611 612 > def debuggetpass(ui):
612 613 > ui.write("%s\\n" % ui.getpass())
613 614 > @command(b"debugprompt", norepo=True)
614 615 > def debugprompt(ui):
615 616 > ui.write("%s\\n" % ui.prompt("prompt:"))
616 617 > @command(b"debugreadstdin", norepo=True)
617 618 > def debugreadstdin(ui):
618 619 > ui.write("read: %r\n" % sys.stdin.read(1))
619 620 > @command(b"debugwritestdout", norepo=True)
620 621 > def debugwritestdout(ui):
621 622 > os.write(1, "low-level stdout fd and\n")
622 623 > sys.stdout.write("stdout should be redirected to /dev/null\n")
623 624 > sys.stdout.flush()
624 625 > EOF
625 626 $ cat <<EOF >> .hg/hgrc
626 627 > [extensions]
627 628 > dbgui = dbgui.py
628 629 > EOF
629 630
630 631 >>> from hgclient import check, readchannel, runcommand, stringio
631 632 >>> @check
632 633 ... def getpass(server):
633 634 ... readchannel(server)
634 635 ... runcommand(server, ['debuggetpass', '--config',
635 636 ... 'ui.interactive=True'],
636 637 ... input=stringio('1234\n'))
637 638 ... runcommand(server, ['debuggetpass', '--config',
638 639 ... 'ui.interactive=True'],
639 640 ... input=stringio('\n'))
640 641 ... runcommand(server, ['debuggetpass', '--config',
641 642 ... 'ui.interactive=True'],
642 643 ... input=stringio(''))
643 644 ... runcommand(server, ['debugprompt', '--config',
644 645 ... 'ui.interactive=True'],
645 646 ... input=stringio('5678\n'))
646 647 ... runcommand(server, ['debugreadstdin'])
647 648 ... runcommand(server, ['debugwritestdout'])
648 649 *** runcommand debuggetpass --config ui.interactive=True
649 650 password: 1234
650 651 *** runcommand debuggetpass --config ui.interactive=True
651 652 password:
652 653 *** runcommand debuggetpass --config ui.interactive=True
653 654 password: abort: response expected
654 655 [255]
655 656 *** runcommand debugprompt --config ui.interactive=True
656 657 prompt: 5678
657 658 *** runcommand debugreadstdin
658 659 read: ''
659 660 *** runcommand debugwritestdout
660 661
661 662
662 663 run commandserver in commandserver, which is silly but should work:
663 664
664 665 >>> from __future__ import print_function
665 666 >>> from hgclient import check, readchannel, runcommand, stringio
666 667 >>> @check
667 668 ... def nested(server):
668 669 ... print('%c, %r' % readchannel(server))
669 670 ... class nestedserver(object):
670 671 ... stdin = stringio('getencoding\n')
671 672 ... stdout = stringio()
672 673 ... runcommand(server, ['serve', '--cmdserver', 'pipe'],
673 674 ... output=nestedserver.stdout, input=nestedserver.stdin)
674 675 ... nestedserver.stdout.seek(0)
675 676 ... print('%c, %r' % readchannel(nestedserver)) # hello
676 677 ... print('%c, %r' % readchannel(nestedserver)) # getencoding
677 678 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
678 679 *** runcommand serve --cmdserver pipe
679 680 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
680 681 r, '*' (glob)
681 682
682 683
683 684 start without repository:
684 685
685 686 $ cd ..
686 687
687 688 >>> from __future__ import print_function
688 689 >>> from hgclient import check, readchannel, runcommand
689 690 >>> @check
690 691 ... def hellomessage(server):
691 692 ... ch, data = readchannel(server)
692 693 ... print('%c, %r' % (ch, data))
693 694 ... # run an arbitrary command to make sure the next thing the server
694 695 ... # sends isn't part of the hello message
695 696 ... runcommand(server, ['id'])
696 697 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
697 698 *** runcommand id
698 699 abort: there is no Mercurial repository here (.hg not found)
699 700 [255]
700 701
701 702 >>> from hgclient import check, readchannel, runcommand
702 703 >>> @check
703 704 ... def startwithoutrepo(server):
704 705 ... readchannel(server)
705 706 ... runcommand(server, ['init', 'repo2'])
706 707 ... runcommand(server, ['id', '-R', 'repo2'])
707 708 *** runcommand init repo2
708 709 *** runcommand id -R repo2
709 710 000000000000 tip
710 711
711 712
712 713 don't fall back to cwd if invalid -R path is specified (issue4805):
713 714
714 715 $ cd repo
715 716 $ hg serve --cmdserver pipe -R ../nonexistent
716 717 abort: repository ../nonexistent not found!
717 718 [255]
718 719 $ cd ..
719 720
720 721
721 722 unix domain socket:
722 723
723 724 $ cd repo
724 725 $ hg update -q
725 726
726 727 #if unix-socket unix-permissions
727 728
728 729 >>> from __future__ import print_function
729 730 >>> from hgclient import check, readchannel, runcommand, stringio, unixserver
730 731 >>> server = unixserver('.hg/server.sock', '.hg/server.log')
731 732 >>> def hellomessage(conn):
732 733 ... ch, data = readchannel(conn)
733 734 ... print('%c, %r' % (ch, data))
734 735 ... runcommand(conn, ['id'])
735 736 >>> check(hellomessage, server.connect)
736 737 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
737 738 *** runcommand id
738 739 eff892de26ec tip bm1/bm2/bm3
739 740 >>> def unknowncommand(conn):
740 741 ... readchannel(conn)
741 742 ... conn.stdin.write('unknowncommand\n')
742 743 >>> check(unknowncommand, server.connect) # error sent to server.log
743 744 >>> def serverinput(conn):
744 745 ... readchannel(conn)
745 746 ... patch = """
746 747 ... # HG changeset patch
747 748 ... # User test
748 749 ... # Date 0 0
749 750 ... 2
750 751 ...
751 752 ... diff -r eff892de26ec -r 1ed24be7e7a0 a
752 753 ... --- a/a
753 754 ... +++ b/a
754 755 ... @@ -1,1 +1,2 @@
755 756 ... 1
756 757 ... +2
757 758 ... """
758 759 ... runcommand(conn, ['import', '-'], input=stringio(patch))
759 760 ... runcommand(conn, ['log', '-rtip', '-q'])
760 761 >>> check(serverinput, server.connect)
761 762 *** runcommand import -
762 763 applying patch from stdin
763 764 *** runcommand log -rtip -q
764 765 2:1ed24be7e7a0
765 766 >>> server.shutdown()
766 767
767 768 $ cat .hg/server.log
768 769 listening at .hg/server.sock
769 770 abort: unknown command unknowncommand
770 771 killed!
771 772 $ rm .hg/server.log
772 773
773 774 if server crashed before hello, traceback will be sent to 'e' channel as
774 775 last ditch:
775 776
776 777 $ cat <<EOF >> .hg/hgrc
777 778 > [cmdserver]
778 779 > log = inexistent/path.log
779 780 > EOF
780 781 >>> from __future__ import print_function
781 782 >>> from hgclient import check, readchannel, unixserver
782 783 >>> server = unixserver('.hg/server.sock', '.hg/server.log')
783 784 >>> def earlycrash(conn):
784 785 ... while True:
785 786 ... try:
786 787 ... ch, data = readchannel(conn)
787 788 ... if not data.startswith(' '):
788 789 ... print('%c, %r' % (ch, data))
789 790 ... except EOFError:
790 791 ... break
791 792 >>> check(earlycrash, server.connect)
792 793 e, 'Traceback (most recent call last):\n'
793 794 e, "IOError: *" (glob)
794 795 >>> server.shutdown()
795 796
796 797 $ cat .hg/server.log | grep -v '^ '
797 798 listening at .hg/server.sock
798 799 Traceback (most recent call last):
799 800 IOError: * (glob)
800 801 killed!
801 802 #endif
802 803 #if no-unix-socket
803 804
804 805 $ hg serve --cmdserver unix -a .hg/server.sock
805 806 abort: unsupported platform
806 807 [255]
807 808
808 809 #endif
809 810
810 811 $ cd ..
811 812
812 813 Test that accessing to invalid changelog cache is avoided at
813 814 subsequent operations even if repo object is reused even after failure
814 815 of transaction (see 0a7610758c42 also)
815 816
816 817 "hg log" after failure of transaction is needed to detect invalid
817 818 cache in repoview: this can't detect by "hg verify" only.
818 819
819 820 Combination of "finalization" and "empty-ness of changelog" (2 x 2 =
820 821 4) are tested, because '00changelog.i' are differently changed in each
821 822 cases.
822 823
823 824 $ cat > $TESTTMP/failafterfinalize.py <<EOF
824 825 > # extension to abort transaction after finalization forcibly
825 826 > from mercurial import commands, error, extensions, lock as lockmod
826 827 > from mercurial import registrar
827 828 > cmdtable = {}
828 829 > command = registrar.command(cmdtable)
829 830 > configtable = {}
830 831 > configitem = registrar.configitem(configtable)
831 832 > configitem('failafterfinalize', 'fail',
832 833 > default=None,
833 834 > )
834 835 > def fail(tr):
835 836 > raise error.Abort('fail after finalization')
836 837 > def reposetup(ui, repo):
837 838 > class failrepo(repo.__class__):
838 839 > def commitctx(self, ctx, error=False):
839 840 > if self.ui.configbool('failafterfinalize', 'fail'):
840 841 > # 'sorted()' by ASCII code on category names causes
841 842 > # invoking 'fail' after finalization of changelog
842 843 > # using "'cl-%i' % id(self)" as category name
843 844 > self.currenttransaction().addfinalize('zzzzzzzz', fail)
844 845 > return super(failrepo, self).commitctx(ctx, error)
845 846 > repo.__class__ = failrepo
846 847 > EOF
847 848
848 849 $ hg init repo3
849 850 $ cd repo3
850 851
851 852 $ cat <<EOF >> $HGRCPATH
852 853 > [ui]
853 854 > logtemplate = {rev} {desc|firstline} ({files})\n
854 855 >
855 856 > [extensions]
856 857 > failafterfinalize = $TESTTMP/failafterfinalize.py
857 858 > EOF
858 859
859 860 - test failure with "empty changelog"
860 861
861 862 $ echo foo > foo
862 863 $ hg add foo
863 864
864 865 (failure before finalization)
865 866
866 867 >>> from hgclient import check, readchannel, runcommand
867 868 >>> @check
868 869 ... def abort(server):
869 870 ... readchannel(server)
870 871 ... runcommand(server, ['commit',
871 872 ... '--config', 'hooks.pretxncommit=false',
872 873 ... '-mfoo'])
873 874 ... runcommand(server, ['log'])
874 875 ... runcommand(server, ['verify', '-q'])
875 876 *** runcommand commit --config hooks.pretxncommit=false -mfoo
876 877 transaction abort!
877 878 rollback completed
878 879 abort: pretxncommit hook exited with status 1
879 880 [255]
880 881 *** runcommand log
881 882 *** runcommand verify -q
882 883
883 884 (failure after finalization)
884 885
885 886 >>> from hgclient import check, readchannel, runcommand
886 887 >>> @check
887 888 ... def abort(server):
888 889 ... readchannel(server)
889 890 ... runcommand(server, ['commit',
890 891 ... '--config', 'failafterfinalize.fail=true',
891 892 ... '-mfoo'])
892 893 ... runcommand(server, ['log'])
893 894 ... runcommand(server, ['verify', '-q'])
894 895 *** runcommand commit --config failafterfinalize.fail=true -mfoo
895 896 transaction abort!
896 897 rollback completed
897 898 abort: fail after finalization
898 899 [255]
899 900 *** runcommand log
900 901 *** runcommand verify -q
901 902
902 903 - test failure with "not-empty changelog"
903 904
904 905 $ echo bar > bar
905 906 $ hg add bar
906 907 $ hg commit -mbar bar
907 908
908 909 (failure before finalization)
909 910
910 911 >>> from hgclient import check, readchannel, runcommand
911 912 >>> @check
912 913 ... def abort(server):
913 914 ... readchannel(server)
914 915 ... runcommand(server, ['commit',
915 916 ... '--config', 'hooks.pretxncommit=false',
916 917 ... '-mfoo', 'foo'])
917 918 ... runcommand(server, ['log'])
918 919 ... runcommand(server, ['verify', '-q'])
919 920 *** runcommand commit --config hooks.pretxncommit=false -mfoo foo
920 921 transaction abort!
921 922 rollback completed
922 923 abort: pretxncommit hook exited with status 1
923 924 [255]
924 925 *** runcommand log
925 926 0 bar (bar)
926 927 *** runcommand verify -q
927 928
928 929 (failure after finalization)
929 930
930 931 >>> from hgclient import check, readchannel, runcommand
931 932 >>> @check
932 933 ... def abort(server):
933 934 ... readchannel(server)
934 935 ... runcommand(server, ['commit',
935 936 ... '--config', 'failafterfinalize.fail=true',
936 937 ... '-mfoo', 'foo'])
937 938 ... runcommand(server, ['log'])
938 939 ... runcommand(server, ['verify', '-q'])
939 940 *** runcommand commit --config failafterfinalize.fail=true -mfoo foo
940 941 transaction abort!
941 942 rollback completed
942 943 abort: fail after finalization
943 944 [255]
944 945 *** runcommand log
945 946 0 bar (bar)
946 947 *** runcommand verify -q
947 948
948 949 $ cd ..
949 950
950 951 Test symlink traversal over cached audited paths:
951 952 -------------------------------------------------
952 953
953 954 #if symlink
954 955
955 956 set up symlink hell
956 957
957 958 $ mkdir merge-symlink-out
958 959 $ hg init merge-symlink
959 960 $ cd merge-symlink
960 961 $ touch base
961 962 $ hg commit -qAm base
962 963 $ ln -s ../merge-symlink-out a
963 964 $ hg commit -qAm 'symlink a -> ../merge-symlink-out'
964 965 $ hg up -q 0
965 966 $ mkdir a
966 967 $ touch a/poisoned
967 968 $ hg commit -qAm 'file a/poisoned'
968 969 $ hg log -G -T '{rev}: {desc}\n'
969 970 @ 2: file a/poisoned
970 971 |
971 972 | o 1: symlink a -> ../merge-symlink-out
972 973 |/
973 974 o 0: base
974 975
975 976
976 977 try trivial merge after update: cache of audited paths should be discarded,
977 978 and the merge should fail (issue5628)
978 979
979 980 $ hg up -q null
980 981 >>> from hgclient import check, readchannel, runcommand
981 982 >>> @check
982 983 ... def merge(server):
983 984 ... readchannel(server)
984 985 ... # audit a/poisoned as a good path
985 986 ... runcommand(server, ['up', '-qC', '2'])
986 987 ... runcommand(server, ['up', '-qC', '1'])
987 988 ... # here a is a symlink, so a/poisoned is bad
988 989 ... runcommand(server, ['merge', '2'])
989 990 *** runcommand up -qC 2
990 991 *** runcommand up -qC 1
991 992 *** runcommand merge 2
992 993 abort: path 'a/poisoned' traverses symbolic link 'a'
993 994 [255]
994 995 $ ls ../merge-symlink-out
995 996
996 997 cache of repo.auditor should be discarded, so matcher would never traverse
997 998 symlinks:
998 999
999 1000 $ hg up -qC 0
1000 1001 $ touch ../merge-symlink-out/poisoned
1001 1002 >>> from hgclient import check, readchannel, runcommand
1002 1003 >>> @check
1003 1004 ... def files(server):
1004 1005 ... readchannel(server)
1005 1006 ... runcommand(server, ['up', '-qC', '2'])
1006 1007 ... # audit a/poisoned as a good path
1007 1008 ... runcommand(server, ['files', 'a/poisoned'])
1008 1009 ... runcommand(server, ['up', '-qC', '0'])
1009 1010 ... runcommand(server, ['up', '-qC', '1'])
1010 1011 ... # here 'a' is a symlink, so a/poisoned should be warned
1011 1012 ... runcommand(server, ['files', 'a/poisoned'])
1012 1013 *** runcommand up -qC 2
1013 1014 *** runcommand files a/poisoned
1014 1015 a/poisoned
1015 1016 *** runcommand up -qC 0
1016 1017 *** runcommand up -qC 1
1017 1018 *** runcommand files a/poisoned
1018 1019 abort: path 'a/poisoned' traverses symbolic link 'a'
1019 1020 [255]
1020 1021
1021 1022 $ cd ..
1022 1023
1023 1024 #endif
@@ -1,126 +1,132 b''
1 1 #require lfs-test-server
2 2
3 3 $ LFS_LISTEN="tcp://:$HGPORT"
4 4 $ LFS_HOST="localhost:$HGPORT"
5 5 $ LFS_PUBLIC=1
6 6 $ export LFS_LISTEN LFS_HOST LFS_PUBLIC
7 7 #if no-windows
8 8 $ lfs-test-server &> lfs-server.log &
9 9 $ echo $! >> $DAEMON_PIDS
10 10 #else
11 11 $ cat >> $TESTTMP/spawn.py <<EOF
12 12 > import os
13 13 > import subprocess
14 14 > import sys
15 15 >
16 16 > for path in os.environ["PATH"].split(os.pathsep):
17 17 > exe = os.path.join(path, 'lfs-test-server.exe')
18 18 > if os.path.exists(exe):
19 19 > with open('lfs-server.log', 'wb') as out:
20 20 > p = subprocess.Popen(exe, stdout=out, stderr=out)
21 21 > sys.stdout.write('%s\n' % p.pid)
22 22 > sys.exit(0)
23 23 > sys.exit(1)
24 24 > EOF
25 25 $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
26 26 #endif
27 27
28 28 $ cat >> $HGRCPATH <<EOF
29 29 > [extensions]
30 30 > lfs=
31 31 > [lfs]
32 32 > url=http://foo:bar@$LFS_HOST/
33 33 > threshold=1
34 34 > EOF
35 35
36 36 $ hg init repo1
37 37 $ cd repo1
38 38 $ echo THIS-IS-LFS > a
39 39 $ hg commit -m a -A a
40 40
41 41 $ hg init ../repo2
42 42 $ hg push ../repo2 -v
43 43 pushing to ../repo2
44 44 searching for changes
45 45 lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
46 46 1 changesets found
47 47 uncompressed size of bundle content:
48 48 * (changelog) (glob)
49 49 * (manifests) (glob)
50 50 * a (glob)
51 51 adding changesets
52 52 adding manifests
53 53 adding file changes
54 54 added 1 changesets with 1 changes to 1 files
55 55
56 Clear the cache to force a download
57 $ rm -rf `hg config lfs.usercache`
56 58 $ cd ../repo2
57 59 $ hg update tip -v
58 60 resolving manifests
59 61 getting a
60 62 lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
61 63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 64
63 65 When the server has some blobs already
64 66
65 67 $ hg mv a b
66 68 $ echo ANOTHER-LARGE-FILE > c
67 69 $ echo ANOTHER-LARGE-FILE2 > d
68 70 $ hg commit -m b-and-c -A b c d
69 71 $ hg push ../repo1 -v | grep -v '^ '
70 72 pushing to ../repo1
71 73 searching for changes
72 74 lfs: need to transfer 2 objects (39 bytes)
73 75 lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
74 76 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
75 77 1 changesets found
76 78 uncompressed size of bundle content:
77 79 adding changesets
78 80 adding manifests
79 81 adding file changes
80 82 added 1 changesets with 3 changes to 3 files
81 83
84 Clear the cache to force a download
85 $ rm -rf `hg config lfs.usercache`
82 86 $ hg --repo ../repo1 update tip -v
83 87 resolving manifests
84 88 getting b
85 89 getting c
86 90 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
87 91 getting d
88 92 lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
89 93 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 94
91 95 Check error message when the remote missed a blob:
92 96
93 97 $ echo FFFFF > b
94 98 $ hg commit -m b -A b
95 99 $ echo FFFFF >> b
96 100 $ hg commit -m b b
97 101 $ rm -rf .hg/store/lfs
102 $ rm -rf `hg config lfs.usercache`
98 103 $ hg update -C '.^'
99 104 abort: LFS server claims required objects do not exist:
100 105 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13!
101 106 [255]
102 107
103 108 Check error message when object does not exist:
104 109
105 110 $ hg init test && cd test
106 111 $ echo "[extensions]" >> .hg/hgrc
107 112 $ echo "lfs=" >> .hg/hgrc
108 113 $ echo "[lfs]" >> .hg/hgrc
109 114 $ echo "threshold=1" >> .hg/hgrc
110 115 $ echo a > a
111 116 $ hg add a
112 117 $ hg commit -m 'test'
113 118 $ echo aaaaa > a
114 119 $ hg commit -m 'largefile'
115 120 $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer".
116 121 version https://git-lfs.github.com/spec/v1
117 122 oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a
118 123 size 6
119 124 x-is-binary 0
120 125 $ cd ..
126 $ rm -rf `hg config lfs.usercache`
121 127 $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2
122 128 updating to branch default
123 129 abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re)
124 130 [255]
125 131
126 132 $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
@@ -1,678 +1,682 b''
1 1 # Initial setup
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > lfs=
6 6 > [lfs]
7 7 > threshold=1000B
8 8 > EOF
9 9
10 10 $ LONG=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
11 11
12 12 # Prepare server and enable extension
13 13 $ hg init server
14 14 $ hg clone -q server client
15 15 $ cd client
16 16
17 17 # Commit small file
18 18 $ echo s > smallfile
19 19 $ hg commit -Aqm "add small file"
20 20
21 21 # Commit large file
22 22 $ echo $LONG > largefile
23 23 $ grep lfs .hg/requires
24 24 [1]
25 25 $ hg commit --traceback -Aqm "add large file"
26 26 $ grep lfs .hg/requires
27 27 lfs
28 28
29 29 # Ensure metadata is stored
30 30 $ hg debugdata largefile 0
31 31 version https://git-lfs.github.com/spec/v1
32 32 oid sha256:f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
33 33 size 1501
34 34 x-is-binary 0
35 35
36 36 # Check the blobstore is populated
37 37 $ find .hg/store/lfs/objects | sort
38 38 .hg/store/lfs/objects
39 39 .hg/store/lfs/objects/f1
40 40 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
41 41
42 42 # Check the blob stored contains the actual contents of the file
43 43 $ cat .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
44 44 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
45 45
46 46 # Push changes to the server
47 47
48 48 $ hg push
49 49 pushing to $TESTTMP/server (glob)
50 50 searching for changes
51 51 abort: lfs.url needs to be configured
52 52 [255]
53 53
54 54 $ cat >> $HGRCPATH << EOF
55 55 > [lfs]
56 56 > url=file:$TESTTMP/dummy-remote/
57 57 > EOF
58 58
59 59 $ hg push -v | egrep -v '^(uncompressed| )'
60 60 pushing to $TESTTMP/server (glob)
61 61 searching for changes
62 62 2 changesets found
63 63 adding changesets
64 64 adding manifests
65 65 adding file changes
66 66 added 2 changesets with 2 changes to 2 files
67 67
68 68 # Unknown URL scheme
69 69
70 70 $ hg push --config lfs.url=ftp://foobar
71 71 abort: lfs: unknown url scheme: ftp
72 72 [255]
73 73
74 74 $ cd ../
75 75
76 76 # Initialize new client (not cloning) and setup extension
77 77 $ hg init client2
78 78 $ cd client2
79 79 $ cat >> .hg/hgrc <<EOF
80 80 > [paths]
81 81 > default = $TESTTMP/server
82 82 > EOF
83 83
84 84 # Pull from server
85 85 $ hg pull default
86 86 pulling from $TESTTMP/server (glob)
87 87 requesting all changes
88 88 adding changesets
89 89 adding manifests
90 90 adding file changes
91 91 added 2 changesets with 2 changes to 2 files
92 92 new changesets b29ba743f89d:00c137947d30
93 93 (run 'hg update' to get a working copy)
94 94
95 95 # Check the blobstore is not yet populated
96 96 $ [ -d .hg/store/lfs/objects ]
97 97 [1]
98 98
99 99 # Update to the last revision containing the large file
100 100 $ hg update
101 101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 102
103 103 # Check the blobstore has been populated on update
104 104 $ find .hg/store/lfs/objects | sort
105 105 .hg/store/lfs/objects
106 106 .hg/store/lfs/objects/f1
107 107 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
108 108
109 109 # Check the contents of the file are fetched from blobstore when requested
110 110 $ hg cat -r . largefile
111 111 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
112 112
113 113 # Check the file has been copied in the working copy
114 114 $ cat largefile
115 115 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
116 116
117 117 $ cd ..
118 118
119 119 # Check rename, and switch between large and small files
120 120
121 121 $ hg init repo3
122 122 $ cd repo3
123 123 $ cat >> .hg/hgrc << EOF
124 124 > [lfs]
125 125 > threshold=10B
126 126 > EOF
127 127
128 128 $ echo LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS > large
129 129 $ echo SHORTER > small
130 130 $ hg add . -q
131 131 $ hg commit -m 'commit with lfs content'
132 132
133 133 $ hg mv large l
134 134 $ hg mv small s
135 135 $ hg commit -m 'renames'
136 136
137 137 $ echo SHORT > l
138 138 $ echo BECOME-LARGER-FROM-SHORTER > s
139 139 $ hg commit -m 'large to small, small to large'
140 140
141 141 $ echo 1 >> l
142 142 $ echo 2 >> s
143 143 $ hg commit -m 'random modifications'
144 144
145 145 $ echo RESTORE-TO-BE-LARGE > l
146 146 $ echo SHORTER > s
147 147 $ hg commit -m 'switch large and small again'
148 148
149 149 # Test lfs_files template
150 150
151 151 $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
152 152 0 large
153 153 1 l
154 154 2 s
155 155 3 s
156 156 4 l
157 157
158 158 # Push and pull the above repo
159 159
160 160 $ hg --cwd .. init repo4
161 161 $ hg push ../repo4
162 162 pushing to ../repo4
163 163 searching for changes
164 164 adding changesets
165 165 adding manifests
166 166 adding file changes
167 167 added 5 changesets with 10 changes to 4 files
168 168
169 169 $ hg --cwd .. init repo5
170 170 $ hg --cwd ../repo5 pull ../repo3
171 171 pulling from ../repo3
172 172 requesting all changes
173 173 adding changesets
174 174 adding manifests
175 175 adding file changes
176 176 added 5 changesets with 10 changes to 4 files
177 177 new changesets fd47a419c4f7:5adf850972b9
178 178 (run 'hg update' to get a working copy)
179 179
180 180 $ cd ..
181 181
182 182 # Test clone
183 183
184 184 $ hg init repo6
185 185 $ cd repo6
186 186 $ cat >> .hg/hgrc << EOF
187 187 > [lfs]
188 188 > threshold=30B
189 189 > EOF
190 190
191 191 $ echo LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES > large
192 192 $ echo SMALL > small
193 193 $ hg commit -Aqm 'create a lfs file' large small
194 194 $ hg debuglfsupload -r 'all()' -v
195 195
196 196 $ cd ..
197 197
198 198 $ hg clone repo6 repo7
199 199 updating to branch default
200 200 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 201 $ cd repo7
202 202 $ hg config extensions --debug | grep lfs
203 203 $TESTTMP/repo7/.hg/hgrc:*: extensions.lfs= (glob)
204 204 $ cat large
205 205 LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES
206 206 $ cat small
207 207 SMALL
208 208
209 209 $ cd ..
210 210
211 211 $ hg --config extensions.share= share repo7 sharedrepo
212 212 updating working directory
213 213 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
214 214 $ hg -R sharedrepo config extensions --debug | grep lfs
215 215 $TESTTMP/sharedrepo/.hg/hgrc:*: extensions.lfs= (glob)
216 216
217 217 # Test rename and status
218 218
219 219 $ hg init repo8
220 220 $ cd repo8
221 221 $ cat >> .hg/hgrc << EOF
222 222 > [lfs]
223 223 > threshold=10B
224 224 > EOF
225 225
226 226 $ echo THIS-IS-LFS-BECAUSE-10-BYTES > a1
227 227 $ echo SMALL > a2
228 228 $ hg commit -m a -A a1 a2
229 229 $ hg status
230 230 $ hg mv a1 b1
231 231 $ hg mv a2 a1
232 232 $ hg mv b1 a2
233 233 $ hg commit -m b
234 234 $ hg status
235 235 $ HEADER=$'\1\n'
236 236 $ printf '%sSTART-WITH-HG-FILELOG-METADATA' "$HEADER" > a2
237 237 $ printf '%sMETA\n' "$HEADER" > a1
238 238 $ hg commit -m meta
239 239 $ hg status
240 240 $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n'
241 241 2: | |
242 242 1: a1 (a2)a2 (a1) | |
243 243 0: | | a1 a2
244 244
245 245 $ for n in a1 a2; do
246 246 > for r in 0 1 2; do
247 247 > printf '\n%s @ %s\n' $n $r
248 248 > hg debugdata $n $r
249 249 > done
250 250 > done
251 251
252 252 a1 @ 0
253 253 version https://git-lfs.github.com/spec/v1
254 254 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
255 255 size 29
256 256 x-is-binary 0
257 257
258 258 a1 @ 1
259 259 \x01 (esc)
260 260 copy: a2
261 261 copyrev: 50470ad23cf937b1f4b9f80bfe54df38e65b50d9
262 262 \x01 (esc)
263 263 SMALL
264 264
265 265 a1 @ 2
266 266 \x01 (esc)
267 267 \x01 (esc)
268 268 \x01 (esc)
269 269 META
270 270
271 271 a2 @ 0
272 272 SMALL
273 273
274 274 a2 @ 1
275 275 version https://git-lfs.github.com/spec/v1
276 276 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
277 277 size 29
278 278 x-hg-copy a1
279 279 x-hg-copyrev be23af27908a582af43e5cda209a5a9b319de8d4
280 280 x-is-binary 0
281 281
282 282 a2 @ 2
283 283 version https://git-lfs.github.com/spec/v1
284 284 oid sha256:876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
285 285 size 32
286 286 x-is-binary 0
287 287
288 288 # Verify commit hashes include rename metadata
289 289
290 290 $ hg log -T '{rev}:{node|short} {desc}\n'
291 291 2:0fae949de7fa meta
292 292 1:9cd6bdffdac0 b
293 293 0:7f96794915f7 a
294 294
295 295 $ cd ..
296 296
297 297 # Test bundle
298 298
299 299 $ hg init repo9
300 300 $ cd repo9
301 301 $ cat >> .hg/hgrc << EOF
302 302 > [lfs]
303 303 > threshold=10B
304 304 > [diff]
305 305 > git=1
306 306 > EOF
307 307
308 308 $ for i in 0 single two three 4; do
309 309 > echo 'THIS-IS-LFS-'$i > a
310 310 > hg commit -m a-$i -A a
311 311 > done
312 312
313 313 $ hg update 2 -q
314 314 $ echo 'THIS-IS-LFS-2-CHILD' > a
315 315 $ hg commit -m branching -q
316 316
317 317 $ hg bundle --base 1 bundle.hg -v
318 318 4 changesets found
319 319 uncompressed size of bundle content:
320 320 * (changelog) (glob)
321 321 * (manifests) (glob)
322 322 * a (glob)
323 323 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
324 324 $ hg -R bundle.hg log -p -T '{rev} {desc}\n' a
325 325 5 branching
326 326 diff --git a/a b/a
327 327 --- a/a
328 328 +++ b/a
329 329 @@ -1,1 +1,1 @@
330 330 -THIS-IS-LFS-two
331 331 +THIS-IS-LFS-2-CHILD
332 332
333 333 4 a-4
334 334 diff --git a/a b/a
335 335 --- a/a
336 336 +++ b/a
337 337 @@ -1,1 +1,1 @@
338 338 -THIS-IS-LFS-three
339 339 +THIS-IS-LFS-4
340 340
341 341 3 a-three
342 342 diff --git a/a b/a
343 343 --- a/a
344 344 +++ b/a
345 345 @@ -1,1 +1,1 @@
346 346 -THIS-IS-LFS-two
347 347 +THIS-IS-LFS-three
348 348
349 349 2 a-two
350 350 diff --git a/a b/a
351 351 --- a/a
352 352 +++ b/a
353 353 @@ -1,1 +1,1 @@
354 354 -THIS-IS-LFS-single
355 355 +THIS-IS-LFS-two
356 356
357 357 1 a-single
358 358 diff --git a/a b/a
359 359 --- a/a
360 360 +++ b/a
361 361 @@ -1,1 +1,1 @@
362 362 -THIS-IS-LFS-0
363 363 +THIS-IS-LFS-single
364 364
365 365 0 a-0
366 366 diff --git a/a b/a
367 367 new file mode 100644
368 368 --- /dev/null
369 369 +++ b/a
370 370 @@ -0,0 +1,1 @@
371 371 +THIS-IS-LFS-0
372 372
373 373 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
374 374 $ hg -R bundle-again.hg log -p -T '{rev} {desc}\n' a
375 375 5 branching
376 376 diff --git a/a b/a
377 377 --- a/a
378 378 +++ b/a
379 379 @@ -1,1 +1,1 @@
380 380 -THIS-IS-LFS-two
381 381 +THIS-IS-LFS-2-CHILD
382 382
383 383 4 a-4
384 384 diff --git a/a b/a
385 385 --- a/a
386 386 +++ b/a
387 387 @@ -1,1 +1,1 @@
388 388 -THIS-IS-LFS-three
389 389 +THIS-IS-LFS-4
390 390
391 391 3 a-three
392 392 diff --git a/a b/a
393 393 --- a/a
394 394 +++ b/a
395 395 @@ -1,1 +1,1 @@
396 396 -THIS-IS-LFS-two
397 397 +THIS-IS-LFS-three
398 398
399 399 2 a-two
400 400 diff --git a/a b/a
401 401 --- a/a
402 402 +++ b/a
403 403 @@ -1,1 +1,1 @@
404 404 -THIS-IS-LFS-single
405 405 +THIS-IS-LFS-two
406 406
407 407 1 a-single
408 408 diff --git a/a b/a
409 409 --- a/a
410 410 +++ b/a
411 411 @@ -1,1 +1,1 @@
412 412 -THIS-IS-LFS-0
413 413 +THIS-IS-LFS-single
414 414
415 415 0 a-0
416 416 diff --git a/a b/a
417 417 new file mode 100644
418 418 --- /dev/null
419 419 +++ b/a
420 420 @@ -0,0 +1,1 @@
421 421 +THIS-IS-LFS-0
422 422
423 423 $ cd ..
424 424
425 425 # Test isbinary
426 426
427 427 $ hg init repo10
428 428 $ cd repo10
429 429 $ cat >> .hg/hgrc << EOF
430 430 > [extensions]
431 431 > lfs=
432 432 > [lfs]
433 433 > threshold=1
434 434 > EOF
435 435 $ $PYTHON <<'EOF'
436 436 > def write(path, content):
437 437 > with open(path, 'wb') as f:
438 438 > f.write(content)
439 439 > write('a', b'\0\0')
440 440 > write('b', b'\1\n')
441 441 > write('c', b'\1\n\0')
442 442 > write('d', b'xx')
443 443 > EOF
444 444 $ hg add a b c d
445 445 $ hg diff --stat
446 446 a | Bin
447 447 b | 1 +
448 448 c | Bin
449 449 d | 1 +
450 450 4 files changed, 2 insertions(+), 0 deletions(-)
451 451 $ hg commit -m binarytest
452 452 $ cat > $TESTTMP/dumpbinary.py << EOF
453 453 > def reposetup(ui, repo):
454 454 > for n in 'abcd':
455 455 > ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary()))
456 456 > EOF
457 457 $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace
458 458 a: binary=True
459 459 b: binary=False
460 460 c: binary=True
461 461 d: binary=False
462 462 b55353847f02 tip
463 463
464 464 $ cd ..
465 465
466 466 # Test fctx.cmp fastpath - diff without LFS blobs
467 467
468 468 $ hg init repo11
469 469 $ cd repo11
470 470 $ cat >> .hg/hgrc <<EOF
471 471 > [lfs]
472 472 > threshold=1
473 473 > EOF
474 474 $ cat > ../patch.diff <<EOF
475 475 > # HG changeset patch
476 476 > 2
477 477 >
478 478 > diff --git a/a b/a
479 479 > old mode 100644
480 480 > new mode 100755
481 481 > EOF
482 482
483 483 $ for i in 1 2 3; do
484 484 > cp ../repo10/a a
485 485 > if [ $i = 3 ]; then
486 486 > # make a content-only change
487 487 > hg import -q --bypass ../patch.diff
488 488 > hg update -q
489 489 > rm ../patch.diff
490 490 > else
491 491 > echo $i >> a
492 492 > hg commit -m $i -A a
493 493 > fi
494 494 > done
495 495 $ [ -d .hg/store/lfs/objects ]
496 496
497 497 $ cd ..
498 498
499 499 $ hg clone repo11 repo12 --noupdate
500 500 $ cd repo12
501 501 $ hg log --removed -p a -T '{desc}\n' --config diff.nobinary=1 --git
502 502 2
503 503 diff --git a/a b/a
504 504 old mode 100644
505 505 new mode 100755
506 506
507 507 2
508 508 diff --git a/a b/a
509 509 Binary file a has changed
510 510
511 511 1
512 512 diff --git a/a b/a
513 513 new file mode 100644
514 514 Binary file a has changed
515 515
516 516 $ [ -d .hg/store/lfs/objects ]
517 517 [1]
518 518
519 519 $ cd ..
520 520
521 521 # Verify the repos
522 522
523 523 $ cat > $TESTTMP/dumpflog.py << EOF
524 524 > # print raw revision sizes, flags, and hashes for certain files
525 525 > import hashlib
526 526 > from mercurial import revlog
527 527 > from mercurial.node import short
528 528 > def hash(rawtext):
529 529 > h = hashlib.sha512()
530 530 > h.update(rawtext)
531 531 > return h.hexdigest()[:4]
532 532 > def reposetup(ui, repo):
533 533 > # these 2 files are interesting
534 534 > for name in ['l', 's']:
535 535 > fl = repo.file(name)
536 536 > if len(fl) == 0:
537 537 > continue
538 538 > sizes = [revlog.revlog.rawsize(fl, i) for i in fl]
539 539 > texts = [fl.revision(i, raw=True) for i in fl]
540 540 > flags = [int(fl.flags(i)) for i in fl]
541 541 > hashes = [hash(t) for t in texts]
542 542 > print(' %s: rawsizes=%r flags=%r hashes=%r'
543 543 > % (name, sizes, flags, hashes))
544 544 > EOF
545 545
546 546 $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \
547 547 > repo10; do
548 548 > echo 'repo:' $i
549 549 > hg --cwd $i verify --config extensions.dumpflog=$TESTTMP/dumpflog.py -q
550 550 > done
551 551 repo: client
552 552 repo: client2
553 553 repo: server
554 554 repo: repo3
555 555 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
556 556 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
557 557 repo: repo4
558 558 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
559 559 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
560 560 repo: repo5
561 561 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
562 562 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
563 563 repo: repo6
564 564 repo: repo7
565 565 repo: repo8
566 566 repo: repo9
567 567 repo: repo10
568 568
569 TODO: repo12 doesn't have any cached lfs files. Figure out how to get the
570 unpushed files from repo12's source instead of the remote store, where they
571 don't exist.
569 repo12 doesn't have any cached lfs files and its source never pushed its
570 files. Therefore, the files don't exist in the remote store. Use the files in
571 the user cache.
572 572
573 573 $ find $TESTTMP/repo12/.hg/store/lfs/objects -type f
574 574 find: */repo12/.hg/store/lfs/objects': $ENOENT$ (glob)
575 575 [1]
576 576
577 577 $ hg --config extensions.share= share repo12 repo13
578 578 updating working directory
579 abort: $TESTTMP/dummy-remote/09/66faba9a01f6c78082aa45899a4fef732002d0b26404e90093adf1e876ab8d: $ENOTDIR$ (glob)
580 [255]
579 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
580 $ hg -R repo13 -q verify
581
581 582 $ hg clone repo12 repo14
582 583 updating to branch default
583 abort: $TESTTMP/dummy-remote/09/66faba9a01f6c78082aa45899a4fef732002d0b26404e90093adf1e876ab8d: $ENOTDIR$ (glob)
584 [255]
584 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
585 $ hg -R repo14 -q verify
585 586
586 TODO: If the source repo doesn't have the blob (maybe it was pulled or cloned
587 with --noupdate), the blob should be accessible via the global cache to send to
588 the remote store.
587 If the source repo doesn't have the blob (maybe it was pulled or cloned with
588 --noupdate), the blob is still accessible via the global cache to send to the
589 remote store.
589 590
590 591 $ rm -rf $TESTTMP/repo14/.hg/store/lfs
591 592 $ hg init repo15
592 593 $ hg -R repo14 push repo15
593 594 pushing to repo15
594 595 searching for changes
595 abort: $TESTTMP/repo14/.hg/store/lfs/objects/1c/896a0adcf9262119f4a98216aaa5ca00a58b9a0ce848914a02f9cd876f65a3: $ENOTDIR$ (glob)
596 [255]
596 adding changesets
597 adding manifests
598 adding file changes
599 added 3 changesets with 2 changes to 1 files
600 $ hg -R repo14 -q verify
597 601
598 602 lfs -> normal -> lfs round trip conversions are possible. The threshold for the
599 603 lfs destination is specified here because it was originally listed in the local
600 604 .hgrc, and the global one is too high to trigger lfs usage. For lfs -> normal,
601 605 there's no 'lfs' destination repo requirement. For normal -> lfs, there is.
602 606
603 607 XXX: There's not a great way to ensure that the conversion to normal files
604 608 actually converts _everything_ to normal. The extension needs to be loaded for
605 609 the source, but there's no way to disable it for the destination. The best that
606 610 can be done is to raise the threshold so that lfs isn't used on the destination.
607 611 It doesn't like using '!' to unset the value on the command line.
608 612
609 613 $ hg --config extensions.convert= --config lfs.threshold=1000M \
610 614 > convert repo8 convert_normal
611 615 initializing destination convert_normal repository
612 616 scanning source...
613 617 sorting...
614 618 converting...
615 619 2 a
616 620 1 b
617 621 0 meta
618 622 $ grep 'lfs' convert_normal/.hg/requires
619 623 [1]
620 624 $ hg --cwd convert_normal debugdata a1 0
621 625 THIS-IS-LFS-BECAUSE-10-BYTES
622 626
623 627 $ hg --config extensions.convert= --config lfs.threshold=10B \
624 628 > convert convert_normal convert_lfs
625 629 initializing destination convert_lfs repository
626 630 scanning source...
627 631 sorting...
628 632 converting...
629 633 2 a
630 634 1 b
631 635 0 meta
632 636 $ hg --cwd convert_lfs debugdata a1 0
633 637 version https://git-lfs.github.com/spec/v1
634 638 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
635 639 size 29
636 640 x-is-binary 0
637 641 $ grep 'lfs' convert_lfs/.hg/requires
638 642 lfs
639 643
640 644 This convert is trickier, because it contains deleted files (via `hg mv`)
641 645
642 646 $ hg --config extensions.convert= --config lfs.threshold=1000M \
643 647 > convert repo3 convert_normal2
644 648 initializing destination convert_normal2 repository
645 649 scanning source...
646 650 sorting...
647 651 converting...
648 652 4 commit with lfs content
649 653 3 renames
650 654 2 large to small, small to large
651 655 1 random modifications
652 656 0 switch large and small again
653 657 $ grep 'lfs' convert_normal2/.hg/requires
654 658 [1]
655 659 $ hg --cwd convert_normal2 debugdata large 0
656 660 LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
657 661
658 662 $ hg --config extensions.convert= --config lfs.threshold=10B \
659 663 > convert convert_normal2 convert_lfs2
660 664 initializing destination convert_lfs2 repository
661 665 scanning source...
662 666 sorting...
663 667 converting...
664 668 4 commit with lfs content
665 669 3 renames
666 670 2 large to small, small to large
667 671 1 random modifications
668 672 0 switch large and small again
669 673 $ grep 'lfs' convert_lfs2/.hg/requires
670 674 lfs
671 675 $ hg --cwd convert_lfs2 debugdata large 0
672 676 version https://git-lfs.github.com/spec/v1
673 677 oid sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
674 678 size 39
675 679 x-is-binary 0
676 680
677 681 $ hg -R convert_lfs2 config --debug extensions | grep lfs
678 682 $TESTTMP/convert_lfs2/.hg/hgrc:*: extensions.lfs= (glob)
General Comments 0
You need to be logged in to leave comments. Login now