##// END OF EJS Templates
repo: look up nullrev context by revnum, not symbolic name...
Martin von Zweigbergk -
r39930:d739f423 default
parent child Browse files
Show More
@@ -1,344 +1,347 b''
1 1 # archival.py - revision archival for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import gzip
11 11 import os
12 12 import struct
13 13 import tarfile
14 14 import time
15 15 import zipfile
16 16 import zlib
17 17
18 18 from .i18n import _
19 from .node import (
20 nullrev,
21 )
19 22
20 23 from . import (
21 24 error,
22 25 formatter,
23 26 match as matchmod,
24 27 pycompat,
25 28 scmutil,
26 29 util,
27 30 vfs as vfsmod,
28 31 )
29 32 stringio = util.stringio
30 33
31 34 # from unzip source code:
32 35 _UNX_IFREG = 0x8000
33 36 _UNX_IFLNK = 0xa000
34 37
35 38 def tidyprefix(dest, kind, prefix):
36 39 '''choose prefix to use for names in archive. make sure prefix is
37 40 safe for consumers.'''
38 41
39 42 if prefix:
40 43 prefix = util.normpath(prefix)
41 44 else:
42 45 if not isinstance(dest, bytes):
43 46 raise ValueError('dest must be string if no prefix')
44 47 prefix = os.path.basename(dest)
45 48 lower = prefix.lower()
46 49 for sfx in exts.get(kind, []):
47 50 if lower.endswith(sfx):
48 51 prefix = prefix[:-len(sfx)]
49 52 break
50 53 lpfx = os.path.normpath(util.localpath(prefix))
51 54 prefix = util.pconvert(lpfx)
52 55 if not prefix.endswith('/'):
53 56 prefix += '/'
54 57 # Drop the leading '.' path component if present, so Windows can read the
55 58 # zip files (issue4634)
56 59 if prefix.startswith('./'):
57 60 prefix = prefix[2:]
58 61 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
59 62 raise error.Abort(_('archive prefix contains illegal components'))
60 63 return prefix
61 64
62 65 exts = {
63 66 'tar': ['.tar'],
64 67 'tbz2': ['.tbz2', '.tar.bz2'],
65 68 'tgz': ['.tgz', '.tar.gz'],
66 69 'zip': ['.zip'],
67 70 }
68 71
69 72 def guesskind(dest):
70 73 for kind, extensions in exts.iteritems():
71 74 if any(dest.endswith(ext) for ext in extensions):
72 75 return kind
73 76 return None
74 77
75 78 def _rootctx(repo):
76 79 # repo[0] may be hidden
77 80 for rev in repo:
78 81 return repo[rev]
79 return repo['null']
82 return repo[nullrev]
80 83
81 84 # {tags} on ctx includes local tags and 'tip', with no current way to limit
82 85 # that to global tags. Therefore, use {latesttag} as a substitute when
83 86 # the distance is 0, since that will be the list of global tags on ctx.
84 87 _defaultmetatemplate = br'''
85 88 repo: {root}
86 89 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
87 90 branch: {branch|utf8}
88 91 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
89 92 separate("\n",
90 93 join(latesttag % "latesttag: {tag}", "\n"),
91 94 "latesttagdistance: {latesttagdistance}",
92 95 "changessincelatesttag: {changessincelatesttag}"))}
93 96 '''[1:] # drop leading '\n'
94 97
95 98 def buildmetadata(ctx):
96 99 '''build content of .hg_archival.txt'''
97 100 repo = ctx.repo()
98 101
99 102 opts = {
100 103 'template': repo.ui.config('experimental', 'archivemetatemplate',
101 104 _defaultmetatemplate)
102 105 }
103 106
104 107 out = util.stringio()
105 108
106 109 fm = formatter.formatter(repo.ui, out, 'archive', opts)
107 110 fm.startitem()
108 111 fm.context(ctx=ctx)
109 112 fm.data(root=_rootctx(repo).hex())
110 113
111 114 if ctx.rev() is None:
112 115 dirty = ''
113 116 if ctx.dirty(missing=True):
114 117 dirty = '+'
115 118 fm.data(dirty=dirty)
116 119 fm.end()
117 120
118 121 return out.getvalue()
119 122
120 123 class tarit(object):
121 124 '''write archive to tar file or stream. can write uncompressed,
122 125 or compress with gzip or bzip2.'''
123 126
124 127 class GzipFileWithTime(gzip.GzipFile):
125 128
126 129 def __init__(self, *args, **kw):
127 130 timestamp = None
128 131 if r'timestamp' in kw:
129 132 timestamp = kw.pop(r'timestamp')
130 133 if timestamp is None:
131 134 self.timestamp = time.time()
132 135 else:
133 136 self.timestamp = timestamp
134 137 gzip.GzipFile.__init__(self, *args, **kw)
135 138
136 139 def _write_gzip_header(self):
137 140 self.fileobj.write('\037\213') # magic header
138 141 self.fileobj.write('\010') # compression method
139 142 fname = self.name
140 143 if fname and fname.endswith('.gz'):
141 144 fname = fname[:-3]
142 145 flags = 0
143 146 if fname:
144 147 flags = gzip.FNAME
145 148 self.fileobj.write(pycompat.bytechr(flags))
146 149 gzip.write32u(self.fileobj, int(self.timestamp))
147 150 self.fileobj.write('\002')
148 151 self.fileobj.write('\377')
149 152 if fname:
150 153 self.fileobj.write(fname + '\000')
151 154
152 155 def __init__(self, dest, mtime, kind=''):
153 156 self.mtime = mtime
154 157 self.fileobj = None
155 158
156 159 def taropen(mode, name='', fileobj=None):
157 160 if kind == 'gz':
158 161 mode = mode[0:1]
159 162 if not fileobj:
160 163 fileobj = open(name, mode + 'b')
161 164 gzfileobj = self.GzipFileWithTime(name,
162 165 pycompat.sysstr(mode + 'b'),
163 166 zlib.Z_BEST_COMPRESSION,
164 167 fileobj, timestamp=mtime)
165 168 self.fileobj = gzfileobj
166 169 return tarfile.TarFile.taropen(
167 170 name, pycompat.sysstr(mode), gzfileobj)
168 171 else:
169 172 return tarfile.open(
170 173 name, pycompat.sysstr(mode + kind), fileobj)
171 174
172 175 if isinstance(dest, bytes):
173 176 self.z = taropen('w:', name=dest)
174 177 else:
175 178 self.z = taropen('w|', fileobj=dest)
176 179
177 180 def addfile(self, name, mode, islink, data):
178 181 name = pycompat.fsdecode(name)
179 182 i = tarfile.TarInfo(name)
180 183 i.mtime = self.mtime
181 184 i.size = len(data)
182 185 if islink:
183 186 i.type = tarfile.SYMTYPE
184 187 i.mode = 0o777
185 188 i.linkname = pycompat.fsdecode(data)
186 189 data = None
187 190 i.size = 0
188 191 else:
189 192 i.mode = mode
190 193 data = stringio(data)
191 194 self.z.addfile(i, data)
192 195
193 196 def done(self):
194 197 self.z.close()
195 198 if self.fileobj:
196 199 self.fileobj.close()
197 200
198 201 class zipit(object):
199 202 '''write archive to zip file or stream. can write uncompressed,
200 203 or compressed with deflate.'''
201 204
202 205 def __init__(self, dest, mtime, compress=True):
203 206 self.z = zipfile.ZipFile(pycompat.fsdecode(dest), r'w',
204 207 compress and zipfile.ZIP_DEFLATED or
205 208 zipfile.ZIP_STORED)
206 209
207 210 # Python's zipfile module emits deprecation warnings if we try
208 211 # to store files with a date before 1980.
209 212 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
210 213 if mtime < epoch:
211 214 mtime = epoch
212 215
213 216 self.mtime = mtime
214 217 self.date_time = time.gmtime(mtime)[:6]
215 218
216 219 def addfile(self, name, mode, islink, data):
217 220 i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
218 221 i.compress_type = self.z.compression
219 222 # unzip will not honor unix file modes unless file creator is
220 223 # set to unix (id 3).
221 224 i.create_system = 3
222 225 ftype = _UNX_IFREG
223 226 if islink:
224 227 mode = 0o777
225 228 ftype = _UNX_IFLNK
226 229 i.external_attr = (mode | ftype) << 16
227 230 # add "extended-timestamp" extra block, because zip archives
228 231 # without this will be extracted with unexpected timestamp,
229 232 # if TZ is not configured as GMT
230 233 i.extra += struct.pack('<hhBl',
231 234 0x5455, # block type: "extended-timestamp"
232 235 1 + 4, # size of this block
233 236 1, # "modification time is present"
234 237 int(self.mtime)) # last modification (UTC)
235 238 self.z.writestr(i, data)
236 239
237 240 def done(self):
238 241 self.z.close()
239 242
240 243 class fileit(object):
241 244 '''write archive as files in directory.'''
242 245
243 246 def __init__(self, name, mtime):
244 247 self.basedir = name
245 248 self.opener = vfsmod.vfs(self.basedir)
246 249 self.mtime = mtime
247 250
248 251 def addfile(self, name, mode, islink, data):
249 252 if islink:
250 253 self.opener.symlink(data, name)
251 254 return
252 255 f = self.opener(name, "w", atomictemp=False)
253 256 f.write(data)
254 257 f.close()
255 258 destfile = os.path.join(self.basedir, name)
256 259 os.chmod(destfile, mode)
257 260 if self.mtime is not None:
258 261 os.utime(destfile, (self.mtime, self.mtime))
259 262
260 263 def done(self):
261 264 pass
262 265
263 266 archivers = {
264 267 'files': fileit,
265 268 'tar': tarit,
266 269 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
267 270 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
268 271 'uzip': lambda name, mtime: zipit(name, mtime, False),
269 272 'zip': zipit,
270 273 }
271 274
272 275 def archive(repo, dest, node, kind, decode=True, matchfn=None,
273 276 prefix='', mtime=None, subrepos=False):
274 277 '''create archive of repo as it was at node.
275 278
276 279 dest can be name of directory, name of archive file, or file
277 280 object to write archive to.
278 281
279 282 kind is type of archive to create.
280 283
281 284 decode tells whether to put files through decode filters from
282 285 hgrc.
283 286
284 287 matchfn is function to filter names of files to write to archive.
285 288
286 289 prefix is name of path to put before every archive member.
287 290
288 291 mtime is the modified time, in seconds, or None to use the changeset time.
289 292
290 293 subrepos tells whether to include subrepos.
291 294 '''
292 295
293 296 if kind == 'files':
294 297 if prefix:
295 298 raise error.Abort(_('cannot give prefix when archiving to files'))
296 299 else:
297 300 prefix = tidyprefix(dest, kind, prefix)
298 301
299 302 def write(name, mode, islink, getdata):
300 303 data = getdata()
301 304 if decode:
302 305 data = repo.wwritedata(name, data)
303 306 archiver.addfile(prefix + name, mode, islink, data)
304 307
305 308 if kind not in archivers:
306 309 raise error.Abort(_("unknown archive type '%s'") % kind)
307 310
308 311 ctx = repo[node]
309 312 archiver = archivers[kind](dest, mtime or ctx.date()[0])
310 313
311 314 if repo.ui.configbool("ui", "archivemeta"):
312 315 name = '.hg_archival.txt'
313 316 if not matchfn or matchfn(name):
314 317 write(name, 0o644, False, lambda: buildmetadata(ctx))
315 318
316 319 if matchfn:
317 320 files = [f for f in ctx.manifest().keys() if matchfn(f)]
318 321 else:
319 322 files = ctx.manifest().keys()
320 323 total = len(files)
321 324 if total:
322 325 files.sort()
323 326 scmutil.prefetchfiles(repo, [ctx.rev()],
324 327 scmutil.matchfiles(repo, files))
325 328 progress = scmutil.progress(repo.ui, _('archiving'), unit=_('files'),
326 329 total=total)
327 330 progress.update(0)
328 331 for f in files:
329 332 ff = ctx.flags(f)
330 333 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
331 334 progress.increment(item=f)
332 335 progress.complete()
333 336
334 337 if subrepos:
335 338 for subpath in sorted(ctx.substate):
336 339 sub = ctx.workingsub(subpath)
337 340 submatch = matchmod.subdirmatcher(subpath, matchfn)
338 341 total += sub.archive(archiver, prefix, submatch, decode)
339 342
340 343 if total == 0:
341 344 raise error.Abort(_('no files match the archive pattern'))
342 345
343 346 archiver.done()
344 347 return total
@@ -1,1790 +1,1791 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 nullrev,
24 25 short,
25 26 wdirid,
26 27 wdirrev,
27 28 )
28 29
29 30 from . import (
30 31 encoding,
31 32 error,
32 33 match as matchmod,
33 34 obsolete,
34 35 obsutil,
35 36 pathutil,
36 37 phases,
37 38 policy,
38 39 pycompat,
39 40 revsetlang,
40 41 similar,
41 42 url,
42 43 util,
43 44 vfs,
44 45 )
45 46
46 47 from .utils import (
47 48 procutil,
48 49 stringutil,
49 50 )
50 51
51 52 if pycompat.iswindows:
52 53 from . import scmwindows as scmplatform
53 54 else:
54 55 from . import scmposix as scmplatform
55 56
56 57 parsers = policy.importmod(r'parsers')
57 58
58 59 termsize = scmplatform.termsize
59 60
60 61 class status(tuple):
61 62 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
62 63 and 'ignored' properties are only relevant to the working copy.
63 64 '''
64 65
65 66 __slots__ = ()
66 67
67 68 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
68 69 clean):
69 70 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
70 71 ignored, clean))
71 72
72 73 @property
73 74 def modified(self):
74 75 '''files that have been modified'''
75 76 return self[0]
76 77
77 78 @property
78 79 def added(self):
79 80 '''files that have been added'''
80 81 return self[1]
81 82
82 83 @property
83 84 def removed(self):
84 85 '''files that have been removed'''
85 86 return self[2]
86 87
87 88 @property
88 89 def deleted(self):
89 90 '''files that are in the dirstate, but have been deleted from the
90 91 working copy (aka "missing")
91 92 '''
92 93 return self[3]
93 94
94 95 @property
95 96 def unknown(self):
96 97 '''files not in the dirstate that are not ignored'''
97 98 return self[4]
98 99
99 100 @property
100 101 def ignored(self):
101 102 '''files not in the dirstate that are ignored (by _dirignore())'''
102 103 return self[5]
103 104
104 105 @property
105 106 def clean(self):
106 107 '''files that have not been modified'''
107 108 return self[6]
108 109
109 110 def __repr__(self, *args, **kwargs):
110 111 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
111 112 r'unknown=%s, ignored=%s, clean=%s>') %
112 113 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
113 114
114 115 def itersubrepos(ctx1, ctx2):
115 116 """find subrepos in ctx1 or ctx2"""
116 117 # Create a (subpath, ctx) mapping where we prefer subpaths from
117 118 # ctx1. The subpaths from ctx2 are important when the .hgsub file
118 119 # has been modified (in ctx2) but not yet committed (in ctx1).
119 120 subpaths = dict.fromkeys(ctx2.substate, ctx2)
120 121 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
121 122
122 123 missing = set()
123 124
124 125 for subpath in ctx2.substate:
125 126 if subpath not in ctx1.substate:
126 127 del subpaths[subpath]
127 128 missing.add(subpath)
128 129
129 130 for subpath, ctx in sorted(subpaths.iteritems()):
130 131 yield subpath, ctx.sub(subpath)
131 132
132 133 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
133 134 # status and diff will have an accurate result when it does
134 135 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
135 136 # against itself.
136 137 for subpath in missing:
137 138 yield subpath, ctx2.nullsub(subpath, ctx1)
138 139
139 140 def nochangesfound(ui, repo, excluded=None):
140 141 '''Report no changes for push/pull, excluded is None or a list of
141 142 nodes excluded from the push/pull.
142 143 '''
143 144 secretlist = []
144 145 if excluded:
145 146 for n in excluded:
146 147 ctx = repo[n]
147 148 if ctx.phase() >= phases.secret and not ctx.extinct():
148 149 secretlist.append(n)
149 150
150 151 if secretlist:
151 152 ui.status(_("no changes found (ignored %d secret changesets)\n")
152 153 % len(secretlist))
153 154 else:
154 155 ui.status(_("no changes found\n"))
155 156
156 157 def callcatch(ui, func):
157 158 """call func() with global exception handling
158 159
159 160 return func() if no exception happens. otherwise do some error handling
160 161 and return an exit code accordingly. does not handle all exceptions.
161 162 """
162 163 try:
163 164 try:
164 165 return func()
165 166 except: # re-raises
166 167 ui.traceback()
167 168 raise
168 169 # Global exception handling, alphabetically
169 170 # Mercurial-specific first, followed by built-in and library exceptions
170 171 except error.LockHeld as inst:
171 172 if inst.errno == errno.ETIMEDOUT:
172 173 reason = _('timed out waiting for lock held by %r') % inst.locker
173 174 else:
174 175 reason = _('lock held by %r') % inst.locker
175 176 ui.error(_("abort: %s: %s\n") % (
176 177 inst.desc or stringutil.forcebytestr(inst.filename), reason))
177 178 if not inst.locker:
178 179 ui.error(_("(lock might be very busy)\n"))
179 180 except error.LockUnavailable as inst:
180 181 ui.error(_("abort: could not lock %s: %s\n") %
181 182 (inst.desc or stringutil.forcebytestr(inst.filename),
182 183 encoding.strtolocal(inst.strerror)))
183 184 except error.OutOfBandError as inst:
184 185 if inst.args:
185 186 msg = _("abort: remote error:\n")
186 187 else:
187 188 msg = _("abort: remote error\n")
188 189 ui.error(msg)
189 190 if inst.args:
190 191 ui.error(''.join(inst.args))
191 192 if inst.hint:
192 193 ui.error('(%s)\n' % inst.hint)
193 194 except error.RepoError as inst:
194 195 ui.error(_("abort: %s!\n") % inst)
195 196 if inst.hint:
196 197 ui.error(_("(%s)\n") % inst.hint)
197 198 except error.ResponseError as inst:
198 199 ui.error(_("abort: %s") % inst.args[0])
199 200 msg = inst.args[1]
200 201 if isinstance(msg, type(u'')):
201 202 msg = pycompat.sysbytes(msg)
202 203 if not isinstance(msg, bytes):
203 204 ui.error(" %r\n" % (msg,))
204 205 elif not msg:
205 206 ui.error(_(" empty string\n"))
206 207 else:
207 208 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
208 209 except error.CensoredNodeError as inst:
209 210 ui.error(_("abort: file censored %s!\n") % inst)
210 211 except error.StorageError as inst:
211 212 ui.error(_("abort: %s!\n") % inst)
212 213 except error.InterventionRequired as inst:
213 214 ui.error("%s\n" % inst)
214 215 if inst.hint:
215 216 ui.error(_("(%s)\n") % inst.hint)
216 217 return 1
217 218 except error.WdirUnsupported:
218 219 ui.error(_("abort: working directory revision cannot be specified\n"))
219 220 except error.Abort as inst:
220 221 ui.error(_("abort: %s\n") % inst)
221 222 if inst.hint:
222 223 ui.error(_("(%s)\n") % inst.hint)
223 224 except ImportError as inst:
224 225 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
225 226 m = stringutil.forcebytestr(inst).split()[-1]
226 227 if m in "mpatch bdiff".split():
227 228 ui.error(_("(did you forget to compile extensions?)\n"))
228 229 elif m in "zlib".split():
229 230 ui.error(_("(is your Python install correct?)\n"))
230 231 except IOError as inst:
231 232 if util.safehasattr(inst, "code"):
232 233 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
233 234 elif util.safehasattr(inst, "reason"):
234 235 try: # usually it is in the form (errno, strerror)
235 236 reason = inst.reason.args[1]
236 237 except (AttributeError, IndexError):
237 238 # it might be anything, for example a string
238 239 reason = inst.reason
239 240 if isinstance(reason, pycompat.unicode):
240 241 # SSLError of Python 2.7.9 contains a unicode
241 242 reason = encoding.unitolocal(reason)
242 243 ui.error(_("abort: error: %s\n") % reason)
243 244 elif (util.safehasattr(inst, "args")
244 245 and inst.args and inst.args[0] == errno.EPIPE):
245 246 pass
246 247 elif getattr(inst, "strerror", None):
247 248 if getattr(inst, "filename", None):
248 249 ui.error(_("abort: %s: %s\n") % (
249 250 encoding.strtolocal(inst.strerror),
250 251 stringutil.forcebytestr(inst.filename)))
251 252 else:
252 253 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
253 254 else:
254 255 raise
255 256 except OSError as inst:
256 257 if getattr(inst, "filename", None) is not None:
257 258 ui.error(_("abort: %s: '%s'\n") % (
258 259 encoding.strtolocal(inst.strerror),
259 260 stringutil.forcebytestr(inst.filename)))
260 261 else:
261 262 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
262 263 except MemoryError:
263 264 ui.error(_("abort: out of memory\n"))
264 265 except SystemExit as inst:
265 266 # Commands shouldn't sys.exit directly, but give a return code.
266 267 # Just in case catch this and and pass exit code to caller.
267 268 return inst.code
268 269 except socket.error as inst:
269 270 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
270 271
271 272 return -1
272 273
273 274 def checknewlabel(repo, lbl, kind):
274 275 # Do not use the "kind" parameter in ui output.
275 276 # It makes strings difficult to translate.
276 277 if lbl in ['tip', '.', 'null']:
277 278 raise error.Abort(_("the name '%s' is reserved") % lbl)
278 279 for c in (':', '\0', '\n', '\r'):
279 280 if c in lbl:
280 281 raise error.Abort(
281 282 _("%r cannot be used in a name") % pycompat.bytestr(c))
282 283 try:
283 284 int(lbl)
284 285 raise error.Abort(_("cannot use an integer as a name"))
285 286 except ValueError:
286 287 pass
287 288 if lbl.strip() != lbl:
288 289 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
289 290
290 291 def checkfilename(f):
291 292 '''Check that the filename f is an acceptable filename for a tracked file'''
292 293 if '\r' in f or '\n' in f:
293 294 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
294 295 % pycompat.bytestr(f))
295 296
296 297 def checkportable(ui, f):
297 298 '''Check if filename f is portable and warn or abort depending on config'''
298 299 checkfilename(f)
299 300 abort, warn = checkportabilityalert(ui)
300 301 if abort or warn:
301 302 msg = util.checkwinfilename(f)
302 303 if msg:
303 304 msg = "%s: %s" % (msg, procutil.shellquote(f))
304 305 if abort:
305 306 raise error.Abort(msg)
306 307 ui.warn(_("warning: %s\n") % msg)
307 308
308 309 def checkportabilityalert(ui):
309 310 '''check if the user's config requests nothing, a warning, or abort for
310 311 non-portable filenames'''
311 312 val = ui.config('ui', 'portablefilenames')
312 313 lval = val.lower()
313 314 bval = stringutil.parsebool(val)
314 315 abort = pycompat.iswindows or lval == 'abort'
315 316 warn = bval or lval == 'warn'
316 317 if bval is None and not (warn or abort or lval == 'ignore'):
317 318 raise error.ConfigError(
318 319 _("ui.portablefilenames value is invalid ('%s')") % val)
319 320 return abort, warn
320 321
321 322 class casecollisionauditor(object):
322 323 def __init__(self, ui, abort, dirstate):
323 324 self._ui = ui
324 325 self._abort = abort
325 326 allfiles = '\0'.join(dirstate._map)
326 327 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
327 328 self._dirstate = dirstate
328 329 # The purpose of _newfiles is so that we don't complain about
329 330 # case collisions if someone were to call this object with the
330 331 # same filename twice.
331 332 self._newfiles = set()
332 333
333 334 def __call__(self, f):
334 335 if f in self._newfiles:
335 336 return
336 337 fl = encoding.lower(f)
337 338 if fl in self._loweredfiles and f not in self._dirstate:
338 339 msg = _('possible case-folding collision for %s') % f
339 340 if self._abort:
340 341 raise error.Abort(msg)
341 342 self._ui.warn(_("warning: %s\n") % msg)
342 343 self._loweredfiles.add(fl)
343 344 self._newfiles.add(f)
344 345
345 346 def filteredhash(repo, maxrev):
346 347 """build hash of filtered revisions in the current repoview.
347 348
348 349 Multiple caches perform up-to-date validation by checking that the
349 350 tiprev and tipnode stored in the cache file match the current repository.
350 351 However, this is not sufficient for validating repoviews because the set
351 352 of revisions in the view may change without the repository tiprev and
352 353 tipnode changing.
353 354
354 355 This function hashes all the revs filtered from the view and returns
355 356 that SHA-1 digest.
356 357 """
357 358 cl = repo.changelog
358 359 if not cl.filteredrevs:
359 360 return None
360 361 key = None
361 362 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
362 363 if revs:
363 364 s = hashlib.sha1()
364 365 for rev in revs:
365 366 s.update('%d;' % rev)
366 367 key = s.digest()
367 368 return key
368 369
369 370 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
370 371 '''yield every hg repository under path, always recursively.
371 372 The recurse flag will only control recursion into repo working dirs'''
372 373 def errhandler(err):
373 374 if err.filename == path:
374 375 raise err
375 376 samestat = getattr(os.path, 'samestat', None)
376 377 if followsym and samestat is not None:
377 378 def adddir(dirlst, dirname):
378 379 dirstat = os.stat(dirname)
379 380 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
380 381 if not match:
381 382 dirlst.append(dirstat)
382 383 return not match
383 384 else:
384 385 followsym = False
385 386
386 387 if (seen_dirs is None) and followsym:
387 388 seen_dirs = []
388 389 adddir(seen_dirs, path)
389 390 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
390 391 dirs.sort()
391 392 if '.hg' in dirs:
392 393 yield root # found a repository
393 394 qroot = os.path.join(root, '.hg', 'patches')
394 395 if os.path.isdir(os.path.join(qroot, '.hg')):
395 396 yield qroot # we have a patch queue repo here
396 397 if recurse:
397 398 # avoid recursing inside the .hg directory
398 399 dirs.remove('.hg')
399 400 else:
400 401 dirs[:] = [] # don't descend further
401 402 elif followsym:
402 403 newdirs = []
403 404 for d in dirs:
404 405 fname = os.path.join(root, d)
405 406 if adddir(seen_dirs, fname):
406 407 if os.path.islink(fname):
407 408 for hgname in walkrepos(fname, True, seen_dirs):
408 409 yield hgname
409 410 else:
410 411 newdirs.append(d)
411 412 dirs[:] = newdirs
412 413
413 414 def binnode(ctx):
414 415 """Return binary node id for a given basectx"""
415 416 node = ctx.node()
416 417 if node is None:
417 418 return wdirid
418 419 return node
419 420
420 421 def intrev(ctx):
421 422 """Return integer for a given basectx that can be used in comparison or
422 423 arithmetic operation"""
423 424 rev = ctx.rev()
424 425 if rev is None:
425 426 return wdirrev
426 427 return rev
427 428
428 429 def formatchangeid(ctx):
429 430 """Format changectx as '{rev}:{node|formatnode}', which is the default
430 431 template provided by logcmdutil.changesettemplater"""
431 432 repo = ctx.repo()
432 433 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
433 434
434 435 def formatrevnode(ui, rev, node):
435 436 """Format given revision and node depending on the current verbosity"""
436 437 if ui.debugflag:
437 438 hexfunc = hex
438 439 else:
439 440 hexfunc = short
440 441 return '%d:%s' % (rev, hexfunc(node))
441 442
442 443 def resolvehexnodeidprefix(repo, prefix):
443 444 if (prefix.startswith('x') and
444 445 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
445 446 prefix = prefix[1:]
446 447 try:
447 448 # Uses unfiltered repo because it's faster when prefix is ambiguous/
448 449 # This matches the shortesthexnodeidprefix() function below.
449 450 node = repo.unfiltered().changelog._partialmatch(prefix)
450 451 except error.AmbiguousPrefixLookupError:
451 452 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
452 453 if revset:
453 454 # Clear config to avoid infinite recursion
454 455 configoverrides = {('experimental',
455 456 'revisions.disambiguatewithin'): None}
456 457 with repo.ui.configoverride(configoverrides):
457 458 revs = repo.anyrevs([revset], user=True)
458 459 matches = []
459 460 for rev in revs:
460 461 node = repo.changelog.node(rev)
461 462 if hex(node).startswith(prefix):
462 463 matches.append(node)
463 464 if len(matches) == 1:
464 465 return matches[0]
465 466 raise
466 467 if node is None:
467 468 return
468 469 repo.changelog.rev(node) # make sure node isn't filtered
469 470 return node
470 471
471 472 def mayberevnum(repo, prefix):
472 473 """Checks if the given prefix may be mistaken for a revision number"""
473 474 try:
474 475 i = int(prefix)
475 476 # if we are a pure int, then starting with zero will not be
476 477 # confused as a rev; or, obviously, if the int is larger
477 478 # than the value of the tip rev
478 479 if prefix[0:1] == b'0' or i >= len(repo):
479 480 return False
480 481 return True
481 482 except ValueError:
482 483 return False
483 484
484 485 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
485 486 """Find the shortest unambiguous prefix that matches hexnode.
486 487
487 488 If "cache" is not None, it must be a dictionary that can be used for
488 489 caching between calls to this method.
489 490 """
490 491 # _partialmatch() of filtered changelog could take O(len(repo)) time,
491 492 # which would be unacceptably slow. so we look for hash collision in
492 493 # unfiltered space, which means some hashes may be slightly longer.
493 494
494 495 def disambiguate(prefix):
495 496 """Disambiguate against revnums."""
496 497 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 498 if mayberevnum(repo, prefix):
498 499 return 'x' + prefix
499 500 else:
500 501 return prefix
501 502
502 503 hexnode = hex(node)
503 504 for length in range(len(prefix), len(hexnode) + 1):
504 505 prefix = hexnode[:length]
505 506 if not mayberevnum(repo, prefix):
506 507 return prefix
507 508
508 509 cl = repo.unfiltered().changelog
509 510 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 511 if revset:
511 512 revs = None
512 513 if cache is not None:
513 514 revs = cache.get('disambiguationrevset')
514 515 if revs is None:
515 516 revs = repo.anyrevs([revset], user=True)
516 517 if cache is not None:
517 518 cache['disambiguationrevset'] = revs
518 519 if cl.rev(node) in revs:
519 520 hexnode = hex(node)
520 521 nodetree = None
521 522 if cache is not None:
522 523 nodetree = cache.get('disambiguationnodetree')
523 524 if not nodetree:
524 525 try:
525 526 nodetree = parsers.nodetree(cl.index, len(revs))
526 527 except AttributeError:
527 528 # no native nodetree
528 529 pass
529 530 else:
530 531 for r in revs:
531 532 nodetree.insert(r)
532 533 if cache is not None:
533 534 cache['disambiguationnodetree'] = nodetree
534 535 if nodetree is not None:
535 536 length = max(nodetree.shortest(node), minlength)
536 537 prefix = hexnode[:length]
537 538 return disambiguate(prefix)
538 539 for length in range(minlength, len(hexnode) + 1):
539 540 matches = []
540 541 prefix = hexnode[:length]
541 542 for rev in revs:
542 543 otherhexnode = repo[rev].hex()
543 544 if prefix == otherhexnode[:length]:
544 545 matches.append(otherhexnode)
545 546 if len(matches) == 1:
546 547 return disambiguate(prefix)
547 548
548 549 try:
549 550 return disambiguate(cl.shortest(node, minlength))
550 551 except error.LookupError:
551 552 raise error.RepoLookupError()
552 553
553 554 def isrevsymbol(repo, symbol):
554 555 """Checks if a symbol exists in the repo.
555 556
556 557 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 558 symbol is an ambiguous nodeid prefix.
558 559 """
559 560 try:
560 561 revsymbol(repo, symbol)
561 562 return True
562 563 except error.RepoLookupError:
563 564 return False
564 565
565 566 def revsymbol(repo, symbol):
566 567 """Returns a context given a single revision symbol (as string).
567 568
568 569 This is similar to revsingle(), but accepts only a single revision symbol,
569 570 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 571 not "max(public())".
571 572 """
572 573 if not isinstance(symbol, bytes):
573 574 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 575 "repo[symbol]?" % (symbol, type(symbol)))
575 576 raise error.ProgrammingError(msg)
576 577 try:
577 578 if symbol in ('.', 'tip', 'null'):
578 579 return repo[symbol]
579 580
580 581 try:
581 582 r = int(symbol)
582 583 if '%d' % r != symbol:
583 584 raise ValueError
584 585 l = len(repo.changelog)
585 586 if r < 0:
586 587 r += l
587 588 if r < 0 or r >= l and r != wdirrev:
588 589 raise ValueError
589 590 return repo[r]
590 591 except error.FilteredIndexError:
591 592 raise
592 593 except (ValueError, OverflowError, IndexError):
593 594 pass
594 595
595 596 if len(symbol) == 40:
596 597 try:
597 598 node = bin(symbol)
598 599 rev = repo.changelog.rev(node)
599 600 return repo[rev]
600 601 except error.FilteredLookupError:
601 602 raise
602 603 except (TypeError, LookupError):
603 604 pass
604 605
605 606 # look up bookmarks through the name interface
606 607 try:
607 608 node = repo.names.singlenode(repo, symbol)
608 609 rev = repo.changelog.rev(node)
609 610 return repo[rev]
610 611 except KeyError:
611 612 pass
612 613
613 614 node = resolvehexnodeidprefix(repo, symbol)
614 615 if node is not None:
615 616 rev = repo.changelog.rev(node)
616 617 return repo[rev]
617 618
618 619 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619 620
620 621 except error.WdirUnsupported:
621 622 return repo[None]
622 623 except (error.FilteredIndexError, error.FilteredLookupError,
623 624 error.FilteredRepoLookupError):
624 625 raise _filterederror(repo, symbol)
625 626
626 627 def _filterederror(repo, changeid):
627 628 """build an exception to be raised about a filtered changeid
628 629
629 630 This is extracted in a function to help extensions (eg: evolve) to
630 631 experiment with various message variants."""
631 632 if repo.filtername.startswith('visible'):
632 633
633 634 # Check if the changeset is obsolete
634 635 unfilteredrepo = repo.unfiltered()
635 636 ctx = revsymbol(unfilteredrepo, changeid)
636 637
637 638 # If the changeset is obsolete, enrich the message with the reason
638 639 # that made this changeset not visible
639 640 if ctx.obsolete():
640 641 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 642 else:
642 643 msg = _("hidden revision '%s'") % changeid
643 644
644 645 hint = _('use --hidden to access hidden revisions')
645 646
646 647 return error.FilteredRepoLookupError(msg, hint=hint)
647 648 msg = _("filtered revision '%s' (not in '%s' subset)")
648 649 msg %= (changeid, repo.filtername)
649 650 return error.FilteredRepoLookupError(msg)
650 651
651 652 def revsingle(repo, revspec, default='.', localalias=None):
652 653 if not revspec and revspec != 0:
653 654 return repo[default]
654 655
655 656 l = revrange(repo, [revspec], localalias=localalias)
656 657 if not l:
657 658 raise error.Abort(_('empty revision set'))
658 659 return repo[l.last()]
659 660
660 661 def _pairspec(revspec):
661 662 tree = revsetlang.parse(revspec)
662 663 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663 664
664 665 def revpair(repo, revs):
665 666 if not revs:
666 667 return repo['.'], repo[None]
667 668
668 669 l = revrange(repo, revs)
669 670
670 671 if not l:
671 672 first = second = None
672 673 elif l.isascending():
673 674 first = l.min()
674 675 second = l.max()
675 676 elif l.isdescending():
676 677 first = l.max()
677 678 second = l.min()
678 679 else:
679 680 first = l.first()
680 681 second = l.last()
681 682
682 683 if first is None:
683 684 raise error.Abort(_('empty revision range'))
684 685 if (first == second and len(revs) >= 2
685 686 and not all(revrange(repo, [r]) for r in revs)):
686 687 raise error.Abort(_('empty revision on one side of range'))
687 688
688 689 # if top-level is range expression, the result must always be a pair
689 690 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
690 691 return repo[first], repo[None]
691 692
692 693 return repo[first], repo[second]
693 694
694 695 def revrange(repo, specs, localalias=None):
695 696 """Execute 1 to many revsets and return the union.
696 697
697 698 This is the preferred mechanism for executing revsets using user-specified
698 699 config options, such as revset aliases.
699 700
700 701 The revsets specified by ``specs`` will be executed via a chained ``OR``
701 702 expression. If ``specs`` is empty, an empty result is returned.
702 703
703 704 ``specs`` can contain integers, in which case they are assumed to be
704 705 revision numbers.
705 706
706 707 It is assumed the revsets are already formatted. If you have arguments
707 708 that need to be expanded in the revset, call ``revsetlang.formatspec()``
708 709 and pass the result as an element of ``specs``.
709 710
710 711 Specifying a single revset is allowed.
711 712
712 713 Returns a ``revset.abstractsmartset`` which is a list-like interface over
713 714 integer revisions.
714 715 """
715 716 allspecs = []
716 717 for spec in specs:
717 718 if isinstance(spec, int):
718 719 spec = revsetlang.formatspec('rev(%d)', spec)
719 720 allspecs.append(spec)
720 721 return repo.anyrevs(allspecs, user=True, localalias=localalias)
721 722
722 723 def meaningfulparents(repo, ctx):
723 724 """Return list of meaningful (or all if debug) parentrevs for rev.
724 725
725 726 For merges (two non-nullrev revisions) both parents are meaningful.
726 727 Otherwise the first parent revision is considered meaningful if it
727 728 is not the preceding revision.
728 729 """
729 730 parents = ctx.parents()
730 731 if len(parents) > 1:
731 732 return parents
732 733 if repo.ui.debugflag:
733 return [parents[0], repo['null']]
734 return [parents[0], repo[nullrev]]
734 735 if parents[0].rev() >= intrev(ctx) - 1:
735 736 return []
736 737 return parents
737 738
738 739 def expandpats(pats):
739 740 '''Expand bare globs when running on windows.
740 741 On posix we assume it already has already been done by sh.'''
741 742 if not util.expandglobs:
742 743 return list(pats)
743 744 ret = []
744 745 for kindpat in pats:
745 746 kind, pat = matchmod._patsplit(kindpat, None)
746 747 if kind is None:
747 748 try:
748 749 globbed = glob.glob(pat)
749 750 except re.error:
750 751 globbed = [pat]
751 752 if globbed:
752 753 ret.extend(globbed)
753 754 continue
754 755 ret.append(kindpat)
755 756 return ret
756 757
757 758 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
758 759 badfn=None):
759 760 '''Return a matcher and the patterns that were used.
760 761 The matcher will warn about bad matches, unless an alternate badfn callback
761 762 is provided.'''
762 763 if pats == ("",):
763 764 pats = []
764 765 if opts is None:
765 766 opts = {}
766 767 if not globbed and default == 'relpath':
767 768 pats = expandpats(pats or [])
768 769
769 770 def bad(f, msg):
770 771 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
771 772
772 773 if badfn is None:
773 774 badfn = bad
774 775
775 776 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
776 777 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
777 778
778 779 if m.always():
779 780 pats = []
780 781 return m, pats
781 782
782 783 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
783 784 badfn=None):
784 785 '''Return a matcher that will warn about bad matches.'''
785 786 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
786 787
787 788 def matchall(repo):
788 789 '''Return a matcher that will efficiently match everything.'''
789 790 return matchmod.always(repo.root, repo.getcwd())
790 791
791 792 def matchfiles(repo, files, badfn=None):
792 793 '''Return a matcher that will efficiently match exactly these files.'''
793 794 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
794 795
795 796 def parsefollowlinespattern(repo, rev, pat, msg):
796 797 """Return a file name from `pat` pattern suitable for usage in followlines
797 798 logic.
798 799 """
799 800 if not matchmod.patkind(pat):
800 801 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 802 else:
802 803 ctx = repo[rev]
803 804 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
804 805 files = [f for f in ctx if m(f)]
805 806 if len(files) != 1:
806 807 raise error.ParseError(msg)
807 808 return files[0]
808 809
809 810 def origpath(ui, repo, filepath):
810 811 '''customize where .orig files are created
811 812
812 813 Fetch user defined path from config file: [ui] origbackuppath = <path>
813 814 Fall back to default (filepath with .orig suffix) if not specified
814 815 '''
815 816 origbackuppath = ui.config('ui', 'origbackuppath')
816 817 if not origbackuppath:
817 818 return filepath + ".orig"
818 819
819 820 # Convert filepath from an absolute path into a path inside the repo.
820 821 filepathfromroot = util.normpath(os.path.relpath(filepath,
821 822 start=repo.root))
822 823
823 824 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
824 825 origbackupdir = origvfs.dirname(filepathfromroot)
825 826 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
826 827 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
827 828
828 829 # Remove any files that conflict with the backup file's path
829 830 for f in reversed(list(util.finddirs(filepathfromroot))):
830 831 if origvfs.isfileorlink(f):
831 832 ui.note(_('removing conflicting file: %s\n')
832 833 % origvfs.join(f))
833 834 origvfs.unlink(f)
834 835 break
835 836
836 837 origvfs.makedirs(origbackupdir)
837 838
838 839 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
839 840 ui.note(_('removing conflicting directory: %s\n')
840 841 % origvfs.join(filepathfromroot))
841 842 origvfs.rmtree(filepathfromroot, forcibly=True)
842 843
843 844 return origvfs.join(filepathfromroot)
844 845
845 846 class _containsnode(object):
846 847 """proxy __contains__(node) to container.__contains__ which accepts revs"""
847 848
848 849 def __init__(self, repo, revcontainer):
849 850 self._torev = repo.changelog.rev
850 851 self._revcontains = revcontainer.__contains__
851 852
852 853 def __contains__(self, node):
853 854 return self._revcontains(self._torev(node))
854 855
855 856 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
856 857 fixphase=False, targetphase=None, backup=True):
857 858 """do common cleanups when old nodes are replaced by new nodes
858 859
859 860 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
860 861 (we might also want to move working directory parent in the future)
861 862
862 863 By default, bookmark moves are calculated automatically from 'replacements',
863 864 but 'moves' can be used to override that. Also, 'moves' may include
864 865 additional bookmark moves that should not have associated obsmarkers.
865 866
866 867 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
867 868 have replacements. operation is a string, like "rebase".
868 869
869 870 metadata is dictionary containing metadata to be stored in obsmarker if
870 871 obsolescence is enabled.
871 872 """
872 873 assert fixphase or targetphase is None
873 874 if not replacements and not moves:
874 875 return
875 876
876 877 # translate mapping's other forms
877 878 if not util.safehasattr(replacements, 'items'):
878 879 replacements = {(n,): () for n in replacements}
879 880 else:
880 881 # upgrading non tuple "source" to tuple ones for BC
881 882 repls = {}
882 883 for key, value in replacements.items():
883 884 if not isinstance(key, tuple):
884 885 key = (key,)
885 886 repls[key] = value
886 887 replacements = repls
887 888
888 889 # Calculate bookmark movements
889 890 if moves is None:
890 891 moves = {}
891 892 # Unfiltered repo is needed since nodes in replacements might be hidden.
892 893 unfi = repo.unfiltered()
893 894 for oldnodes, newnodes in replacements.items():
894 895 for oldnode in oldnodes:
895 896 if oldnode in moves:
896 897 continue
897 898 if len(newnodes) > 1:
898 899 # usually a split, take the one with biggest rev number
899 900 newnode = next(unfi.set('max(%ln)', newnodes)).node()
900 901 elif len(newnodes) == 0:
901 902 # move bookmark backwards
902 903 allreplaced = []
903 904 for rep in replacements:
904 905 allreplaced.extend(rep)
905 906 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
906 907 allreplaced))
907 908 if roots:
908 909 newnode = roots[0].node()
909 910 else:
910 911 newnode = nullid
911 912 else:
912 913 newnode = newnodes[0]
913 914 moves[oldnode] = newnode
914 915
915 916 allnewnodes = [n for ns in replacements.values() for n in ns]
916 917 toretract = {}
917 918 toadvance = {}
918 919 if fixphase:
919 920 precursors = {}
920 921 for oldnodes, newnodes in replacements.items():
921 922 for oldnode in oldnodes:
922 923 for newnode in newnodes:
923 924 precursors.setdefault(newnode, []).append(oldnode)
924 925
925 926 allnewnodes.sort(key=lambda n: unfi[n].rev())
926 927 newphases = {}
927 928 def phase(ctx):
928 929 return newphases.get(ctx.node(), ctx.phase())
929 930 for newnode in allnewnodes:
930 931 ctx = unfi[newnode]
931 932 parentphase = max(phase(p) for p in ctx.parents())
932 933 if targetphase is None:
933 934 oldphase = max(unfi[oldnode].phase()
934 935 for oldnode in precursors[newnode])
935 936 newphase = max(oldphase, parentphase)
936 937 else:
937 938 newphase = max(targetphase, parentphase)
938 939 newphases[newnode] = newphase
939 940 if newphase > ctx.phase():
940 941 toretract.setdefault(newphase, []).append(newnode)
941 942 elif newphase < ctx.phase():
942 943 toadvance.setdefault(newphase, []).append(newnode)
943 944
944 945 with repo.transaction('cleanup') as tr:
945 946 # Move bookmarks
946 947 bmarks = repo._bookmarks
947 948 bmarkchanges = []
948 949 for oldnode, newnode in moves.items():
949 950 oldbmarks = repo.nodebookmarks(oldnode)
950 951 if not oldbmarks:
951 952 continue
952 953 from . import bookmarks # avoid import cycle
953 954 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
954 955 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
955 956 hex(oldnode), hex(newnode)))
956 957 # Delete divergent bookmarks being parents of related newnodes
957 958 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
958 959 allnewnodes, newnode, oldnode)
959 960 deletenodes = _containsnode(repo, deleterevs)
960 961 for name in oldbmarks:
961 962 bmarkchanges.append((name, newnode))
962 963 for b in bookmarks.divergent2delete(repo, deletenodes, name):
963 964 bmarkchanges.append((b, None))
964 965
965 966 if bmarkchanges:
966 967 bmarks.applychanges(repo, tr, bmarkchanges)
967 968
968 969 for phase, nodes in toretract.items():
969 970 phases.retractboundary(repo, tr, phase, nodes)
970 971 for phase, nodes in toadvance.items():
971 972 phases.advanceboundary(repo, tr, phase, nodes)
972 973
973 974 # Obsolete or strip nodes
974 975 if obsolete.isenabled(repo, obsolete.createmarkersopt):
975 976 # If a node is already obsoleted, and we want to obsolete it
976 977 # without a successor, skip that obssolete request since it's
977 978 # unnecessary. That's the "if s or not isobs(n)" check below.
978 979 # Also sort the node in topology order, that might be useful for
979 980 # some obsstore logic.
980 981 # NOTE: the filtering and sorting might belong to createmarkers.
981 982 isobs = unfi.obsstore.successors.__contains__
982 983 torev = unfi.changelog.rev
983 984 sortfunc = lambda ns: torev(ns[0][0])
984 985 rels = []
985 986 for ns, s in sorted(replacements.items(), key=sortfunc):
986 987 for n in ns:
987 988 if s or not isobs(n):
988 989 rel = (unfi[n], tuple(unfi[m] for m in s))
989 990 rels.append(rel)
990 991 if rels:
991 992 obsolete.createmarkers(repo, rels, operation=operation,
992 993 metadata=metadata)
993 994 else:
994 995 from . import repair # avoid import cycle
995 996 tostrip = list(n for ns in replacements for n in ns)
996 997 if tostrip:
997 998 repair.delayedstrip(repo.ui, repo, tostrip, operation,
998 999 backup=backup)
999 1000
1000 1001 def addremove(repo, matcher, prefix, opts=None):
1001 1002 if opts is None:
1002 1003 opts = {}
1003 1004 m = matcher
1004 1005 dry_run = opts.get('dry_run')
1005 1006 try:
1006 1007 similarity = float(opts.get('similarity') or 0)
1007 1008 except ValueError:
1008 1009 raise error.Abort(_('similarity must be a number'))
1009 1010 if similarity < 0 or similarity > 100:
1010 1011 raise error.Abort(_('similarity must be between 0 and 100'))
1011 1012 similarity /= 100.0
1012 1013
1013 1014 ret = 0
1014 1015 join = lambda f: os.path.join(prefix, f)
1015 1016
1016 1017 wctx = repo[None]
1017 1018 for subpath in sorted(wctx.substate):
1018 1019 submatch = matchmod.subdirmatcher(subpath, m)
1019 1020 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1020 1021 sub = wctx.sub(subpath)
1021 1022 try:
1022 1023 if sub.addremove(submatch, prefix, opts):
1023 1024 ret = 1
1024 1025 except error.LookupError:
1025 1026 repo.ui.status(_("skipping missing subrepository: %s\n")
1026 1027 % join(subpath))
1027 1028
1028 1029 rejected = []
1029 1030 def badfn(f, msg):
1030 1031 if f in m.files():
1031 1032 m.bad(f, msg)
1032 1033 rejected.append(f)
1033 1034
1034 1035 badmatch = matchmod.badmatch(m, badfn)
1035 1036 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1036 1037 badmatch)
1037 1038
1038 1039 unknownset = set(unknown + forgotten)
1039 1040 toprint = unknownset.copy()
1040 1041 toprint.update(deleted)
1041 1042 for abs in sorted(toprint):
1042 1043 if repo.ui.verbose or not m.exact(abs):
1043 1044 if abs in unknownset:
1044 1045 status = _('adding %s\n') % m.uipath(abs)
1045 1046 label = 'addremove.added'
1046 1047 else:
1047 1048 status = _('removing %s\n') % m.uipath(abs)
1048 1049 label = 'addremove.removed'
1049 1050 repo.ui.status(status, label=label)
1050 1051
1051 1052 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1052 1053 similarity)
1053 1054
1054 1055 if not dry_run:
1055 1056 _markchanges(repo, unknown + forgotten, deleted, renames)
1056 1057
1057 1058 for f in rejected:
1058 1059 if f in m.files():
1059 1060 return 1
1060 1061 return ret
1061 1062
1062 1063 def marktouched(repo, files, similarity=0.0):
1063 1064 '''Assert that files have somehow been operated upon. files are relative to
1064 1065 the repo root.'''
1065 1066 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1066 1067 rejected = []
1067 1068
1068 1069 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1069 1070
1070 1071 if repo.ui.verbose:
1071 1072 unknownset = set(unknown + forgotten)
1072 1073 toprint = unknownset.copy()
1073 1074 toprint.update(deleted)
1074 1075 for abs in sorted(toprint):
1075 1076 if abs in unknownset:
1076 1077 status = _('adding %s\n') % abs
1077 1078 else:
1078 1079 status = _('removing %s\n') % abs
1079 1080 repo.ui.status(status)
1080 1081
1081 1082 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1082 1083 similarity)
1083 1084
1084 1085 _markchanges(repo, unknown + forgotten, deleted, renames)
1085 1086
1086 1087 for f in rejected:
1087 1088 if f in m.files():
1088 1089 return 1
1089 1090 return 0
1090 1091
1091 1092 def _interestingfiles(repo, matcher):
1092 1093 '''Walk dirstate with matcher, looking for files that addremove would care
1093 1094 about.
1094 1095
1095 1096 This is different from dirstate.status because it doesn't care about
1096 1097 whether files are modified or clean.'''
1097 1098 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1098 1099 audit_path = pathutil.pathauditor(repo.root, cached=True)
1099 1100
1100 1101 ctx = repo[None]
1101 1102 dirstate = repo.dirstate
1102 1103 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1103 1104 unknown=True, ignored=False, full=False)
1104 1105 for abs, st in walkresults.iteritems():
1105 1106 dstate = dirstate[abs]
1106 1107 if dstate == '?' and audit_path.check(abs):
1107 1108 unknown.append(abs)
1108 1109 elif dstate != 'r' and not st:
1109 1110 deleted.append(abs)
1110 1111 elif dstate == 'r' and st:
1111 1112 forgotten.append(abs)
1112 1113 # for finding renames
1113 1114 elif dstate == 'r' and not st:
1114 1115 removed.append(abs)
1115 1116 elif dstate == 'a':
1116 1117 added.append(abs)
1117 1118
1118 1119 return added, unknown, deleted, removed, forgotten
1119 1120
1120 1121 def _findrenames(repo, matcher, added, removed, similarity):
1121 1122 '''Find renames from removed files to added ones.'''
1122 1123 renames = {}
1123 1124 if similarity > 0:
1124 1125 for old, new, score in similar.findrenames(repo, added, removed,
1125 1126 similarity):
1126 1127 if (repo.ui.verbose or not matcher.exact(old)
1127 1128 or not matcher.exact(new)):
1128 1129 repo.ui.status(_('recording removal of %s as rename to %s '
1129 1130 '(%d%% similar)\n') %
1130 1131 (matcher.rel(old), matcher.rel(new),
1131 1132 score * 100))
1132 1133 renames[new] = old
1133 1134 return renames
1134 1135
1135 1136 def _markchanges(repo, unknown, deleted, renames):
1136 1137 '''Marks the files in unknown as added, the files in deleted as removed,
1137 1138 and the files in renames as copied.'''
1138 1139 wctx = repo[None]
1139 1140 with repo.wlock():
1140 1141 wctx.forget(deleted)
1141 1142 wctx.add(unknown)
1142 1143 for new, old in renames.iteritems():
1143 1144 wctx.copy(old, new)
1144 1145
1145 1146 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1146 1147 """Update the dirstate to reflect the intent of copying src to dst. For
1147 1148 different reasons it might not end with dst being marked as copied from src.
1148 1149 """
1149 1150 origsrc = repo.dirstate.copied(src) or src
1150 1151 if dst == origsrc: # copying back a copy?
1151 1152 if repo.dirstate[dst] not in 'mn' and not dryrun:
1152 1153 repo.dirstate.normallookup(dst)
1153 1154 else:
1154 1155 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1155 1156 if not ui.quiet:
1156 1157 ui.warn(_("%s has not been committed yet, so no copy "
1157 1158 "data will be stored for %s.\n")
1158 1159 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1159 1160 if repo.dirstate[dst] in '?r' and not dryrun:
1160 1161 wctx.add([dst])
1161 1162 elif not dryrun:
1162 1163 wctx.copy(origsrc, dst)
1163 1164
1164 1165 def writerequires(opener, requirements):
1165 1166 with opener('requires', 'w') as fp:
1166 1167 for r in sorted(requirements):
1167 1168 fp.write("%s\n" % r)
1168 1169
1169 1170 class filecachesubentry(object):
1170 1171 def __init__(self, path, stat):
1171 1172 self.path = path
1172 1173 self.cachestat = None
1173 1174 self._cacheable = None
1174 1175
1175 1176 if stat:
1176 1177 self.cachestat = filecachesubentry.stat(self.path)
1177 1178
1178 1179 if self.cachestat:
1179 1180 self._cacheable = self.cachestat.cacheable()
1180 1181 else:
1181 1182 # None means we don't know yet
1182 1183 self._cacheable = None
1183 1184
1184 1185 def refresh(self):
1185 1186 if self.cacheable():
1186 1187 self.cachestat = filecachesubentry.stat(self.path)
1187 1188
1188 1189 def cacheable(self):
1189 1190 if self._cacheable is not None:
1190 1191 return self._cacheable
1191 1192
1192 1193 # we don't know yet, assume it is for now
1193 1194 return True
1194 1195
1195 1196 def changed(self):
1196 1197 # no point in going further if we can't cache it
1197 1198 if not self.cacheable():
1198 1199 return True
1199 1200
1200 1201 newstat = filecachesubentry.stat(self.path)
1201 1202
1202 1203 # we may not know if it's cacheable yet, check again now
1203 1204 if newstat and self._cacheable is None:
1204 1205 self._cacheable = newstat.cacheable()
1205 1206
1206 1207 # check again
1207 1208 if not self._cacheable:
1208 1209 return True
1209 1210
1210 1211 if self.cachestat != newstat:
1211 1212 self.cachestat = newstat
1212 1213 return True
1213 1214 else:
1214 1215 return False
1215 1216
1216 1217 @staticmethod
1217 1218 def stat(path):
1218 1219 try:
1219 1220 return util.cachestat(path)
1220 1221 except OSError as e:
1221 1222 if e.errno != errno.ENOENT:
1222 1223 raise
1223 1224
1224 1225 class filecacheentry(object):
1225 1226 def __init__(self, paths, stat=True):
1226 1227 self._entries = []
1227 1228 for path in paths:
1228 1229 self._entries.append(filecachesubentry(path, stat))
1229 1230
1230 1231 def changed(self):
1231 1232 '''true if any entry has changed'''
1232 1233 for entry in self._entries:
1233 1234 if entry.changed():
1234 1235 return True
1235 1236 return False
1236 1237
1237 1238 def refresh(self):
1238 1239 for entry in self._entries:
1239 1240 entry.refresh()
1240 1241
1241 1242 class filecache(object):
1242 1243 """A property like decorator that tracks files under .hg/ for updates.
1243 1244
1244 1245 On first access, the files defined as arguments are stat()ed and the
1245 1246 results cached. The decorated function is called. The results are stashed
1246 1247 away in a ``_filecache`` dict on the object whose method is decorated.
1247 1248
1248 1249 On subsequent access, the cached result is returned.
1249 1250
1250 1251 On external property set operations, stat() calls are performed and the new
1251 1252 value is cached.
1252 1253
1253 1254 On property delete operations, cached data is removed.
1254 1255
1255 1256 When using the property API, cached data is always returned, if available:
1256 1257 no stat() is performed to check if the file has changed and if the function
1257 1258 needs to be called to reflect file changes.
1258 1259
1259 1260 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1260 1261 can populate an entry before the property's getter is called. In this case,
1261 1262 entries in ``_filecache`` will be used during property operations,
1262 1263 if available. If the underlying file changes, it is up to external callers
1263 1264 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1264 1265 method result as well as possibly calling ``del obj._filecache[attr]`` to
1265 1266 remove the ``filecacheentry``.
1266 1267 """
1267 1268
1268 1269 def __init__(self, *paths):
1269 1270 self.paths = paths
1270 1271
1271 1272 def join(self, obj, fname):
1272 1273 """Used to compute the runtime path of a cached file.
1273 1274
1274 1275 Users should subclass filecache and provide their own version of this
1275 1276 function to call the appropriate join function on 'obj' (an instance
1276 1277 of the class that its member function was decorated).
1277 1278 """
1278 1279 raise NotImplementedError
1279 1280
1280 1281 def __call__(self, func):
1281 1282 self.func = func
1282 1283 self.sname = func.__name__
1283 1284 self.name = pycompat.sysbytes(self.sname)
1284 1285 return self
1285 1286
1286 1287 def __get__(self, obj, type=None):
1287 1288 # if accessed on the class, return the descriptor itself.
1288 1289 if obj is None:
1289 1290 return self
1290 1291 # do we need to check if the file changed?
1291 1292 if self.sname in obj.__dict__:
1292 1293 assert self.name in obj._filecache, self.name
1293 1294 return obj.__dict__[self.sname]
1294 1295
1295 1296 entry = obj._filecache.get(self.name)
1296 1297
1297 1298 if entry:
1298 1299 if entry.changed():
1299 1300 entry.obj = self.func(obj)
1300 1301 else:
1301 1302 paths = [self.join(obj, path) for path in self.paths]
1302 1303
1303 1304 # We stat -before- creating the object so our cache doesn't lie if
1304 1305 # a writer modified between the time we read and stat
1305 1306 entry = filecacheentry(paths, True)
1306 1307 entry.obj = self.func(obj)
1307 1308
1308 1309 obj._filecache[self.name] = entry
1309 1310
1310 1311 obj.__dict__[self.sname] = entry.obj
1311 1312 return entry.obj
1312 1313
1313 1314 def __set__(self, obj, value):
1314 1315 if self.name not in obj._filecache:
1315 1316 # we add an entry for the missing value because X in __dict__
1316 1317 # implies X in _filecache
1317 1318 paths = [self.join(obj, path) for path in self.paths]
1318 1319 ce = filecacheentry(paths, False)
1319 1320 obj._filecache[self.name] = ce
1320 1321 else:
1321 1322 ce = obj._filecache[self.name]
1322 1323
1323 1324 ce.obj = value # update cached copy
1324 1325 obj.__dict__[self.sname] = value # update copy returned by obj.x
1325 1326
1326 1327 def __delete__(self, obj):
1327 1328 try:
1328 1329 del obj.__dict__[self.sname]
1329 1330 except KeyError:
1330 1331 raise AttributeError(self.sname)
1331 1332
1332 1333 def extdatasource(repo, source):
1333 1334 """Gather a map of rev -> value dict from the specified source
1334 1335
1335 1336 A source spec is treated as a URL, with a special case shell: type
1336 1337 for parsing the output from a shell command.
1337 1338
1338 1339 The data is parsed as a series of newline-separated records where
1339 1340 each record is a revision specifier optionally followed by a space
1340 1341 and a freeform string value. If the revision is known locally, it
1341 1342 is converted to a rev, otherwise the record is skipped.
1342 1343
1343 1344 Note that both key and value are treated as UTF-8 and converted to
1344 1345 the local encoding. This allows uniformity between local and
1345 1346 remote data sources.
1346 1347 """
1347 1348
1348 1349 spec = repo.ui.config("extdata", source)
1349 1350 if not spec:
1350 1351 raise error.Abort(_("unknown extdata source '%s'") % source)
1351 1352
1352 1353 data = {}
1353 1354 src = proc = None
1354 1355 try:
1355 1356 if spec.startswith("shell:"):
1356 1357 # external commands should be run relative to the repo root
1357 1358 cmd = spec[6:]
1358 1359 proc = subprocess.Popen(procutil.tonativestr(cmd),
1359 1360 shell=True, bufsize=-1,
1360 1361 close_fds=procutil.closefds,
1361 1362 stdout=subprocess.PIPE,
1362 1363 cwd=procutil.tonativestr(repo.root))
1363 1364 src = proc.stdout
1364 1365 else:
1365 1366 # treat as a URL or file
1366 1367 src = url.open(repo.ui, spec)
1367 1368 for l in src:
1368 1369 if " " in l:
1369 1370 k, v = l.strip().split(" ", 1)
1370 1371 else:
1371 1372 k, v = l.strip(), ""
1372 1373
1373 1374 k = encoding.tolocal(k)
1374 1375 try:
1375 1376 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1376 1377 except (error.LookupError, error.RepoLookupError):
1377 1378 pass # we ignore data for nodes that don't exist locally
1378 1379 finally:
1379 1380 if proc:
1380 1381 proc.communicate()
1381 1382 if src:
1382 1383 src.close()
1383 1384 if proc and proc.returncode != 0:
1384 1385 raise error.Abort(_("extdata command '%s' failed: %s")
1385 1386 % (cmd, procutil.explainexit(proc.returncode)))
1386 1387
1387 1388 return data
1388 1389
1389 1390 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1390 1391 if lock is None:
1391 1392 raise error.LockInheritanceContractViolation(
1392 1393 'lock can only be inherited while held')
1393 1394 if environ is None:
1394 1395 environ = {}
1395 1396 with lock.inherit() as locker:
1396 1397 environ[envvar] = locker
1397 1398 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1398 1399
1399 1400 def wlocksub(repo, cmd, *args, **kwargs):
1400 1401 """run cmd as a subprocess that allows inheriting repo's wlock
1401 1402
1402 1403 This can only be called while the wlock is held. This takes all the
1403 1404 arguments that ui.system does, and returns the exit code of the
1404 1405 subprocess."""
1405 1406 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1406 1407 **kwargs)
1407 1408
1408 1409 class progress(object):
1409 1410 def __init__(self, ui, topic, unit="", total=None):
1410 1411 self.ui = ui
1411 1412 self.pos = 0
1412 1413 self.topic = topic
1413 1414 self.unit = unit
1414 1415 self.total = total
1415 1416
1416 1417 def __enter__(self):
1417 1418 return self
1418 1419
1419 1420 def __exit__(self, exc_type, exc_value, exc_tb):
1420 1421 self.complete()
1421 1422
1422 1423 def update(self, pos, item="", total=None):
1423 1424 assert pos is not None
1424 1425 if total:
1425 1426 self.total = total
1426 1427 self.pos = pos
1427 1428 self._print(item)
1428 1429
1429 1430 def increment(self, step=1, item="", total=None):
1430 1431 self.update(self.pos + step, item, total)
1431 1432
1432 1433 def complete(self):
1433 1434 self.ui.progress(self.topic, None)
1434 1435
1435 1436 def _print(self, item):
1436 1437 self.ui.progress(self.topic, self.pos, item, self.unit,
1437 1438 self.total)
1438 1439
1439 1440 def gdinitconfig(ui):
1440 1441 """helper function to know if a repo should be created as general delta
1441 1442 """
1442 1443 # experimental config: format.generaldelta
1443 1444 return (ui.configbool('format', 'generaldelta')
1444 1445 or ui.configbool('format', 'usegeneraldelta')
1445 1446 or ui.configbool('format', 'sparse-revlog'))
1446 1447
1447 1448 def gddeltaconfig(ui):
1448 1449 """helper function to know if incoming delta should be optimised
1449 1450 """
1450 1451 # experimental config: format.generaldelta
1451 1452 return ui.configbool('format', 'generaldelta')
1452 1453
1453 1454 class simplekeyvaluefile(object):
1454 1455 """A simple file with key=value lines
1455 1456
1456 1457 Keys must be alphanumerics and start with a letter, values must not
1457 1458 contain '\n' characters"""
1458 1459 firstlinekey = '__firstline'
1459 1460
1460 1461 def __init__(self, vfs, path, keys=None):
1461 1462 self.vfs = vfs
1462 1463 self.path = path
1463 1464
1464 1465 def read(self, firstlinenonkeyval=False):
1465 1466 """Read the contents of a simple key-value file
1466 1467
1467 1468 'firstlinenonkeyval' indicates whether the first line of file should
1468 1469 be treated as a key-value pair or reuturned fully under the
1469 1470 __firstline key."""
1470 1471 lines = self.vfs.readlines(self.path)
1471 1472 d = {}
1472 1473 if firstlinenonkeyval:
1473 1474 if not lines:
1474 1475 e = _("empty simplekeyvalue file")
1475 1476 raise error.CorruptedState(e)
1476 1477 # we don't want to include '\n' in the __firstline
1477 1478 d[self.firstlinekey] = lines[0][:-1]
1478 1479 del lines[0]
1479 1480
1480 1481 try:
1481 1482 # the 'if line.strip()' part prevents us from failing on empty
1482 1483 # lines which only contain '\n' therefore are not skipped
1483 1484 # by 'if line'
1484 1485 updatedict = dict(line[:-1].split('=', 1) for line in lines
1485 1486 if line.strip())
1486 1487 if self.firstlinekey in updatedict:
1487 1488 e = _("%r can't be used as a key")
1488 1489 raise error.CorruptedState(e % self.firstlinekey)
1489 1490 d.update(updatedict)
1490 1491 except ValueError as e:
1491 1492 raise error.CorruptedState(str(e))
1492 1493 return d
1493 1494
1494 1495 def write(self, data, firstline=None):
1495 1496 """Write key=>value mapping to a file
1496 1497 data is a dict. Keys must be alphanumerical and start with a letter.
1497 1498 Values must not contain newline characters.
1498 1499
1499 1500 If 'firstline' is not None, it is written to file before
1500 1501 everything else, as it is, not in a key=value form"""
1501 1502 lines = []
1502 1503 if firstline is not None:
1503 1504 lines.append('%s\n' % firstline)
1504 1505
1505 1506 for k, v in data.items():
1506 1507 if k == self.firstlinekey:
1507 1508 e = "key name '%s' is reserved" % self.firstlinekey
1508 1509 raise error.ProgrammingError(e)
1509 1510 if not k[0:1].isalpha():
1510 1511 e = "keys must start with a letter in a key-value file"
1511 1512 raise error.ProgrammingError(e)
1512 1513 if not k.isalnum():
1513 1514 e = "invalid key name in a simple key-value file"
1514 1515 raise error.ProgrammingError(e)
1515 1516 if '\n' in v:
1516 1517 e = "invalid value in a simple key-value file"
1517 1518 raise error.ProgrammingError(e)
1518 1519 lines.append("%s=%s\n" % (k, v))
1519 1520 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1520 1521 fp.write(''.join(lines))
1521 1522
1522 1523 _reportobsoletedsource = [
1523 1524 'debugobsolete',
1524 1525 'pull',
1525 1526 'push',
1526 1527 'serve',
1527 1528 'unbundle',
1528 1529 ]
1529 1530
1530 1531 _reportnewcssource = [
1531 1532 'pull',
1532 1533 'unbundle',
1533 1534 ]
1534 1535
1535 1536 def prefetchfiles(repo, revs, match):
1536 1537 """Invokes the registered file prefetch functions, allowing extensions to
1537 1538 ensure the corresponding files are available locally, before the command
1538 1539 uses them."""
1539 1540 if match:
1540 1541 # The command itself will complain about files that don't exist, so
1541 1542 # don't duplicate the message.
1542 1543 match = matchmod.badmatch(match, lambda fn, msg: None)
1543 1544 else:
1544 1545 match = matchall(repo)
1545 1546
1546 1547 fileprefetchhooks(repo, revs, match)
1547 1548
1548 1549 # a list of (repo, revs, match) prefetch functions
1549 1550 fileprefetchhooks = util.hooks()
1550 1551
1551 1552 # A marker that tells the evolve extension to suppress its own reporting
1552 1553 _reportstroubledchangesets = True
1553 1554
1554 1555 def registersummarycallback(repo, otr, txnname=''):
1555 1556 """register a callback to issue a summary after the transaction is closed
1556 1557 """
1557 1558 def txmatch(sources):
1558 1559 return any(txnname.startswith(source) for source in sources)
1559 1560
1560 1561 categories = []
1561 1562
1562 1563 def reportsummary(func):
1563 1564 """decorator for report callbacks."""
1564 1565 # The repoview life cycle is shorter than the one of the actual
1565 1566 # underlying repository. So the filtered object can die before the
1566 1567 # weakref is used leading to troubles. We keep a reference to the
1567 1568 # unfiltered object and restore the filtering when retrieving the
1568 1569 # repository through the weakref.
1569 1570 filtername = repo.filtername
1570 1571 reporef = weakref.ref(repo.unfiltered())
1571 1572 def wrapped(tr):
1572 1573 repo = reporef()
1573 1574 if filtername:
1574 1575 repo = repo.filtered(filtername)
1575 1576 func(repo, tr)
1576 1577 newcat = '%02i-txnreport' % len(categories)
1577 1578 otr.addpostclose(newcat, wrapped)
1578 1579 categories.append(newcat)
1579 1580 return wrapped
1580 1581
1581 1582 if txmatch(_reportobsoletedsource):
1582 1583 @reportsummary
1583 1584 def reportobsoleted(repo, tr):
1584 1585 obsoleted = obsutil.getobsoleted(repo, tr)
1585 1586 if obsoleted:
1586 1587 repo.ui.status(_('obsoleted %i changesets\n')
1587 1588 % len(obsoleted))
1588 1589
1589 1590 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1590 1591 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1591 1592 instabilitytypes = [
1592 1593 ('orphan', 'orphan'),
1593 1594 ('phase-divergent', 'phasedivergent'),
1594 1595 ('content-divergent', 'contentdivergent'),
1595 1596 ]
1596 1597
1597 1598 def getinstabilitycounts(repo):
1598 1599 filtered = repo.changelog.filteredrevs
1599 1600 counts = {}
1600 1601 for instability, revset in instabilitytypes:
1601 1602 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1602 1603 filtered)
1603 1604 return counts
1604 1605
1605 1606 oldinstabilitycounts = getinstabilitycounts(repo)
1606 1607 @reportsummary
1607 1608 def reportnewinstabilities(repo, tr):
1608 1609 newinstabilitycounts = getinstabilitycounts(repo)
1609 1610 for instability, revset in instabilitytypes:
1610 1611 delta = (newinstabilitycounts[instability] -
1611 1612 oldinstabilitycounts[instability])
1612 1613 msg = getinstabilitymessage(delta, instability)
1613 1614 if msg:
1614 1615 repo.ui.warn(msg)
1615 1616
1616 1617 if txmatch(_reportnewcssource):
1617 1618 @reportsummary
1618 1619 def reportnewcs(repo, tr):
1619 1620 """Report the range of new revisions pulled/unbundled."""
1620 1621 origrepolen = tr.changes.get('origrepolen', len(repo))
1621 1622 if origrepolen >= len(repo):
1622 1623 return
1623 1624
1624 1625 # Compute the bounds of new revisions' range, excluding obsoletes.
1625 1626 unfi = repo.unfiltered()
1626 1627 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1627 1628 if not revs:
1628 1629 # Got only obsoletes.
1629 1630 return
1630 1631 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1631 1632
1632 1633 if minrev == maxrev:
1633 1634 revrange = minrev
1634 1635 else:
1635 1636 revrange = '%s:%s' % (minrev, maxrev)
1636 1637 draft = len(repo.revs('%ld and draft()', revs))
1637 1638 secret = len(repo.revs('%ld and secret()', revs))
1638 1639 if not (draft or secret):
1639 1640 msg = _('new changesets %s\n') % revrange
1640 1641 elif draft and secret:
1641 1642 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1642 1643 msg %= (revrange, draft, secret)
1643 1644 elif draft:
1644 1645 msg = _('new changesets %s (%d drafts)\n')
1645 1646 msg %= (revrange, draft)
1646 1647 elif secret:
1647 1648 msg = _('new changesets %s (%d secrets)\n')
1648 1649 msg %= (revrange, secret)
1649 1650 else:
1650 1651 raise error.ProgrammingError('entered unreachable condition')
1651 1652 repo.ui.status(msg)
1652 1653
1653 1654 @reportsummary
1654 1655 def reportphasechanges(repo, tr):
1655 1656 """Report statistics of phase changes for changesets pre-existing
1656 1657 pull/unbundle.
1657 1658 """
1658 1659 origrepolen = tr.changes.get('origrepolen', len(repo))
1659 1660 phasetracking = tr.changes.get('phases', {})
1660 1661 if not phasetracking:
1661 1662 return
1662 1663 published = [
1663 1664 rev for rev, (old, new) in phasetracking.iteritems()
1664 1665 if new == phases.public and rev < origrepolen
1665 1666 ]
1666 1667 if not published:
1667 1668 return
1668 1669 repo.ui.status(_('%d local changesets published\n')
1669 1670 % len(published))
1670 1671
1671 1672 def getinstabilitymessage(delta, instability):
1672 1673 """function to return the message to show warning about new instabilities
1673 1674
1674 1675 exists as a separate function so that extension can wrap to show more
1675 1676 information like how to fix instabilities"""
1676 1677 if delta > 0:
1677 1678 return _('%i new %s changesets\n') % (delta, instability)
1678 1679
1679 1680 def nodesummaries(repo, nodes, maxnumnodes=4):
1680 1681 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1681 1682 return ' '.join(short(h) for h in nodes)
1682 1683 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1683 1684 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1684 1685
1685 1686 def enforcesinglehead(repo, tr, desc):
1686 1687 """check that no named branch has multiple heads"""
1687 1688 if desc in ('strip', 'repair'):
1688 1689 # skip the logic during strip
1689 1690 return
1690 1691 visible = repo.filtered('visible')
1691 1692 # possible improvement: we could restrict the check to affected branch
1692 1693 for name, heads in visible.branchmap().iteritems():
1693 1694 if len(heads) > 1:
1694 1695 msg = _('rejecting multiple heads on branch "%s"')
1695 1696 msg %= name
1696 1697 hint = _('%d heads: %s')
1697 1698 hint %= (len(heads), nodesummaries(repo, heads))
1698 1699 raise error.Abort(msg, hint=hint)
1699 1700
1700 1701 def wrapconvertsink(sink):
1701 1702 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1702 1703 before it is used, whether or not the convert extension was formally loaded.
1703 1704 """
1704 1705 return sink
1705 1706
1706 1707 def unhidehashlikerevs(repo, specs, hiddentype):
1707 1708 """parse the user specs and unhide changesets whose hash or revision number
1708 1709 is passed.
1709 1710
1710 1711 hiddentype can be: 1) 'warn': warn while unhiding changesets
1711 1712 2) 'nowarn': don't warn while unhiding changesets
1712 1713
1713 1714 returns a repo object with the required changesets unhidden
1714 1715 """
1715 1716 if not repo.filtername or not repo.ui.configbool('experimental',
1716 1717 'directaccess'):
1717 1718 return repo
1718 1719
1719 1720 if repo.filtername not in ('visible', 'visible-hidden'):
1720 1721 return repo
1721 1722
1722 1723 symbols = set()
1723 1724 for spec in specs:
1724 1725 try:
1725 1726 tree = revsetlang.parse(spec)
1726 1727 except error.ParseError: # will be reported by scmutil.revrange()
1727 1728 continue
1728 1729
1729 1730 symbols.update(revsetlang.gethashlikesymbols(tree))
1730 1731
1731 1732 if not symbols:
1732 1733 return repo
1733 1734
1734 1735 revs = _getrevsfromsymbols(repo, symbols)
1735 1736
1736 1737 if not revs:
1737 1738 return repo
1738 1739
1739 1740 if hiddentype == 'warn':
1740 1741 unfi = repo.unfiltered()
1741 1742 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1742 1743 repo.ui.warn(_("warning: accessing hidden changesets for write "
1743 1744 "operation: %s\n") % revstr)
1744 1745
1745 1746 # we have to use new filtername to separate branch/tags cache until we can
1746 1747 # disbale these cache when revisions are dynamically pinned.
1747 1748 return repo.filtered('visible-hidden', revs)
1748 1749
1749 1750 def _getrevsfromsymbols(repo, symbols):
1750 1751 """parse the list of symbols and returns a set of revision numbers of hidden
1751 1752 changesets present in symbols"""
1752 1753 revs = set()
1753 1754 unfi = repo.unfiltered()
1754 1755 unficl = unfi.changelog
1755 1756 cl = repo.changelog
1756 1757 tiprev = len(unficl)
1757 1758 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1758 1759 for s in symbols:
1759 1760 try:
1760 1761 n = int(s)
1761 1762 if n <= tiprev:
1762 1763 if not allowrevnums:
1763 1764 continue
1764 1765 else:
1765 1766 if n not in cl:
1766 1767 revs.add(n)
1767 1768 continue
1768 1769 except ValueError:
1769 1770 pass
1770 1771
1771 1772 try:
1772 1773 s = resolvehexnodeidprefix(unfi, s)
1773 1774 except (error.LookupError, error.WdirUnsupported):
1774 1775 s = None
1775 1776
1776 1777 if s is not None:
1777 1778 rev = unficl.rev(s)
1778 1779 if rev not in cl:
1779 1780 revs.add(rev)
1780 1781
1781 1782 return revs
1782 1783
1783 1784 def bookmarkrevs(repo, mark):
1784 1785 """
1785 1786 Select revisions reachable by a given bookmark
1786 1787 """
1787 1788 return repo.revs("ancestors(bookmark(%s)) - "
1788 1789 "ancestors(head() and not bookmark(%s)) - "
1789 1790 "ancestors(bookmark() and not bookmark(%s))",
1790 1791 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now