##// END OF EJS Templates
narrow: add trailing slash to dir earlier for debug{revlog,index,data}...
Martin von Zweigbergk -
r37287:6ff8bd69 default
parent child Browse files
Show More
@@ -1,187 +1,185 b''
1 1 # narrowrevlog.py - revlog storing irrelevant nodes as "ellipsis" nodes
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 error,
12 12 manifest,
13 13 revlog,
14 14 util,
15 15 )
16 16
17 17 def readtransform(self, text):
18 18 return text, False
19 19
20 20 def writetransform(self, text):
21 21 return text, False
22 22
23 23 def rawtransform(self, text):
24 24 return False
25 25
26 26 revlog.addflagprocessor(revlog.REVIDX_ELLIPSIS,
27 27 (readtransform, writetransform, rawtransform))
28 28
29 29 def setup():
30 30 # We just wanted to add the flag processor, which is done at module
31 31 # load time.
32 32 pass
33 33
34 34 class excludeddir(manifest.treemanifest):
35 35 """Stand-in for a directory that is excluded from the repository.
36 36
37 37 With narrowing active on a repository that uses treemanifests,
38 38 some of the directory revlogs will be excluded from the resulting
39 39 clone. This is a huge storage win for clients, but means we need
40 40 some sort of pseudo-manifest to surface to internals so we can
41 41 detect a merge conflict outside the narrowspec. That's what this
42 42 class is: it stands in for a directory whose node is known, but
43 43 whose contents are unknown.
44 44 """
45 45 def __init__(self, dir, node):
46 46 super(excludeddir, self).__init__(dir)
47 47 self._node = node
48 48 # Add an empty file, which will be included by iterators and such,
49 49 # appearing as the directory itself (i.e. something like "dir/")
50 50 self._files[''] = node
51 51 self._flags[''] = 't'
52 52
53 53 # Manifests outside the narrowspec should never be modified, so avoid
54 54 # copying. This makes a noticeable difference when there are very many
55 55 # directories outside the narrowspec. Also, it makes sense for the copy to
56 56 # be of the same type as the original, which would not happen with the
57 57 # super type's copy().
58 58 def copy(self):
59 59 return self
60 60
61 61 class excludeddirmanifestctx(manifest.treemanifestctx):
62 62 """context wrapper for excludeddir - see that docstring for rationale"""
63 63 def __init__(self, dir, node):
64 64 self._dir = dir
65 65 self._node = node
66 66
67 67 def read(self):
68 68 return excludeddir(self._dir, self._node)
69 69
70 70 def write(self, *args):
71 71 raise error.ProgrammingError(
72 72 'attempt to write manifest from excluded dir %s' % self._dir)
73 73
74 74 class excludedmanifestrevlog(manifest.manifestrevlog):
75 75 """Stand-in for excluded treemanifest revlogs.
76 76
77 77 When narrowing is active on a treemanifest repository, we'll have
78 78 references to directories we can't see due to the revlog being
79 79 skipped. This class exists to conform to the manifestrevlog
80 80 interface for those directories and proactively prevent writes to
81 81 outside the narrowspec.
82 82 """
83 83
84 84 def __init__(self, dir):
85 85 self._dir = dir
86 86
87 87 def __len__(self):
88 88 raise error.ProgrammingError(
89 89 'attempt to get length of excluded dir %s' % self._dir)
90 90
91 91 def rev(self, node):
92 92 raise error.ProgrammingError(
93 93 'attempt to get rev from excluded dir %s' % self._dir)
94 94
95 95 def linkrev(self, node):
96 96 raise error.ProgrammingError(
97 97 'attempt to get linkrev from excluded dir %s' % self._dir)
98 98
99 99 def node(self, rev):
100 100 raise error.ProgrammingError(
101 101 'attempt to get node from excluded dir %s' % self._dir)
102 102
103 103 def add(self, *args, **kwargs):
104 104 # We should never write entries in dirlogs outside the narrow clone.
105 105 # However, the method still gets called from writesubtree() in
106 106 # _addtree(), so we need to handle it. We should possibly make that
107 107 # avoid calling add() with a clean manifest (_dirty is always False
108 108 # in excludeddir instances).
109 109 pass
110 110
111 111 def makenarrowmanifestrevlog(mfrevlog, repo):
112 112 if util.safehasattr(mfrevlog, '_narrowed'):
113 113 return
114 114
115 115 class narrowmanifestrevlog(mfrevlog.__class__):
116 116 # This function is called via debug{revlog,index,data}, but also during
117 117 # at least some push operations. This will be used to wrap/exclude the
118 118 # child directories when using treemanifests.
119 119 def dirlog(self, d):
120 if d and not d.endswith('/'):
121 d = d + '/'
122 120 if not repo.narrowmatch().visitdir(d[:-1] or '.'):
123 121 return excludedmanifestrevlog(d)
124 122 result = super(narrowmanifestrevlog, self).dirlog(d)
125 123 makenarrowmanifestrevlog(result, repo)
126 124 return result
127 125
128 126 mfrevlog.__class__ = narrowmanifestrevlog
129 127 mfrevlog._narrowed = True
130 128
131 129 def makenarrowmanifestlog(mfl, repo):
132 130 class narrowmanifestlog(mfl.__class__):
133 131 def get(self, dir, node, verify=True):
134 132 if not repo.narrowmatch().visitdir(dir[:-1] or '.'):
135 133 return excludeddirmanifestctx(dir, node)
136 134 return super(narrowmanifestlog, self).get(dir, node, verify=verify)
137 135 mfl.__class__ = narrowmanifestlog
138 136
139 137 def makenarrowfilelog(fl, narrowmatch):
140 138 class narrowfilelog(fl.__class__):
141 139 def renamed(self, node):
142 140 # Renames that come from outside the narrowspec are
143 141 # problematic at least for git-diffs, because we lack the
144 142 # base text for the rename. This logic was introduced in
145 143 # 3cd72b1 of narrowhg (authored by martinvonz, reviewed by
146 144 # adgar), but that revision doesn't have any additional
147 145 # commentary on what problems we can encounter.
148 146 m = super(narrowfilelog, self).renamed(node)
149 147 if m and not narrowmatch(m[0]):
150 148 return None
151 149 return m
152 150
153 151 def size(self, rev):
154 152 # We take advantage of the fact that remotefilelog
155 153 # lacks a node() method to just skip the
156 154 # rename-checking logic when on remotefilelog. This
157 155 # might be incorrect on other non-revlog-based storage
158 156 # engines, but for now this seems to be fine.
159 157 #
160 158 # TODO: when remotefilelog is in core, improve this to
161 159 # explicitly look for remotefilelog instead of cheating
162 160 # with a hasattr check.
163 161 if util.safehasattr(self, 'node'):
164 162 node = self.node(rev)
165 163 # Because renamed() is overridden above to
166 164 # sometimes return None even if there is metadata
167 165 # in the revlog, size can be incorrect for
168 166 # copies/renames, so we need to make sure we call
169 167 # the super class's implementation of renamed()
170 168 # for the purpose of size calculation.
171 169 if super(narrowfilelog, self).renamed(node):
172 170 return len(self.read(node))
173 171 return super(narrowfilelog, self).size(rev)
174 172
175 173 def cmp(self, node, text):
176 174 different = super(narrowfilelog, self).cmp(node, text)
177 175 if different:
178 176 # Similar to size() above, if the file was copied from
179 177 # a file outside the narrowspec, the super class's
180 178 # would have returned True because we tricked it into
181 179 # thinking that the file was not renamed.
182 180 if super(narrowfilelog, self).renamed(node):
183 181 t2 = self.read(node)
184 182 return t2 != text
185 183 return different
186 184
187 185 fl.__class__ = narrowfilelog
@@ -1,3214 +1,3216 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import tempfile
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 bookmarks,
25 25 changelog,
26 26 copies,
27 27 crecord as crecordmod,
28 28 dirstateguard,
29 29 encoding,
30 30 error,
31 31 formatter,
32 32 logcmdutil,
33 33 match as matchmod,
34 34 merge as mergemod,
35 35 mergeutil,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 pycompat,
40 40 registrar,
41 41 revlog,
42 42 rewriteutil,
43 43 scmutil,
44 44 smartset,
45 45 subrepoutil,
46 46 templatekw,
47 47 templater,
48 48 util,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 from .utils import (
53 53 dateutil,
54 54 stringutil,
55 55 )
56 56
57 57 stringio = util.stringio
58 58
59 59 # templates of common command options
60 60
61 61 dryrunopts = [
62 62 ('n', 'dry-run', None,
63 63 _('do not perform actions, just print output')),
64 64 ]
65 65
66 66 remoteopts = [
67 67 ('e', 'ssh', '',
68 68 _('specify ssh command to use'), _('CMD')),
69 69 ('', 'remotecmd', '',
70 70 _('specify hg command to run on the remote side'), _('CMD')),
71 71 ('', 'insecure', None,
72 72 _('do not verify server certificate (ignoring web.cacerts config)')),
73 73 ]
74 74
75 75 walkopts = [
76 76 ('I', 'include', [],
77 77 _('include names matching the given patterns'), _('PATTERN')),
78 78 ('X', 'exclude', [],
79 79 _('exclude names matching the given patterns'), _('PATTERN')),
80 80 ]
81 81
82 82 commitopts = [
83 83 ('m', 'message', '',
84 84 _('use text as commit message'), _('TEXT')),
85 85 ('l', 'logfile', '',
86 86 _('read commit message from file'), _('FILE')),
87 87 ]
88 88
89 89 commitopts2 = [
90 90 ('d', 'date', '',
91 91 _('record the specified date as commit date'), _('DATE')),
92 92 ('u', 'user', '',
93 93 _('record the specified user as committer'), _('USER')),
94 94 ]
95 95
96 96 # hidden for now
97 97 formatteropts = [
98 98 ('T', 'template', '',
99 99 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
100 100 ]
101 101
102 102 templateopts = [
103 103 ('', 'style', '',
104 104 _('display using template map file (DEPRECATED)'), _('STYLE')),
105 105 ('T', 'template', '',
106 106 _('display with template'), _('TEMPLATE')),
107 107 ]
108 108
109 109 logopts = [
110 110 ('p', 'patch', None, _('show patch')),
111 111 ('g', 'git', None, _('use git extended diff format')),
112 112 ('l', 'limit', '',
113 113 _('limit number of changes displayed'), _('NUM')),
114 114 ('M', 'no-merges', None, _('do not show merges')),
115 115 ('', 'stat', None, _('output diffstat-style summary of changes')),
116 116 ('G', 'graph', None, _("show the revision DAG")),
117 117 ] + templateopts
118 118
119 119 diffopts = [
120 120 ('a', 'text', None, _('treat all files as text')),
121 121 ('g', 'git', None, _('use git extended diff format')),
122 122 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
123 123 ('', 'nodates', None, _('omit dates from diff headers'))
124 124 ]
125 125
126 126 diffwsopts = [
127 127 ('w', 'ignore-all-space', None,
128 128 _('ignore white space when comparing lines')),
129 129 ('b', 'ignore-space-change', None,
130 130 _('ignore changes in the amount of white space')),
131 131 ('B', 'ignore-blank-lines', None,
132 132 _('ignore changes whose lines are all blank')),
133 133 ('Z', 'ignore-space-at-eol', None,
134 134 _('ignore changes in whitespace at EOL')),
135 135 ]
136 136
137 137 diffopts2 = [
138 138 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
139 139 ('p', 'show-function', None, _('show which function each change is in')),
140 140 ('', 'reverse', None, _('produce a diff that undoes the changes')),
141 141 ] + diffwsopts + [
142 142 ('U', 'unified', '',
143 143 _('number of lines of context to show'), _('NUM')),
144 144 ('', 'stat', None, _('output diffstat-style summary of changes')),
145 145 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
146 146 ]
147 147
148 148 mergetoolopts = [
149 149 ('t', 'tool', '', _('specify merge tool')),
150 150 ]
151 151
152 152 similarityopts = [
153 153 ('s', 'similarity', '',
154 154 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
155 155 ]
156 156
157 157 subrepoopts = [
158 158 ('S', 'subrepos', None,
159 159 _('recurse into subrepositories'))
160 160 ]
161 161
162 162 debugrevlogopts = [
163 163 ('c', 'changelog', False, _('open changelog')),
164 164 ('m', 'manifest', False, _('open manifest')),
165 165 ('', 'dir', '', _('open directory manifest')),
166 166 ]
167 167
168 168 # special string such that everything below this line will be ingored in the
169 169 # editor text
170 170 _linebelow = "^HG: ------------------------ >8 ------------------------$"
171 171
172 172 def ishunk(x):
173 173 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
174 174 return isinstance(x, hunkclasses)
175 175
176 176 def newandmodified(chunks, originalchunks):
177 177 newlyaddedandmodifiedfiles = set()
178 178 for chunk in chunks:
179 179 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
180 180 originalchunks:
181 181 newlyaddedandmodifiedfiles.add(chunk.header.filename())
182 182 return newlyaddedandmodifiedfiles
183 183
184 184 def parsealiases(cmd):
185 185 return cmd.lstrip("^").split("|")
186 186
187 187 def setupwrapcolorwrite(ui):
188 188 # wrap ui.write so diff output can be labeled/colorized
189 189 def wrapwrite(orig, *args, **kw):
190 190 label = kw.pop(r'label', '')
191 191 for chunk, l in patch.difflabel(lambda: args):
192 192 orig(chunk, label=label + l)
193 193
194 194 oldwrite = ui.write
195 195 def wrap(*args, **kwargs):
196 196 return wrapwrite(oldwrite, *args, **kwargs)
197 197 setattr(ui, 'write', wrap)
198 198 return oldwrite
199 199
200 200 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
201 201 if usecurses:
202 202 if testfile:
203 203 recordfn = crecordmod.testdecorator(testfile,
204 204 crecordmod.testchunkselector)
205 205 else:
206 206 recordfn = crecordmod.chunkselector
207 207
208 208 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
209 209
210 210 else:
211 211 return patch.filterpatch(ui, originalhunks, operation)
212 212
213 213 def recordfilter(ui, originalhunks, operation=None):
214 214 """ Prompts the user to filter the originalhunks and return a list of
215 215 selected hunks.
216 216 *operation* is used for to build ui messages to indicate the user what
217 217 kind of filtering they are doing: reverting, committing, shelving, etc.
218 218 (see patch.filterpatch).
219 219 """
220 220 usecurses = crecordmod.checkcurses(ui)
221 221 testfile = ui.config('experimental', 'crecordtest')
222 222 oldwrite = setupwrapcolorwrite(ui)
223 223 try:
224 224 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
225 225 testfile, operation)
226 226 finally:
227 227 ui.write = oldwrite
228 228 return newchunks, newopts
229 229
230 230 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
231 231 filterfn, *pats, **opts):
232 232 opts = pycompat.byteskwargs(opts)
233 233 if not ui.interactive():
234 234 if cmdsuggest:
235 235 msg = _('running non-interactively, use %s instead') % cmdsuggest
236 236 else:
237 237 msg = _('running non-interactively')
238 238 raise error.Abort(msg)
239 239
240 240 # make sure username is set before going interactive
241 241 if not opts.get('user'):
242 242 ui.username() # raise exception, username not provided
243 243
244 244 def recordfunc(ui, repo, message, match, opts):
245 245 """This is generic record driver.
246 246
247 247 Its job is to interactively filter local changes, and
248 248 accordingly prepare working directory into a state in which the
249 249 job can be delegated to a non-interactive commit command such as
250 250 'commit' or 'qrefresh'.
251 251
252 252 After the actual job is done by non-interactive command, the
253 253 working directory is restored to its original state.
254 254
255 255 In the end we'll record interesting changes, and everything else
256 256 will be left in place, so the user can continue working.
257 257 """
258 258
259 259 checkunfinished(repo, commit=True)
260 260 wctx = repo[None]
261 261 merge = len(wctx.parents()) > 1
262 262 if merge:
263 263 raise error.Abort(_('cannot partially commit a merge '
264 264 '(use "hg commit" instead)'))
265 265
266 266 def fail(f, msg):
267 267 raise error.Abort('%s: %s' % (f, msg))
268 268
269 269 force = opts.get('force')
270 270 if not force:
271 271 vdirs = []
272 272 match.explicitdir = vdirs.append
273 273 match.bad = fail
274 274
275 275 status = repo.status(match=match)
276 276 if not force:
277 277 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
278 278 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
279 279 diffopts.nodates = True
280 280 diffopts.git = True
281 281 diffopts.showfunc = True
282 282 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
283 283 originalchunks = patch.parsepatch(originaldiff)
284 284
285 285 # 1. filter patch, since we are intending to apply subset of it
286 286 try:
287 287 chunks, newopts = filterfn(ui, originalchunks)
288 288 except error.PatchError as err:
289 289 raise error.Abort(_('error parsing patch: %s') % err)
290 290 opts.update(newopts)
291 291
292 292 # We need to keep a backup of files that have been newly added and
293 293 # modified during the recording process because there is a previous
294 294 # version without the edit in the workdir
295 295 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
296 296 contenders = set()
297 297 for h in chunks:
298 298 try:
299 299 contenders.update(set(h.files()))
300 300 except AttributeError:
301 301 pass
302 302
303 303 changed = status.modified + status.added + status.removed
304 304 newfiles = [f for f in changed if f in contenders]
305 305 if not newfiles:
306 306 ui.status(_('no changes to record\n'))
307 307 return 0
308 308
309 309 modified = set(status.modified)
310 310
311 311 # 2. backup changed files, so we can restore them in the end
312 312
313 313 if backupall:
314 314 tobackup = changed
315 315 else:
316 316 tobackup = [f for f in newfiles if f in modified or f in \
317 317 newlyaddedandmodifiedfiles]
318 318 backups = {}
319 319 if tobackup:
320 320 backupdir = repo.vfs.join('record-backups')
321 321 try:
322 322 os.mkdir(backupdir)
323 323 except OSError as err:
324 324 if err.errno != errno.EEXIST:
325 325 raise
326 326 try:
327 327 # backup continues
328 328 for f in tobackup:
329 329 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
330 330 dir=backupdir)
331 331 os.close(fd)
332 332 ui.debug('backup %r as %r\n' % (f, tmpname))
333 333 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
334 334 backups[f] = tmpname
335 335
336 336 fp = stringio()
337 337 for c in chunks:
338 338 fname = c.filename()
339 339 if fname in backups:
340 340 c.write(fp)
341 341 dopatch = fp.tell()
342 342 fp.seek(0)
343 343
344 344 # 2.5 optionally review / modify patch in text editor
345 345 if opts.get('review', False):
346 346 patchtext = (crecordmod.diffhelptext
347 347 + crecordmod.patchhelptext
348 348 + fp.read())
349 349 reviewedpatch = ui.edit(patchtext, "",
350 350 action="diff",
351 351 repopath=repo.path)
352 352 fp.truncate(0)
353 353 fp.write(reviewedpatch)
354 354 fp.seek(0)
355 355
356 356 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
357 357 # 3a. apply filtered patch to clean repo (clean)
358 358 if backups:
359 359 # Equivalent to hg.revert
360 360 m = scmutil.matchfiles(repo, backups.keys())
361 361 mergemod.update(repo, repo.dirstate.p1(),
362 362 False, True, matcher=m)
363 363
364 364 # 3b. (apply)
365 365 if dopatch:
366 366 try:
367 367 ui.debug('applying patch\n')
368 368 ui.debug(fp.getvalue())
369 369 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
370 370 except error.PatchError as err:
371 371 raise error.Abort(pycompat.bytestr(err))
372 372 del fp
373 373
374 374 # 4. We prepared working directory according to filtered
375 375 # patch. Now is the time to delegate the job to
376 376 # commit/qrefresh or the like!
377 377
378 378 # Make all of the pathnames absolute.
379 379 newfiles = [repo.wjoin(nf) for nf in newfiles]
380 380 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
381 381 finally:
382 382 # 5. finally restore backed-up files
383 383 try:
384 384 dirstate = repo.dirstate
385 385 for realname, tmpname in backups.iteritems():
386 386 ui.debug('restoring %r to %r\n' % (tmpname, realname))
387 387
388 388 if dirstate[realname] == 'n':
389 389 # without normallookup, restoring timestamp
390 390 # may cause partially committed files
391 391 # to be treated as unmodified
392 392 dirstate.normallookup(realname)
393 393
394 394 # copystat=True here and above are a hack to trick any
395 395 # editors that have f open that we haven't modified them.
396 396 #
397 397 # Also note that this racy as an editor could notice the
398 398 # file's mtime before we've finished writing it.
399 399 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
400 400 os.unlink(tmpname)
401 401 if tobackup:
402 402 os.rmdir(backupdir)
403 403 except OSError:
404 404 pass
405 405
406 406 def recordinwlock(ui, repo, message, match, opts):
407 407 with repo.wlock():
408 408 return recordfunc(ui, repo, message, match, opts)
409 409
410 410 return commit(ui, repo, recordinwlock, pats, opts)
411 411
412 412 class dirnode(object):
413 413 """
414 414 Represent a directory in user working copy with information required for
415 415 the purpose of tersing its status.
416 416
417 417 path is the path to the directory
418 418
419 419 statuses is a set of statuses of all files in this directory (this includes
420 420 all the files in all the subdirectories too)
421 421
422 422 files is a list of files which are direct child of this directory
423 423
424 424 subdirs is a dictionary of sub-directory name as the key and it's own
425 425 dirnode object as the value
426 426 """
427 427
428 428 def __init__(self, dirpath):
429 429 self.path = dirpath
430 430 self.statuses = set([])
431 431 self.files = []
432 432 self.subdirs = {}
433 433
434 434 def _addfileindir(self, filename, status):
435 435 """Add a file in this directory as a direct child."""
436 436 self.files.append((filename, status))
437 437
438 438 def addfile(self, filename, status):
439 439 """
440 440 Add a file to this directory or to its direct parent directory.
441 441
442 442 If the file is not direct child of this directory, we traverse to the
443 443 directory of which this file is a direct child of and add the file
444 444 there.
445 445 """
446 446
447 447 # the filename contains a path separator, it means it's not the direct
448 448 # child of this directory
449 449 if '/' in filename:
450 450 subdir, filep = filename.split('/', 1)
451 451
452 452 # does the dirnode object for subdir exists
453 453 if subdir not in self.subdirs:
454 454 subdirpath = os.path.join(self.path, subdir)
455 455 self.subdirs[subdir] = dirnode(subdirpath)
456 456
457 457 # try adding the file in subdir
458 458 self.subdirs[subdir].addfile(filep, status)
459 459
460 460 else:
461 461 self._addfileindir(filename, status)
462 462
463 463 if status not in self.statuses:
464 464 self.statuses.add(status)
465 465
466 466 def iterfilepaths(self):
467 467 """Yield (status, path) for files directly under this directory."""
468 468 for f, st in self.files:
469 469 yield st, os.path.join(self.path, f)
470 470
471 471 def tersewalk(self, terseargs):
472 472 """
473 473 Yield (status, path) obtained by processing the status of this
474 474 dirnode.
475 475
476 476 terseargs is the string of arguments passed by the user with `--terse`
477 477 flag.
478 478
479 479 Following are the cases which can happen:
480 480
481 481 1) All the files in the directory (including all the files in its
482 482 subdirectories) share the same status and the user has asked us to terse
483 483 that status. -> yield (status, dirpath)
484 484
485 485 2) Otherwise, we do following:
486 486
487 487 a) Yield (status, filepath) for all the files which are in this
488 488 directory (only the ones in this directory, not the subdirs)
489 489
490 490 b) Recurse the function on all the subdirectories of this
491 491 directory
492 492 """
493 493
494 494 if len(self.statuses) == 1:
495 495 onlyst = self.statuses.pop()
496 496
497 497 # Making sure we terse only when the status abbreviation is
498 498 # passed as terse argument
499 499 if onlyst in terseargs:
500 500 yield onlyst, self.path + pycompat.ossep
501 501 return
502 502
503 503 # add the files to status list
504 504 for st, fpath in self.iterfilepaths():
505 505 yield st, fpath
506 506
507 507 #recurse on the subdirs
508 508 for dirobj in self.subdirs.values():
509 509 for st, fpath in dirobj.tersewalk(terseargs):
510 510 yield st, fpath
511 511
512 512 def tersedir(statuslist, terseargs):
513 513 """
514 514 Terse the status if all the files in a directory shares the same status.
515 515
516 516 statuslist is scmutil.status() object which contains a list of files for
517 517 each status.
518 518 terseargs is string which is passed by the user as the argument to `--terse`
519 519 flag.
520 520
521 521 The function makes a tree of objects of dirnode class, and at each node it
522 522 stores the information required to know whether we can terse a certain
523 523 directory or not.
524 524 """
525 525 # the order matters here as that is used to produce final list
526 526 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
527 527
528 528 # checking the argument validity
529 529 for s in pycompat.bytestr(terseargs):
530 530 if s not in allst:
531 531 raise error.Abort(_("'%s' not recognized") % s)
532 532
533 533 # creating a dirnode object for the root of the repo
534 534 rootobj = dirnode('')
535 535 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
536 536 'ignored', 'removed')
537 537
538 538 tersedict = {}
539 539 for attrname in pstatus:
540 540 statuschar = attrname[0:1]
541 541 for f in getattr(statuslist, attrname):
542 542 rootobj.addfile(f, statuschar)
543 543 tersedict[statuschar] = []
544 544
545 545 # we won't be tersing the root dir, so add files in it
546 546 for st, fpath in rootobj.iterfilepaths():
547 547 tersedict[st].append(fpath)
548 548
549 549 # process each sub-directory and build tersedict
550 550 for subdir in rootobj.subdirs.values():
551 551 for st, f in subdir.tersewalk(terseargs):
552 552 tersedict[st].append(f)
553 553
554 554 tersedlist = []
555 555 for st in allst:
556 556 tersedict[st].sort()
557 557 tersedlist.append(tersedict[st])
558 558
559 559 return tersedlist
560 560
561 561 def _commentlines(raw):
562 562 '''Surround lineswith a comment char and a new line'''
563 563 lines = raw.splitlines()
564 564 commentedlines = ['# %s' % line for line in lines]
565 565 return '\n'.join(commentedlines) + '\n'
566 566
567 567 def _conflictsmsg(repo):
568 568 mergestate = mergemod.mergestate.read(repo)
569 569 if not mergestate.active():
570 570 return
571 571
572 572 m = scmutil.match(repo[None])
573 573 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
574 574 if unresolvedlist:
575 575 mergeliststr = '\n'.join(
576 576 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
577 577 for path in unresolvedlist])
578 578 msg = _('''Unresolved merge conflicts:
579 579
580 580 %s
581 581
582 582 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
583 583 else:
584 584 msg = _('No unresolved merge conflicts.')
585 585
586 586 return _commentlines(msg)
587 587
588 588 def _helpmessage(continuecmd, abortcmd):
589 589 msg = _('To continue: %s\n'
590 590 'To abort: %s') % (continuecmd, abortcmd)
591 591 return _commentlines(msg)
592 592
593 593 def _rebasemsg():
594 594 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
595 595
596 596 def _histeditmsg():
597 597 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
598 598
599 599 def _unshelvemsg():
600 600 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
601 601
602 602 def _updatecleanmsg(dest=None):
603 603 warning = _('warning: this will discard uncommitted changes')
604 604 return 'hg update --clean %s (%s)' % (dest or '.', warning)
605 605
606 606 def _graftmsg():
607 607 # tweakdefaults requires `update` to have a rev hence the `.`
608 608 return _helpmessage('hg graft --continue', _updatecleanmsg())
609 609
610 610 def _mergemsg():
611 611 # tweakdefaults requires `update` to have a rev hence the `.`
612 612 return _helpmessage('hg commit', _updatecleanmsg())
613 613
614 614 def _bisectmsg():
615 615 msg = _('To mark the changeset good: hg bisect --good\n'
616 616 'To mark the changeset bad: hg bisect --bad\n'
617 617 'To abort: hg bisect --reset\n')
618 618 return _commentlines(msg)
619 619
620 620 def fileexistspredicate(filename):
621 621 return lambda repo: repo.vfs.exists(filename)
622 622
623 623 def _mergepredicate(repo):
624 624 return len(repo[None].parents()) > 1
625 625
626 626 STATES = (
627 627 # (state, predicate to detect states, helpful message function)
628 628 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
629 629 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
630 630 ('graft', fileexistspredicate('graftstate'), _graftmsg),
631 631 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
632 632 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
633 633 # The merge state is part of a list that will be iterated over.
634 634 # They need to be last because some of the other unfinished states may also
635 635 # be in a merge or update state (eg. rebase, histedit, graft, etc).
636 636 # We want those to have priority.
637 637 ('merge', _mergepredicate, _mergemsg),
638 638 )
639 639
640 640 def _getrepostate(repo):
641 641 # experimental config: commands.status.skipstates
642 642 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
643 643 for state, statedetectionpredicate, msgfn in STATES:
644 644 if state in skip:
645 645 continue
646 646 if statedetectionpredicate(repo):
647 647 return (state, statedetectionpredicate, msgfn)
648 648
649 649 def morestatus(repo, fm):
650 650 statetuple = _getrepostate(repo)
651 651 label = 'status.morestatus'
652 652 if statetuple:
653 653 fm.startitem()
654 654 state, statedetectionpredicate, helpfulmsg = statetuple
655 655 statemsg = _('The repository is in an unfinished *%s* state.') % state
656 656 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
657 657 conmsg = _conflictsmsg(repo)
658 658 if conmsg:
659 659 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
660 660 if helpfulmsg:
661 661 helpmsg = helpfulmsg()
662 662 fm.write('helpmsg', '%s\n', helpmsg, label=label)
663 663
664 664 def findpossible(cmd, table, strict=False):
665 665 """
666 666 Return cmd -> (aliases, command table entry)
667 667 for each matching command.
668 668 Return debug commands (or their aliases) only if no normal command matches.
669 669 """
670 670 choice = {}
671 671 debugchoice = {}
672 672
673 673 if cmd in table:
674 674 # short-circuit exact matches, "log" alias beats "^log|history"
675 675 keys = [cmd]
676 676 else:
677 677 keys = table.keys()
678 678
679 679 allcmds = []
680 680 for e in keys:
681 681 aliases = parsealiases(e)
682 682 allcmds.extend(aliases)
683 683 found = None
684 684 if cmd in aliases:
685 685 found = cmd
686 686 elif not strict:
687 687 for a in aliases:
688 688 if a.startswith(cmd):
689 689 found = a
690 690 break
691 691 if found is not None:
692 692 if aliases[0].startswith("debug") or found.startswith("debug"):
693 693 debugchoice[found] = (aliases, table[e])
694 694 else:
695 695 choice[found] = (aliases, table[e])
696 696
697 697 if not choice and debugchoice:
698 698 choice = debugchoice
699 699
700 700 return choice, allcmds
701 701
702 702 def findcmd(cmd, table, strict=True):
703 703 """Return (aliases, command table entry) for command string."""
704 704 choice, allcmds = findpossible(cmd, table, strict)
705 705
706 706 if cmd in choice:
707 707 return choice[cmd]
708 708
709 709 if len(choice) > 1:
710 710 clist = sorted(choice)
711 711 raise error.AmbiguousCommand(cmd, clist)
712 712
713 713 if choice:
714 714 return list(choice.values())[0]
715 715
716 716 raise error.UnknownCommand(cmd, allcmds)
717 717
718 718 def changebranch(ui, repo, revs, label):
719 719 """ Change the branch name of given revs to label """
720 720
721 721 with repo.wlock(), repo.lock(), repo.transaction('branches'):
722 722 # abort in case of uncommitted merge or dirty wdir
723 723 bailifchanged(repo)
724 724 revs = scmutil.revrange(repo, revs)
725 725 if not revs:
726 726 raise error.Abort("empty revision set")
727 727 roots = repo.revs('roots(%ld)', revs)
728 728 if len(roots) > 1:
729 729 raise error.Abort(_("cannot change branch of non-linear revisions"))
730 730 rewriteutil.precheck(repo, revs, 'change branch of')
731 731
732 732 root = repo[roots.first()]
733 733 if not root.p1().branch() == label and label in repo.branchmap():
734 734 raise error.Abort(_("a branch of the same name already exists"))
735 735
736 736 if repo.revs('merge() and %ld', revs):
737 737 raise error.Abort(_("cannot change branch of a merge commit"))
738 738 if repo.revs('obsolete() and %ld', revs):
739 739 raise error.Abort(_("cannot change branch of a obsolete changeset"))
740 740
741 741 # make sure only topological heads
742 742 if repo.revs('heads(%ld) - head()', revs):
743 743 raise error.Abort(_("cannot change branch in middle of a stack"))
744 744
745 745 replacements = {}
746 746 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
747 747 # mercurial.subrepo -> mercurial.cmdutil
748 748 from . import context
749 749 for rev in revs:
750 750 ctx = repo[rev]
751 751 oldbranch = ctx.branch()
752 752 # check if ctx has same branch
753 753 if oldbranch == label:
754 754 continue
755 755
756 756 def filectxfn(repo, newctx, path):
757 757 try:
758 758 return ctx[path]
759 759 except error.ManifestLookupError:
760 760 return None
761 761
762 762 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
763 763 % (hex(ctx.node()), oldbranch, label))
764 764 extra = ctx.extra()
765 765 extra['branch_change'] = hex(ctx.node())
766 766 # While changing branch of set of linear commits, make sure that
767 767 # we base our commits on new parent rather than old parent which
768 768 # was obsoleted while changing the branch
769 769 p1 = ctx.p1().node()
770 770 p2 = ctx.p2().node()
771 771 if p1 in replacements:
772 772 p1 = replacements[p1][0]
773 773 if p2 in replacements:
774 774 p2 = replacements[p2][0]
775 775
776 776 mc = context.memctx(repo, (p1, p2),
777 777 ctx.description(),
778 778 ctx.files(),
779 779 filectxfn,
780 780 user=ctx.user(),
781 781 date=ctx.date(),
782 782 extra=extra,
783 783 branch=label)
784 784
785 785 commitphase = ctx.phase()
786 786 overrides = {('phases', 'new-commit'): commitphase}
787 787 with repo.ui.configoverride(overrides, 'branch-change'):
788 788 newnode = repo.commitctx(mc)
789 789
790 790 replacements[ctx.node()] = (newnode,)
791 791 ui.debug('new node id is %s\n' % hex(newnode))
792 792
793 793 # create obsmarkers and move bookmarks
794 794 scmutil.cleanupnodes(repo, replacements, 'branch-change')
795 795
796 796 # move the working copy too
797 797 wctx = repo[None]
798 798 # in-progress merge is a bit too complex for now.
799 799 if len(wctx.parents()) == 1:
800 800 newid = replacements.get(wctx.p1().node())
801 801 if newid is not None:
802 802 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
803 803 # mercurial.cmdutil
804 804 from . import hg
805 805 hg.update(repo, newid[0], quietempty=True)
806 806
807 807 ui.status(_("changed branch on %d changesets\n") % len(replacements))
808 808
809 809 def findrepo(p):
810 810 while not os.path.isdir(os.path.join(p, ".hg")):
811 811 oldp, p = p, os.path.dirname(p)
812 812 if p == oldp:
813 813 return None
814 814
815 815 return p
816 816
817 817 def bailifchanged(repo, merge=True, hint=None):
818 818 """ enforce the precondition that working directory must be clean.
819 819
820 820 'merge' can be set to false if a pending uncommitted merge should be
821 821 ignored (such as when 'update --check' runs).
822 822
823 823 'hint' is the usual hint given to Abort exception.
824 824 """
825 825
826 826 if merge and repo.dirstate.p2() != nullid:
827 827 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
828 828 modified, added, removed, deleted = repo.status()[:4]
829 829 if modified or added or removed or deleted:
830 830 raise error.Abort(_('uncommitted changes'), hint=hint)
831 831 ctx = repo[None]
832 832 for s in sorted(ctx.substate):
833 833 ctx.sub(s).bailifchanged(hint=hint)
834 834
835 835 def logmessage(ui, opts):
836 836 """ get the log message according to -m and -l option """
837 837 message = opts.get('message')
838 838 logfile = opts.get('logfile')
839 839
840 840 if message and logfile:
841 841 raise error.Abort(_('options --message and --logfile are mutually '
842 842 'exclusive'))
843 843 if not message and logfile:
844 844 try:
845 845 if isstdiofilename(logfile):
846 846 message = ui.fin.read()
847 847 else:
848 848 message = '\n'.join(util.readfile(logfile).splitlines())
849 849 except IOError as inst:
850 850 raise error.Abort(_("can't read commit message '%s': %s") %
851 851 (logfile, encoding.strtolocal(inst.strerror)))
852 852 return message
853 853
854 854 def mergeeditform(ctxorbool, baseformname):
855 855 """return appropriate editform name (referencing a committemplate)
856 856
857 857 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
858 858 merging is committed.
859 859
860 860 This returns baseformname with '.merge' appended if it is a merge,
861 861 otherwise '.normal' is appended.
862 862 """
863 863 if isinstance(ctxorbool, bool):
864 864 if ctxorbool:
865 865 return baseformname + ".merge"
866 866 elif 1 < len(ctxorbool.parents()):
867 867 return baseformname + ".merge"
868 868
869 869 return baseformname + ".normal"
870 870
871 871 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
872 872 editform='', **opts):
873 873 """get appropriate commit message editor according to '--edit' option
874 874
875 875 'finishdesc' is a function to be called with edited commit message
876 876 (= 'description' of the new changeset) just after editing, but
877 877 before checking empty-ness. It should return actual text to be
878 878 stored into history. This allows to change description before
879 879 storing.
880 880
881 881 'extramsg' is a extra message to be shown in the editor instead of
882 882 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
883 883 is automatically added.
884 884
885 885 'editform' is a dot-separated list of names, to distinguish
886 886 the purpose of commit text editing.
887 887
888 888 'getcommiteditor' returns 'commitforceeditor' regardless of
889 889 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
890 890 they are specific for usage in MQ.
891 891 """
892 892 if edit or finishdesc or extramsg:
893 893 return lambda r, c, s: commitforceeditor(r, c, s,
894 894 finishdesc=finishdesc,
895 895 extramsg=extramsg,
896 896 editform=editform)
897 897 elif editform:
898 898 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
899 899 else:
900 900 return commiteditor
901 901
902 902 def rendertemplate(ctx, tmpl, props=None):
903 903 """Expand a literal template 'tmpl' byte-string against one changeset
904 904
905 905 Each props item must be a stringify-able value or a callable returning
906 906 such value, i.e. no bare list nor dict should be passed.
907 907 """
908 908 repo = ctx.repo()
909 909 tres = formatter.templateresources(repo.ui, repo)
910 910 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
911 911 resources=tres)
912 912 mapping = {'ctx': ctx}
913 913 if props:
914 914 mapping.update(props)
915 915 return t.renderdefault(mapping)
916 916
917 917 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
918 918 r"""Convert old-style filename format string to template string
919 919
920 920 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
921 921 'foo-{reporoot|basename}-{seqno}.patch'
922 922 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
923 923 '{rev}{tags % "{tag}"}{node}'
924 924
925 925 '\' in outermost strings has to be escaped because it is a directory
926 926 separator on Windows:
927 927
928 928 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
929 929 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
930 930 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
931 931 '\\\\\\\\foo\\\\bar.patch'
932 932 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
933 933 '\\\\{tags % "{tag}"}'
934 934
935 935 but inner strings follow the template rules (i.e. '\' is taken as an
936 936 escape character):
937 937
938 938 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
939 939 '{"c:\\tmp"}'
940 940 """
941 941 expander = {
942 942 b'H': b'{node}',
943 943 b'R': b'{rev}',
944 944 b'h': b'{node|short}',
945 945 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
946 946 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
947 947 b'%': b'%',
948 948 b'b': b'{reporoot|basename}',
949 949 }
950 950 if total is not None:
951 951 expander[b'N'] = b'{total}'
952 952 if seqno is not None:
953 953 expander[b'n'] = b'{seqno}'
954 954 if total is not None and seqno is not None:
955 955 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
956 956 if pathname is not None:
957 957 expander[b's'] = b'{pathname|basename}'
958 958 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
959 959 expander[b'p'] = b'{pathname}'
960 960
961 961 newname = []
962 962 for typ, start, end in templater.scantemplate(pat, raw=True):
963 963 if typ != b'string':
964 964 newname.append(pat[start:end])
965 965 continue
966 966 i = start
967 967 while i < end:
968 968 n = pat.find(b'%', i, end)
969 969 if n < 0:
970 970 newname.append(stringutil.escapestr(pat[i:end]))
971 971 break
972 972 newname.append(stringutil.escapestr(pat[i:n]))
973 973 if n + 2 > end:
974 974 raise error.Abort(_("incomplete format spec in output "
975 975 "filename"))
976 976 c = pat[n + 1:n + 2]
977 977 i = n + 2
978 978 try:
979 979 newname.append(expander[c])
980 980 except KeyError:
981 981 raise error.Abort(_("invalid format spec '%%%s' in output "
982 982 "filename") % c)
983 983 return ''.join(newname)
984 984
985 985 def makefilename(ctx, pat, **props):
986 986 if not pat:
987 987 return pat
988 988 tmpl = _buildfntemplate(pat, **props)
989 989 # BUG: alias expansion shouldn't be made against template fragments
990 990 # rewritten from %-format strings, but we have no easy way to partially
991 991 # disable the expansion.
992 992 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
993 993
994 994 def isstdiofilename(pat):
995 995 """True if the given pat looks like a filename denoting stdin/stdout"""
996 996 return not pat or pat == '-'
997 997
998 998 class _unclosablefile(object):
999 999 def __init__(self, fp):
1000 1000 self._fp = fp
1001 1001
1002 1002 def close(self):
1003 1003 pass
1004 1004
1005 1005 def __iter__(self):
1006 1006 return iter(self._fp)
1007 1007
1008 1008 def __getattr__(self, attr):
1009 1009 return getattr(self._fp, attr)
1010 1010
1011 1011 def __enter__(self):
1012 1012 return self
1013 1013
1014 1014 def __exit__(self, exc_type, exc_value, exc_tb):
1015 1015 pass
1016 1016
1017 1017 def makefileobj(ctx, pat, mode='wb', modemap=None, **props):
1018 1018 writable = mode not in ('r', 'rb')
1019 1019
1020 1020 if isstdiofilename(pat):
1021 1021 repo = ctx.repo()
1022 1022 if writable:
1023 1023 fp = repo.ui.fout
1024 1024 else:
1025 1025 fp = repo.ui.fin
1026 1026 return _unclosablefile(fp)
1027 1027 fn = makefilename(ctx, pat, **props)
1028 1028 if modemap is not None:
1029 1029 mode = modemap.get(fn, mode)
1030 1030 if mode == 'wb':
1031 1031 modemap[fn] = 'ab'
1032 1032 return open(fn, mode)
1033 1033
1034 1034 def openrevlog(repo, cmd, file_, opts):
1035 1035 """opens the changelog, manifest, a filelog or a given revlog"""
1036 1036 cl = opts['changelog']
1037 1037 mf = opts['manifest']
1038 1038 dir = opts['dir']
1039 1039 msg = None
1040 1040 if cl and mf:
1041 1041 msg = _('cannot specify --changelog and --manifest at the same time')
1042 1042 elif cl and dir:
1043 1043 msg = _('cannot specify --changelog and --dir at the same time')
1044 1044 elif cl or mf or dir:
1045 1045 if file_:
1046 1046 msg = _('cannot specify filename with --changelog or --manifest')
1047 1047 elif not repo:
1048 1048 msg = _('cannot specify --changelog or --manifest or --dir '
1049 1049 'without a repository')
1050 1050 if msg:
1051 1051 raise error.Abort(msg)
1052 1052
1053 1053 r = None
1054 1054 if repo:
1055 1055 if cl:
1056 1056 r = repo.unfiltered().changelog
1057 1057 elif dir:
1058 1058 if 'treemanifest' not in repo.requirements:
1059 1059 raise error.Abort(_("--dir can only be used on repos with "
1060 1060 "treemanifest enabled"))
1061 if not dir.endswith('/'):
1062 dir = dir + '/'
1061 1063 dirlog = repo.manifestlog._revlog.dirlog(dir)
1062 1064 if len(dirlog):
1063 1065 r = dirlog
1064 1066 elif mf:
1065 1067 r = repo.manifestlog._revlog
1066 1068 elif file_:
1067 1069 filelog = repo.file(file_)
1068 1070 if len(filelog):
1069 1071 r = filelog
1070 1072 if not r:
1071 1073 if not file_:
1072 1074 raise error.CommandError(cmd, _('invalid arguments'))
1073 1075 if not os.path.isfile(file_):
1074 1076 raise error.Abort(_("revlog '%s' not found") % file_)
1075 1077 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
1076 1078 file_[:-2] + ".i")
1077 1079 return r
1078 1080
1079 1081 def copy(ui, repo, pats, opts, rename=False):
1080 1082 # called with the repo lock held
1081 1083 #
1082 1084 # hgsep => pathname that uses "/" to separate directories
1083 1085 # ossep => pathname that uses os.sep to separate directories
1084 1086 cwd = repo.getcwd()
1085 1087 targets = {}
1086 1088 after = opts.get("after")
1087 1089 dryrun = opts.get("dry_run")
1088 1090 wctx = repo[None]
1089 1091
1090 1092 def walkpat(pat):
1091 1093 srcs = []
1092 1094 if after:
1093 1095 badstates = '?'
1094 1096 else:
1095 1097 badstates = '?r'
1096 1098 m = scmutil.match(wctx, [pat], opts, globbed=True)
1097 1099 for abs in wctx.walk(m):
1098 1100 state = repo.dirstate[abs]
1099 1101 rel = m.rel(abs)
1100 1102 exact = m.exact(abs)
1101 1103 if state in badstates:
1102 1104 if exact and state == '?':
1103 1105 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1104 1106 if exact and state == 'r':
1105 1107 ui.warn(_('%s: not copying - file has been marked for'
1106 1108 ' remove\n') % rel)
1107 1109 continue
1108 1110 # abs: hgsep
1109 1111 # rel: ossep
1110 1112 srcs.append((abs, rel, exact))
1111 1113 return srcs
1112 1114
1113 1115 # abssrc: hgsep
1114 1116 # relsrc: ossep
1115 1117 # otarget: ossep
1116 1118 def copyfile(abssrc, relsrc, otarget, exact):
1117 1119 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1118 1120 if '/' in abstarget:
1119 1121 # We cannot normalize abstarget itself, this would prevent
1120 1122 # case only renames, like a => A.
1121 1123 abspath, absname = abstarget.rsplit('/', 1)
1122 1124 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1123 1125 reltarget = repo.pathto(abstarget, cwd)
1124 1126 target = repo.wjoin(abstarget)
1125 1127 src = repo.wjoin(abssrc)
1126 1128 state = repo.dirstate[abstarget]
1127 1129
1128 1130 scmutil.checkportable(ui, abstarget)
1129 1131
1130 1132 # check for collisions
1131 1133 prevsrc = targets.get(abstarget)
1132 1134 if prevsrc is not None:
1133 1135 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1134 1136 (reltarget, repo.pathto(abssrc, cwd),
1135 1137 repo.pathto(prevsrc, cwd)))
1136 1138 return
1137 1139
1138 1140 # check for overwrites
1139 1141 exists = os.path.lexists(target)
1140 1142 samefile = False
1141 1143 if exists and abssrc != abstarget:
1142 1144 if (repo.dirstate.normalize(abssrc) ==
1143 1145 repo.dirstate.normalize(abstarget)):
1144 1146 if not rename:
1145 1147 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1146 1148 return
1147 1149 exists = False
1148 1150 samefile = True
1149 1151
1150 1152 if not after and exists or after and state in 'mn':
1151 1153 if not opts['force']:
1152 1154 if state in 'mn':
1153 1155 msg = _('%s: not overwriting - file already committed\n')
1154 1156 if after:
1155 1157 flags = '--after --force'
1156 1158 else:
1157 1159 flags = '--force'
1158 1160 if rename:
1159 1161 hint = _('(hg rename %s to replace the file by '
1160 1162 'recording a rename)\n') % flags
1161 1163 else:
1162 1164 hint = _('(hg copy %s to replace the file by '
1163 1165 'recording a copy)\n') % flags
1164 1166 else:
1165 1167 msg = _('%s: not overwriting - file exists\n')
1166 1168 if rename:
1167 1169 hint = _('(hg rename --after to record the rename)\n')
1168 1170 else:
1169 1171 hint = _('(hg copy --after to record the copy)\n')
1170 1172 ui.warn(msg % reltarget)
1171 1173 ui.warn(hint)
1172 1174 return
1173 1175
1174 1176 if after:
1175 1177 if not exists:
1176 1178 if rename:
1177 1179 ui.warn(_('%s: not recording move - %s does not exist\n') %
1178 1180 (relsrc, reltarget))
1179 1181 else:
1180 1182 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1181 1183 (relsrc, reltarget))
1182 1184 return
1183 1185 elif not dryrun:
1184 1186 try:
1185 1187 if exists:
1186 1188 os.unlink(target)
1187 1189 targetdir = os.path.dirname(target) or '.'
1188 1190 if not os.path.isdir(targetdir):
1189 1191 os.makedirs(targetdir)
1190 1192 if samefile:
1191 1193 tmp = target + "~hgrename"
1192 1194 os.rename(src, tmp)
1193 1195 os.rename(tmp, target)
1194 1196 else:
1195 1197 # Preserve stat info on renames, not on copies; this matches
1196 1198 # Linux CLI behavior.
1197 1199 util.copyfile(src, target, copystat=rename)
1198 1200 srcexists = True
1199 1201 except IOError as inst:
1200 1202 if inst.errno == errno.ENOENT:
1201 1203 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1202 1204 srcexists = False
1203 1205 else:
1204 1206 ui.warn(_('%s: cannot copy - %s\n') %
1205 1207 (relsrc, encoding.strtolocal(inst.strerror)))
1206 1208 return True # report a failure
1207 1209
1208 1210 if ui.verbose or not exact:
1209 1211 if rename:
1210 1212 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1211 1213 else:
1212 1214 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1213 1215
1214 1216 targets[abstarget] = abssrc
1215 1217
1216 1218 # fix up dirstate
1217 1219 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1218 1220 dryrun=dryrun, cwd=cwd)
1219 1221 if rename and not dryrun:
1220 1222 if not after and srcexists and not samefile:
1221 1223 repo.wvfs.unlinkpath(abssrc)
1222 1224 wctx.forget([abssrc])
1223 1225
1224 1226 # pat: ossep
1225 1227 # dest ossep
1226 1228 # srcs: list of (hgsep, hgsep, ossep, bool)
1227 1229 # return: function that takes hgsep and returns ossep
1228 1230 def targetpathfn(pat, dest, srcs):
1229 1231 if os.path.isdir(pat):
1230 1232 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1231 1233 abspfx = util.localpath(abspfx)
1232 1234 if destdirexists:
1233 1235 striplen = len(os.path.split(abspfx)[0])
1234 1236 else:
1235 1237 striplen = len(abspfx)
1236 1238 if striplen:
1237 1239 striplen += len(pycompat.ossep)
1238 1240 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1239 1241 elif destdirexists:
1240 1242 res = lambda p: os.path.join(dest,
1241 1243 os.path.basename(util.localpath(p)))
1242 1244 else:
1243 1245 res = lambda p: dest
1244 1246 return res
1245 1247
1246 1248 # pat: ossep
1247 1249 # dest ossep
1248 1250 # srcs: list of (hgsep, hgsep, ossep, bool)
1249 1251 # return: function that takes hgsep and returns ossep
1250 1252 def targetpathafterfn(pat, dest, srcs):
1251 1253 if matchmod.patkind(pat):
1252 1254 # a mercurial pattern
1253 1255 res = lambda p: os.path.join(dest,
1254 1256 os.path.basename(util.localpath(p)))
1255 1257 else:
1256 1258 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1257 1259 if len(abspfx) < len(srcs[0][0]):
1258 1260 # A directory. Either the target path contains the last
1259 1261 # component of the source path or it does not.
1260 1262 def evalpath(striplen):
1261 1263 score = 0
1262 1264 for s in srcs:
1263 1265 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1264 1266 if os.path.lexists(t):
1265 1267 score += 1
1266 1268 return score
1267 1269
1268 1270 abspfx = util.localpath(abspfx)
1269 1271 striplen = len(abspfx)
1270 1272 if striplen:
1271 1273 striplen += len(pycompat.ossep)
1272 1274 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1273 1275 score = evalpath(striplen)
1274 1276 striplen1 = len(os.path.split(abspfx)[0])
1275 1277 if striplen1:
1276 1278 striplen1 += len(pycompat.ossep)
1277 1279 if evalpath(striplen1) > score:
1278 1280 striplen = striplen1
1279 1281 res = lambda p: os.path.join(dest,
1280 1282 util.localpath(p)[striplen:])
1281 1283 else:
1282 1284 # a file
1283 1285 if destdirexists:
1284 1286 res = lambda p: os.path.join(dest,
1285 1287 os.path.basename(util.localpath(p)))
1286 1288 else:
1287 1289 res = lambda p: dest
1288 1290 return res
1289 1291
1290 1292 pats = scmutil.expandpats(pats)
1291 1293 if not pats:
1292 1294 raise error.Abort(_('no source or destination specified'))
1293 1295 if len(pats) == 1:
1294 1296 raise error.Abort(_('no destination specified'))
1295 1297 dest = pats.pop()
1296 1298 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1297 1299 if not destdirexists:
1298 1300 if len(pats) > 1 or matchmod.patkind(pats[0]):
1299 1301 raise error.Abort(_('with multiple sources, destination must be an '
1300 1302 'existing directory'))
1301 1303 if util.endswithsep(dest):
1302 1304 raise error.Abort(_('destination %s is not a directory') % dest)
1303 1305
1304 1306 tfn = targetpathfn
1305 1307 if after:
1306 1308 tfn = targetpathafterfn
1307 1309 copylist = []
1308 1310 for pat in pats:
1309 1311 srcs = walkpat(pat)
1310 1312 if not srcs:
1311 1313 continue
1312 1314 copylist.append((tfn(pat, dest, srcs), srcs))
1313 1315 if not copylist:
1314 1316 raise error.Abort(_('no files to copy'))
1315 1317
1316 1318 errors = 0
1317 1319 for targetpath, srcs in copylist:
1318 1320 for abssrc, relsrc, exact in srcs:
1319 1321 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1320 1322 errors += 1
1321 1323
1322 1324 if errors:
1323 1325 ui.warn(_('(consider using --after)\n'))
1324 1326
1325 1327 return errors != 0
1326 1328
1327 1329 ## facility to let extension process additional data into an import patch
1328 1330 # list of identifier to be executed in order
1329 1331 extrapreimport = [] # run before commit
1330 1332 extrapostimport = [] # run after commit
1331 1333 # mapping from identifier to actual import function
1332 1334 #
1333 1335 # 'preimport' are run before the commit is made and are provided the following
1334 1336 # arguments:
1335 1337 # - repo: the localrepository instance,
1336 1338 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1337 1339 # - extra: the future extra dictionary of the changeset, please mutate it,
1338 1340 # - opts: the import options.
1339 1341 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1340 1342 # mutation of in memory commit and more. Feel free to rework the code to get
1341 1343 # there.
1342 1344 extrapreimportmap = {}
1343 1345 # 'postimport' are run after the commit is made and are provided the following
1344 1346 # argument:
1345 1347 # - ctx: the changectx created by import.
1346 1348 extrapostimportmap = {}
1347 1349
1348 1350 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1349 1351 """Utility function used by commands.import to import a single patch
1350 1352
1351 1353 This function is explicitly defined here to help the evolve extension to
1352 1354 wrap this part of the import logic.
1353 1355
1354 1356 The API is currently a bit ugly because it a simple code translation from
1355 1357 the import command. Feel free to make it better.
1356 1358
1357 1359 :hunk: a patch (as a binary string)
1358 1360 :parents: nodes that will be parent of the created commit
1359 1361 :opts: the full dict of option passed to the import command
1360 1362 :msgs: list to save commit message to.
1361 1363 (used in case we need to save it when failing)
1362 1364 :updatefunc: a function that update a repo to a given node
1363 1365 updatefunc(<repo>, <node>)
1364 1366 """
1365 1367 # avoid cycle context -> subrepo -> cmdutil
1366 1368 from . import context
1367 1369 extractdata = patch.extract(ui, hunk)
1368 1370 tmpname = extractdata.get('filename')
1369 1371 message = extractdata.get('message')
1370 1372 user = opts.get('user') or extractdata.get('user')
1371 1373 date = opts.get('date') or extractdata.get('date')
1372 1374 branch = extractdata.get('branch')
1373 1375 nodeid = extractdata.get('nodeid')
1374 1376 p1 = extractdata.get('p1')
1375 1377 p2 = extractdata.get('p2')
1376 1378
1377 1379 nocommit = opts.get('no_commit')
1378 1380 importbranch = opts.get('import_branch')
1379 1381 update = not opts.get('bypass')
1380 1382 strip = opts["strip"]
1381 1383 prefix = opts["prefix"]
1382 1384 sim = float(opts.get('similarity') or 0)
1383 1385 if not tmpname:
1384 1386 return (None, None, False)
1385 1387
1386 1388 rejects = False
1387 1389
1388 1390 try:
1389 1391 cmdline_message = logmessage(ui, opts)
1390 1392 if cmdline_message:
1391 1393 # pickup the cmdline msg
1392 1394 message = cmdline_message
1393 1395 elif message:
1394 1396 # pickup the patch msg
1395 1397 message = message.strip()
1396 1398 else:
1397 1399 # launch the editor
1398 1400 message = None
1399 1401 ui.debug('message:\n%s\n' % message)
1400 1402
1401 1403 if len(parents) == 1:
1402 1404 parents.append(repo[nullid])
1403 1405 if opts.get('exact'):
1404 1406 if not nodeid or not p1:
1405 1407 raise error.Abort(_('not a Mercurial patch'))
1406 1408 p1 = repo[p1]
1407 1409 p2 = repo[p2 or nullid]
1408 1410 elif p2:
1409 1411 try:
1410 1412 p1 = repo[p1]
1411 1413 p2 = repo[p2]
1412 1414 # Without any options, consider p2 only if the
1413 1415 # patch is being applied on top of the recorded
1414 1416 # first parent.
1415 1417 if p1 != parents[0]:
1416 1418 p1 = parents[0]
1417 1419 p2 = repo[nullid]
1418 1420 except error.RepoError:
1419 1421 p1, p2 = parents
1420 1422 if p2.node() == nullid:
1421 1423 ui.warn(_("warning: import the patch as a normal revision\n"
1422 1424 "(use --exact to import the patch as a merge)\n"))
1423 1425 else:
1424 1426 p1, p2 = parents
1425 1427
1426 1428 n = None
1427 1429 if update:
1428 1430 if p1 != parents[0]:
1429 1431 updatefunc(repo, p1.node())
1430 1432 if p2 != parents[1]:
1431 1433 repo.setparents(p1.node(), p2.node())
1432 1434
1433 1435 if opts.get('exact') or importbranch:
1434 1436 repo.dirstate.setbranch(branch or 'default')
1435 1437
1436 1438 partial = opts.get('partial', False)
1437 1439 files = set()
1438 1440 try:
1439 1441 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1440 1442 files=files, eolmode=None, similarity=sim / 100.0)
1441 1443 except error.PatchError as e:
1442 1444 if not partial:
1443 1445 raise error.Abort(pycompat.bytestr(e))
1444 1446 if partial:
1445 1447 rejects = True
1446 1448
1447 1449 files = list(files)
1448 1450 if nocommit:
1449 1451 if message:
1450 1452 msgs.append(message)
1451 1453 else:
1452 1454 if opts.get('exact') or p2:
1453 1455 # If you got here, you either use --force and know what
1454 1456 # you are doing or used --exact or a merge patch while
1455 1457 # being updated to its first parent.
1456 1458 m = None
1457 1459 else:
1458 1460 m = scmutil.matchfiles(repo, files or [])
1459 1461 editform = mergeeditform(repo[None], 'import.normal')
1460 1462 if opts.get('exact'):
1461 1463 editor = None
1462 1464 else:
1463 1465 editor = getcommiteditor(editform=editform,
1464 1466 **pycompat.strkwargs(opts))
1465 1467 extra = {}
1466 1468 for idfunc in extrapreimport:
1467 1469 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1468 1470 overrides = {}
1469 1471 if partial:
1470 1472 overrides[('ui', 'allowemptycommit')] = True
1471 1473 with repo.ui.configoverride(overrides, 'import'):
1472 1474 n = repo.commit(message, user,
1473 1475 date, match=m,
1474 1476 editor=editor, extra=extra)
1475 1477 for idfunc in extrapostimport:
1476 1478 extrapostimportmap[idfunc](repo[n])
1477 1479 else:
1478 1480 if opts.get('exact') or importbranch:
1479 1481 branch = branch or 'default'
1480 1482 else:
1481 1483 branch = p1.branch()
1482 1484 store = patch.filestore()
1483 1485 try:
1484 1486 files = set()
1485 1487 try:
1486 1488 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1487 1489 files, eolmode=None)
1488 1490 except error.PatchError as e:
1489 1491 raise error.Abort(stringutil.forcebytestr(e))
1490 1492 if opts.get('exact'):
1491 1493 editor = None
1492 1494 else:
1493 1495 editor = getcommiteditor(editform='import.bypass')
1494 1496 memctx = context.memctx(repo, (p1.node(), p2.node()),
1495 1497 message,
1496 1498 files=files,
1497 1499 filectxfn=store,
1498 1500 user=user,
1499 1501 date=date,
1500 1502 branch=branch,
1501 1503 editor=editor)
1502 1504 n = memctx.commit()
1503 1505 finally:
1504 1506 store.close()
1505 1507 if opts.get('exact') and nocommit:
1506 1508 # --exact with --no-commit is still useful in that it does merge
1507 1509 # and branch bits
1508 1510 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1509 1511 elif opts.get('exact') and hex(n) != nodeid:
1510 1512 raise error.Abort(_('patch is damaged or loses information'))
1511 1513 msg = _('applied to working directory')
1512 1514 if n:
1513 1515 # i18n: refers to a short changeset id
1514 1516 msg = _('created %s') % short(n)
1515 1517 return (msg, n, rejects)
1516 1518 finally:
1517 1519 os.unlink(tmpname)
1518 1520
1519 1521 # facility to let extensions include additional data in an exported patch
1520 1522 # list of identifiers to be executed in order
1521 1523 extraexport = []
1522 1524 # mapping from identifier to actual export function
1523 1525 # function as to return a string to be added to the header or None
1524 1526 # it is given two arguments (sequencenumber, changectx)
1525 1527 extraexportmap = {}
1526 1528
1527 1529 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1528 1530 node = scmutil.binnode(ctx)
1529 1531 parents = [p.node() for p in ctx.parents() if p]
1530 1532 branch = ctx.branch()
1531 1533 if switch_parent:
1532 1534 parents.reverse()
1533 1535
1534 1536 if parents:
1535 1537 prev = parents[0]
1536 1538 else:
1537 1539 prev = nullid
1538 1540
1539 1541 write("# HG changeset patch\n")
1540 1542 write("# User %s\n" % ctx.user())
1541 1543 write("# Date %d %d\n" % ctx.date())
1542 1544 write("# %s\n" % dateutil.datestr(ctx.date()))
1543 1545 if branch and branch != 'default':
1544 1546 write("# Branch %s\n" % branch)
1545 1547 write("# Node ID %s\n" % hex(node))
1546 1548 write("# Parent %s\n" % hex(prev))
1547 1549 if len(parents) > 1:
1548 1550 write("# Parent %s\n" % hex(parents[1]))
1549 1551
1550 1552 for headerid in extraexport:
1551 1553 header = extraexportmap[headerid](seqno, ctx)
1552 1554 if header is not None:
1553 1555 write('# %s\n' % header)
1554 1556 write(ctx.description().rstrip())
1555 1557 write("\n\n")
1556 1558
1557 1559 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1558 1560 write(chunk, label=label)
1559 1561
1560 1562 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1561 1563 opts=None, match=None):
1562 1564 '''export changesets as hg patches
1563 1565
1564 1566 Args:
1565 1567 repo: The repository from which we're exporting revisions.
1566 1568 revs: A list of revisions to export as revision numbers.
1567 1569 fntemplate: An optional string to use for generating patch file names.
1568 1570 fp: An optional file-like object to which patches should be written.
1569 1571 switch_parent: If True, show diffs against second parent when not nullid.
1570 1572 Default is false, which always shows diff against p1.
1571 1573 opts: diff options to use for generating the patch.
1572 1574 match: If specified, only export changes to files matching this matcher.
1573 1575
1574 1576 Returns:
1575 1577 Nothing.
1576 1578
1577 1579 Side Effect:
1578 1580 "HG Changeset Patch" data is emitted to one of the following
1579 1581 destinations:
1580 1582 fp is specified: All revs are written to the specified
1581 1583 file-like object.
1582 1584 fntemplate specified: Each rev is written to a unique file named using
1583 1585 the given template.
1584 1586 Neither fp nor template specified: All revs written to repo.ui.write()
1585 1587 '''
1586 1588
1587 1589 total = len(revs)
1588 1590 revwidth = max(len(str(rev)) for rev in revs)
1589 1591 filemode = {}
1590 1592
1591 1593 write = None
1592 1594 dest = '<unnamed>'
1593 1595 if fp:
1594 1596 dest = getattr(fp, 'name', dest)
1595 1597 def write(s, **kw):
1596 1598 fp.write(s)
1597 1599 elif not fntemplate:
1598 1600 write = repo.ui.write
1599 1601
1600 1602 for seqno, rev in enumerate(revs, 1):
1601 1603 ctx = repo[rev]
1602 1604 fo = None
1603 1605 if not fp and fntemplate:
1604 1606 fo = makefileobj(ctx, fntemplate, mode='wb', modemap=filemode,
1605 1607 total=total, seqno=seqno, revwidth=revwidth)
1606 1608 dest = fo.name
1607 1609 def write(s, **kw):
1608 1610 fo.write(s)
1609 1611 if not dest.startswith('<'):
1610 1612 repo.ui.note("%s\n" % dest)
1611 1613 _exportsingle(
1612 1614 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1613 1615 if fo is not None:
1614 1616 fo.close()
1615 1617
1616 1618 def showmarker(fm, marker, index=None):
1617 1619 """utility function to display obsolescence marker in a readable way
1618 1620
1619 1621 To be used by debug function."""
1620 1622 if index is not None:
1621 1623 fm.write('index', '%i ', index)
1622 1624 fm.write('prednode', '%s ', hex(marker.prednode()))
1623 1625 succs = marker.succnodes()
1624 1626 fm.condwrite(succs, 'succnodes', '%s ',
1625 1627 fm.formatlist(map(hex, succs), name='node'))
1626 1628 fm.write('flag', '%X ', marker.flags())
1627 1629 parents = marker.parentnodes()
1628 1630 if parents is not None:
1629 1631 fm.write('parentnodes', '{%s} ',
1630 1632 fm.formatlist(map(hex, parents), name='node', sep=', '))
1631 1633 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1632 1634 meta = marker.metadata().copy()
1633 1635 meta.pop('date', None)
1634 1636 smeta = util.rapply(pycompat.maybebytestr, meta)
1635 1637 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1636 1638 fm.plain('\n')
1637 1639
1638 1640 def finddate(ui, repo, date):
1639 1641 """Find the tipmost changeset that matches the given date spec"""
1640 1642
1641 1643 df = dateutil.matchdate(date)
1642 1644 m = scmutil.matchall(repo)
1643 1645 results = {}
1644 1646
1645 1647 def prep(ctx, fns):
1646 1648 d = ctx.date()
1647 1649 if df(d[0]):
1648 1650 results[ctx.rev()] = d
1649 1651
1650 1652 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1651 1653 rev = ctx.rev()
1652 1654 if rev in results:
1653 1655 ui.status(_("found revision %s from %s\n") %
1654 1656 (rev, dateutil.datestr(results[rev])))
1655 1657 return '%d' % rev
1656 1658
1657 1659 raise error.Abort(_("revision matching date not found"))
1658 1660
1659 1661 def increasingwindows(windowsize=8, sizelimit=512):
1660 1662 while True:
1661 1663 yield windowsize
1662 1664 if windowsize < sizelimit:
1663 1665 windowsize *= 2
1664 1666
1665 1667 def _walkrevs(repo, opts):
1666 1668 # Default --rev value depends on --follow but --follow behavior
1667 1669 # depends on revisions resolved from --rev...
1668 1670 follow = opts.get('follow') or opts.get('follow_first')
1669 1671 if opts.get('rev'):
1670 1672 revs = scmutil.revrange(repo, opts['rev'])
1671 1673 elif follow and repo.dirstate.p1() == nullid:
1672 1674 revs = smartset.baseset()
1673 1675 elif follow:
1674 1676 revs = repo.revs('reverse(:.)')
1675 1677 else:
1676 1678 revs = smartset.spanset(repo)
1677 1679 revs.reverse()
1678 1680 return revs
1679 1681
1680 1682 class FileWalkError(Exception):
1681 1683 pass
1682 1684
1683 1685 def walkfilerevs(repo, match, follow, revs, fncache):
1684 1686 '''Walks the file history for the matched files.
1685 1687
1686 1688 Returns the changeset revs that are involved in the file history.
1687 1689
1688 1690 Throws FileWalkError if the file history can't be walked using
1689 1691 filelogs alone.
1690 1692 '''
1691 1693 wanted = set()
1692 1694 copies = []
1693 1695 minrev, maxrev = min(revs), max(revs)
1694 1696 def filerevgen(filelog, last):
1695 1697 """
1696 1698 Only files, no patterns. Check the history of each file.
1697 1699
1698 1700 Examines filelog entries within minrev, maxrev linkrev range
1699 1701 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1700 1702 tuples in backwards order
1701 1703 """
1702 1704 cl_count = len(repo)
1703 1705 revs = []
1704 1706 for j in xrange(0, last + 1):
1705 1707 linkrev = filelog.linkrev(j)
1706 1708 if linkrev < minrev:
1707 1709 continue
1708 1710 # only yield rev for which we have the changelog, it can
1709 1711 # happen while doing "hg log" during a pull or commit
1710 1712 if linkrev >= cl_count:
1711 1713 break
1712 1714
1713 1715 parentlinkrevs = []
1714 1716 for p in filelog.parentrevs(j):
1715 1717 if p != nullrev:
1716 1718 parentlinkrevs.append(filelog.linkrev(p))
1717 1719 n = filelog.node(j)
1718 1720 revs.append((linkrev, parentlinkrevs,
1719 1721 follow and filelog.renamed(n)))
1720 1722
1721 1723 return reversed(revs)
1722 1724 def iterfiles():
1723 1725 pctx = repo['.']
1724 1726 for filename in match.files():
1725 1727 if follow:
1726 1728 if filename not in pctx:
1727 1729 raise error.Abort(_('cannot follow file not in parent '
1728 1730 'revision: "%s"') % filename)
1729 1731 yield filename, pctx[filename].filenode()
1730 1732 else:
1731 1733 yield filename, None
1732 1734 for filename_node in copies:
1733 1735 yield filename_node
1734 1736
1735 1737 for file_, node in iterfiles():
1736 1738 filelog = repo.file(file_)
1737 1739 if not len(filelog):
1738 1740 if node is None:
1739 1741 # A zero count may be a directory or deleted file, so
1740 1742 # try to find matching entries on the slow path.
1741 1743 if follow:
1742 1744 raise error.Abort(
1743 1745 _('cannot follow nonexistent file: "%s"') % file_)
1744 1746 raise FileWalkError("Cannot walk via filelog")
1745 1747 else:
1746 1748 continue
1747 1749
1748 1750 if node is None:
1749 1751 last = len(filelog) - 1
1750 1752 else:
1751 1753 last = filelog.rev(node)
1752 1754
1753 1755 # keep track of all ancestors of the file
1754 1756 ancestors = {filelog.linkrev(last)}
1755 1757
1756 1758 # iterate from latest to oldest revision
1757 1759 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1758 1760 if not follow:
1759 1761 if rev > maxrev:
1760 1762 continue
1761 1763 else:
1762 1764 # Note that last might not be the first interesting
1763 1765 # rev to us:
1764 1766 # if the file has been changed after maxrev, we'll
1765 1767 # have linkrev(last) > maxrev, and we still need
1766 1768 # to explore the file graph
1767 1769 if rev not in ancestors:
1768 1770 continue
1769 1771 # XXX insert 1327 fix here
1770 1772 if flparentlinkrevs:
1771 1773 ancestors.update(flparentlinkrevs)
1772 1774
1773 1775 fncache.setdefault(rev, []).append(file_)
1774 1776 wanted.add(rev)
1775 1777 if copied:
1776 1778 copies.append(copied)
1777 1779
1778 1780 return wanted
1779 1781
1780 1782 class _followfilter(object):
1781 1783 def __init__(self, repo, onlyfirst=False):
1782 1784 self.repo = repo
1783 1785 self.startrev = nullrev
1784 1786 self.roots = set()
1785 1787 self.onlyfirst = onlyfirst
1786 1788
1787 1789 def match(self, rev):
1788 1790 def realparents(rev):
1789 1791 if self.onlyfirst:
1790 1792 return self.repo.changelog.parentrevs(rev)[0:1]
1791 1793 else:
1792 1794 return filter(lambda x: x != nullrev,
1793 1795 self.repo.changelog.parentrevs(rev))
1794 1796
1795 1797 if self.startrev == nullrev:
1796 1798 self.startrev = rev
1797 1799 return True
1798 1800
1799 1801 if rev > self.startrev:
1800 1802 # forward: all descendants
1801 1803 if not self.roots:
1802 1804 self.roots.add(self.startrev)
1803 1805 for parent in realparents(rev):
1804 1806 if parent in self.roots:
1805 1807 self.roots.add(rev)
1806 1808 return True
1807 1809 else:
1808 1810 # backwards: all parents
1809 1811 if not self.roots:
1810 1812 self.roots.update(realparents(self.startrev))
1811 1813 if rev in self.roots:
1812 1814 self.roots.remove(rev)
1813 1815 self.roots.update(realparents(rev))
1814 1816 return True
1815 1817
1816 1818 return False
1817 1819
1818 1820 def walkchangerevs(repo, match, opts, prepare):
1819 1821 '''Iterate over files and the revs in which they changed.
1820 1822
1821 1823 Callers most commonly need to iterate backwards over the history
1822 1824 in which they are interested. Doing so has awful (quadratic-looking)
1823 1825 performance, so we use iterators in a "windowed" way.
1824 1826
1825 1827 We walk a window of revisions in the desired order. Within the
1826 1828 window, we first walk forwards to gather data, then in the desired
1827 1829 order (usually backwards) to display it.
1828 1830
1829 1831 This function returns an iterator yielding contexts. Before
1830 1832 yielding each context, the iterator will first call the prepare
1831 1833 function on each context in the window in forward order.'''
1832 1834
1833 1835 follow = opts.get('follow') or opts.get('follow_first')
1834 1836 revs = _walkrevs(repo, opts)
1835 1837 if not revs:
1836 1838 return []
1837 1839 wanted = set()
1838 1840 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1839 1841 fncache = {}
1840 1842 change = repo.changectx
1841 1843
1842 1844 # First step is to fill wanted, the set of revisions that we want to yield.
1843 1845 # When it does not induce extra cost, we also fill fncache for revisions in
1844 1846 # wanted: a cache of filenames that were changed (ctx.files()) and that
1845 1847 # match the file filtering conditions.
1846 1848
1847 1849 if match.always():
1848 1850 # No files, no patterns. Display all revs.
1849 1851 wanted = revs
1850 1852 elif not slowpath:
1851 1853 # We only have to read through the filelog to find wanted revisions
1852 1854
1853 1855 try:
1854 1856 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1855 1857 except FileWalkError:
1856 1858 slowpath = True
1857 1859
1858 1860 # We decided to fall back to the slowpath because at least one
1859 1861 # of the paths was not a file. Check to see if at least one of them
1860 1862 # existed in history, otherwise simply return
1861 1863 for path in match.files():
1862 1864 if path == '.' or path in repo.store:
1863 1865 break
1864 1866 else:
1865 1867 return []
1866 1868
1867 1869 if slowpath:
1868 1870 # We have to read the changelog to match filenames against
1869 1871 # changed files
1870 1872
1871 1873 if follow:
1872 1874 raise error.Abort(_('can only follow copies/renames for explicit '
1873 1875 'filenames'))
1874 1876
1875 1877 # The slow path checks files modified in every changeset.
1876 1878 # This is really slow on large repos, so compute the set lazily.
1877 1879 class lazywantedset(object):
1878 1880 def __init__(self):
1879 1881 self.set = set()
1880 1882 self.revs = set(revs)
1881 1883
1882 1884 # No need to worry about locality here because it will be accessed
1883 1885 # in the same order as the increasing window below.
1884 1886 def __contains__(self, value):
1885 1887 if value in self.set:
1886 1888 return True
1887 1889 elif not value in self.revs:
1888 1890 return False
1889 1891 else:
1890 1892 self.revs.discard(value)
1891 1893 ctx = change(value)
1892 1894 matches = [f for f in ctx.files() if match(f)]
1893 1895 if matches:
1894 1896 fncache[value] = matches
1895 1897 self.set.add(value)
1896 1898 return True
1897 1899 return False
1898 1900
1899 1901 def discard(self, value):
1900 1902 self.revs.discard(value)
1901 1903 self.set.discard(value)
1902 1904
1903 1905 wanted = lazywantedset()
1904 1906
1905 1907 # it might be worthwhile to do this in the iterator if the rev range
1906 1908 # is descending and the prune args are all within that range
1907 1909 for rev in opts.get('prune', ()):
1908 1910 rev = repo[rev].rev()
1909 1911 ff = _followfilter(repo)
1910 1912 stop = min(revs[0], revs[-1])
1911 1913 for x in xrange(rev, stop - 1, -1):
1912 1914 if ff.match(x):
1913 1915 wanted = wanted - [x]
1914 1916
1915 1917 # Now that wanted is correctly initialized, we can iterate over the
1916 1918 # revision range, yielding only revisions in wanted.
1917 1919 def iterate():
1918 1920 if follow and match.always():
1919 1921 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1920 1922 def want(rev):
1921 1923 return ff.match(rev) and rev in wanted
1922 1924 else:
1923 1925 def want(rev):
1924 1926 return rev in wanted
1925 1927
1926 1928 it = iter(revs)
1927 1929 stopiteration = False
1928 1930 for windowsize in increasingwindows():
1929 1931 nrevs = []
1930 1932 for i in xrange(windowsize):
1931 1933 rev = next(it, None)
1932 1934 if rev is None:
1933 1935 stopiteration = True
1934 1936 break
1935 1937 elif want(rev):
1936 1938 nrevs.append(rev)
1937 1939 for rev in sorted(nrevs):
1938 1940 fns = fncache.get(rev)
1939 1941 ctx = change(rev)
1940 1942 if not fns:
1941 1943 def fns_generator():
1942 1944 for f in ctx.files():
1943 1945 if match(f):
1944 1946 yield f
1945 1947 fns = fns_generator()
1946 1948 prepare(ctx, fns)
1947 1949 for rev in nrevs:
1948 1950 yield change(rev)
1949 1951
1950 1952 if stopiteration:
1951 1953 break
1952 1954
1953 1955 return iterate()
1954 1956
1955 1957 def add(ui, repo, match, prefix, explicitonly, **opts):
1956 1958 join = lambda f: os.path.join(prefix, f)
1957 1959 bad = []
1958 1960
1959 1961 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
1960 1962 names = []
1961 1963 wctx = repo[None]
1962 1964 cca = None
1963 1965 abort, warn = scmutil.checkportabilityalert(ui)
1964 1966 if abort or warn:
1965 1967 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1966 1968
1967 1969 badmatch = matchmod.badmatch(match, badfn)
1968 1970 dirstate = repo.dirstate
1969 1971 # We don't want to just call wctx.walk here, since it would return a lot of
1970 1972 # clean files, which we aren't interested in and takes time.
1971 1973 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
1972 1974 unknown=True, ignored=False, full=False)):
1973 1975 exact = match.exact(f)
1974 1976 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
1975 1977 if cca:
1976 1978 cca(f)
1977 1979 names.append(f)
1978 1980 if ui.verbose or not exact:
1979 1981 ui.status(_('adding %s\n') % match.rel(f))
1980 1982
1981 1983 for subpath in sorted(wctx.substate):
1982 1984 sub = wctx.sub(subpath)
1983 1985 try:
1984 1986 submatch = matchmod.subdirmatcher(subpath, match)
1985 1987 if opts.get(r'subrepos'):
1986 1988 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
1987 1989 else:
1988 1990 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
1989 1991 except error.LookupError:
1990 1992 ui.status(_("skipping missing subrepository: %s\n")
1991 1993 % join(subpath))
1992 1994
1993 1995 if not opts.get(r'dry_run'):
1994 1996 rejected = wctx.add(names, prefix)
1995 1997 bad.extend(f for f in rejected if f in match.files())
1996 1998 return bad
1997 1999
1998 2000 def addwebdirpath(repo, serverpath, webconf):
1999 2001 webconf[serverpath] = repo.root
2000 2002 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2001 2003
2002 2004 for r in repo.revs('filelog("path:.hgsub")'):
2003 2005 ctx = repo[r]
2004 2006 for subpath in ctx.substate:
2005 2007 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2006 2008
2007 2009 def forget(ui, repo, match, prefix, explicitonly, dryrun):
2008 2010 join = lambda f: os.path.join(prefix, f)
2009 2011 bad = []
2010 2012 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2011 2013 wctx = repo[None]
2012 2014 forgot = []
2013 2015
2014 2016 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2015 2017 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2016 2018 if explicitonly:
2017 2019 forget = [f for f in forget if match.exact(f)]
2018 2020
2019 2021 for subpath in sorted(wctx.substate):
2020 2022 sub = wctx.sub(subpath)
2021 2023 try:
2022 2024 submatch = matchmod.subdirmatcher(subpath, match)
2023 2025 subbad, subforgot = sub.forget(submatch, prefix, dryrun=dryrun)
2024 2026 bad.extend([subpath + '/' + f for f in subbad])
2025 2027 forgot.extend([subpath + '/' + f for f in subforgot])
2026 2028 except error.LookupError:
2027 2029 ui.status(_("skipping missing subrepository: %s\n")
2028 2030 % join(subpath))
2029 2031
2030 2032 if not explicitonly:
2031 2033 for f in match.files():
2032 2034 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2033 2035 if f not in forgot:
2034 2036 if repo.wvfs.exists(f):
2035 2037 # Don't complain if the exact case match wasn't given.
2036 2038 # But don't do this until after checking 'forgot', so
2037 2039 # that subrepo files aren't normalized, and this op is
2038 2040 # purely from data cached by the status walk above.
2039 2041 if repo.dirstate.normalize(f) in repo.dirstate:
2040 2042 continue
2041 2043 ui.warn(_('not removing %s: '
2042 2044 'file is already untracked\n')
2043 2045 % match.rel(f))
2044 2046 bad.append(f)
2045 2047
2046 2048 for f in forget:
2047 2049 if ui.verbose or not match.exact(f):
2048 2050 ui.status(_('removing %s\n') % match.rel(f))
2049 2051
2050 2052 if not dryrun:
2051 2053 rejected = wctx.forget(forget, prefix)
2052 2054 bad.extend(f for f in rejected if f in match.files())
2053 2055 forgot.extend(f for f in forget if f not in rejected)
2054 2056 return bad, forgot
2055 2057
2056 2058 def files(ui, ctx, m, fm, fmt, subrepos):
2057 2059 rev = ctx.rev()
2058 2060 ret = 1
2059 2061 ds = ctx.repo().dirstate
2060 2062
2061 2063 for f in ctx.matches(m):
2062 2064 if rev is None and ds[f] == 'r':
2063 2065 continue
2064 2066 fm.startitem()
2065 2067 if ui.verbose:
2066 2068 fc = ctx[f]
2067 2069 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2068 2070 fm.data(abspath=f)
2069 2071 fm.write('path', fmt, m.rel(f))
2070 2072 ret = 0
2071 2073
2072 2074 for subpath in sorted(ctx.substate):
2073 2075 submatch = matchmod.subdirmatcher(subpath, m)
2074 2076 if (subrepos or m.exact(subpath) or any(submatch.files())):
2075 2077 sub = ctx.sub(subpath)
2076 2078 try:
2077 2079 recurse = m.exact(subpath) or subrepos
2078 2080 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2079 2081 ret = 0
2080 2082 except error.LookupError:
2081 2083 ui.status(_("skipping missing subrepository: %s\n")
2082 2084 % m.abs(subpath))
2083 2085
2084 2086 return ret
2085 2087
2086 2088 def remove(ui, repo, m, prefix, after, force, subrepos, dryrun, warnings=None):
2087 2089 join = lambda f: os.path.join(prefix, f)
2088 2090 ret = 0
2089 2091 s = repo.status(match=m, clean=True)
2090 2092 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2091 2093
2092 2094 wctx = repo[None]
2093 2095
2094 2096 if warnings is None:
2095 2097 warnings = []
2096 2098 warn = True
2097 2099 else:
2098 2100 warn = False
2099 2101
2100 2102 subs = sorted(wctx.substate)
2101 2103 total = len(subs)
2102 2104 count = 0
2103 2105 for subpath in subs:
2104 2106 count += 1
2105 2107 submatch = matchmod.subdirmatcher(subpath, m)
2106 2108 if subrepos or m.exact(subpath) or any(submatch.files()):
2107 2109 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2108 2110 sub = wctx.sub(subpath)
2109 2111 try:
2110 2112 if sub.removefiles(submatch, prefix, after, force, subrepos,
2111 2113 dryrun, warnings):
2112 2114 ret = 1
2113 2115 except error.LookupError:
2114 2116 warnings.append(_("skipping missing subrepository: %s\n")
2115 2117 % join(subpath))
2116 2118 ui.progress(_('searching'), None)
2117 2119
2118 2120 # warn about failure to delete explicit files/dirs
2119 2121 deleteddirs = util.dirs(deleted)
2120 2122 files = m.files()
2121 2123 total = len(files)
2122 2124 count = 0
2123 2125 for f in files:
2124 2126 def insubrepo():
2125 2127 for subpath in wctx.substate:
2126 2128 if f.startswith(subpath + '/'):
2127 2129 return True
2128 2130 return False
2129 2131
2130 2132 count += 1
2131 2133 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2132 2134 isdir = f in deleteddirs or wctx.hasdir(f)
2133 2135 if (f in repo.dirstate or isdir or f == '.'
2134 2136 or insubrepo() or f in subs):
2135 2137 continue
2136 2138
2137 2139 if repo.wvfs.exists(f):
2138 2140 if repo.wvfs.isdir(f):
2139 2141 warnings.append(_('not removing %s: no tracked files\n')
2140 2142 % m.rel(f))
2141 2143 else:
2142 2144 warnings.append(_('not removing %s: file is untracked\n')
2143 2145 % m.rel(f))
2144 2146 # missing files will generate a warning elsewhere
2145 2147 ret = 1
2146 2148 ui.progress(_('deleting'), None)
2147 2149
2148 2150 if force:
2149 2151 list = modified + deleted + clean + added
2150 2152 elif after:
2151 2153 list = deleted
2152 2154 remaining = modified + added + clean
2153 2155 total = len(remaining)
2154 2156 count = 0
2155 2157 for f in remaining:
2156 2158 count += 1
2157 2159 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2158 2160 if ui.verbose or (f in files):
2159 2161 warnings.append(_('not removing %s: file still exists\n')
2160 2162 % m.rel(f))
2161 2163 ret = 1
2162 2164 ui.progress(_('skipping'), None)
2163 2165 else:
2164 2166 list = deleted + clean
2165 2167 total = len(modified) + len(added)
2166 2168 count = 0
2167 2169 for f in modified:
2168 2170 count += 1
2169 2171 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2170 2172 warnings.append(_('not removing %s: file is modified (use -f'
2171 2173 ' to force removal)\n') % m.rel(f))
2172 2174 ret = 1
2173 2175 for f in added:
2174 2176 count += 1
2175 2177 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2176 2178 warnings.append(_("not removing %s: file has been marked for add"
2177 2179 " (use 'hg forget' to undo add)\n") % m.rel(f))
2178 2180 ret = 1
2179 2181 ui.progress(_('skipping'), None)
2180 2182
2181 2183 list = sorted(list)
2182 2184 total = len(list)
2183 2185 count = 0
2184 2186 for f in list:
2185 2187 count += 1
2186 2188 if ui.verbose or not m.exact(f):
2187 2189 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2188 2190 ui.status(_('removing %s\n') % m.rel(f))
2189 2191 ui.progress(_('deleting'), None)
2190 2192
2191 2193 if not dryrun:
2192 2194 with repo.wlock():
2193 2195 if not after:
2194 2196 for f in list:
2195 2197 if f in added:
2196 2198 continue # we never unlink added files on remove
2197 2199 repo.wvfs.unlinkpath(f, ignoremissing=True)
2198 2200 repo[None].forget(list)
2199 2201
2200 2202 if warn:
2201 2203 for warning in warnings:
2202 2204 ui.warn(warning)
2203 2205
2204 2206 return ret
2205 2207
2206 2208 def _updatecatformatter(fm, ctx, matcher, path, decode):
2207 2209 """Hook for adding data to the formatter used by ``hg cat``.
2208 2210
2209 2211 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2210 2212 this method first."""
2211 2213 data = ctx[path].data()
2212 2214 if decode:
2213 2215 data = ctx.repo().wwritedata(path, data)
2214 2216 fm.startitem()
2215 2217 fm.write('data', '%s', data)
2216 2218 fm.data(abspath=path, path=matcher.rel(path))
2217 2219
2218 2220 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2219 2221 err = 1
2220 2222 opts = pycompat.byteskwargs(opts)
2221 2223
2222 2224 def write(path):
2223 2225 filename = None
2224 2226 if fntemplate:
2225 2227 filename = makefilename(ctx, fntemplate,
2226 2228 pathname=os.path.join(prefix, path))
2227 2229 # attempt to create the directory if it does not already exist
2228 2230 try:
2229 2231 os.makedirs(os.path.dirname(filename))
2230 2232 except OSError:
2231 2233 pass
2232 2234 with formatter.maybereopen(basefm, filename, opts) as fm:
2233 2235 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2234 2236
2235 2237 # Automation often uses hg cat on single files, so special case it
2236 2238 # for performance to avoid the cost of parsing the manifest.
2237 2239 if len(matcher.files()) == 1 and not matcher.anypats():
2238 2240 file = matcher.files()[0]
2239 2241 mfl = repo.manifestlog
2240 2242 mfnode = ctx.manifestnode()
2241 2243 try:
2242 2244 if mfnode and mfl[mfnode].find(file)[0]:
2243 2245 scmutil.fileprefetchhooks(repo, ctx, [file])
2244 2246 write(file)
2245 2247 return 0
2246 2248 except KeyError:
2247 2249 pass
2248 2250
2249 2251 files = [f for f in ctx.walk(matcher)]
2250 2252 scmutil.fileprefetchhooks(repo, ctx, files)
2251 2253
2252 2254 for abs in files:
2253 2255 write(abs)
2254 2256 err = 0
2255 2257
2256 2258 for subpath in sorted(ctx.substate):
2257 2259 sub = ctx.sub(subpath)
2258 2260 try:
2259 2261 submatch = matchmod.subdirmatcher(subpath, matcher)
2260 2262
2261 2263 if not sub.cat(submatch, basefm, fntemplate,
2262 2264 os.path.join(prefix, sub._path),
2263 2265 **pycompat.strkwargs(opts)):
2264 2266 err = 0
2265 2267 except error.RepoLookupError:
2266 2268 ui.status(_("skipping missing subrepository: %s\n")
2267 2269 % os.path.join(prefix, subpath))
2268 2270
2269 2271 return err
2270 2272
2271 2273 def commit(ui, repo, commitfunc, pats, opts):
2272 2274 '''commit the specified files or all outstanding changes'''
2273 2275 date = opts.get('date')
2274 2276 if date:
2275 2277 opts['date'] = dateutil.parsedate(date)
2276 2278 message = logmessage(ui, opts)
2277 2279 matcher = scmutil.match(repo[None], pats, opts)
2278 2280
2279 2281 dsguard = None
2280 2282 # extract addremove carefully -- this function can be called from a command
2281 2283 # that doesn't support addremove
2282 2284 if opts.get('addremove'):
2283 2285 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2284 2286 with dsguard or util.nullcontextmanager():
2285 2287 if dsguard:
2286 2288 if scmutil.addremove(repo, matcher, "", opts) != 0:
2287 2289 raise error.Abort(
2288 2290 _("failed to mark all new/missing files as added/removed"))
2289 2291
2290 2292 return commitfunc(ui, repo, message, matcher, opts)
2291 2293
2292 2294 def samefile(f, ctx1, ctx2):
2293 2295 if f in ctx1.manifest():
2294 2296 a = ctx1.filectx(f)
2295 2297 if f in ctx2.manifest():
2296 2298 b = ctx2.filectx(f)
2297 2299 return (not a.cmp(b)
2298 2300 and a.flags() == b.flags())
2299 2301 else:
2300 2302 return False
2301 2303 else:
2302 2304 return f not in ctx2.manifest()
2303 2305
2304 2306 def amend(ui, repo, old, extra, pats, opts):
2305 2307 # avoid cycle context -> subrepo -> cmdutil
2306 2308 from . import context
2307 2309
2308 2310 # amend will reuse the existing user if not specified, but the obsolete
2309 2311 # marker creation requires that the current user's name is specified.
2310 2312 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2311 2313 ui.username() # raise exception if username not set
2312 2314
2313 2315 ui.note(_('amending changeset %s\n') % old)
2314 2316 base = old.p1()
2315 2317
2316 2318 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2317 2319 # Participating changesets:
2318 2320 #
2319 2321 # wctx o - workingctx that contains changes from working copy
2320 2322 # | to go into amending commit
2321 2323 # |
2322 2324 # old o - changeset to amend
2323 2325 # |
2324 2326 # base o - first parent of the changeset to amend
2325 2327 wctx = repo[None]
2326 2328
2327 2329 # Copy to avoid mutating input
2328 2330 extra = extra.copy()
2329 2331 # Update extra dict from amended commit (e.g. to preserve graft
2330 2332 # source)
2331 2333 extra.update(old.extra())
2332 2334
2333 2335 # Also update it from the from the wctx
2334 2336 extra.update(wctx.extra())
2335 2337
2336 2338 user = opts.get('user') or old.user()
2337 2339 date = opts.get('date') or old.date()
2338 2340
2339 2341 # Parse the date to allow comparison between date and old.date()
2340 2342 date = dateutil.parsedate(date)
2341 2343
2342 2344 if len(old.parents()) > 1:
2343 2345 # ctx.files() isn't reliable for merges, so fall back to the
2344 2346 # slower repo.status() method
2345 2347 files = set([fn for st in repo.status(base, old)[:3]
2346 2348 for fn in st])
2347 2349 else:
2348 2350 files = set(old.files())
2349 2351
2350 2352 # add/remove the files to the working copy if the "addremove" option
2351 2353 # was specified.
2352 2354 matcher = scmutil.match(wctx, pats, opts)
2353 2355 if (opts.get('addremove')
2354 2356 and scmutil.addremove(repo, matcher, "", opts)):
2355 2357 raise error.Abort(
2356 2358 _("failed to mark all new/missing files as added/removed"))
2357 2359
2358 2360 # Check subrepos. This depends on in-place wctx._status update in
2359 2361 # subrepo.precommit(). To minimize the risk of this hack, we do
2360 2362 # nothing if .hgsub does not exist.
2361 2363 if '.hgsub' in wctx or '.hgsub' in old:
2362 2364 subs, commitsubs, newsubstate = subrepoutil.precommit(
2363 2365 ui, wctx, wctx._status, matcher)
2364 2366 # amend should abort if commitsubrepos is enabled
2365 2367 assert not commitsubs
2366 2368 if subs:
2367 2369 subrepoutil.writestate(repo, newsubstate)
2368 2370
2369 2371 ms = mergemod.mergestate.read(repo)
2370 2372 mergeutil.checkunresolved(ms)
2371 2373
2372 2374 filestoamend = set(f for f in wctx.files() if matcher(f))
2373 2375
2374 2376 changes = (len(filestoamend) > 0)
2375 2377 if changes:
2376 2378 # Recompute copies (avoid recording a -> b -> a)
2377 2379 copied = copies.pathcopies(base, wctx, matcher)
2378 2380 if old.p2:
2379 2381 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2380 2382
2381 2383 # Prune files which were reverted by the updates: if old
2382 2384 # introduced file X and the file was renamed in the working
2383 2385 # copy, then those two files are the same and
2384 2386 # we can discard X from our list of files. Likewise if X
2385 2387 # was removed, it's no longer relevant. If X is missing (aka
2386 2388 # deleted), old X must be preserved.
2387 2389 files.update(filestoamend)
2388 2390 files = [f for f in files if (not samefile(f, wctx, base)
2389 2391 or f in wctx.deleted())]
2390 2392
2391 2393 def filectxfn(repo, ctx_, path):
2392 2394 try:
2393 2395 # If the file being considered is not amongst the files
2394 2396 # to be amended, we should return the file context from the
2395 2397 # old changeset. This avoids issues when only some files in
2396 2398 # the working copy are being amended but there are also
2397 2399 # changes to other files from the old changeset.
2398 2400 if path not in filestoamend:
2399 2401 return old.filectx(path)
2400 2402
2401 2403 # Return None for removed files.
2402 2404 if path in wctx.removed():
2403 2405 return None
2404 2406
2405 2407 fctx = wctx[path]
2406 2408 flags = fctx.flags()
2407 2409 mctx = context.memfilectx(repo, ctx_,
2408 2410 fctx.path(), fctx.data(),
2409 2411 islink='l' in flags,
2410 2412 isexec='x' in flags,
2411 2413 copied=copied.get(path))
2412 2414 return mctx
2413 2415 except KeyError:
2414 2416 return None
2415 2417 else:
2416 2418 ui.note(_('copying changeset %s to %s\n') % (old, base))
2417 2419
2418 2420 # Use version of files as in the old cset
2419 2421 def filectxfn(repo, ctx_, path):
2420 2422 try:
2421 2423 return old.filectx(path)
2422 2424 except KeyError:
2423 2425 return None
2424 2426
2425 2427 # See if we got a message from -m or -l, if not, open the editor with
2426 2428 # the message of the changeset to amend.
2427 2429 message = logmessage(ui, opts)
2428 2430
2429 2431 editform = mergeeditform(old, 'commit.amend')
2430 2432 editor = getcommiteditor(editform=editform,
2431 2433 **pycompat.strkwargs(opts))
2432 2434
2433 2435 if not message:
2434 2436 editor = getcommiteditor(edit=True, editform=editform)
2435 2437 message = old.description()
2436 2438
2437 2439 pureextra = extra.copy()
2438 2440 extra['amend_source'] = old.hex()
2439 2441
2440 2442 new = context.memctx(repo,
2441 2443 parents=[base.node(), old.p2().node()],
2442 2444 text=message,
2443 2445 files=files,
2444 2446 filectxfn=filectxfn,
2445 2447 user=user,
2446 2448 date=date,
2447 2449 extra=extra,
2448 2450 editor=editor)
2449 2451
2450 2452 newdesc = changelog.stripdesc(new.description())
2451 2453 if ((not changes)
2452 2454 and newdesc == old.description()
2453 2455 and user == old.user()
2454 2456 and date == old.date()
2455 2457 and pureextra == old.extra()):
2456 2458 # nothing changed. continuing here would create a new node
2457 2459 # anyway because of the amend_source noise.
2458 2460 #
2459 2461 # This not what we expect from amend.
2460 2462 return old.node()
2461 2463
2462 2464 if opts.get('secret'):
2463 2465 commitphase = 'secret'
2464 2466 else:
2465 2467 commitphase = old.phase()
2466 2468 overrides = {('phases', 'new-commit'): commitphase}
2467 2469 with ui.configoverride(overrides, 'amend'):
2468 2470 newid = repo.commitctx(new)
2469 2471
2470 2472 # Reroute the working copy parent to the new changeset
2471 2473 repo.setparents(newid, nullid)
2472 2474 mapping = {old.node(): (newid,)}
2473 2475 obsmetadata = None
2474 2476 if opts.get('note'):
2475 2477 obsmetadata = {'note': opts['note']}
2476 2478 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
2477 2479
2478 2480 # Fixing the dirstate because localrepo.commitctx does not update
2479 2481 # it. This is rather convenient because we did not need to update
2480 2482 # the dirstate for all the files in the new commit which commitctx
2481 2483 # could have done if it updated the dirstate. Now, we can
2482 2484 # selectively update the dirstate only for the amended files.
2483 2485 dirstate = repo.dirstate
2484 2486
2485 2487 # Update the state of the files which were added and
2486 2488 # and modified in the amend to "normal" in the dirstate.
2487 2489 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2488 2490 for f in normalfiles:
2489 2491 dirstate.normal(f)
2490 2492
2491 2493 # Update the state of files which were removed in the amend
2492 2494 # to "removed" in the dirstate.
2493 2495 removedfiles = set(wctx.removed()) & filestoamend
2494 2496 for f in removedfiles:
2495 2497 dirstate.drop(f)
2496 2498
2497 2499 return newid
2498 2500
2499 2501 def commiteditor(repo, ctx, subs, editform=''):
2500 2502 if ctx.description():
2501 2503 return ctx.description()
2502 2504 return commitforceeditor(repo, ctx, subs, editform=editform,
2503 2505 unchangedmessagedetection=True)
2504 2506
2505 2507 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2506 2508 editform='', unchangedmessagedetection=False):
2507 2509 if not extramsg:
2508 2510 extramsg = _("Leave message empty to abort commit.")
2509 2511
2510 2512 forms = [e for e in editform.split('.') if e]
2511 2513 forms.insert(0, 'changeset')
2512 2514 templatetext = None
2513 2515 while forms:
2514 2516 ref = '.'.join(forms)
2515 2517 if repo.ui.config('committemplate', ref):
2516 2518 templatetext = committext = buildcommittemplate(
2517 2519 repo, ctx, subs, extramsg, ref)
2518 2520 break
2519 2521 forms.pop()
2520 2522 else:
2521 2523 committext = buildcommittext(repo, ctx, subs, extramsg)
2522 2524
2523 2525 # run editor in the repository root
2524 2526 olddir = pycompat.getcwd()
2525 2527 os.chdir(repo.root)
2526 2528
2527 2529 # make in-memory changes visible to external process
2528 2530 tr = repo.currenttransaction()
2529 2531 repo.dirstate.write(tr)
2530 2532 pending = tr and tr.writepending() and repo.root
2531 2533
2532 2534 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2533 2535 editform=editform, pending=pending,
2534 2536 repopath=repo.path, action='commit')
2535 2537 text = editortext
2536 2538
2537 2539 # strip away anything below this special string (used for editors that want
2538 2540 # to display the diff)
2539 2541 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2540 2542 if stripbelow:
2541 2543 text = text[:stripbelow.start()]
2542 2544
2543 2545 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2544 2546 os.chdir(olddir)
2545 2547
2546 2548 if finishdesc:
2547 2549 text = finishdesc(text)
2548 2550 if not text.strip():
2549 2551 raise error.Abort(_("empty commit message"))
2550 2552 if unchangedmessagedetection and editortext == templatetext:
2551 2553 raise error.Abort(_("commit message unchanged"))
2552 2554
2553 2555 return text
2554 2556
2555 2557 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2556 2558 ui = repo.ui
2557 2559 spec = formatter.templatespec(ref, None, None)
2558 2560 t = logcmdutil.changesettemplater(ui, repo, spec)
2559 2561 t.t.cache.update((k, templater.unquotestring(v))
2560 2562 for k, v in repo.ui.configitems('committemplate'))
2561 2563
2562 2564 if not extramsg:
2563 2565 extramsg = '' # ensure that extramsg is string
2564 2566
2565 2567 ui.pushbuffer()
2566 2568 t.show(ctx, extramsg=extramsg)
2567 2569 return ui.popbuffer()
2568 2570
2569 2571 def hgprefix(msg):
2570 2572 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2571 2573
2572 2574 def buildcommittext(repo, ctx, subs, extramsg):
2573 2575 edittext = []
2574 2576 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2575 2577 if ctx.description():
2576 2578 edittext.append(ctx.description())
2577 2579 edittext.append("")
2578 2580 edittext.append("") # Empty line between message and comments.
2579 2581 edittext.append(hgprefix(_("Enter commit message."
2580 2582 " Lines beginning with 'HG:' are removed.")))
2581 2583 edittext.append(hgprefix(extramsg))
2582 2584 edittext.append("HG: --")
2583 2585 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2584 2586 if ctx.p2():
2585 2587 edittext.append(hgprefix(_("branch merge")))
2586 2588 if ctx.branch():
2587 2589 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2588 2590 if bookmarks.isactivewdirparent(repo):
2589 2591 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2590 2592 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2591 2593 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2592 2594 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2593 2595 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2594 2596 if not added and not modified and not removed:
2595 2597 edittext.append(hgprefix(_("no files changed")))
2596 2598 edittext.append("")
2597 2599
2598 2600 return "\n".join(edittext)
2599 2601
2600 2602 def commitstatus(repo, node, branch, bheads=None, opts=None):
2601 2603 if opts is None:
2602 2604 opts = {}
2603 2605 ctx = repo[node]
2604 2606 parents = ctx.parents()
2605 2607
2606 2608 if (not opts.get('amend') and bheads and node not in bheads and not
2607 2609 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2608 2610 repo.ui.status(_('created new head\n'))
2609 2611 # The message is not printed for initial roots. For the other
2610 2612 # changesets, it is printed in the following situations:
2611 2613 #
2612 2614 # Par column: for the 2 parents with ...
2613 2615 # N: null or no parent
2614 2616 # B: parent is on another named branch
2615 2617 # C: parent is a regular non head changeset
2616 2618 # H: parent was a branch head of the current branch
2617 2619 # Msg column: whether we print "created new head" message
2618 2620 # In the following, it is assumed that there already exists some
2619 2621 # initial branch heads of the current branch, otherwise nothing is
2620 2622 # printed anyway.
2621 2623 #
2622 2624 # Par Msg Comment
2623 2625 # N N y additional topo root
2624 2626 #
2625 2627 # B N y additional branch root
2626 2628 # C N y additional topo head
2627 2629 # H N n usual case
2628 2630 #
2629 2631 # B B y weird additional branch root
2630 2632 # C B y branch merge
2631 2633 # H B n merge with named branch
2632 2634 #
2633 2635 # C C y additional head from merge
2634 2636 # C H n merge with a head
2635 2637 #
2636 2638 # H H n head merge: head count decreases
2637 2639
2638 2640 if not opts.get('close_branch'):
2639 2641 for r in parents:
2640 2642 if r.closesbranch() and r.branch() == branch:
2641 2643 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2642 2644
2643 2645 if repo.ui.debugflag:
2644 2646 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2645 2647 elif repo.ui.verbose:
2646 2648 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2647 2649
2648 2650 def postcommitstatus(repo, pats, opts):
2649 2651 return repo.status(match=scmutil.match(repo[None], pats, opts))
2650 2652
2651 2653 def revert(ui, repo, ctx, parents, *pats, **opts):
2652 2654 opts = pycompat.byteskwargs(opts)
2653 2655 parent, p2 = parents
2654 2656 node = ctx.node()
2655 2657
2656 2658 mf = ctx.manifest()
2657 2659 if node == p2:
2658 2660 parent = p2
2659 2661
2660 2662 # need all matching names in dirstate and manifest of target rev,
2661 2663 # so have to walk both. do not print errors if files exist in one
2662 2664 # but not other. in both cases, filesets should be evaluated against
2663 2665 # workingctx to get consistent result (issue4497). this means 'set:**'
2664 2666 # cannot be used to select missing files from target rev.
2665 2667
2666 2668 # `names` is a mapping for all elements in working copy and target revision
2667 2669 # The mapping is in the form:
2668 2670 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2669 2671 names = {}
2670 2672
2671 2673 with repo.wlock():
2672 2674 ## filling of the `names` mapping
2673 2675 # walk dirstate to fill `names`
2674 2676
2675 2677 interactive = opts.get('interactive', False)
2676 2678 wctx = repo[None]
2677 2679 m = scmutil.match(wctx, pats, opts)
2678 2680
2679 2681 # we'll need this later
2680 2682 targetsubs = sorted(s for s in wctx.substate if m(s))
2681 2683
2682 2684 if not m.always():
2683 2685 matcher = matchmod.badmatch(m, lambda x, y: False)
2684 2686 for abs in wctx.walk(matcher):
2685 2687 names[abs] = m.rel(abs), m.exact(abs)
2686 2688
2687 2689 # walk target manifest to fill `names`
2688 2690
2689 2691 def badfn(path, msg):
2690 2692 if path in names:
2691 2693 return
2692 2694 if path in ctx.substate:
2693 2695 return
2694 2696 path_ = path + '/'
2695 2697 for f in names:
2696 2698 if f.startswith(path_):
2697 2699 return
2698 2700 ui.warn("%s: %s\n" % (m.rel(path), msg))
2699 2701
2700 2702 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2701 2703 if abs not in names:
2702 2704 names[abs] = m.rel(abs), m.exact(abs)
2703 2705
2704 2706 # Find status of all file in `names`.
2705 2707 m = scmutil.matchfiles(repo, names)
2706 2708
2707 2709 changes = repo.status(node1=node, match=m,
2708 2710 unknown=True, ignored=True, clean=True)
2709 2711 else:
2710 2712 changes = repo.status(node1=node, match=m)
2711 2713 for kind in changes:
2712 2714 for abs in kind:
2713 2715 names[abs] = m.rel(abs), m.exact(abs)
2714 2716
2715 2717 m = scmutil.matchfiles(repo, names)
2716 2718
2717 2719 modified = set(changes.modified)
2718 2720 added = set(changes.added)
2719 2721 removed = set(changes.removed)
2720 2722 _deleted = set(changes.deleted)
2721 2723 unknown = set(changes.unknown)
2722 2724 unknown.update(changes.ignored)
2723 2725 clean = set(changes.clean)
2724 2726 modadded = set()
2725 2727
2726 2728 # We need to account for the state of the file in the dirstate,
2727 2729 # even when we revert against something else than parent. This will
2728 2730 # slightly alter the behavior of revert (doing back up or not, delete
2729 2731 # or just forget etc).
2730 2732 if parent == node:
2731 2733 dsmodified = modified
2732 2734 dsadded = added
2733 2735 dsremoved = removed
2734 2736 # store all local modifications, useful later for rename detection
2735 2737 localchanges = dsmodified | dsadded
2736 2738 modified, added, removed = set(), set(), set()
2737 2739 else:
2738 2740 changes = repo.status(node1=parent, match=m)
2739 2741 dsmodified = set(changes.modified)
2740 2742 dsadded = set(changes.added)
2741 2743 dsremoved = set(changes.removed)
2742 2744 # store all local modifications, useful later for rename detection
2743 2745 localchanges = dsmodified | dsadded
2744 2746
2745 2747 # only take into account for removes between wc and target
2746 2748 clean |= dsremoved - removed
2747 2749 dsremoved &= removed
2748 2750 # distinct between dirstate remove and other
2749 2751 removed -= dsremoved
2750 2752
2751 2753 modadded = added & dsmodified
2752 2754 added -= modadded
2753 2755
2754 2756 # tell newly modified apart.
2755 2757 dsmodified &= modified
2756 2758 dsmodified |= modified & dsadded # dirstate added may need backup
2757 2759 modified -= dsmodified
2758 2760
2759 2761 # We need to wait for some post-processing to update this set
2760 2762 # before making the distinction. The dirstate will be used for
2761 2763 # that purpose.
2762 2764 dsadded = added
2763 2765
2764 2766 # in case of merge, files that are actually added can be reported as
2765 2767 # modified, we need to post process the result
2766 2768 if p2 != nullid:
2767 2769 mergeadd = set(dsmodified)
2768 2770 for path in dsmodified:
2769 2771 if path in mf:
2770 2772 mergeadd.remove(path)
2771 2773 dsadded |= mergeadd
2772 2774 dsmodified -= mergeadd
2773 2775
2774 2776 # if f is a rename, update `names` to also revert the source
2775 2777 cwd = repo.getcwd()
2776 2778 for f in localchanges:
2777 2779 src = repo.dirstate.copied(f)
2778 2780 # XXX should we check for rename down to target node?
2779 2781 if src and src not in names and repo.dirstate[src] == 'r':
2780 2782 dsremoved.add(src)
2781 2783 names[src] = (repo.pathto(src, cwd), True)
2782 2784
2783 2785 # determine the exact nature of the deleted changesets
2784 2786 deladded = set(_deleted)
2785 2787 for path in _deleted:
2786 2788 if path in mf:
2787 2789 deladded.remove(path)
2788 2790 deleted = _deleted - deladded
2789 2791
2790 2792 # distinguish between file to forget and the other
2791 2793 added = set()
2792 2794 for abs in dsadded:
2793 2795 if repo.dirstate[abs] != 'a':
2794 2796 added.add(abs)
2795 2797 dsadded -= added
2796 2798
2797 2799 for abs in deladded:
2798 2800 if repo.dirstate[abs] == 'a':
2799 2801 dsadded.add(abs)
2800 2802 deladded -= dsadded
2801 2803
2802 2804 # For files marked as removed, we check if an unknown file is present at
2803 2805 # the same path. If a such file exists it may need to be backed up.
2804 2806 # Making the distinction at this stage helps have simpler backup
2805 2807 # logic.
2806 2808 removunk = set()
2807 2809 for abs in removed:
2808 2810 target = repo.wjoin(abs)
2809 2811 if os.path.lexists(target):
2810 2812 removunk.add(abs)
2811 2813 removed -= removunk
2812 2814
2813 2815 dsremovunk = set()
2814 2816 for abs in dsremoved:
2815 2817 target = repo.wjoin(abs)
2816 2818 if os.path.lexists(target):
2817 2819 dsremovunk.add(abs)
2818 2820 dsremoved -= dsremovunk
2819 2821
2820 2822 # action to be actually performed by revert
2821 2823 # (<list of file>, message>) tuple
2822 2824 actions = {'revert': ([], _('reverting %s\n')),
2823 2825 'add': ([], _('adding %s\n')),
2824 2826 'remove': ([], _('removing %s\n')),
2825 2827 'drop': ([], _('removing %s\n')),
2826 2828 'forget': ([], _('forgetting %s\n')),
2827 2829 'undelete': ([], _('undeleting %s\n')),
2828 2830 'noop': (None, _('no changes needed to %s\n')),
2829 2831 'unknown': (None, _('file not managed: %s\n')),
2830 2832 }
2831 2833
2832 2834 # "constant" that convey the backup strategy.
2833 2835 # All set to `discard` if `no-backup` is set do avoid checking
2834 2836 # no_backup lower in the code.
2835 2837 # These values are ordered for comparison purposes
2836 2838 backupinteractive = 3 # do backup if interactively modified
2837 2839 backup = 2 # unconditionally do backup
2838 2840 check = 1 # check if the existing file differs from target
2839 2841 discard = 0 # never do backup
2840 2842 if opts.get('no_backup'):
2841 2843 backupinteractive = backup = check = discard
2842 2844 if interactive:
2843 2845 dsmodifiedbackup = backupinteractive
2844 2846 else:
2845 2847 dsmodifiedbackup = backup
2846 2848 tobackup = set()
2847 2849
2848 2850 backupanddel = actions['remove']
2849 2851 if not opts.get('no_backup'):
2850 2852 backupanddel = actions['drop']
2851 2853
2852 2854 disptable = (
2853 2855 # dispatch table:
2854 2856 # file state
2855 2857 # action
2856 2858 # make backup
2857 2859
2858 2860 ## Sets that results that will change file on disk
2859 2861 # Modified compared to target, no local change
2860 2862 (modified, actions['revert'], discard),
2861 2863 # Modified compared to target, but local file is deleted
2862 2864 (deleted, actions['revert'], discard),
2863 2865 # Modified compared to target, local change
2864 2866 (dsmodified, actions['revert'], dsmodifiedbackup),
2865 2867 # Added since target
2866 2868 (added, actions['remove'], discard),
2867 2869 # Added in working directory
2868 2870 (dsadded, actions['forget'], discard),
2869 2871 # Added since target, have local modification
2870 2872 (modadded, backupanddel, backup),
2871 2873 # Added since target but file is missing in working directory
2872 2874 (deladded, actions['drop'], discard),
2873 2875 # Removed since target, before working copy parent
2874 2876 (removed, actions['add'], discard),
2875 2877 # Same as `removed` but an unknown file exists at the same path
2876 2878 (removunk, actions['add'], check),
2877 2879 # Removed since targe, marked as such in working copy parent
2878 2880 (dsremoved, actions['undelete'], discard),
2879 2881 # Same as `dsremoved` but an unknown file exists at the same path
2880 2882 (dsremovunk, actions['undelete'], check),
2881 2883 ## the following sets does not result in any file changes
2882 2884 # File with no modification
2883 2885 (clean, actions['noop'], discard),
2884 2886 # Existing file, not tracked anywhere
2885 2887 (unknown, actions['unknown'], discard),
2886 2888 )
2887 2889
2888 2890 for abs, (rel, exact) in sorted(names.items()):
2889 2891 # target file to be touch on disk (relative to cwd)
2890 2892 target = repo.wjoin(abs)
2891 2893 # search the entry in the dispatch table.
2892 2894 # if the file is in any of these sets, it was touched in the working
2893 2895 # directory parent and we are sure it needs to be reverted.
2894 2896 for table, (xlist, msg), dobackup in disptable:
2895 2897 if abs not in table:
2896 2898 continue
2897 2899 if xlist is not None:
2898 2900 xlist.append(abs)
2899 2901 if dobackup:
2900 2902 # If in interactive mode, don't automatically create
2901 2903 # .orig files (issue4793)
2902 2904 if dobackup == backupinteractive:
2903 2905 tobackup.add(abs)
2904 2906 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
2905 2907 bakname = scmutil.origpath(ui, repo, rel)
2906 2908 ui.note(_('saving current version of %s as %s\n') %
2907 2909 (rel, bakname))
2908 2910 if not opts.get('dry_run'):
2909 2911 if interactive:
2910 2912 util.copyfile(target, bakname)
2911 2913 else:
2912 2914 util.rename(target, bakname)
2913 2915 if ui.verbose or not exact:
2914 2916 if not isinstance(msg, bytes):
2915 2917 msg = msg(abs)
2916 2918 ui.status(msg % rel)
2917 2919 elif exact:
2918 2920 ui.warn(msg % rel)
2919 2921 break
2920 2922
2921 2923 if not opts.get('dry_run'):
2922 2924 needdata = ('revert', 'add', 'undelete')
2923 2925 if _revertprefetch is not _revertprefetchstub:
2924 2926 ui.deprecwarn("'cmdutil._revertprefetch' is deprecated, "
2925 2927 "add a callback to 'scmutil.fileprefetchhooks'",
2926 2928 '4.6', stacklevel=1)
2927 2929 _revertprefetch(repo, ctx,
2928 2930 *[actions[name][0] for name in needdata])
2929 2931 oplist = [actions[name][0] for name in needdata]
2930 2932 prefetch = scmutil.fileprefetchhooks
2931 2933 prefetch(repo, ctx, [f for sublist in oplist for f in sublist])
2932 2934 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
2933 2935
2934 2936 if targetsubs:
2935 2937 # Revert the subrepos on the revert list
2936 2938 for sub in targetsubs:
2937 2939 try:
2938 2940 wctx.sub(sub).revert(ctx.substate[sub], *pats,
2939 2941 **pycompat.strkwargs(opts))
2940 2942 except KeyError:
2941 2943 raise error.Abort("subrepository '%s' does not exist in %s!"
2942 2944 % (sub, short(ctx.node())))
2943 2945
2944 2946 def _revertprefetchstub(repo, ctx, *files):
2945 2947 """Stub method for detecting extension wrapping of _revertprefetch(), to
2946 2948 issue a deprecation warning."""
2947 2949
2948 2950 _revertprefetch = _revertprefetchstub
2949 2951
2950 2952 def _performrevert(repo, parents, ctx, actions, interactive=False,
2951 2953 tobackup=None):
2952 2954 """function that actually perform all the actions computed for revert
2953 2955
2954 2956 This is an independent function to let extension to plug in and react to
2955 2957 the imminent revert.
2956 2958
2957 2959 Make sure you have the working directory locked when calling this function.
2958 2960 """
2959 2961 parent, p2 = parents
2960 2962 node = ctx.node()
2961 2963 excluded_files = []
2962 2964
2963 2965 def checkout(f):
2964 2966 fc = ctx[f]
2965 2967 repo.wwrite(f, fc.data(), fc.flags())
2966 2968
2967 2969 def doremove(f):
2968 2970 try:
2969 2971 repo.wvfs.unlinkpath(f)
2970 2972 except OSError:
2971 2973 pass
2972 2974 repo.dirstate.remove(f)
2973 2975
2974 2976 audit_path = pathutil.pathauditor(repo.root, cached=True)
2975 2977 for f in actions['forget'][0]:
2976 2978 if interactive:
2977 2979 choice = repo.ui.promptchoice(
2978 2980 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
2979 2981 if choice == 0:
2980 2982 repo.dirstate.drop(f)
2981 2983 else:
2982 2984 excluded_files.append(f)
2983 2985 else:
2984 2986 repo.dirstate.drop(f)
2985 2987 for f in actions['remove'][0]:
2986 2988 audit_path(f)
2987 2989 if interactive:
2988 2990 choice = repo.ui.promptchoice(
2989 2991 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
2990 2992 if choice == 0:
2991 2993 doremove(f)
2992 2994 else:
2993 2995 excluded_files.append(f)
2994 2996 else:
2995 2997 doremove(f)
2996 2998 for f in actions['drop'][0]:
2997 2999 audit_path(f)
2998 3000 repo.dirstate.remove(f)
2999 3001
3000 3002 normal = None
3001 3003 if node == parent:
3002 3004 # We're reverting to our parent. If possible, we'd like status
3003 3005 # to report the file as clean. We have to use normallookup for
3004 3006 # merges to avoid losing information about merged/dirty files.
3005 3007 if p2 != nullid:
3006 3008 normal = repo.dirstate.normallookup
3007 3009 else:
3008 3010 normal = repo.dirstate.normal
3009 3011
3010 3012 newlyaddedandmodifiedfiles = set()
3011 3013 if interactive:
3012 3014 # Prompt the user for changes to revert
3013 3015 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3014 3016 m = scmutil.matchfiles(repo, torevert)
3015 3017 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3016 3018 diffopts.nodates = True
3017 3019 diffopts.git = True
3018 3020 operation = 'discard'
3019 3021 reversehunks = True
3020 3022 if node != parent:
3021 3023 operation = 'apply'
3022 3024 reversehunks = False
3023 3025 if reversehunks:
3024 3026 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3025 3027 else:
3026 3028 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3027 3029 originalchunks = patch.parsepatch(diff)
3028 3030
3029 3031 try:
3030 3032
3031 3033 chunks, opts = recordfilter(repo.ui, originalchunks,
3032 3034 operation=operation)
3033 3035 if reversehunks:
3034 3036 chunks = patch.reversehunks(chunks)
3035 3037
3036 3038 except error.PatchError as err:
3037 3039 raise error.Abort(_('error parsing patch: %s') % err)
3038 3040
3039 3041 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3040 3042 if tobackup is None:
3041 3043 tobackup = set()
3042 3044 # Apply changes
3043 3045 fp = stringio()
3044 3046 for c in chunks:
3045 3047 # Create a backup file only if this hunk should be backed up
3046 3048 if ishunk(c) and c.header.filename() in tobackup:
3047 3049 abs = c.header.filename()
3048 3050 target = repo.wjoin(abs)
3049 3051 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3050 3052 util.copyfile(target, bakname)
3051 3053 tobackup.remove(abs)
3052 3054 c.write(fp)
3053 3055 dopatch = fp.tell()
3054 3056 fp.seek(0)
3055 3057 if dopatch:
3056 3058 try:
3057 3059 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3058 3060 except error.PatchError as err:
3059 3061 raise error.Abort(pycompat.bytestr(err))
3060 3062 del fp
3061 3063 else:
3062 3064 for f in actions['revert'][0]:
3063 3065 checkout(f)
3064 3066 if normal:
3065 3067 normal(f)
3066 3068
3067 3069 for f in actions['add'][0]:
3068 3070 # Don't checkout modified files, they are already created by the diff
3069 3071 if f not in newlyaddedandmodifiedfiles:
3070 3072 checkout(f)
3071 3073 repo.dirstate.add(f)
3072 3074
3073 3075 normal = repo.dirstate.normallookup
3074 3076 if node == parent and p2 == nullid:
3075 3077 normal = repo.dirstate.normal
3076 3078 for f in actions['undelete'][0]:
3077 3079 checkout(f)
3078 3080 normal(f)
3079 3081
3080 3082 copied = copies.pathcopies(repo[parent], ctx)
3081 3083
3082 3084 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3083 3085 if f in copied:
3084 3086 repo.dirstate.copy(copied[f], f)
3085 3087
3086 3088 class command(registrar.command):
3087 3089 """deprecated: used registrar.command instead"""
3088 3090 def _doregister(self, func, name, *args, **kwargs):
3089 3091 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3090 3092 return super(command, self)._doregister(func, name, *args, **kwargs)
3091 3093
3092 3094 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3093 3095 # commands.outgoing. "missing" is "missing" of the result of
3094 3096 # "findcommonoutgoing()"
3095 3097 outgoinghooks = util.hooks()
3096 3098
3097 3099 # a list of (ui, repo) functions called by commands.summary
3098 3100 summaryhooks = util.hooks()
3099 3101
3100 3102 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3101 3103 #
3102 3104 # functions should return tuple of booleans below, if 'changes' is None:
3103 3105 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3104 3106 #
3105 3107 # otherwise, 'changes' is a tuple of tuples below:
3106 3108 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3107 3109 # - (desturl, destbranch, destpeer, outgoing)
3108 3110 summaryremotehooks = util.hooks()
3109 3111
3110 3112 # A list of state files kept by multistep operations like graft.
3111 3113 # Since graft cannot be aborted, it is considered 'clearable' by update.
3112 3114 # note: bisect is intentionally excluded
3113 3115 # (state file, clearable, allowcommit, error, hint)
3114 3116 unfinishedstates = [
3115 3117 ('graftstate', True, False, _('graft in progress'),
3116 3118 _("use 'hg graft --continue' or 'hg update' to abort")),
3117 3119 ('updatestate', True, False, _('last update was interrupted'),
3118 3120 _("use 'hg update' to get a consistent checkout"))
3119 3121 ]
3120 3122
3121 3123 def checkunfinished(repo, commit=False):
3122 3124 '''Look for an unfinished multistep operation, like graft, and abort
3123 3125 if found. It's probably good to check this right before
3124 3126 bailifchanged().
3125 3127 '''
3126 3128 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3127 3129 if commit and allowcommit:
3128 3130 continue
3129 3131 if repo.vfs.exists(f):
3130 3132 raise error.Abort(msg, hint=hint)
3131 3133
3132 3134 def clearunfinished(repo):
3133 3135 '''Check for unfinished operations (as above), and clear the ones
3134 3136 that are clearable.
3135 3137 '''
3136 3138 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3137 3139 if not clearable and repo.vfs.exists(f):
3138 3140 raise error.Abort(msg, hint=hint)
3139 3141 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3140 3142 if clearable and repo.vfs.exists(f):
3141 3143 util.unlink(repo.vfs.join(f))
3142 3144
3143 3145 afterresolvedstates = [
3144 3146 ('graftstate',
3145 3147 _('hg graft --continue')),
3146 3148 ]
3147 3149
3148 3150 def howtocontinue(repo):
3149 3151 '''Check for an unfinished operation and return the command to finish
3150 3152 it.
3151 3153
3152 3154 afterresolvedstates tuples define a .hg/{file} and the corresponding
3153 3155 command needed to finish it.
3154 3156
3155 3157 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3156 3158 a boolean.
3157 3159 '''
3158 3160 contmsg = _("continue: %s")
3159 3161 for f, msg in afterresolvedstates:
3160 3162 if repo.vfs.exists(f):
3161 3163 return contmsg % msg, True
3162 3164 if repo[None].dirty(missing=True, merge=False, branch=False):
3163 3165 return contmsg % _("hg commit"), False
3164 3166 return None, None
3165 3167
3166 3168 def checkafterresolved(repo):
3167 3169 '''Inform the user about the next action after completing hg resolve
3168 3170
3169 3171 If there's a matching afterresolvedstates, howtocontinue will yield
3170 3172 repo.ui.warn as the reporter.
3171 3173
3172 3174 Otherwise, it will yield repo.ui.note.
3173 3175 '''
3174 3176 msg, warning = howtocontinue(repo)
3175 3177 if msg is not None:
3176 3178 if warning:
3177 3179 repo.ui.warn("%s\n" % msg)
3178 3180 else:
3179 3181 repo.ui.note("%s\n" % msg)
3180 3182
3181 3183 def wrongtooltocontinue(repo, task):
3182 3184 '''Raise an abort suggesting how to properly continue if there is an
3183 3185 active task.
3184 3186
3185 3187 Uses howtocontinue() to find the active task.
3186 3188
3187 3189 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3188 3190 a hint.
3189 3191 '''
3190 3192 after = howtocontinue(repo)
3191 3193 hint = None
3192 3194 if after[1]:
3193 3195 hint = after[0]
3194 3196 raise error.Abort(_('no %s in progress') % task, hint=hint)
3195 3197
3196 3198 class changeset_printer(logcmdutil.changesetprinter):
3197 3199
3198 3200 def __init__(self, ui, *args, **kwargs):
3199 3201 msg = ("'cmdutil.changeset_printer' is deprecated, "
3200 3202 "use 'logcmdutil.logcmdutil'")
3201 3203 ui.deprecwarn(msg, "4.6")
3202 3204 super(changeset_printer, self).__init__(ui, *args, **kwargs)
3203 3205
3204 3206 def displaygraph(ui, *args, **kwargs):
3205 3207 msg = ("'cmdutil.displaygraph' is deprecated, "
3206 3208 "use 'logcmdutil.displaygraph'")
3207 3209 ui.deprecwarn(msg, "4.6")
3208 3210 return logcmdutil.displaygraph(ui, *args, **kwargs)
3209 3211
3210 3212 def show_changeset(ui, *args, **kwargs):
3211 3213 msg = ("'cmdutil.show_changeset' is deprecated, "
3212 3214 "use 'logcmdutil.changesetdisplayer'")
3213 3215 ui.deprecwarn(msg, "4.6")
3214 3216 return logcmdutil.changesetdisplayer(ui, *args, **kwargs)
@@ -1,1573 +1,1571 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import itertools
12 12 import struct
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 )
19 19 from . import (
20 20 error,
21 21 mdiff,
22 22 policy,
23 23 revlog,
24 24 util,
25 25 )
26 26
27 27 parsers = policy.importmod(r'parsers')
28 28 propertycache = util.propertycache
29 29
30 30 def _parse(data):
31 31 # This method does a little bit of excessive-looking
32 32 # precondition checking. This is so that the behavior of this
33 33 # class exactly matches its C counterpart to try and help
34 34 # prevent surprise breakage for anyone that develops against
35 35 # the pure version.
36 36 if data and data[-1:] != '\n':
37 37 raise ValueError('Manifest did not end in a newline.')
38 38 prev = None
39 39 for l in data.splitlines():
40 40 if prev is not None and prev > l:
41 41 raise ValueError('Manifest lines not in sorted order.')
42 42 prev = l
43 43 f, n = l.split('\0')
44 44 if len(n) > 40:
45 45 yield f, bin(n[:40]), n[40:]
46 46 else:
47 47 yield f, bin(n), ''
48 48
49 49 def _text(it):
50 50 files = []
51 51 lines = []
52 52 _hex = revlog.hex
53 53 for f, n, fl in it:
54 54 files.append(f)
55 55 # if this is changed to support newlines in filenames,
56 56 # be sure to check the templates/ dir again (especially *-raw.tmpl)
57 57 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
58 58
59 59 _checkforbidden(files)
60 60 return ''.join(lines)
61 61
62 62 class lazymanifestiter(object):
63 63 def __init__(self, lm):
64 64 self.pos = 0
65 65 self.lm = lm
66 66
67 67 def __iter__(self):
68 68 return self
69 69
70 70 def next(self):
71 71 try:
72 72 data, pos = self.lm._get(self.pos)
73 73 except IndexError:
74 74 raise StopIteration
75 75 if pos == -1:
76 76 self.pos += 1
77 77 return data[0]
78 78 self.pos += 1
79 79 zeropos = data.find('\x00', pos)
80 80 return data[pos:zeropos]
81 81
82 82 __next__ = next
83 83
84 84 class lazymanifestiterentries(object):
85 85 def __init__(self, lm):
86 86 self.lm = lm
87 87 self.pos = 0
88 88
89 89 def __iter__(self):
90 90 return self
91 91
92 92 def next(self):
93 93 try:
94 94 data, pos = self.lm._get(self.pos)
95 95 except IndexError:
96 96 raise StopIteration
97 97 if pos == -1:
98 98 self.pos += 1
99 99 return data
100 100 zeropos = data.find('\x00', pos)
101 101 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
102 102 zeropos + 1, 40)
103 103 flags = self.lm._getflags(data, self.pos, zeropos)
104 104 self.pos += 1
105 105 return (data[pos:zeropos], hashval, flags)
106 106
107 107 __next__ = next
108 108
109 109 def unhexlify(data, extra, pos, length):
110 110 s = bin(data[pos:pos + length])
111 111 if extra:
112 112 s += chr(extra & 0xff)
113 113 return s
114 114
115 115 def _cmp(a, b):
116 116 return (a > b) - (a < b)
117 117
118 118 class _lazymanifest(object):
119 119 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
120 120 if positions is None:
121 121 self.positions = self.findlines(data)
122 122 self.extrainfo = [0] * len(self.positions)
123 123 self.data = data
124 124 self.extradata = []
125 125 else:
126 126 self.positions = positions[:]
127 127 self.extrainfo = extrainfo[:]
128 128 self.extradata = extradata[:]
129 129 self.data = data
130 130
131 131 def findlines(self, data):
132 132 if not data:
133 133 return []
134 134 pos = data.find("\n")
135 135 if pos == -1 or data[-1:] != '\n':
136 136 raise ValueError("Manifest did not end in a newline.")
137 137 positions = [0]
138 138 prev = data[:data.find('\x00')]
139 139 while pos < len(data) - 1 and pos != -1:
140 140 positions.append(pos + 1)
141 141 nexts = data[pos + 1:data.find('\x00', pos + 1)]
142 142 if nexts < prev:
143 143 raise ValueError("Manifest lines not in sorted order.")
144 144 prev = nexts
145 145 pos = data.find("\n", pos + 1)
146 146 return positions
147 147
148 148 def _get(self, index):
149 149 # get the position encoded in pos:
150 150 # positive number is an index in 'data'
151 151 # negative number is in extrapieces
152 152 pos = self.positions[index]
153 153 if pos >= 0:
154 154 return self.data, pos
155 155 return self.extradata[-pos - 1], -1
156 156
157 157 def _getkey(self, pos):
158 158 if pos >= 0:
159 159 return self.data[pos:self.data.find('\x00', pos + 1)]
160 160 return self.extradata[-pos - 1][0]
161 161
162 162 def bsearch(self, key):
163 163 first = 0
164 164 last = len(self.positions) - 1
165 165
166 166 while first <= last:
167 167 midpoint = (first + last)//2
168 168 nextpos = self.positions[midpoint]
169 169 candidate = self._getkey(nextpos)
170 170 r = _cmp(key, candidate)
171 171 if r == 0:
172 172 return midpoint
173 173 else:
174 174 if r < 0:
175 175 last = midpoint - 1
176 176 else:
177 177 first = midpoint + 1
178 178 return -1
179 179
180 180 def bsearch2(self, key):
181 181 # same as the above, but will always return the position
182 182 # done for performance reasons
183 183 first = 0
184 184 last = len(self.positions) - 1
185 185
186 186 while first <= last:
187 187 midpoint = (first + last)//2
188 188 nextpos = self.positions[midpoint]
189 189 candidate = self._getkey(nextpos)
190 190 r = _cmp(key, candidate)
191 191 if r == 0:
192 192 return (midpoint, True)
193 193 else:
194 194 if r < 0:
195 195 last = midpoint - 1
196 196 else:
197 197 first = midpoint + 1
198 198 return (first, False)
199 199
200 200 def __contains__(self, key):
201 201 return self.bsearch(key) != -1
202 202
203 203 def _getflags(self, data, needle, pos):
204 204 start = pos + 41
205 205 end = data.find("\n", start)
206 206 if end == -1:
207 207 end = len(data) - 1
208 208 if start == end:
209 209 return ''
210 210 return self.data[start:end]
211 211
212 212 def __getitem__(self, key):
213 213 if not isinstance(key, bytes):
214 214 raise TypeError("getitem: manifest keys must be a bytes.")
215 215 needle = self.bsearch(key)
216 216 if needle == -1:
217 217 raise KeyError
218 218 data, pos = self._get(needle)
219 219 if pos == -1:
220 220 return (data[1], data[2])
221 221 zeropos = data.find('\x00', pos)
222 222 assert 0 <= needle <= len(self.positions)
223 223 assert len(self.extrainfo) == len(self.positions)
224 224 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
225 225 flags = self._getflags(data, needle, zeropos)
226 226 return (hashval, flags)
227 227
228 228 def __delitem__(self, key):
229 229 needle, found = self.bsearch2(key)
230 230 if not found:
231 231 raise KeyError
232 232 cur = self.positions[needle]
233 233 self.positions = self.positions[:needle] + self.positions[needle + 1:]
234 234 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
235 235 if cur >= 0:
236 236 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
237 237
238 238 def __setitem__(self, key, value):
239 239 if not isinstance(key, bytes):
240 240 raise TypeError("setitem: manifest keys must be a byte string.")
241 241 if not isinstance(value, tuple) or len(value) != 2:
242 242 raise TypeError("Manifest values must be a tuple of (node, flags).")
243 243 hashval = value[0]
244 244 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
245 245 raise TypeError("node must be a 20-byte byte string")
246 246 flags = value[1]
247 247 if len(hashval) == 22:
248 248 hashval = hashval[:-1]
249 249 if not isinstance(flags, bytes) or len(flags) > 1:
250 250 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
251 251 needle, found = self.bsearch2(key)
252 252 if found:
253 253 # put the item
254 254 pos = self.positions[needle]
255 255 if pos < 0:
256 256 self.extradata[-pos - 1] = (key, hashval, value[1])
257 257 else:
258 258 # just don't bother
259 259 self.extradata.append((key, hashval, value[1]))
260 260 self.positions[needle] = -len(self.extradata)
261 261 else:
262 262 # not found, put it in with extra positions
263 263 self.extradata.append((key, hashval, value[1]))
264 264 self.positions = (self.positions[:needle] + [-len(self.extradata)]
265 265 + self.positions[needle:])
266 266 self.extrainfo = (self.extrainfo[:needle] + [0] +
267 267 self.extrainfo[needle:])
268 268
269 269 def copy(self):
270 270 # XXX call _compact like in C?
271 271 return _lazymanifest(self.data, self.positions, self.extrainfo,
272 272 self.extradata)
273 273
274 274 def _compact(self):
275 275 # hopefully not called TOO often
276 276 if len(self.extradata) == 0:
277 277 return
278 278 l = []
279 279 last_cut = 0
280 280 i = 0
281 281 offset = 0
282 282 self.extrainfo = [0] * len(self.positions)
283 283 while i < len(self.positions):
284 284 if self.positions[i] >= 0:
285 285 cur = self.positions[i]
286 286 last_cut = cur
287 287 while True:
288 288 self.positions[i] = offset
289 289 i += 1
290 290 if i == len(self.positions) or self.positions[i] < 0:
291 291 break
292 292 offset += self.positions[i] - cur
293 293 cur = self.positions[i]
294 294 end_cut = self.data.find('\n', cur)
295 295 if end_cut != -1:
296 296 end_cut += 1
297 297 offset += end_cut - cur
298 298 l.append(self.data[last_cut:end_cut])
299 299 else:
300 300 while i < len(self.positions) and self.positions[i] < 0:
301 301 cur = self.positions[i]
302 302 t = self.extradata[-cur - 1]
303 303 l.append(self._pack(t))
304 304 self.positions[i] = offset
305 305 if len(t[1]) > 20:
306 306 self.extrainfo[i] = ord(t[1][21])
307 307 offset += len(l[-1])
308 308 i += 1
309 309 self.data = ''.join(l)
310 310 self.extradata = []
311 311
312 312 def _pack(self, d):
313 313 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
314 314
315 315 def text(self):
316 316 self._compact()
317 317 return self.data
318 318
319 319 def diff(self, m2, clean=False):
320 320 '''Finds changes between the current manifest and m2.'''
321 321 # XXX think whether efficiency matters here
322 322 diff = {}
323 323
324 324 for fn, e1, flags in self.iterentries():
325 325 if fn not in m2:
326 326 diff[fn] = (e1, flags), (None, '')
327 327 else:
328 328 e2 = m2[fn]
329 329 if (e1, flags) != e2:
330 330 diff[fn] = (e1, flags), e2
331 331 elif clean:
332 332 diff[fn] = None
333 333
334 334 for fn, e2, flags in m2.iterentries():
335 335 if fn not in self:
336 336 diff[fn] = (None, ''), (e2, flags)
337 337
338 338 return diff
339 339
340 340 def iterentries(self):
341 341 return lazymanifestiterentries(self)
342 342
343 343 def iterkeys(self):
344 344 return lazymanifestiter(self)
345 345
346 346 def __iter__(self):
347 347 return lazymanifestiter(self)
348 348
349 349 def __len__(self):
350 350 return len(self.positions)
351 351
352 352 def filtercopy(self, filterfn):
353 353 # XXX should be optimized
354 354 c = _lazymanifest('')
355 355 for f, n, fl in self.iterentries():
356 356 if filterfn(f):
357 357 c[f] = n, fl
358 358 return c
359 359
360 360 try:
361 361 _lazymanifest = parsers.lazymanifest
362 362 except AttributeError:
363 363 pass
364 364
365 365 class manifestdict(object):
366 366 def __init__(self, data=''):
367 367 self._lm = _lazymanifest(data)
368 368
369 369 def __getitem__(self, key):
370 370 return self._lm[key][0]
371 371
372 372 def find(self, key):
373 373 return self._lm[key]
374 374
375 375 def __len__(self):
376 376 return len(self._lm)
377 377
378 378 def __nonzero__(self):
379 379 # nonzero is covered by the __len__ function, but implementing it here
380 380 # makes it easier for extensions to override.
381 381 return len(self._lm) != 0
382 382
383 383 __bool__ = __nonzero__
384 384
385 385 def __setitem__(self, key, node):
386 386 self._lm[key] = node, self.flags(key, '')
387 387
388 388 def __contains__(self, key):
389 389 if key is None:
390 390 return False
391 391 return key in self._lm
392 392
393 393 def __delitem__(self, key):
394 394 del self._lm[key]
395 395
396 396 def __iter__(self):
397 397 return self._lm.__iter__()
398 398
399 399 def iterkeys(self):
400 400 return self._lm.iterkeys()
401 401
402 402 def keys(self):
403 403 return list(self.iterkeys())
404 404
405 405 def filesnotin(self, m2, match=None):
406 406 '''Set of files in this manifest that are not in the other'''
407 407 if match:
408 408 m1 = self.matches(match)
409 409 m2 = m2.matches(match)
410 410 return m1.filesnotin(m2)
411 411 diff = self.diff(m2)
412 412 files = set(filepath
413 413 for filepath, hashflags in diff.iteritems()
414 414 if hashflags[1][0] is None)
415 415 return files
416 416
417 417 @propertycache
418 418 def _dirs(self):
419 419 return util.dirs(self)
420 420
421 421 def dirs(self):
422 422 return self._dirs
423 423
424 424 def hasdir(self, dir):
425 425 return dir in self._dirs
426 426
427 427 def _filesfastpath(self, match):
428 428 '''Checks whether we can correctly and quickly iterate over matcher
429 429 files instead of over manifest files.'''
430 430 files = match.files()
431 431 return (len(files) < 100 and (match.isexact() or
432 432 (match.prefix() and all(fn in self for fn in files))))
433 433
434 434 def walk(self, match):
435 435 '''Generates matching file names.
436 436
437 437 Equivalent to manifest.matches(match).iterkeys(), but without creating
438 438 an entirely new manifest.
439 439
440 440 It also reports nonexistent files by marking them bad with match.bad().
441 441 '''
442 442 if match.always():
443 443 for f in iter(self):
444 444 yield f
445 445 return
446 446
447 447 fset = set(match.files())
448 448
449 449 # avoid the entire walk if we're only looking for specific files
450 450 if self._filesfastpath(match):
451 451 for fn in sorted(fset):
452 452 yield fn
453 453 return
454 454
455 455 for fn in self:
456 456 if fn in fset:
457 457 # specified pattern is the exact name
458 458 fset.remove(fn)
459 459 if match(fn):
460 460 yield fn
461 461
462 462 # for dirstate.walk, files=['.'] means "walk the whole tree".
463 463 # follow that here, too
464 464 fset.discard('.')
465 465
466 466 for fn in sorted(fset):
467 467 if not self.hasdir(fn):
468 468 match.bad(fn, None)
469 469
470 470 def matches(self, match):
471 471 '''generate a new manifest filtered by the match argument'''
472 472 if match.always():
473 473 return self.copy()
474 474
475 475 if self._filesfastpath(match):
476 476 m = manifestdict()
477 477 lm = self._lm
478 478 for fn in match.files():
479 479 if fn in lm:
480 480 m._lm[fn] = lm[fn]
481 481 return m
482 482
483 483 m = manifestdict()
484 484 m._lm = self._lm.filtercopy(match)
485 485 return m
486 486
487 487 def diff(self, m2, match=None, clean=False):
488 488 '''Finds changes between the current manifest and m2.
489 489
490 490 Args:
491 491 m2: the manifest to which this manifest should be compared.
492 492 clean: if true, include files unchanged between these manifests
493 493 with a None value in the returned dictionary.
494 494
495 495 The result is returned as a dict with filename as key and
496 496 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
497 497 nodeid in the current/other manifest and fl1/fl2 is the flag
498 498 in the current/other manifest. Where the file does not exist,
499 499 the nodeid will be None and the flags will be the empty
500 500 string.
501 501 '''
502 502 if match:
503 503 m1 = self.matches(match)
504 504 m2 = m2.matches(match)
505 505 return m1.diff(m2, clean=clean)
506 506 return self._lm.diff(m2._lm, clean)
507 507
508 508 def setflag(self, key, flag):
509 509 self._lm[key] = self[key], flag
510 510
511 511 def get(self, key, default=None):
512 512 try:
513 513 return self._lm[key][0]
514 514 except KeyError:
515 515 return default
516 516
517 517 def flags(self, key, default=''):
518 518 try:
519 519 return self._lm[key][1]
520 520 except KeyError:
521 521 return default
522 522
523 523 def copy(self):
524 524 c = manifestdict()
525 525 c._lm = self._lm.copy()
526 526 return c
527 527
528 528 def items(self):
529 529 return (x[:2] for x in self._lm.iterentries())
530 530
531 531 iteritems = items
532 532
533 533 def iterentries(self):
534 534 return self._lm.iterentries()
535 535
536 536 def text(self):
537 537 # most likely uses native version
538 538 return self._lm.text()
539 539
540 540 def fastdelta(self, base, changes):
541 541 """Given a base manifest text as a bytearray and a list of changes
542 542 relative to that text, compute a delta that can be used by revlog.
543 543 """
544 544 delta = []
545 545 dstart = None
546 546 dend = None
547 547 dline = [""]
548 548 start = 0
549 549 # zero copy representation of base as a buffer
550 550 addbuf = util.buffer(base)
551 551
552 552 changes = list(changes)
553 553 if len(changes) < 1000:
554 554 # start with a readonly loop that finds the offset of
555 555 # each line and creates the deltas
556 556 for f, todelete in changes:
557 557 # bs will either be the index of the item or the insert point
558 558 start, end = _msearch(addbuf, f, start)
559 559 if not todelete:
560 560 h, fl = self._lm[f]
561 561 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
562 562 else:
563 563 if start == end:
564 564 # item we want to delete was not found, error out
565 565 raise AssertionError(
566 566 _("failed to remove %s from manifest") % f)
567 567 l = ""
568 568 if dstart is not None and dstart <= start and dend >= start:
569 569 if dend < end:
570 570 dend = end
571 571 if l:
572 572 dline.append(l)
573 573 else:
574 574 if dstart is not None:
575 575 delta.append([dstart, dend, "".join(dline)])
576 576 dstart = start
577 577 dend = end
578 578 dline = [l]
579 579
580 580 if dstart is not None:
581 581 delta.append([dstart, dend, "".join(dline)])
582 582 # apply the delta to the base, and get a delta for addrevision
583 583 deltatext, arraytext = _addlistdelta(base, delta)
584 584 else:
585 585 # For large changes, it's much cheaper to just build the text and
586 586 # diff it.
587 587 arraytext = bytearray(self.text())
588 588 deltatext = mdiff.textdiff(
589 589 util.buffer(base), util.buffer(arraytext))
590 590
591 591 return arraytext, deltatext
592 592
593 593 def _msearch(m, s, lo=0, hi=None):
594 594 '''return a tuple (start, end) that says where to find s within m.
595 595
596 596 If the string is found m[start:end] are the line containing
597 597 that string. If start == end the string was not found and
598 598 they indicate the proper sorted insertion point.
599 599
600 600 m should be a buffer, a memoryview or a byte string.
601 601 s is a byte string'''
602 602 def advance(i, c):
603 603 while i < lenm and m[i:i + 1] != c:
604 604 i += 1
605 605 return i
606 606 if not s:
607 607 return (lo, lo)
608 608 lenm = len(m)
609 609 if not hi:
610 610 hi = lenm
611 611 while lo < hi:
612 612 mid = (lo + hi) // 2
613 613 start = mid
614 614 while start > 0 and m[start - 1:start] != '\n':
615 615 start -= 1
616 616 end = advance(start, '\0')
617 617 if bytes(m[start:end]) < s:
618 618 # we know that after the null there are 40 bytes of sha1
619 619 # this translates to the bisect lo = mid + 1
620 620 lo = advance(end + 40, '\n') + 1
621 621 else:
622 622 # this translates to the bisect hi = mid
623 623 hi = start
624 624 end = advance(lo, '\0')
625 625 found = m[lo:end]
626 626 if s == found:
627 627 # we know that after the null there are 40 bytes of sha1
628 628 end = advance(end + 40, '\n')
629 629 return (lo, end + 1)
630 630 else:
631 631 return (lo, lo)
632 632
633 633 def _checkforbidden(l):
634 634 """Check filenames for illegal characters."""
635 635 for f in l:
636 636 if '\n' in f or '\r' in f:
637 637 raise error.RevlogError(
638 638 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
639 639
640 640
641 641 # apply the changes collected during the bisect loop to our addlist
642 642 # return a delta suitable for addrevision
643 643 def _addlistdelta(addlist, x):
644 644 # for large addlist arrays, building a new array is cheaper
645 645 # than repeatedly modifying the existing one
646 646 currentposition = 0
647 647 newaddlist = bytearray()
648 648
649 649 for start, end, content in x:
650 650 newaddlist += addlist[currentposition:start]
651 651 if content:
652 652 newaddlist += bytearray(content)
653 653
654 654 currentposition = end
655 655
656 656 newaddlist += addlist[currentposition:]
657 657
658 658 deltatext = "".join(struct.pack(">lll", start, end, len(content))
659 659 + content for start, end, content in x)
660 660 return deltatext, newaddlist
661 661
662 662 def _splittopdir(f):
663 663 if '/' in f:
664 664 dir, subpath = f.split('/', 1)
665 665 return dir + '/', subpath
666 666 else:
667 667 return '', f
668 668
669 669 _noop = lambda s: None
670 670
671 671 class treemanifest(object):
672 672 def __init__(self, dir='', text=''):
673 673 self._dir = dir
674 674 self._node = revlog.nullid
675 675 self._loadfunc = _noop
676 676 self._copyfunc = _noop
677 677 self._dirty = False
678 678 self._dirs = {}
679 679 # Using _lazymanifest here is a little slower than plain old dicts
680 680 self._files = {}
681 681 self._flags = {}
682 682 if text:
683 683 def readsubtree(subdir, subm):
684 684 raise AssertionError('treemanifest constructor only accepts '
685 685 'flat manifests')
686 686 self.parse(text, readsubtree)
687 687 self._dirty = True # Mark flat manifest dirty after parsing
688 688
689 689 def _subpath(self, path):
690 690 return self._dir + path
691 691
692 692 def __len__(self):
693 693 self._load()
694 694 size = len(self._files)
695 695 for m in self._dirs.values():
696 696 size += m.__len__()
697 697 return size
698 698
699 699 def __nonzero__(self):
700 700 # Faster than "__len() != 0" since it avoids loading sub-manifests
701 701 return not self._isempty()
702 702
703 703 __bool__ = __nonzero__
704 704
705 705 def _isempty(self):
706 706 self._load() # for consistency; already loaded by all callers
707 707 return (not self._files and (not self._dirs or
708 708 all(m._isempty() for m in self._dirs.values())))
709 709
710 710 def __repr__(self):
711 711 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
712 712 (self._dir, revlog.hex(self._node),
713 713 bool(self._loadfunc is _noop),
714 714 self._dirty, id(self)))
715 715
716 716 def dir(self):
717 717 '''The directory that this tree manifest represents, including a
718 718 trailing '/'. Empty string for the repo root directory.'''
719 719 return self._dir
720 720
721 721 def node(self):
722 722 '''This node of this instance. nullid for unsaved instances. Should
723 723 be updated when the instance is read or written from a revlog.
724 724 '''
725 725 assert not self._dirty
726 726 return self._node
727 727
728 728 def setnode(self, node):
729 729 self._node = node
730 730 self._dirty = False
731 731
732 732 def iterentries(self):
733 733 self._load()
734 734 for p, n in sorted(itertools.chain(self._dirs.items(),
735 735 self._files.items())):
736 736 if p in self._files:
737 737 yield self._subpath(p), n, self._flags.get(p, '')
738 738 else:
739 739 for x in n.iterentries():
740 740 yield x
741 741
742 742 def items(self):
743 743 self._load()
744 744 for p, n in sorted(itertools.chain(self._dirs.items(),
745 745 self._files.items())):
746 746 if p in self._files:
747 747 yield self._subpath(p), n
748 748 else:
749 749 for f, sn in n.iteritems():
750 750 yield f, sn
751 751
752 752 iteritems = items
753 753
754 754 def iterkeys(self):
755 755 self._load()
756 756 for p in sorted(itertools.chain(self._dirs, self._files)):
757 757 if p in self._files:
758 758 yield self._subpath(p)
759 759 else:
760 760 for f in self._dirs[p]:
761 761 yield f
762 762
763 763 def keys(self):
764 764 return list(self.iterkeys())
765 765
766 766 def __iter__(self):
767 767 return self.iterkeys()
768 768
769 769 def __contains__(self, f):
770 770 if f is None:
771 771 return False
772 772 self._load()
773 773 dir, subpath = _splittopdir(f)
774 774 if dir:
775 775 if dir not in self._dirs:
776 776 return False
777 777 return self._dirs[dir].__contains__(subpath)
778 778 else:
779 779 return f in self._files
780 780
781 781 def get(self, f, default=None):
782 782 self._load()
783 783 dir, subpath = _splittopdir(f)
784 784 if dir:
785 785 if dir not in self._dirs:
786 786 return default
787 787 return self._dirs[dir].get(subpath, default)
788 788 else:
789 789 return self._files.get(f, default)
790 790
791 791 def __getitem__(self, f):
792 792 self._load()
793 793 dir, subpath = _splittopdir(f)
794 794 if dir:
795 795 return self._dirs[dir].__getitem__(subpath)
796 796 else:
797 797 return self._files[f]
798 798
799 799 def flags(self, f):
800 800 self._load()
801 801 dir, subpath = _splittopdir(f)
802 802 if dir:
803 803 if dir not in self._dirs:
804 804 return ''
805 805 return self._dirs[dir].flags(subpath)
806 806 else:
807 807 if f in self._dirs:
808 808 return ''
809 809 return self._flags.get(f, '')
810 810
811 811 def find(self, f):
812 812 self._load()
813 813 dir, subpath = _splittopdir(f)
814 814 if dir:
815 815 return self._dirs[dir].find(subpath)
816 816 else:
817 817 return self._files[f], self._flags.get(f, '')
818 818
819 819 def __delitem__(self, f):
820 820 self._load()
821 821 dir, subpath = _splittopdir(f)
822 822 if dir:
823 823 self._dirs[dir].__delitem__(subpath)
824 824 # If the directory is now empty, remove it
825 825 if self._dirs[dir]._isempty():
826 826 del self._dirs[dir]
827 827 else:
828 828 del self._files[f]
829 829 if f in self._flags:
830 830 del self._flags[f]
831 831 self._dirty = True
832 832
833 833 def __setitem__(self, f, n):
834 834 assert n is not None
835 835 self._load()
836 836 dir, subpath = _splittopdir(f)
837 837 if dir:
838 838 if dir not in self._dirs:
839 839 self._dirs[dir] = treemanifest(self._subpath(dir))
840 840 self._dirs[dir].__setitem__(subpath, n)
841 841 else:
842 842 self._files[f] = n[:21] # to match manifestdict's behavior
843 843 self._dirty = True
844 844
845 845 def _load(self):
846 846 if self._loadfunc is not _noop:
847 847 lf, self._loadfunc = self._loadfunc, _noop
848 848 lf(self)
849 849 elif self._copyfunc is not _noop:
850 850 cf, self._copyfunc = self._copyfunc, _noop
851 851 cf(self)
852 852
853 853 def setflag(self, f, flags):
854 854 """Set the flags (symlink, executable) for path f."""
855 855 self._load()
856 856 dir, subpath = _splittopdir(f)
857 857 if dir:
858 858 if dir not in self._dirs:
859 859 self._dirs[dir] = treemanifest(self._subpath(dir))
860 860 self._dirs[dir].setflag(subpath, flags)
861 861 else:
862 862 self._flags[f] = flags
863 863 self._dirty = True
864 864
865 865 def copy(self):
866 866 copy = treemanifest(self._dir)
867 867 copy._node = self._node
868 868 copy._dirty = self._dirty
869 869 if self._copyfunc is _noop:
870 870 def _copyfunc(s):
871 871 self._load()
872 872 for d in self._dirs:
873 873 s._dirs[d] = self._dirs[d].copy()
874 874 s._files = dict.copy(self._files)
875 875 s._flags = dict.copy(self._flags)
876 876 if self._loadfunc is _noop:
877 877 _copyfunc(copy)
878 878 else:
879 879 copy._copyfunc = _copyfunc
880 880 else:
881 881 copy._copyfunc = self._copyfunc
882 882 return copy
883 883
884 884 def filesnotin(self, m2, match=None):
885 885 '''Set of files in this manifest that are not in the other'''
886 886 if match:
887 887 m1 = self.matches(match)
888 888 m2 = m2.matches(match)
889 889 return m1.filesnotin(m2)
890 890
891 891 files = set()
892 892 def _filesnotin(t1, t2):
893 893 if t1._node == t2._node and not t1._dirty and not t2._dirty:
894 894 return
895 895 t1._load()
896 896 t2._load()
897 897 for d, m1 in t1._dirs.iteritems():
898 898 if d in t2._dirs:
899 899 m2 = t2._dirs[d]
900 900 _filesnotin(m1, m2)
901 901 else:
902 902 files.update(m1.iterkeys())
903 903
904 904 for fn in t1._files:
905 905 if fn not in t2._files:
906 906 files.add(t1._subpath(fn))
907 907
908 908 _filesnotin(self, m2)
909 909 return files
910 910
911 911 @propertycache
912 912 def _alldirs(self):
913 913 return util.dirs(self)
914 914
915 915 def dirs(self):
916 916 return self._alldirs
917 917
918 918 def hasdir(self, dir):
919 919 self._load()
920 920 topdir, subdir = _splittopdir(dir)
921 921 if topdir:
922 922 if topdir in self._dirs:
923 923 return self._dirs[topdir].hasdir(subdir)
924 924 return False
925 925 return (dir + '/') in self._dirs
926 926
927 927 def walk(self, match):
928 928 '''Generates matching file names.
929 929
930 930 Equivalent to manifest.matches(match).iterkeys(), but without creating
931 931 an entirely new manifest.
932 932
933 933 It also reports nonexistent files by marking them bad with match.bad().
934 934 '''
935 935 if match.always():
936 936 for f in iter(self):
937 937 yield f
938 938 return
939 939
940 940 fset = set(match.files())
941 941
942 942 for fn in self._walk(match):
943 943 if fn in fset:
944 944 # specified pattern is the exact name
945 945 fset.remove(fn)
946 946 yield fn
947 947
948 948 # for dirstate.walk, files=['.'] means "walk the whole tree".
949 949 # follow that here, too
950 950 fset.discard('.')
951 951
952 952 for fn in sorted(fset):
953 953 if not self.hasdir(fn):
954 954 match.bad(fn, None)
955 955
956 956 def _walk(self, match):
957 957 '''Recursively generates matching file names for walk().'''
958 958 if not match.visitdir(self._dir[:-1] or '.'):
959 959 return
960 960
961 961 # yield this dir's files and walk its submanifests
962 962 self._load()
963 963 for p in sorted(list(self._dirs) + list(self._files)):
964 964 if p in self._files:
965 965 fullp = self._subpath(p)
966 966 if match(fullp):
967 967 yield fullp
968 968 else:
969 969 for f in self._dirs[p]._walk(match):
970 970 yield f
971 971
972 972 def matches(self, match):
973 973 '''generate a new manifest filtered by the match argument'''
974 974 if match.always():
975 975 return self.copy()
976 976
977 977 return self._matches(match)
978 978
979 979 def _matches(self, match):
980 980 '''recursively generate a new manifest filtered by the match argument.
981 981 '''
982 982
983 983 visit = match.visitdir(self._dir[:-1] or '.')
984 984 if visit == 'all':
985 985 return self.copy()
986 986 ret = treemanifest(self._dir)
987 987 if not visit:
988 988 return ret
989 989
990 990 self._load()
991 991 for fn in self._files:
992 992 fullp = self._subpath(fn)
993 993 if not match(fullp):
994 994 continue
995 995 ret._files[fn] = self._files[fn]
996 996 if fn in self._flags:
997 997 ret._flags[fn] = self._flags[fn]
998 998
999 999 for dir, subm in self._dirs.iteritems():
1000 1000 m = subm._matches(match)
1001 1001 if not m._isempty():
1002 1002 ret._dirs[dir] = m
1003 1003
1004 1004 if not ret._isempty():
1005 1005 ret._dirty = True
1006 1006 return ret
1007 1007
1008 1008 def diff(self, m2, match=None, clean=False):
1009 1009 '''Finds changes between the current manifest and m2.
1010 1010
1011 1011 Args:
1012 1012 m2: the manifest to which this manifest should be compared.
1013 1013 clean: if true, include files unchanged between these manifests
1014 1014 with a None value in the returned dictionary.
1015 1015
1016 1016 The result is returned as a dict with filename as key and
1017 1017 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1018 1018 nodeid in the current/other manifest and fl1/fl2 is the flag
1019 1019 in the current/other manifest. Where the file does not exist,
1020 1020 the nodeid will be None and the flags will be the empty
1021 1021 string.
1022 1022 '''
1023 1023 if match:
1024 1024 m1 = self.matches(match)
1025 1025 m2 = m2.matches(match)
1026 1026 return m1.diff(m2, clean=clean)
1027 1027 result = {}
1028 1028 emptytree = treemanifest()
1029 1029 def _diff(t1, t2):
1030 1030 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1031 1031 return
1032 1032 t1._load()
1033 1033 t2._load()
1034 1034 for d, m1 in t1._dirs.iteritems():
1035 1035 m2 = t2._dirs.get(d, emptytree)
1036 1036 _diff(m1, m2)
1037 1037
1038 1038 for d, m2 in t2._dirs.iteritems():
1039 1039 if d not in t1._dirs:
1040 1040 _diff(emptytree, m2)
1041 1041
1042 1042 for fn, n1 in t1._files.iteritems():
1043 1043 fl1 = t1._flags.get(fn, '')
1044 1044 n2 = t2._files.get(fn, None)
1045 1045 fl2 = t2._flags.get(fn, '')
1046 1046 if n1 != n2 or fl1 != fl2:
1047 1047 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1048 1048 elif clean:
1049 1049 result[t1._subpath(fn)] = None
1050 1050
1051 1051 for fn, n2 in t2._files.iteritems():
1052 1052 if fn not in t1._files:
1053 1053 fl2 = t2._flags.get(fn, '')
1054 1054 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1055 1055
1056 1056 _diff(self, m2)
1057 1057 return result
1058 1058
1059 1059 def unmodifiedsince(self, m2):
1060 1060 return not self._dirty and not m2._dirty and self._node == m2._node
1061 1061
1062 1062 def parse(self, text, readsubtree):
1063 1063 for f, n, fl in _parse(text):
1064 1064 if fl == 't':
1065 1065 f = f + '/'
1066 1066 self._dirs[f] = readsubtree(self._subpath(f), n)
1067 1067 elif '/' in f:
1068 1068 # This is a flat manifest, so use __setitem__ and setflag rather
1069 1069 # than assigning directly to _files and _flags, so we can
1070 1070 # assign a path in a subdirectory, and to mark dirty (compared
1071 1071 # to nullid).
1072 1072 self[f] = n
1073 1073 if fl:
1074 1074 self.setflag(f, fl)
1075 1075 else:
1076 1076 # Assigning to _files and _flags avoids marking as dirty,
1077 1077 # and should be a little faster.
1078 1078 self._files[f] = n
1079 1079 if fl:
1080 1080 self._flags[f] = fl
1081 1081
1082 1082 def text(self):
1083 1083 """Get the full data of this manifest as a bytestring."""
1084 1084 self._load()
1085 1085 return _text(self.iterentries())
1086 1086
1087 1087 def dirtext(self):
1088 1088 """Get the full data of this directory as a bytestring. Make sure that
1089 1089 any submanifests have been written first, so their nodeids are correct.
1090 1090 """
1091 1091 self._load()
1092 1092 flags = self.flags
1093 1093 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1094 1094 files = [(f, self._files[f], flags(f)) for f in self._files]
1095 1095 return _text(sorted(dirs + files))
1096 1096
1097 1097 def read(self, gettext, readsubtree):
1098 1098 def _load_for_read(s):
1099 1099 s.parse(gettext(), readsubtree)
1100 1100 s._dirty = False
1101 1101 self._loadfunc = _load_for_read
1102 1102
1103 1103 def writesubtrees(self, m1, m2, writesubtree):
1104 1104 self._load() # for consistency; should never have any effect here
1105 1105 m1._load()
1106 1106 m2._load()
1107 1107 emptytree = treemanifest()
1108 1108 for d, subm in self._dirs.iteritems():
1109 1109 subp1 = m1._dirs.get(d, emptytree)._node
1110 1110 subp2 = m2._dirs.get(d, emptytree)._node
1111 1111 if subp1 == revlog.nullid:
1112 1112 subp1, subp2 = subp2, subp1
1113 1113 writesubtree(subm, subp1, subp2)
1114 1114
1115 1115 def walksubtrees(self, matcher=None):
1116 1116 """Returns an iterator of the subtrees of this manifest, including this
1117 1117 manifest itself.
1118 1118
1119 1119 If `matcher` is provided, it only returns subtrees that match.
1120 1120 """
1121 1121 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1122 1122 return
1123 1123 if not matcher or matcher(self._dir[:-1]):
1124 1124 yield self
1125 1125
1126 1126 self._load()
1127 1127 for d, subm in self._dirs.iteritems():
1128 1128 for subtree in subm.walksubtrees(matcher=matcher):
1129 1129 yield subtree
1130 1130
1131 1131 class manifestrevlog(revlog.revlog):
1132 1132 '''A revlog that stores manifest texts. This is responsible for caching the
1133 1133 full-text manifest contents.
1134 1134 '''
1135 1135 def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
1136 1136 treemanifest=False):
1137 1137 """Constructs a new manifest revlog
1138 1138
1139 1139 `indexfile` - used by extensions to have two manifests at once, like
1140 1140 when transitioning between flatmanifeset and treemanifests.
1141 1141
1142 1142 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1143 1143 options can also be used to make this a tree manifest revlog. The opener
1144 1144 option takes precedence, so if it is set to True, we ignore whatever
1145 1145 value is passed in to the constructor.
1146 1146 """
1147 1147 # During normal operations, we expect to deal with not more than four
1148 1148 # revs at a time (such as during commit --amend). When rebasing large
1149 1149 # stacks of commits, the number can go up, hence the config knob below.
1150 1150 cachesize = 4
1151 1151 optiontreemanifest = False
1152 1152 opts = getattr(opener, 'options', None)
1153 1153 if opts is not None:
1154 1154 cachesize = opts.get('manifestcachesize', cachesize)
1155 1155 optiontreemanifest = opts.get('treemanifest', False)
1156 1156
1157 1157 self._treeondisk = optiontreemanifest or treemanifest
1158 1158
1159 1159 self._fulltextcache = util.lrucachedict(cachesize)
1160 1160
1161 1161 if dir:
1162 1162 assert self._treeondisk, 'opts is %r' % opts
1163 if not dir.endswith('/'):
1164 dir = dir + '/'
1165 1163
1166 1164 if indexfile is None:
1167 1165 indexfile = '00manifest.i'
1168 1166 if dir:
1169 1167 indexfile = "meta/" + dir + indexfile
1170 1168
1171 1169 self._dir = dir
1172 1170 # The dirlogcache is kept on the root manifest log
1173 1171 if dir:
1174 1172 self._dirlogcache = dirlogcache
1175 1173 else:
1176 1174 self._dirlogcache = {'': self}
1177 1175
1178 1176 super(manifestrevlog, self).__init__(opener, indexfile,
1179 1177 # only root indexfile is cached
1180 1178 checkambig=not bool(dir),
1181 1179 mmaplargeindex=True)
1182 1180
1183 1181 @property
1184 1182 def fulltextcache(self):
1185 1183 return self._fulltextcache
1186 1184
1187 1185 def clearcaches(self):
1188 1186 super(manifestrevlog, self).clearcaches()
1189 1187 self._fulltextcache.clear()
1190 1188 self._dirlogcache = {'': self}
1191 1189
1192 1190 def dirlog(self, d):
1193 1191 if d:
1194 1192 assert self._treeondisk
1195 1193 if d not in self._dirlogcache:
1196 1194 mfrevlog = manifestrevlog(self.opener, d,
1197 1195 self._dirlogcache,
1198 1196 treemanifest=self._treeondisk)
1199 1197 self._dirlogcache[d] = mfrevlog
1200 1198 return self._dirlogcache[d]
1201 1199
1202 1200 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
1203 1201 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1204 1202 # If our first parent is in the manifest cache, we can
1205 1203 # compute a delta here using properties we know about the
1206 1204 # manifest up-front, which may save time later for the
1207 1205 # revlog layer.
1208 1206
1209 1207 _checkforbidden(added)
1210 1208 # combine the changed lists into one sorted iterator
1211 1209 work = heapq.merge([(x, False) for x in added],
1212 1210 [(x, True) for x in removed])
1213 1211
1214 1212 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1215 1213 cachedelta = self.rev(p1), deltatext
1216 1214 text = util.buffer(arraytext)
1217 1215 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1218 1216 else:
1219 1217 # The first parent manifest isn't already loaded, so we'll
1220 1218 # just encode a fulltext of the manifest and pass that
1221 1219 # through to the revlog layer, and let it handle the delta
1222 1220 # process.
1223 1221 if self._treeondisk:
1224 1222 assert readtree, "readtree must be set for treemanifest writes"
1225 1223 m1 = readtree(self._dir, p1)
1226 1224 m2 = readtree(self._dir, p2)
1227 1225 n = self._addtree(m, transaction, link, m1, m2, readtree)
1228 1226 arraytext = None
1229 1227 else:
1230 1228 text = m.text()
1231 1229 n = self.addrevision(text, transaction, link, p1, p2)
1232 1230 arraytext = bytearray(text)
1233 1231
1234 1232 if arraytext is not None:
1235 1233 self.fulltextcache[n] = arraytext
1236 1234
1237 1235 return n
1238 1236
1239 1237 def _addtree(self, m, transaction, link, m1, m2, readtree):
1240 1238 # If the manifest is unchanged compared to one parent,
1241 1239 # don't write a new revision
1242 1240 if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
1243 1241 return m.node()
1244 1242 def writesubtree(subm, subp1, subp2):
1245 1243 sublog = self.dirlog(subm.dir())
1246 1244 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1247 1245 readtree=readtree)
1248 1246 m.writesubtrees(m1, m2, writesubtree)
1249 1247 text = m.dirtext()
1250 1248 n = None
1251 1249 if self._dir != '':
1252 1250 # Double-check whether contents are unchanged to one parent
1253 1251 if text == m1.dirtext():
1254 1252 n = m1.node()
1255 1253 elif text == m2.dirtext():
1256 1254 n = m2.node()
1257 1255
1258 1256 if not n:
1259 1257 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1260 1258
1261 1259 # Save nodeid so parent manifest can calculate its nodeid
1262 1260 m.setnode(n)
1263 1261 return n
1264 1262
1265 1263 class manifestlog(object):
1266 1264 """A collection class representing the collection of manifest snapshots
1267 1265 referenced by commits in the repository.
1268 1266
1269 1267 In this situation, 'manifest' refers to the abstract concept of a snapshot
1270 1268 of the list of files in the given commit. Consumers of the output of this
1271 1269 class do not care about the implementation details of the actual manifests
1272 1270 they receive (i.e. tree or flat or lazily loaded, etc)."""
1273 1271 def __init__(self, opener, repo):
1274 1272 usetreemanifest = False
1275 1273 cachesize = 4
1276 1274
1277 1275 opts = getattr(opener, 'options', None)
1278 1276 if opts is not None:
1279 1277 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1280 1278 cachesize = opts.get('manifestcachesize', cachesize)
1281 1279 self._treeinmem = usetreemanifest
1282 1280
1283 1281 self._revlog = repo._constructmanifest()
1284 1282
1285 1283 # A cache of the manifestctx or treemanifestctx for each directory
1286 1284 self._dirmancache = {}
1287 1285 self._dirmancache[''] = util.lrucachedict(cachesize)
1288 1286
1289 1287 self.cachesize = cachesize
1290 1288
1291 1289 def __getitem__(self, node):
1292 1290 """Retrieves the manifest instance for the given node. Throws a
1293 1291 LookupError if not found.
1294 1292 """
1295 1293 return self.get('', node)
1296 1294
1297 1295 def get(self, dir, node, verify=True):
1298 1296 """Retrieves the manifest instance for the given node. Throws a
1299 1297 LookupError if not found.
1300 1298
1301 1299 `verify` - if True an exception will be thrown if the node is not in
1302 1300 the revlog
1303 1301 """
1304 1302 if node in self._dirmancache.get(dir, ()):
1305 1303 return self._dirmancache[dir][node]
1306 1304
1307 1305 if dir:
1308 1306 if self._revlog._treeondisk:
1309 1307 if verify:
1310 1308 dirlog = self._revlog.dirlog(dir)
1311 1309 if node not in dirlog.nodemap:
1312 1310 raise LookupError(node, dirlog.indexfile,
1313 1311 _('no node'))
1314 1312 m = treemanifestctx(self, dir, node)
1315 1313 else:
1316 1314 raise error.Abort(
1317 1315 _("cannot ask for manifest directory '%s' in a flat "
1318 1316 "manifest") % dir)
1319 1317 else:
1320 1318 if verify:
1321 1319 if node not in self._revlog.nodemap:
1322 1320 raise LookupError(node, self._revlog.indexfile,
1323 1321 _('no node'))
1324 1322 if self._treeinmem:
1325 1323 m = treemanifestctx(self, '', node)
1326 1324 else:
1327 1325 m = manifestctx(self, node)
1328 1326
1329 1327 if node != revlog.nullid:
1330 1328 mancache = self._dirmancache.get(dir)
1331 1329 if not mancache:
1332 1330 mancache = util.lrucachedict(self.cachesize)
1333 1331 self._dirmancache[dir] = mancache
1334 1332 mancache[node] = m
1335 1333 return m
1336 1334
1337 1335 def clearcaches(self):
1338 1336 self._dirmancache.clear()
1339 1337 self._revlog.clearcaches()
1340 1338
1341 1339 class memmanifestctx(object):
1342 1340 def __init__(self, manifestlog):
1343 1341 self._manifestlog = manifestlog
1344 1342 self._manifestdict = manifestdict()
1345 1343
1346 1344 def _revlog(self):
1347 1345 return self._manifestlog._revlog
1348 1346
1349 1347 def new(self):
1350 1348 return memmanifestctx(self._manifestlog)
1351 1349
1352 1350 def copy(self):
1353 1351 memmf = memmanifestctx(self._manifestlog)
1354 1352 memmf._manifestdict = self.read().copy()
1355 1353 return memmf
1356 1354
1357 1355 def read(self):
1358 1356 return self._manifestdict
1359 1357
1360 1358 def write(self, transaction, link, p1, p2, added, removed):
1361 1359 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1362 1360 added, removed)
1363 1361
1364 1362 class manifestctx(object):
1365 1363 """A class representing a single revision of a manifest, including its
1366 1364 contents, its parent revs, and its linkrev.
1367 1365 """
1368 1366 def __init__(self, manifestlog, node):
1369 1367 self._manifestlog = manifestlog
1370 1368 self._data = None
1371 1369
1372 1370 self._node = node
1373 1371
1374 1372 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1375 1373 # but let's add it later when something needs it and we can load it
1376 1374 # lazily.
1377 1375 #self.p1, self.p2 = revlog.parents(node)
1378 1376 #rev = revlog.rev(node)
1379 1377 #self.linkrev = revlog.linkrev(rev)
1380 1378
1381 1379 def _revlog(self):
1382 1380 return self._manifestlog._revlog
1383 1381
1384 1382 def node(self):
1385 1383 return self._node
1386 1384
1387 1385 def new(self):
1388 1386 return memmanifestctx(self._manifestlog)
1389 1387
1390 1388 def copy(self):
1391 1389 memmf = memmanifestctx(self._manifestlog)
1392 1390 memmf._manifestdict = self.read().copy()
1393 1391 return memmf
1394 1392
1395 1393 @propertycache
1396 1394 def parents(self):
1397 1395 return self._revlog().parents(self._node)
1398 1396
1399 1397 def read(self):
1400 1398 if self._data is None:
1401 1399 if self._node == revlog.nullid:
1402 1400 self._data = manifestdict()
1403 1401 else:
1404 1402 rl = self._revlog()
1405 1403 text = rl.revision(self._node)
1406 1404 arraytext = bytearray(text)
1407 1405 rl._fulltextcache[self._node] = arraytext
1408 1406 self._data = manifestdict(text)
1409 1407 return self._data
1410 1408
1411 1409 def readfast(self, shallow=False):
1412 1410 '''Calls either readdelta or read, based on which would be less work.
1413 1411 readdelta is called if the delta is against the p1, and therefore can be
1414 1412 read quickly.
1415 1413
1416 1414 If `shallow` is True, nothing changes since this is a flat manifest.
1417 1415 '''
1418 1416 rl = self._revlog()
1419 1417 r = rl.rev(self._node)
1420 1418 deltaparent = rl.deltaparent(r)
1421 1419 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1422 1420 return self.readdelta()
1423 1421 return self.read()
1424 1422
1425 1423 def readdelta(self, shallow=False):
1426 1424 '''Returns a manifest containing just the entries that are present
1427 1425 in this manifest, but not in its p1 manifest. This is efficient to read
1428 1426 if the revlog delta is already p1.
1429 1427
1430 1428 Changing the value of `shallow` has no effect on flat manifests.
1431 1429 '''
1432 1430 revlog = self._revlog()
1433 1431 r = revlog.rev(self._node)
1434 1432 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1435 1433 return manifestdict(d)
1436 1434
1437 1435 def find(self, key):
1438 1436 return self.read().find(key)
1439 1437
1440 1438 class memtreemanifestctx(object):
1441 1439 def __init__(self, manifestlog, dir=''):
1442 1440 self._manifestlog = manifestlog
1443 1441 self._dir = dir
1444 1442 self._treemanifest = treemanifest()
1445 1443
1446 1444 def _revlog(self):
1447 1445 return self._manifestlog._revlog
1448 1446
1449 1447 def new(self, dir=''):
1450 1448 return memtreemanifestctx(self._manifestlog, dir=dir)
1451 1449
1452 1450 def copy(self):
1453 1451 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1454 1452 memmf._treemanifest = self._treemanifest.copy()
1455 1453 return memmf
1456 1454
1457 1455 def read(self):
1458 1456 return self._treemanifest
1459 1457
1460 1458 def write(self, transaction, link, p1, p2, added, removed):
1461 1459 def readtree(dir, node):
1462 1460 return self._manifestlog.get(dir, node).read()
1463 1461 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1464 1462 added, removed, readtree=readtree)
1465 1463
1466 1464 class treemanifestctx(object):
1467 1465 def __init__(self, manifestlog, dir, node):
1468 1466 self._manifestlog = manifestlog
1469 1467 self._dir = dir
1470 1468 self._data = None
1471 1469
1472 1470 self._node = node
1473 1471
1474 1472 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1475 1473 # we can instantiate treemanifestctx objects for directories we don't
1476 1474 # have on disk.
1477 1475 #self.p1, self.p2 = revlog.parents(node)
1478 1476 #rev = revlog.rev(node)
1479 1477 #self.linkrev = revlog.linkrev(rev)
1480 1478
1481 1479 def _revlog(self):
1482 1480 return self._manifestlog._revlog.dirlog(self._dir)
1483 1481
1484 1482 def read(self):
1485 1483 if self._data is None:
1486 1484 rl = self._revlog()
1487 1485 if self._node == revlog.nullid:
1488 1486 self._data = treemanifest()
1489 1487 elif rl._treeondisk:
1490 1488 m = treemanifest(dir=self._dir)
1491 1489 def gettext():
1492 1490 return rl.revision(self._node)
1493 1491 def readsubtree(dir, subm):
1494 1492 # Set verify to False since we need to be able to create
1495 1493 # subtrees for trees that don't exist on disk.
1496 1494 return self._manifestlog.get(dir, subm, verify=False).read()
1497 1495 m.read(gettext, readsubtree)
1498 1496 m.setnode(self._node)
1499 1497 self._data = m
1500 1498 else:
1501 1499 text = rl.revision(self._node)
1502 1500 arraytext = bytearray(text)
1503 1501 rl.fulltextcache[self._node] = arraytext
1504 1502 self._data = treemanifest(dir=self._dir, text=text)
1505 1503
1506 1504 return self._data
1507 1505
1508 1506 def node(self):
1509 1507 return self._node
1510 1508
1511 1509 def new(self, dir=''):
1512 1510 return memtreemanifestctx(self._manifestlog, dir=dir)
1513 1511
1514 1512 def copy(self):
1515 1513 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1516 1514 memmf._treemanifest = self.read().copy()
1517 1515 return memmf
1518 1516
1519 1517 @propertycache
1520 1518 def parents(self):
1521 1519 return self._revlog().parents(self._node)
1522 1520
1523 1521 def readdelta(self, shallow=False):
1524 1522 '''Returns a manifest containing just the entries that are present
1525 1523 in this manifest, but not in its p1 manifest. This is efficient to read
1526 1524 if the revlog delta is already p1.
1527 1525
1528 1526 If `shallow` is True, this will read the delta for this directory,
1529 1527 without recursively reading subdirectory manifests. Instead, any
1530 1528 subdirectory entry will be reported as it appears in the manifest, i.e.
1531 1529 the subdirectory will be reported among files and distinguished only by
1532 1530 its 't' flag.
1533 1531 '''
1534 1532 revlog = self._revlog()
1535 1533 if shallow:
1536 1534 r = revlog.rev(self._node)
1537 1535 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1538 1536 return manifestdict(d)
1539 1537 else:
1540 1538 # Need to perform a slow delta
1541 1539 r0 = revlog.deltaparent(revlog.rev(self._node))
1542 1540 m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
1543 1541 m1 = self.read()
1544 1542 md = treemanifest(dir=self._dir)
1545 1543 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1546 1544 if n1:
1547 1545 md[f] = n1
1548 1546 if fl1:
1549 1547 md.setflag(f, fl1)
1550 1548 return md
1551 1549
1552 1550 def readfast(self, shallow=False):
1553 1551 '''Calls either readdelta or read, based on which would be less work.
1554 1552 readdelta is called if the delta is against the p1, and therefore can be
1555 1553 read quickly.
1556 1554
1557 1555 If `shallow` is True, it only returns the entries from this manifest,
1558 1556 and not any submanifests.
1559 1557 '''
1560 1558 rl = self._revlog()
1561 1559 r = rl.rev(self._node)
1562 1560 deltaparent = rl.deltaparent(r)
1563 1561 if (deltaparent != revlog.nullrev and
1564 1562 deltaparent in rl.parentrevs(r)):
1565 1563 return self.readdelta(shallow=shallow)
1566 1564
1567 1565 if shallow:
1568 1566 return manifestdict(rl.revision(self._node))
1569 1567 else:
1570 1568 return self.read()
1571 1569
1572 1570 def find(self, key):
1573 1571 return self.read().find(key)
General Comments 0
You need to be logged in to leave comments. Login now