##// END OF EJS Templates
dirstate: preserve path components case on renames (issue3402)...
Patrick Mezard -
r16542:e596a631 stable
parent child Browse files
Show More
@@ -1,1639 +1,1644 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import subrepo, context, repair, bookmarks
13 import subrepo, context, repair, bookmarks
14
14
15 def parsealiases(cmd):
15 def parsealiases(cmd):
16 return cmd.lstrip("^").split("|")
16 return cmd.lstrip("^").split("|")
17
17
18 def findpossible(cmd, table, strict=False):
18 def findpossible(cmd, table, strict=False):
19 """
19 """
20 Return cmd -> (aliases, command table entry)
20 Return cmd -> (aliases, command table entry)
21 for each matching command.
21 for each matching command.
22 Return debug commands (or their aliases) only if no normal command matches.
22 Return debug commands (or their aliases) only if no normal command matches.
23 """
23 """
24 choice = {}
24 choice = {}
25 debugchoice = {}
25 debugchoice = {}
26
26
27 if cmd in table:
27 if cmd in table:
28 # short-circuit exact matches, "log" alias beats "^log|history"
28 # short-circuit exact matches, "log" alias beats "^log|history"
29 keys = [cmd]
29 keys = [cmd]
30 else:
30 else:
31 keys = table.keys()
31 keys = table.keys()
32
32
33 for e in keys:
33 for e in keys:
34 aliases = parsealiases(e)
34 aliases = parsealiases(e)
35 found = None
35 found = None
36 if cmd in aliases:
36 if cmd in aliases:
37 found = cmd
37 found = cmd
38 elif not strict:
38 elif not strict:
39 for a in aliases:
39 for a in aliases:
40 if a.startswith(cmd):
40 if a.startswith(cmd):
41 found = a
41 found = a
42 break
42 break
43 if found is not None:
43 if found is not None:
44 if aliases[0].startswith("debug") or found.startswith("debug"):
44 if aliases[0].startswith("debug") or found.startswith("debug"):
45 debugchoice[found] = (aliases, table[e])
45 debugchoice[found] = (aliases, table[e])
46 else:
46 else:
47 choice[found] = (aliases, table[e])
47 choice[found] = (aliases, table[e])
48
48
49 if not choice and debugchoice:
49 if not choice and debugchoice:
50 choice = debugchoice
50 choice = debugchoice
51
51
52 return choice
52 return choice
53
53
54 def findcmd(cmd, table, strict=True):
54 def findcmd(cmd, table, strict=True):
55 """Return (aliases, command table entry) for command string."""
55 """Return (aliases, command table entry) for command string."""
56 choice = findpossible(cmd, table, strict)
56 choice = findpossible(cmd, table, strict)
57
57
58 if cmd in choice:
58 if cmd in choice:
59 return choice[cmd]
59 return choice[cmd]
60
60
61 if len(choice) > 1:
61 if len(choice) > 1:
62 clist = choice.keys()
62 clist = choice.keys()
63 clist.sort()
63 clist.sort()
64 raise error.AmbiguousCommand(cmd, clist)
64 raise error.AmbiguousCommand(cmd, clist)
65
65
66 if choice:
66 if choice:
67 return choice.values()[0]
67 return choice.values()[0]
68
68
69 raise error.UnknownCommand(cmd)
69 raise error.UnknownCommand(cmd)
70
70
71 def findrepo(p):
71 def findrepo(p):
72 while not os.path.isdir(os.path.join(p, ".hg")):
72 while not os.path.isdir(os.path.join(p, ".hg")):
73 oldp, p = p, os.path.dirname(p)
73 oldp, p = p, os.path.dirname(p)
74 if p == oldp:
74 if p == oldp:
75 return None
75 return None
76
76
77 return p
77 return p
78
78
79 def bailifchanged(repo):
79 def bailifchanged(repo):
80 if repo.dirstate.p2() != nullid:
80 if repo.dirstate.p2() != nullid:
81 raise util.Abort(_('outstanding uncommitted merge'))
81 raise util.Abort(_('outstanding uncommitted merge'))
82 modified, added, removed, deleted = repo.status()[:4]
82 modified, added, removed, deleted = repo.status()[:4]
83 if modified or added or removed or deleted:
83 if modified or added or removed or deleted:
84 raise util.Abort(_("outstanding uncommitted changes"))
84 raise util.Abort(_("outstanding uncommitted changes"))
85 ctx = repo[None]
85 ctx = repo[None]
86 for s in ctx.substate:
86 for s in ctx.substate:
87 if ctx.sub(s).dirty():
87 if ctx.sub(s).dirty():
88 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
88 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
89
89
90 def logmessage(ui, opts):
90 def logmessage(ui, opts):
91 """ get the log message according to -m and -l option """
91 """ get the log message according to -m and -l option """
92 message = opts.get('message')
92 message = opts.get('message')
93 logfile = opts.get('logfile')
93 logfile = opts.get('logfile')
94
94
95 if message and logfile:
95 if message and logfile:
96 raise util.Abort(_('options --message and --logfile are mutually '
96 raise util.Abort(_('options --message and --logfile are mutually '
97 'exclusive'))
97 'exclusive'))
98 if not message and logfile:
98 if not message and logfile:
99 try:
99 try:
100 if logfile == '-':
100 if logfile == '-':
101 message = ui.fin.read()
101 message = ui.fin.read()
102 else:
102 else:
103 message = '\n'.join(util.readfile(logfile).splitlines())
103 message = '\n'.join(util.readfile(logfile).splitlines())
104 except IOError, inst:
104 except IOError, inst:
105 raise util.Abort(_("can't read commit message '%s': %s") %
105 raise util.Abort(_("can't read commit message '%s': %s") %
106 (logfile, inst.strerror))
106 (logfile, inst.strerror))
107 return message
107 return message
108
108
109 def loglimit(opts):
109 def loglimit(opts):
110 """get the log limit according to option -l/--limit"""
110 """get the log limit according to option -l/--limit"""
111 limit = opts.get('limit')
111 limit = opts.get('limit')
112 if limit:
112 if limit:
113 try:
113 try:
114 limit = int(limit)
114 limit = int(limit)
115 except ValueError:
115 except ValueError:
116 raise util.Abort(_('limit must be a positive integer'))
116 raise util.Abort(_('limit must be a positive integer'))
117 if limit <= 0:
117 if limit <= 0:
118 raise util.Abort(_('limit must be positive'))
118 raise util.Abort(_('limit must be positive'))
119 else:
119 else:
120 limit = None
120 limit = None
121 return limit
121 return limit
122
122
123 def makefilename(repo, pat, node, desc=None,
123 def makefilename(repo, pat, node, desc=None,
124 total=None, seqno=None, revwidth=None, pathname=None):
124 total=None, seqno=None, revwidth=None, pathname=None):
125 node_expander = {
125 node_expander = {
126 'H': lambda: hex(node),
126 'H': lambda: hex(node),
127 'R': lambda: str(repo.changelog.rev(node)),
127 'R': lambda: str(repo.changelog.rev(node)),
128 'h': lambda: short(node),
128 'h': lambda: short(node),
129 'm': lambda: re.sub('[^\w]', '_', str(desc))
129 'm': lambda: re.sub('[^\w]', '_', str(desc))
130 }
130 }
131 expander = {
131 expander = {
132 '%': lambda: '%',
132 '%': lambda: '%',
133 'b': lambda: os.path.basename(repo.root),
133 'b': lambda: os.path.basename(repo.root),
134 }
134 }
135
135
136 try:
136 try:
137 if node:
137 if node:
138 expander.update(node_expander)
138 expander.update(node_expander)
139 if node:
139 if node:
140 expander['r'] = (lambda:
140 expander['r'] = (lambda:
141 str(repo.changelog.rev(node)).zfill(revwidth or 0))
141 str(repo.changelog.rev(node)).zfill(revwidth or 0))
142 if total is not None:
142 if total is not None:
143 expander['N'] = lambda: str(total)
143 expander['N'] = lambda: str(total)
144 if seqno is not None:
144 if seqno is not None:
145 expander['n'] = lambda: str(seqno)
145 expander['n'] = lambda: str(seqno)
146 if total is not None and seqno is not None:
146 if total is not None and seqno is not None:
147 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
147 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
148 if pathname is not None:
148 if pathname is not None:
149 expander['s'] = lambda: os.path.basename(pathname)
149 expander['s'] = lambda: os.path.basename(pathname)
150 expander['d'] = lambda: os.path.dirname(pathname) or '.'
150 expander['d'] = lambda: os.path.dirname(pathname) or '.'
151 expander['p'] = lambda: pathname
151 expander['p'] = lambda: pathname
152
152
153 newname = []
153 newname = []
154 patlen = len(pat)
154 patlen = len(pat)
155 i = 0
155 i = 0
156 while i < patlen:
156 while i < patlen:
157 c = pat[i]
157 c = pat[i]
158 if c == '%':
158 if c == '%':
159 i += 1
159 i += 1
160 c = pat[i]
160 c = pat[i]
161 c = expander[c]()
161 c = expander[c]()
162 newname.append(c)
162 newname.append(c)
163 i += 1
163 i += 1
164 return ''.join(newname)
164 return ''.join(newname)
165 except KeyError, inst:
165 except KeyError, inst:
166 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
166 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
167 inst.args[0])
167 inst.args[0])
168
168
169 def makefileobj(repo, pat, node=None, desc=None, total=None,
169 def makefileobj(repo, pat, node=None, desc=None, total=None,
170 seqno=None, revwidth=None, mode='wb', pathname=None):
170 seqno=None, revwidth=None, mode='wb', pathname=None):
171
171
172 writable = mode not in ('r', 'rb')
172 writable = mode not in ('r', 'rb')
173
173
174 if not pat or pat == '-':
174 if not pat or pat == '-':
175 fp = writable and repo.ui.fout or repo.ui.fin
175 fp = writable and repo.ui.fout or repo.ui.fin
176 if util.safehasattr(fp, 'fileno'):
176 if util.safehasattr(fp, 'fileno'):
177 return os.fdopen(os.dup(fp.fileno()), mode)
177 return os.fdopen(os.dup(fp.fileno()), mode)
178 else:
178 else:
179 # if this fp can't be duped properly, return
179 # if this fp can't be duped properly, return
180 # a dummy object that can be closed
180 # a dummy object that can be closed
181 class wrappedfileobj(object):
181 class wrappedfileobj(object):
182 noop = lambda x: None
182 noop = lambda x: None
183 def __init__(self, f):
183 def __init__(self, f):
184 self.f = f
184 self.f = f
185 def __getattr__(self, attr):
185 def __getattr__(self, attr):
186 if attr == 'close':
186 if attr == 'close':
187 return self.noop
187 return self.noop
188 else:
188 else:
189 return getattr(self.f, attr)
189 return getattr(self.f, attr)
190
190
191 return wrappedfileobj(fp)
191 return wrappedfileobj(fp)
192 if util.safehasattr(pat, 'write') and writable:
192 if util.safehasattr(pat, 'write') and writable:
193 return pat
193 return pat
194 if util.safehasattr(pat, 'read') and 'r' in mode:
194 if util.safehasattr(pat, 'read') and 'r' in mode:
195 return pat
195 return pat
196 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
196 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
197 pathname),
197 pathname),
198 mode)
198 mode)
199
199
200 def openrevlog(repo, cmd, file_, opts):
200 def openrevlog(repo, cmd, file_, opts):
201 """opens the changelog, manifest, a filelog or a given revlog"""
201 """opens the changelog, manifest, a filelog or a given revlog"""
202 cl = opts['changelog']
202 cl = opts['changelog']
203 mf = opts['manifest']
203 mf = opts['manifest']
204 msg = None
204 msg = None
205 if cl and mf:
205 if cl and mf:
206 msg = _('cannot specify --changelog and --manifest at the same time')
206 msg = _('cannot specify --changelog and --manifest at the same time')
207 elif cl or mf:
207 elif cl or mf:
208 if file_:
208 if file_:
209 msg = _('cannot specify filename with --changelog or --manifest')
209 msg = _('cannot specify filename with --changelog or --manifest')
210 elif not repo:
210 elif not repo:
211 msg = _('cannot specify --changelog or --manifest '
211 msg = _('cannot specify --changelog or --manifest '
212 'without a repository')
212 'without a repository')
213 if msg:
213 if msg:
214 raise util.Abort(msg)
214 raise util.Abort(msg)
215
215
216 r = None
216 r = None
217 if repo:
217 if repo:
218 if cl:
218 if cl:
219 r = repo.changelog
219 r = repo.changelog
220 elif mf:
220 elif mf:
221 r = repo.manifest
221 r = repo.manifest
222 elif file_:
222 elif file_:
223 filelog = repo.file(file_)
223 filelog = repo.file(file_)
224 if len(filelog):
224 if len(filelog):
225 r = filelog
225 r = filelog
226 if not r:
226 if not r:
227 if not file_:
227 if not file_:
228 raise error.CommandError(cmd, _('invalid arguments'))
228 raise error.CommandError(cmd, _('invalid arguments'))
229 if not os.path.isfile(file_):
229 if not os.path.isfile(file_):
230 raise util.Abort(_("revlog '%s' not found") % file_)
230 raise util.Abort(_("revlog '%s' not found") % file_)
231 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
231 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
232 file_[:-2] + ".i")
232 file_[:-2] + ".i")
233 return r
233 return r
234
234
235 def copy(ui, repo, pats, opts, rename=False):
235 def copy(ui, repo, pats, opts, rename=False):
236 # called with the repo lock held
236 # called with the repo lock held
237 #
237 #
238 # hgsep => pathname that uses "/" to separate directories
238 # hgsep => pathname that uses "/" to separate directories
239 # ossep => pathname that uses os.sep to separate directories
239 # ossep => pathname that uses os.sep to separate directories
240 cwd = repo.getcwd()
240 cwd = repo.getcwd()
241 targets = {}
241 targets = {}
242 after = opts.get("after")
242 after = opts.get("after")
243 dryrun = opts.get("dry_run")
243 dryrun = opts.get("dry_run")
244 wctx = repo[None]
244 wctx = repo[None]
245
245
246 def walkpat(pat):
246 def walkpat(pat):
247 srcs = []
247 srcs = []
248 badstates = after and '?' or '?r'
248 badstates = after and '?' or '?r'
249 m = scmutil.match(repo[None], [pat], opts, globbed=True)
249 m = scmutil.match(repo[None], [pat], opts, globbed=True)
250 for abs in repo.walk(m):
250 for abs in repo.walk(m):
251 state = repo.dirstate[abs]
251 state = repo.dirstate[abs]
252 rel = m.rel(abs)
252 rel = m.rel(abs)
253 exact = m.exact(abs)
253 exact = m.exact(abs)
254 if state in badstates:
254 if state in badstates:
255 if exact and state == '?':
255 if exact and state == '?':
256 ui.warn(_('%s: not copying - file is not managed\n') % rel)
256 ui.warn(_('%s: not copying - file is not managed\n') % rel)
257 if exact and state == 'r':
257 if exact and state == 'r':
258 ui.warn(_('%s: not copying - file has been marked for'
258 ui.warn(_('%s: not copying - file has been marked for'
259 ' remove\n') % rel)
259 ' remove\n') % rel)
260 continue
260 continue
261 # abs: hgsep
261 # abs: hgsep
262 # rel: ossep
262 # rel: ossep
263 srcs.append((abs, rel, exact))
263 srcs.append((abs, rel, exact))
264 return srcs
264 return srcs
265
265
266 # abssrc: hgsep
266 # abssrc: hgsep
267 # relsrc: ossep
267 # relsrc: ossep
268 # otarget: ossep
268 # otarget: ossep
269 def copyfile(abssrc, relsrc, otarget, exact):
269 def copyfile(abssrc, relsrc, otarget, exact):
270 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
270 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
271 if '/' in abstarget:
272 # We cannot normalize abstarget itself, this would prevent
273 # case only renames, like a => A.
274 abspath, absname = abstarget.rsplit('/', 1)
275 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
271 reltarget = repo.pathto(abstarget, cwd)
276 reltarget = repo.pathto(abstarget, cwd)
272 target = repo.wjoin(abstarget)
277 target = repo.wjoin(abstarget)
273 src = repo.wjoin(abssrc)
278 src = repo.wjoin(abssrc)
274 state = repo.dirstate[abstarget]
279 state = repo.dirstate[abstarget]
275
280
276 scmutil.checkportable(ui, abstarget)
281 scmutil.checkportable(ui, abstarget)
277
282
278 # check for collisions
283 # check for collisions
279 prevsrc = targets.get(abstarget)
284 prevsrc = targets.get(abstarget)
280 if prevsrc is not None:
285 if prevsrc is not None:
281 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
286 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
282 (reltarget, repo.pathto(abssrc, cwd),
287 (reltarget, repo.pathto(abssrc, cwd),
283 repo.pathto(prevsrc, cwd)))
288 repo.pathto(prevsrc, cwd)))
284 return
289 return
285
290
286 # check for overwrites
291 # check for overwrites
287 exists = os.path.lexists(target)
292 exists = os.path.lexists(target)
288 samefile = False
293 samefile = False
289 if exists and abssrc != abstarget:
294 if exists and abssrc != abstarget:
290 if (repo.dirstate.normalize(abssrc) ==
295 if (repo.dirstate.normalize(abssrc) ==
291 repo.dirstate.normalize(abstarget)):
296 repo.dirstate.normalize(abstarget)):
292 if not rename:
297 if not rename:
293 ui.warn(_("%s: can't copy - same file\n") % reltarget)
298 ui.warn(_("%s: can't copy - same file\n") % reltarget)
294 return
299 return
295 exists = False
300 exists = False
296 samefile = True
301 samefile = True
297
302
298 if not after and exists or after and state in 'mn':
303 if not after and exists or after and state in 'mn':
299 if not opts['force']:
304 if not opts['force']:
300 ui.warn(_('%s: not overwriting - file exists\n') %
305 ui.warn(_('%s: not overwriting - file exists\n') %
301 reltarget)
306 reltarget)
302 return
307 return
303
308
304 if after:
309 if after:
305 if not exists:
310 if not exists:
306 if rename:
311 if rename:
307 ui.warn(_('%s: not recording move - %s does not exist\n') %
312 ui.warn(_('%s: not recording move - %s does not exist\n') %
308 (relsrc, reltarget))
313 (relsrc, reltarget))
309 else:
314 else:
310 ui.warn(_('%s: not recording copy - %s does not exist\n') %
315 ui.warn(_('%s: not recording copy - %s does not exist\n') %
311 (relsrc, reltarget))
316 (relsrc, reltarget))
312 return
317 return
313 elif not dryrun:
318 elif not dryrun:
314 try:
319 try:
315 if exists:
320 if exists:
316 os.unlink(target)
321 os.unlink(target)
317 targetdir = os.path.dirname(target) or '.'
322 targetdir = os.path.dirname(target) or '.'
318 if not os.path.isdir(targetdir):
323 if not os.path.isdir(targetdir):
319 os.makedirs(targetdir)
324 os.makedirs(targetdir)
320 if samefile:
325 if samefile:
321 tmp = target + "~hgrename"
326 tmp = target + "~hgrename"
322 os.rename(src, tmp)
327 os.rename(src, tmp)
323 os.rename(tmp, target)
328 os.rename(tmp, target)
324 else:
329 else:
325 util.copyfile(src, target)
330 util.copyfile(src, target)
326 srcexists = True
331 srcexists = True
327 except IOError, inst:
332 except IOError, inst:
328 if inst.errno == errno.ENOENT:
333 if inst.errno == errno.ENOENT:
329 ui.warn(_('%s: deleted in working copy\n') % relsrc)
334 ui.warn(_('%s: deleted in working copy\n') % relsrc)
330 srcexists = False
335 srcexists = False
331 else:
336 else:
332 ui.warn(_('%s: cannot copy - %s\n') %
337 ui.warn(_('%s: cannot copy - %s\n') %
333 (relsrc, inst.strerror))
338 (relsrc, inst.strerror))
334 return True # report a failure
339 return True # report a failure
335
340
336 if ui.verbose or not exact:
341 if ui.verbose or not exact:
337 if rename:
342 if rename:
338 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
343 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
339 else:
344 else:
340 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
345 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
341
346
342 targets[abstarget] = abssrc
347 targets[abstarget] = abssrc
343
348
344 # fix up dirstate
349 # fix up dirstate
345 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
350 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
346 dryrun=dryrun, cwd=cwd)
351 dryrun=dryrun, cwd=cwd)
347 if rename and not dryrun:
352 if rename and not dryrun:
348 if not after and srcexists and not samefile:
353 if not after and srcexists and not samefile:
349 util.unlinkpath(repo.wjoin(abssrc))
354 util.unlinkpath(repo.wjoin(abssrc))
350 wctx.forget([abssrc])
355 wctx.forget([abssrc])
351
356
352 # pat: ossep
357 # pat: ossep
353 # dest ossep
358 # dest ossep
354 # srcs: list of (hgsep, hgsep, ossep, bool)
359 # srcs: list of (hgsep, hgsep, ossep, bool)
355 # return: function that takes hgsep and returns ossep
360 # return: function that takes hgsep and returns ossep
356 def targetpathfn(pat, dest, srcs):
361 def targetpathfn(pat, dest, srcs):
357 if os.path.isdir(pat):
362 if os.path.isdir(pat):
358 abspfx = scmutil.canonpath(repo.root, cwd, pat)
363 abspfx = scmutil.canonpath(repo.root, cwd, pat)
359 abspfx = util.localpath(abspfx)
364 abspfx = util.localpath(abspfx)
360 if destdirexists:
365 if destdirexists:
361 striplen = len(os.path.split(abspfx)[0])
366 striplen = len(os.path.split(abspfx)[0])
362 else:
367 else:
363 striplen = len(abspfx)
368 striplen = len(abspfx)
364 if striplen:
369 if striplen:
365 striplen += len(os.sep)
370 striplen += len(os.sep)
366 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
371 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
367 elif destdirexists:
372 elif destdirexists:
368 res = lambda p: os.path.join(dest,
373 res = lambda p: os.path.join(dest,
369 os.path.basename(util.localpath(p)))
374 os.path.basename(util.localpath(p)))
370 else:
375 else:
371 res = lambda p: dest
376 res = lambda p: dest
372 return res
377 return res
373
378
374 # pat: ossep
379 # pat: ossep
375 # dest ossep
380 # dest ossep
376 # srcs: list of (hgsep, hgsep, ossep, bool)
381 # srcs: list of (hgsep, hgsep, ossep, bool)
377 # return: function that takes hgsep and returns ossep
382 # return: function that takes hgsep and returns ossep
378 def targetpathafterfn(pat, dest, srcs):
383 def targetpathafterfn(pat, dest, srcs):
379 if matchmod.patkind(pat):
384 if matchmod.patkind(pat):
380 # a mercurial pattern
385 # a mercurial pattern
381 res = lambda p: os.path.join(dest,
386 res = lambda p: os.path.join(dest,
382 os.path.basename(util.localpath(p)))
387 os.path.basename(util.localpath(p)))
383 else:
388 else:
384 abspfx = scmutil.canonpath(repo.root, cwd, pat)
389 abspfx = scmutil.canonpath(repo.root, cwd, pat)
385 if len(abspfx) < len(srcs[0][0]):
390 if len(abspfx) < len(srcs[0][0]):
386 # A directory. Either the target path contains the last
391 # A directory. Either the target path contains the last
387 # component of the source path or it does not.
392 # component of the source path or it does not.
388 def evalpath(striplen):
393 def evalpath(striplen):
389 score = 0
394 score = 0
390 for s in srcs:
395 for s in srcs:
391 t = os.path.join(dest, util.localpath(s[0])[striplen:])
396 t = os.path.join(dest, util.localpath(s[0])[striplen:])
392 if os.path.lexists(t):
397 if os.path.lexists(t):
393 score += 1
398 score += 1
394 return score
399 return score
395
400
396 abspfx = util.localpath(abspfx)
401 abspfx = util.localpath(abspfx)
397 striplen = len(abspfx)
402 striplen = len(abspfx)
398 if striplen:
403 if striplen:
399 striplen += len(os.sep)
404 striplen += len(os.sep)
400 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
405 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
401 score = evalpath(striplen)
406 score = evalpath(striplen)
402 striplen1 = len(os.path.split(abspfx)[0])
407 striplen1 = len(os.path.split(abspfx)[0])
403 if striplen1:
408 if striplen1:
404 striplen1 += len(os.sep)
409 striplen1 += len(os.sep)
405 if evalpath(striplen1) > score:
410 if evalpath(striplen1) > score:
406 striplen = striplen1
411 striplen = striplen1
407 res = lambda p: os.path.join(dest,
412 res = lambda p: os.path.join(dest,
408 util.localpath(p)[striplen:])
413 util.localpath(p)[striplen:])
409 else:
414 else:
410 # a file
415 # a file
411 if destdirexists:
416 if destdirexists:
412 res = lambda p: os.path.join(dest,
417 res = lambda p: os.path.join(dest,
413 os.path.basename(util.localpath(p)))
418 os.path.basename(util.localpath(p)))
414 else:
419 else:
415 res = lambda p: dest
420 res = lambda p: dest
416 return res
421 return res
417
422
418
423
419 pats = scmutil.expandpats(pats)
424 pats = scmutil.expandpats(pats)
420 if not pats:
425 if not pats:
421 raise util.Abort(_('no source or destination specified'))
426 raise util.Abort(_('no source or destination specified'))
422 if len(pats) == 1:
427 if len(pats) == 1:
423 raise util.Abort(_('no destination specified'))
428 raise util.Abort(_('no destination specified'))
424 dest = pats.pop()
429 dest = pats.pop()
425 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
430 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
426 if not destdirexists:
431 if not destdirexists:
427 if len(pats) > 1 or matchmod.patkind(pats[0]):
432 if len(pats) > 1 or matchmod.patkind(pats[0]):
428 raise util.Abort(_('with multiple sources, destination must be an '
433 raise util.Abort(_('with multiple sources, destination must be an '
429 'existing directory'))
434 'existing directory'))
430 if util.endswithsep(dest):
435 if util.endswithsep(dest):
431 raise util.Abort(_('destination %s is not a directory') % dest)
436 raise util.Abort(_('destination %s is not a directory') % dest)
432
437
433 tfn = targetpathfn
438 tfn = targetpathfn
434 if after:
439 if after:
435 tfn = targetpathafterfn
440 tfn = targetpathafterfn
436 copylist = []
441 copylist = []
437 for pat in pats:
442 for pat in pats:
438 srcs = walkpat(pat)
443 srcs = walkpat(pat)
439 if not srcs:
444 if not srcs:
440 continue
445 continue
441 copylist.append((tfn(pat, dest, srcs), srcs))
446 copylist.append((tfn(pat, dest, srcs), srcs))
442 if not copylist:
447 if not copylist:
443 raise util.Abort(_('no files to copy'))
448 raise util.Abort(_('no files to copy'))
444
449
445 errors = 0
450 errors = 0
446 for targetpath, srcs in copylist:
451 for targetpath, srcs in copylist:
447 for abssrc, relsrc, exact in srcs:
452 for abssrc, relsrc, exact in srcs:
448 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
453 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
449 errors += 1
454 errors += 1
450
455
451 if errors:
456 if errors:
452 ui.warn(_('(consider using --after)\n'))
457 ui.warn(_('(consider using --after)\n'))
453
458
454 return errors != 0
459 return errors != 0
455
460
456 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
461 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
457 runargs=None, appendpid=False):
462 runargs=None, appendpid=False):
458 '''Run a command as a service.'''
463 '''Run a command as a service.'''
459
464
460 if opts['daemon'] and not opts['daemon_pipefds']:
465 if opts['daemon'] and not opts['daemon_pipefds']:
461 # Signal child process startup with file removal
466 # Signal child process startup with file removal
462 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
467 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
463 os.close(lockfd)
468 os.close(lockfd)
464 try:
469 try:
465 if not runargs:
470 if not runargs:
466 runargs = util.hgcmd() + sys.argv[1:]
471 runargs = util.hgcmd() + sys.argv[1:]
467 runargs.append('--daemon-pipefds=%s' % lockpath)
472 runargs.append('--daemon-pipefds=%s' % lockpath)
468 # Don't pass --cwd to the child process, because we've already
473 # Don't pass --cwd to the child process, because we've already
469 # changed directory.
474 # changed directory.
470 for i in xrange(1, len(runargs)):
475 for i in xrange(1, len(runargs)):
471 if runargs[i].startswith('--cwd='):
476 if runargs[i].startswith('--cwd='):
472 del runargs[i]
477 del runargs[i]
473 break
478 break
474 elif runargs[i].startswith('--cwd'):
479 elif runargs[i].startswith('--cwd'):
475 del runargs[i:i + 2]
480 del runargs[i:i + 2]
476 break
481 break
477 def condfn():
482 def condfn():
478 return not os.path.exists(lockpath)
483 return not os.path.exists(lockpath)
479 pid = util.rundetached(runargs, condfn)
484 pid = util.rundetached(runargs, condfn)
480 if pid < 0:
485 if pid < 0:
481 raise util.Abort(_('child process failed to start'))
486 raise util.Abort(_('child process failed to start'))
482 finally:
487 finally:
483 try:
488 try:
484 os.unlink(lockpath)
489 os.unlink(lockpath)
485 except OSError, e:
490 except OSError, e:
486 if e.errno != errno.ENOENT:
491 if e.errno != errno.ENOENT:
487 raise
492 raise
488 if parentfn:
493 if parentfn:
489 return parentfn(pid)
494 return parentfn(pid)
490 else:
495 else:
491 return
496 return
492
497
493 if initfn:
498 if initfn:
494 initfn()
499 initfn()
495
500
496 if opts['pid_file']:
501 if opts['pid_file']:
497 mode = appendpid and 'a' or 'w'
502 mode = appendpid and 'a' or 'w'
498 fp = open(opts['pid_file'], mode)
503 fp = open(opts['pid_file'], mode)
499 fp.write(str(os.getpid()) + '\n')
504 fp.write(str(os.getpid()) + '\n')
500 fp.close()
505 fp.close()
501
506
502 if opts['daemon_pipefds']:
507 if opts['daemon_pipefds']:
503 lockpath = opts['daemon_pipefds']
508 lockpath = opts['daemon_pipefds']
504 try:
509 try:
505 os.setsid()
510 os.setsid()
506 except AttributeError:
511 except AttributeError:
507 pass
512 pass
508 os.unlink(lockpath)
513 os.unlink(lockpath)
509 util.hidewindow()
514 util.hidewindow()
510 sys.stdout.flush()
515 sys.stdout.flush()
511 sys.stderr.flush()
516 sys.stderr.flush()
512
517
513 nullfd = os.open(util.nulldev, os.O_RDWR)
518 nullfd = os.open(util.nulldev, os.O_RDWR)
514 logfilefd = nullfd
519 logfilefd = nullfd
515 if logfile:
520 if logfile:
516 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
521 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
517 os.dup2(nullfd, 0)
522 os.dup2(nullfd, 0)
518 os.dup2(logfilefd, 1)
523 os.dup2(logfilefd, 1)
519 os.dup2(logfilefd, 2)
524 os.dup2(logfilefd, 2)
520 if nullfd not in (0, 1, 2):
525 if nullfd not in (0, 1, 2):
521 os.close(nullfd)
526 os.close(nullfd)
522 if logfile and logfilefd not in (0, 1, 2):
527 if logfile and logfilefd not in (0, 1, 2):
523 os.close(logfilefd)
528 os.close(logfilefd)
524
529
525 if runfn:
530 if runfn:
526 return runfn()
531 return runfn()
527
532
528 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
533 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
529 opts=None):
534 opts=None):
530 '''export changesets as hg patches.'''
535 '''export changesets as hg patches.'''
531
536
532 total = len(revs)
537 total = len(revs)
533 revwidth = max([len(str(rev)) for rev in revs])
538 revwidth = max([len(str(rev)) for rev in revs])
534
539
535 def single(rev, seqno, fp):
540 def single(rev, seqno, fp):
536 ctx = repo[rev]
541 ctx = repo[rev]
537 node = ctx.node()
542 node = ctx.node()
538 parents = [p.node() for p in ctx.parents() if p]
543 parents = [p.node() for p in ctx.parents() if p]
539 branch = ctx.branch()
544 branch = ctx.branch()
540 if switch_parent:
545 if switch_parent:
541 parents.reverse()
546 parents.reverse()
542 prev = (parents and parents[0]) or nullid
547 prev = (parents and parents[0]) or nullid
543
548
544 shouldclose = False
549 shouldclose = False
545 if not fp:
550 if not fp:
546 desc_lines = ctx.description().rstrip().split('\n')
551 desc_lines = ctx.description().rstrip().split('\n')
547 desc = desc_lines[0] #Commit always has a first line.
552 desc = desc_lines[0] #Commit always has a first line.
548 fp = makefileobj(repo, template, node, desc=desc, total=total,
553 fp = makefileobj(repo, template, node, desc=desc, total=total,
549 seqno=seqno, revwidth=revwidth, mode='ab')
554 seqno=seqno, revwidth=revwidth, mode='ab')
550 if fp != template:
555 if fp != template:
551 shouldclose = True
556 shouldclose = True
552 if fp != sys.stdout and util.safehasattr(fp, 'name'):
557 if fp != sys.stdout and util.safehasattr(fp, 'name'):
553 repo.ui.note("%s\n" % fp.name)
558 repo.ui.note("%s\n" % fp.name)
554
559
555 fp.write("# HG changeset patch\n")
560 fp.write("# HG changeset patch\n")
556 fp.write("# User %s\n" % ctx.user())
561 fp.write("# User %s\n" % ctx.user())
557 fp.write("# Date %d %d\n" % ctx.date())
562 fp.write("# Date %d %d\n" % ctx.date())
558 if branch and branch != 'default':
563 if branch and branch != 'default':
559 fp.write("# Branch %s\n" % branch)
564 fp.write("# Branch %s\n" % branch)
560 fp.write("# Node ID %s\n" % hex(node))
565 fp.write("# Node ID %s\n" % hex(node))
561 fp.write("# Parent %s\n" % hex(prev))
566 fp.write("# Parent %s\n" % hex(prev))
562 if len(parents) > 1:
567 if len(parents) > 1:
563 fp.write("# Parent %s\n" % hex(parents[1]))
568 fp.write("# Parent %s\n" % hex(parents[1]))
564 fp.write(ctx.description().rstrip())
569 fp.write(ctx.description().rstrip())
565 fp.write("\n\n")
570 fp.write("\n\n")
566
571
567 for chunk in patch.diff(repo, prev, node, opts=opts):
572 for chunk in patch.diff(repo, prev, node, opts=opts):
568 fp.write(chunk)
573 fp.write(chunk)
569
574
570 if shouldclose:
575 if shouldclose:
571 fp.close()
576 fp.close()
572
577
573 for seqno, rev in enumerate(revs):
578 for seqno, rev in enumerate(revs):
574 single(rev, seqno + 1, fp)
579 single(rev, seqno + 1, fp)
575
580
576 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
581 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
577 changes=None, stat=False, fp=None, prefix='',
582 changes=None, stat=False, fp=None, prefix='',
578 listsubrepos=False):
583 listsubrepos=False):
579 '''show diff or diffstat.'''
584 '''show diff or diffstat.'''
580 if fp is None:
585 if fp is None:
581 write = ui.write
586 write = ui.write
582 else:
587 else:
583 def write(s, **kw):
588 def write(s, **kw):
584 fp.write(s)
589 fp.write(s)
585
590
586 if stat:
591 if stat:
587 diffopts = diffopts.copy(context=0)
592 diffopts = diffopts.copy(context=0)
588 width = 80
593 width = 80
589 if not ui.plain():
594 if not ui.plain():
590 width = ui.termwidth()
595 width = ui.termwidth()
591 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
596 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
592 prefix=prefix)
597 prefix=prefix)
593 for chunk, label in patch.diffstatui(util.iterlines(chunks),
598 for chunk, label in patch.diffstatui(util.iterlines(chunks),
594 width=width,
599 width=width,
595 git=diffopts.git):
600 git=diffopts.git):
596 write(chunk, label=label)
601 write(chunk, label=label)
597 else:
602 else:
598 for chunk, label in patch.diffui(repo, node1, node2, match,
603 for chunk, label in patch.diffui(repo, node1, node2, match,
599 changes, diffopts, prefix=prefix):
604 changes, diffopts, prefix=prefix):
600 write(chunk, label=label)
605 write(chunk, label=label)
601
606
602 if listsubrepos:
607 if listsubrepos:
603 ctx1 = repo[node1]
608 ctx1 = repo[node1]
604 ctx2 = repo[node2]
609 ctx2 = repo[node2]
605 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
610 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
606 tempnode2 = node2
611 tempnode2 = node2
607 try:
612 try:
608 if node2 is not None:
613 if node2 is not None:
609 tempnode2 = ctx2.substate[subpath][1]
614 tempnode2 = ctx2.substate[subpath][1]
610 except KeyError:
615 except KeyError:
611 # A subrepo that existed in node1 was deleted between node1 and
616 # A subrepo that existed in node1 was deleted between node1 and
612 # node2 (inclusive). Thus, ctx2's substate won't contain that
617 # node2 (inclusive). Thus, ctx2's substate won't contain that
613 # subpath. The best we can do is to ignore it.
618 # subpath. The best we can do is to ignore it.
614 tempnode2 = None
619 tempnode2 = None
615 submatch = matchmod.narrowmatcher(subpath, match)
620 submatch = matchmod.narrowmatcher(subpath, match)
616 sub.diff(diffopts, tempnode2, submatch, changes=changes,
621 sub.diff(diffopts, tempnode2, submatch, changes=changes,
617 stat=stat, fp=fp, prefix=prefix)
622 stat=stat, fp=fp, prefix=prefix)
618
623
619 class changeset_printer(object):
624 class changeset_printer(object):
620 '''show changeset information when templating not requested.'''
625 '''show changeset information when templating not requested.'''
621
626
622 def __init__(self, ui, repo, patch, diffopts, buffered):
627 def __init__(self, ui, repo, patch, diffopts, buffered):
623 self.ui = ui
628 self.ui = ui
624 self.repo = repo
629 self.repo = repo
625 self.buffered = buffered
630 self.buffered = buffered
626 self.patch = patch
631 self.patch = patch
627 self.diffopts = diffopts
632 self.diffopts = diffopts
628 self.header = {}
633 self.header = {}
629 self.hunk = {}
634 self.hunk = {}
630 self.lastheader = None
635 self.lastheader = None
631 self.footer = None
636 self.footer = None
632
637
633 def flush(self, rev):
638 def flush(self, rev):
634 if rev in self.header:
639 if rev in self.header:
635 h = self.header[rev]
640 h = self.header[rev]
636 if h != self.lastheader:
641 if h != self.lastheader:
637 self.lastheader = h
642 self.lastheader = h
638 self.ui.write(h)
643 self.ui.write(h)
639 del self.header[rev]
644 del self.header[rev]
640 if rev in self.hunk:
645 if rev in self.hunk:
641 self.ui.write(self.hunk[rev])
646 self.ui.write(self.hunk[rev])
642 del self.hunk[rev]
647 del self.hunk[rev]
643 return 1
648 return 1
644 return 0
649 return 0
645
650
646 def close(self):
651 def close(self):
647 if self.footer:
652 if self.footer:
648 self.ui.write(self.footer)
653 self.ui.write(self.footer)
649
654
650 def show(self, ctx, copies=None, matchfn=None, **props):
655 def show(self, ctx, copies=None, matchfn=None, **props):
651 if self.buffered:
656 if self.buffered:
652 self.ui.pushbuffer()
657 self.ui.pushbuffer()
653 self._show(ctx, copies, matchfn, props)
658 self._show(ctx, copies, matchfn, props)
654 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
659 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
655 else:
660 else:
656 self._show(ctx, copies, matchfn, props)
661 self._show(ctx, copies, matchfn, props)
657
662
658 def _show(self, ctx, copies, matchfn, props):
663 def _show(self, ctx, copies, matchfn, props):
659 '''show a single changeset or file revision'''
664 '''show a single changeset or file revision'''
660 changenode = ctx.node()
665 changenode = ctx.node()
661 rev = ctx.rev()
666 rev = ctx.rev()
662
667
663 if self.ui.quiet:
668 if self.ui.quiet:
664 self.ui.write("%d:%s\n" % (rev, short(changenode)),
669 self.ui.write("%d:%s\n" % (rev, short(changenode)),
665 label='log.node')
670 label='log.node')
666 return
671 return
667
672
668 log = self.repo.changelog
673 log = self.repo.changelog
669 date = util.datestr(ctx.date())
674 date = util.datestr(ctx.date())
670
675
671 hexfunc = self.ui.debugflag and hex or short
676 hexfunc = self.ui.debugflag and hex or short
672
677
673 parents = [(p, hexfunc(log.node(p)))
678 parents = [(p, hexfunc(log.node(p)))
674 for p in self._meaningful_parentrevs(log, rev)]
679 for p in self._meaningful_parentrevs(log, rev)]
675
680
676 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
681 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
677 label='log.changeset')
682 label='log.changeset')
678
683
679 branch = ctx.branch()
684 branch = ctx.branch()
680 # don't show the default branch name
685 # don't show the default branch name
681 if branch != 'default':
686 if branch != 'default':
682 self.ui.write(_("branch: %s\n") % branch,
687 self.ui.write(_("branch: %s\n") % branch,
683 label='log.branch')
688 label='log.branch')
684 for bookmark in self.repo.nodebookmarks(changenode):
689 for bookmark in self.repo.nodebookmarks(changenode):
685 self.ui.write(_("bookmark: %s\n") % bookmark,
690 self.ui.write(_("bookmark: %s\n") % bookmark,
686 label='log.bookmark')
691 label='log.bookmark')
687 for tag in self.repo.nodetags(changenode):
692 for tag in self.repo.nodetags(changenode):
688 self.ui.write(_("tag: %s\n") % tag,
693 self.ui.write(_("tag: %s\n") % tag,
689 label='log.tag')
694 label='log.tag')
690 if self.ui.debugflag and ctx.phase():
695 if self.ui.debugflag and ctx.phase():
691 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
696 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
692 label='log.phase')
697 label='log.phase')
693 for parent in parents:
698 for parent in parents:
694 self.ui.write(_("parent: %d:%s\n") % parent,
699 self.ui.write(_("parent: %d:%s\n") % parent,
695 label='log.parent')
700 label='log.parent')
696
701
697 if self.ui.debugflag:
702 if self.ui.debugflag:
698 mnode = ctx.manifestnode()
703 mnode = ctx.manifestnode()
699 self.ui.write(_("manifest: %d:%s\n") %
704 self.ui.write(_("manifest: %d:%s\n") %
700 (self.repo.manifest.rev(mnode), hex(mnode)),
705 (self.repo.manifest.rev(mnode), hex(mnode)),
701 label='ui.debug log.manifest')
706 label='ui.debug log.manifest')
702 self.ui.write(_("user: %s\n") % ctx.user(),
707 self.ui.write(_("user: %s\n") % ctx.user(),
703 label='log.user')
708 label='log.user')
704 self.ui.write(_("date: %s\n") % date,
709 self.ui.write(_("date: %s\n") % date,
705 label='log.date')
710 label='log.date')
706
711
707 if self.ui.debugflag:
712 if self.ui.debugflag:
708 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
713 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
709 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
714 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
710 files):
715 files):
711 if value:
716 if value:
712 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
717 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
713 label='ui.debug log.files')
718 label='ui.debug log.files')
714 elif ctx.files() and self.ui.verbose:
719 elif ctx.files() and self.ui.verbose:
715 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
720 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
716 label='ui.note log.files')
721 label='ui.note log.files')
717 if copies and self.ui.verbose:
722 if copies and self.ui.verbose:
718 copies = ['%s (%s)' % c for c in copies]
723 copies = ['%s (%s)' % c for c in copies]
719 self.ui.write(_("copies: %s\n") % ' '.join(copies),
724 self.ui.write(_("copies: %s\n") % ' '.join(copies),
720 label='ui.note log.copies')
725 label='ui.note log.copies')
721
726
722 extra = ctx.extra()
727 extra = ctx.extra()
723 if extra and self.ui.debugflag:
728 if extra and self.ui.debugflag:
724 for key, value in sorted(extra.items()):
729 for key, value in sorted(extra.items()):
725 self.ui.write(_("extra: %s=%s\n")
730 self.ui.write(_("extra: %s=%s\n")
726 % (key, value.encode('string_escape')),
731 % (key, value.encode('string_escape')),
727 label='ui.debug log.extra')
732 label='ui.debug log.extra')
728
733
729 description = ctx.description().strip()
734 description = ctx.description().strip()
730 if description:
735 if description:
731 if self.ui.verbose:
736 if self.ui.verbose:
732 self.ui.write(_("description:\n"),
737 self.ui.write(_("description:\n"),
733 label='ui.note log.description')
738 label='ui.note log.description')
734 self.ui.write(description,
739 self.ui.write(description,
735 label='ui.note log.description')
740 label='ui.note log.description')
736 self.ui.write("\n\n")
741 self.ui.write("\n\n")
737 else:
742 else:
738 self.ui.write(_("summary: %s\n") %
743 self.ui.write(_("summary: %s\n") %
739 description.splitlines()[0],
744 description.splitlines()[0],
740 label='log.summary')
745 label='log.summary')
741 self.ui.write("\n")
746 self.ui.write("\n")
742
747
743 self.showpatch(changenode, matchfn)
748 self.showpatch(changenode, matchfn)
744
749
745 def showpatch(self, node, matchfn):
750 def showpatch(self, node, matchfn):
746 if not matchfn:
751 if not matchfn:
747 matchfn = self.patch
752 matchfn = self.patch
748 if matchfn:
753 if matchfn:
749 stat = self.diffopts.get('stat')
754 stat = self.diffopts.get('stat')
750 diff = self.diffopts.get('patch')
755 diff = self.diffopts.get('patch')
751 diffopts = patch.diffopts(self.ui, self.diffopts)
756 diffopts = patch.diffopts(self.ui, self.diffopts)
752 prev = self.repo.changelog.parents(node)[0]
757 prev = self.repo.changelog.parents(node)[0]
753 if stat:
758 if stat:
754 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
759 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
755 match=matchfn, stat=True)
760 match=matchfn, stat=True)
756 if diff:
761 if diff:
757 if stat:
762 if stat:
758 self.ui.write("\n")
763 self.ui.write("\n")
759 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
764 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
760 match=matchfn, stat=False)
765 match=matchfn, stat=False)
761 self.ui.write("\n")
766 self.ui.write("\n")
762
767
763 def _meaningful_parentrevs(self, log, rev):
768 def _meaningful_parentrevs(self, log, rev):
764 """Return list of meaningful (or all if debug) parentrevs for rev.
769 """Return list of meaningful (or all if debug) parentrevs for rev.
765
770
766 For merges (two non-nullrev revisions) both parents are meaningful.
771 For merges (two non-nullrev revisions) both parents are meaningful.
767 Otherwise the first parent revision is considered meaningful if it
772 Otherwise the first parent revision is considered meaningful if it
768 is not the preceding revision.
773 is not the preceding revision.
769 """
774 """
770 parents = log.parentrevs(rev)
775 parents = log.parentrevs(rev)
771 if not self.ui.debugflag and parents[1] == nullrev:
776 if not self.ui.debugflag and parents[1] == nullrev:
772 if parents[0] >= rev - 1:
777 if parents[0] >= rev - 1:
773 parents = []
778 parents = []
774 else:
779 else:
775 parents = [parents[0]]
780 parents = [parents[0]]
776 return parents
781 return parents
777
782
778
783
779 class changeset_templater(changeset_printer):
784 class changeset_templater(changeset_printer):
780 '''format changeset information.'''
785 '''format changeset information.'''
781
786
782 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
787 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
783 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
788 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
784 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
789 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
785 defaulttempl = {
790 defaulttempl = {
786 'parent': '{rev}:{node|formatnode} ',
791 'parent': '{rev}:{node|formatnode} ',
787 'manifest': '{rev}:{node|formatnode}',
792 'manifest': '{rev}:{node|formatnode}',
788 'file_copy': '{name} ({source})',
793 'file_copy': '{name} ({source})',
789 'extra': '{key}={value|stringescape}'
794 'extra': '{key}={value|stringescape}'
790 }
795 }
791 # filecopy is preserved for compatibility reasons
796 # filecopy is preserved for compatibility reasons
792 defaulttempl['filecopy'] = defaulttempl['file_copy']
797 defaulttempl['filecopy'] = defaulttempl['file_copy']
793 self.t = templater.templater(mapfile, {'formatnode': formatnode},
798 self.t = templater.templater(mapfile, {'formatnode': formatnode},
794 cache=defaulttempl)
799 cache=defaulttempl)
795 self.cache = {}
800 self.cache = {}
796
801
797 def use_template(self, t):
802 def use_template(self, t):
798 '''set template string to use'''
803 '''set template string to use'''
799 self.t.cache['changeset'] = t
804 self.t.cache['changeset'] = t
800
805
801 def _meaningful_parentrevs(self, ctx):
806 def _meaningful_parentrevs(self, ctx):
802 """Return list of meaningful (or all if debug) parentrevs for rev.
807 """Return list of meaningful (or all if debug) parentrevs for rev.
803 """
808 """
804 parents = ctx.parents()
809 parents = ctx.parents()
805 if len(parents) > 1:
810 if len(parents) > 1:
806 return parents
811 return parents
807 if self.ui.debugflag:
812 if self.ui.debugflag:
808 return [parents[0], self.repo['null']]
813 return [parents[0], self.repo['null']]
809 if parents[0].rev() >= ctx.rev() - 1:
814 if parents[0].rev() >= ctx.rev() - 1:
810 return []
815 return []
811 return parents
816 return parents
812
817
813 def _show(self, ctx, copies, matchfn, props):
818 def _show(self, ctx, copies, matchfn, props):
814 '''show a single changeset or file revision'''
819 '''show a single changeset or file revision'''
815
820
816 showlist = templatekw.showlist
821 showlist = templatekw.showlist
817
822
818 # showparents() behaviour depends on ui trace level which
823 # showparents() behaviour depends on ui trace level which
819 # causes unexpected behaviours at templating level and makes
824 # causes unexpected behaviours at templating level and makes
820 # it harder to extract it in a standalone function. Its
825 # it harder to extract it in a standalone function. Its
821 # behaviour cannot be changed so leave it here for now.
826 # behaviour cannot be changed so leave it here for now.
822 def showparents(**args):
827 def showparents(**args):
823 ctx = args['ctx']
828 ctx = args['ctx']
824 parents = [[('rev', p.rev()), ('node', p.hex())]
829 parents = [[('rev', p.rev()), ('node', p.hex())]
825 for p in self._meaningful_parentrevs(ctx)]
830 for p in self._meaningful_parentrevs(ctx)]
826 return showlist('parent', parents, **args)
831 return showlist('parent', parents, **args)
827
832
828 props = props.copy()
833 props = props.copy()
829 props.update(templatekw.keywords)
834 props.update(templatekw.keywords)
830 props['parents'] = showparents
835 props['parents'] = showparents
831 props['templ'] = self.t
836 props['templ'] = self.t
832 props['ctx'] = ctx
837 props['ctx'] = ctx
833 props['repo'] = self.repo
838 props['repo'] = self.repo
834 props['revcache'] = {'copies': copies}
839 props['revcache'] = {'copies': copies}
835 props['cache'] = self.cache
840 props['cache'] = self.cache
836
841
837 # find correct templates for current mode
842 # find correct templates for current mode
838
843
839 tmplmodes = [
844 tmplmodes = [
840 (True, None),
845 (True, None),
841 (self.ui.verbose, 'verbose'),
846 (self.ui.verbose, 'verbose'),
842 (self.ui.quiet, 'quiet'),
847 (self.ui.quiet, 'quiet'),
843 (self.ui.debugflag, 'debug'),
848 (self.ui.debugflag, 'debug'),
844 ]
849 ]
845
850
846 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
851 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
847 for mode, postfix in tmplmodes:
852 for mode, postfix in tmplmodes:
848 for type in types:
853 for type in types:
849 cur = postfix and ('%s_%s' % (type, postfix)) or type
854 cur = postfix and ('%s_%s' % (type, postfix)) or type
850 if mode and cur in self.t:
855 if mode and cur in self.t:
851 types[type] = cur
856 types[type] = cur
852
857
853 try:
858 try:
854
859
855 # write header
860 # write header
856 if types['header']:
861 if types['header']:
857 h = templater.stringify(self.t(types['header'], **props))
862 h = templater.stringify(self.t(types['header'], **props))
858 if self.buffered:
863 if self.buffered:
859 self.header[ctx.rev()] = h
864 self.header[ctx.rev()] = h
860 else:
865 else:
861 if self.lastheader != h:
866 if self.lastheader != h:
862 self.lastheader = h
867 self.lastheader = h
863 self.ui.write(h)
868 self.ui.write(h)
864
869
865 # write changeset metadata, then patch if requested
870 # write changeset metadata, then patch if requested
866 key = types['changeset']
871 key = types['changeset']
867 self.ui.write(templater.stringify(self.t(key, **props)))
872 self.ui.write(templater.stringify(self.t(key, **props)))
868 self.showpatch(ctx.node(), matchfn)
873 self.showpatch(ctx.node(), matchfn)
869
874
870 if types['footer']:
875 if types['footer']:
871 if not self.footer:
876 if not self.footer:
872 self.footer = templater.stringify(self.t(types['footer'],
877 self.footer = templater.stringify(self.t(types['footer'],
873 **props))
878 **props))
874
879
875 except KeyError, inst:
880 except KeyError, inst:
876 msg = _("%s: no key named '%s'")
881 msg = _("%s: no key named '%s'")
877 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
882 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
878 except SyntaxError, inst:
883 except SyntaxError, inst:
879 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
884 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
880
885
881 def show_changeset(ui, repo, opts, buffered=False):
886 def show_changeset(ui, repo, opts, buffered=False):
882 """show one changeset using template or regular display.
887 """show one changeset using template or regular display.
883
888
884 Display format will be the first non-empty hit of:
889 Display format will be the first non-empty hit of:
885 1. option 'template'
890 1. option 'template'
886 2. option 'style'
891 2. option 'style'
887 3. [ui] setting 'logtemplate'
892 3. [ui] setting 'logtemplate'
888 4. [ui] setting 'style'
893 4. [ui] setting 'style'
889 If all of these values are either the unset or the empty string,
894 If all of these values are either the unset or the empty string,
890 regular display via changeset_printer() is done.
895 regular display via changeset_printer() is done.
891 """
896 """
892 # options
897 # options
893 patch = False
898 patch = False
894 if opts.get('patch') or opts.get('stat'):
899 if opts.get('patch') or opts.get('stat'):
895 patch = scmutil.matchall(repo)
900 patch = scmutil.matchall(repo)
896
901
897 tmpl = opts.get('template')
902 tmpl = opts.get('template')
898 style = None
903 style = None
899 if tmpl:
904 if tmpl:
900 tmpl = templater.parsestring(tmpl, quoted=False)
905 tmpl = templater.parsestring(tmpl, quoted=False)
901 else:
906 else:
902 style = opts.get('style')
907 style = opts.get('style')
903
908
904 # ui settings
909 # ui settings
905 if not (tmpl or style):
910 if not (tmpl or style):
906 tmpl = ui.config('ui', 'logtemplate')
911 tmpl = ui.config('ui', 'logtemplate')
907 if tmpl:
912 if tmpl:
908 tmpl = templater.parsestring(tmpl)
913 tmpl = templater.parsestring(tmpl)
909 else:
914 else:
910 style = util.expandpath(ui.config('ui', 'style', ''))
915 style = util.expandpath(ui.config('ui', 'style', ''))
911
916
912 if not (tmpl or style):
917 if not (tmpl or style):
913 return changeset_printer(ui, repo, patch, opts, buffered)
918 return changeset_printer(ui, repo, patch, opts, buffered)
914
919
915 mapfile = None
920 mapfile = None
916 if style and not tmpl:
921 if style and not tmpl:
917 mapfile = style
922 mapfile = style
918 if not os.path.split(mapfile)[0]:
923 if not os.path.split(mapfile)[0]:
919 mapname = (templater.templatepath('map-cmdline.' + mapfile)
924 mapname = (templater.templatepath('map-cmdline.' + mapfile)
920 or templater.templatepath(mapfile))
925 or templater.templatepath(mapfile))
921 if mapname:
926 if mapname:
922 mapfile = mapname
927 mapfile = mapname
923
928
924 try:
929 try:
925 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
930 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
926 except SyntaxError, inst:
931 except SyntaxError, inst:
927 raise util.Abort(inst.args[0])
932 raise util.Abort(inst.args[0])
928 if tmpl:
933 if tmpl:
929 t.use_template(tmpl)
934 t.use_template(tmpl)
930 return t
935 return t
931
936
932 def finddate(ui, repo, date):
937 def finddate(ui, repo, date):
933 """Find the tipmost changeset that matches the given date spec"""
938 """Find the tipmost changeset that matches the given date spec"""
934
939
935 df = util.matchdate(date)
940 df = util.matchdate(date)
936 m = scmutil.matchall(repo)
941 m = scmutil.matchall(repo)
937 results = {}
942 results = {}
938
943
939 def prep(ctx, fns):
944 def prep(ctx, fns):
940 d = ctx.date()
945 d = ctx.date()
941 if df(d[0]):
946 if df(d[0]):
942 results[ctx.rev()] = d
947 results[ctx.rev()] = d
943
948
944 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
949 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
945 rev = ctx.rev()
950 rev = ctx.rev()
946 if rev in results:
951 if rev in results:
947 ui.status(_("Found revision %s from %s\n") %
952 ui.status(_("Found revision %s from %s\n") %
948 (rev, util.datestr(results[rev])))
953 (rev, util.datestr(results[rev])))
949 return str(rev)
954 return str(rev)
950
955
951 raise util.Abort(_("revision matching date not found"))
956 raise util.Abort(_("revision matching date not found"))
952
957
953 def walkchangerevs(repo, match, opts, prepare):
958 def walkchangerevs(repo, match, opts, prepare):
954 '''Iterate over files and the revs in which they changed.
959 '''Iterate over files and the revs in which they changed.
955
960
956 Callers most commonly need to iterate backwards over the history
961 Callers most commonly need to iterate backwards over the history
957 in which they are interested. Doing so has awful (quadratic-looking)
962 in which they are interested. Doing so has awful (quadratic-looking)
958 performance, so we use iterators in a "windowed" way.
963 performance, so we use iterators in a "windowed" way.
959
964
960 We walk a window of revisions in the desired order. Within the
965 We walk a window of revisions in the desired order. Within the
961 window, we first walk forwards to gather data, then in the desired
966 window, we first walk forwards to gather data, then in the desired
962 order (usually backwards) to display it.
967 order (usually backwards) to display it.
963
968
964 This function returns an iterator yielding contexts. Before
969 This function returns an iterator yielding contexts. Before
965 yielding each context, the iterator will first call the prepare
970 yielding each context, the iterator will first call the prepare
966 function on each context in the window in forward order.'''
971 function on each context in the window in forward order.'''
967
972
968 def increasing_windows(start, end, windowsize=8, sizelimit=512):
973 def increasing_windows(start, end, windowsize=8, sizelimit=512):
969 if start < end:
974 if start < end:
970 while start < end:
975 while start < end:
971 yield start, min(windowsize, end - start)
976 yield start, min(windowsize, end - start)
972 start += windowsize
977 start += windowsize
973 if windowsize < sizelimit:
978 if windowsize < sizelimit:
974 windowsize *= 2
979 windowsize *= 2
975 else:
980 else:
976 while start > end:
981 while start > end:
977 yield start, min(windowsize, start - end - 1)
982 yield start, min(windowsize, start - end - 1)
978 start -= windowsize
983 start -= windowsize
979 if windowsize < sizelimit:
984 if windowsize < sizelimit:
980 windowsize *= 2
985 windowsize *= 2
981
986
982 follow = opts.get('follow') or opts.get('follow_first')
987 follow = opts.get('follow') or opts.get('follow_first')
983
988
984 if not len(repo):
989 if not len(repo):
985 return []
990 return []
986
991
987 if follow:
992 if follow:
988 defrange = '%s:0' % repo['.'].rev()
993 defrange = '%s:0' % repo['.'].rev()
989 else:
994 else:
990 defrange = '-1:0'
995 defrange = '-1:0'
991 revs = scmutil.revrange(repo, opts['rev'] or [defrange])
996 revs = scmutil.revrange(repo, opts['rev'] or [defrange])
992 if not revs:
997 if not revs:
993 return []
998 return []
994 wanted = set()
999 wanted = set()
995 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1000 slowpath = match.anypats() or (match.files() and opts.get('removed'))
996 fncache = {}
1001 fncache = {}
997 change = repo.changectx
1002 change = repo.changectx
998
1003
999 # First step is to fill wanted, the set of revisions that we want to yield.
1004 # First step is to fill wanted, the set of revisions that we want to yield.
1000 # When it does not induce extra cost, we also fill fncache for revisions in
1005 # When it does not induce extra cost, we also fill fncache for revisions in
1001 # wanted: a cache of filenames that were changed (ctx.files()) and that
1006 # wanted: a cache of filenames that were changed (ctx.files()) and that
1002 # match the file filtering conditions.
1007 # match the file filtering conditions.
1003
1008
1004 if not slowpath and not match.files():
1009 if not slowpath and not match.files():
1005 # No files, no patterns. Display all revs.
1010 # No files, no patterns. Display all revs.
1006 wanted = set(revs)
1011 wanted = set(revs)
1007 copies = []
1012 copies = []
1008
1013
1009 if not slowpath and match.files():
1014 if not slowpath and match.files():
1010 # We only have to read through the filelog to find wanted revisions
1015 # We only have to read through the filelog to find wanted revisions
1011
1016
1012 minrev, maxrev = min(revs), max(revs)
1017 minrev, maxrev = min(revs), max(revs)
1013 def filerevgen(filelog, last):
1018 def filerevgen(filelog, last):
1014 """
1019 """
1015 Only files, no patterns. Check the history of each file.
1020 Only files, no patterns. Check the history of each file.
1016
1021
1017 Examines filelog entries within minrev, maxrev linkrev range
1022 Examines filelog entries within minrev, maxrev linkrev range
1018 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1023 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1019 tuples in backwards order
1024 tuples in backwards order
1020 """
1025 """
1021 cl_count = len(repo)
1026 cl_count = len(repo)
1022 revs = []
1027 revs = []
1023 for j in xrange(0, last + 1):
1028 for j in xrange(0, last + 1):
1024 linkrev = filelog.linkrev(j)
1029 linkrev = filelog.linkrev(j)
1025 if linkrev < minrev:
1030 if linkrev < minrev:
1026 continue
1031 continue
1027 # only yield rev for which we have the changelog, it can
1032 # only yield rev for which we have the changelog, it can
1028 # happen while doing "hg log" during a pull or commit
1033 # happen while doing "hg log" during a pull or commit
1029 if linkrev >= cl_count:
1034 if linkrev >= cl_count:
1030 break
1035 break
1031
1036
1032 parentlinkrevs = []
1037 parentlinkrevs = []
1033 for p in filelog.parentrevs(j):
1038 for p in filelog.parentrevs(j):
1034 if p != nullrev:
1039 if p != nullrev:
1035 parentlinkrevs.append(filelog.linkrev(p))
1040 parentlinkrevs.append(filelog.linkrev(p))
1036 n = filelog.node(j)
1041 n = filelog.node(j)
1037 revs.append((linkrev, parentlinkrevs,
1042 revs.append((linkrev, parentlinkrevs,
1038 follow and filelog.renamed(n)))
1043 follow and filelog.renamed(n)))
1039
1044
1040 return reversed(revs)
1045 return reversed(revs)
1041 def iterfiles():
1046 def iterfiles():
1042 pctx = repo['.']
1047 pctx = repo['.']
1043 for filename in match.files():
1048 for filename in match.files():
1044 if follow:
1049 if follow:
1045 if filename not in pctx:
1050 if filename not in pctx:
1046 raise util.Abort(_('cannot follow file not in parent '
1051 raise util.Abort(_('cannot follow file not in parent '
1047 'revision: "%s"') % filename)
1052 'revision: "%s"') % filename)
1048 yield filename, pctx[filename].filenode()
1053 yield filename, pctx[filename].filenode()
1049 else:
1054 else:
1050 yield filename, None
1055 yield filename, None
1051 for filename_node in copies:
1056 for filename_node in copies:
1052 yield filename_node
1057 yield filename_node
1053 for file_, node in iterfiles():
1058 for file_, node in iterfiles():
1054 filelog = repo.file(file_)
1059 filelog = repo.file(file_)
1055 if not len(filelog):
1060 if not len(filelog):
1056 if node is None:
1061 if node is None:
1057 # A zero count may be a directory or deleted file, so
1062 # A zero count may be a directory or deleted file, so
1058 # try to find matching entries on the slow path.
1063 # try to find matching entries on the slow path.
1059 if follow:
1064 if follow:
1060 raise util.Abort(
1065 raise util.Abort(
1061 _('cannot follow nonexistent file: "%s"') % file_)
1066 _('cannot follow nonexistent file: "%s"') % file_)
1062 slowpath = True
1067 slowpath = True
1063 break
1068 break
1064 else:
1069 else:
1065 continue
1070 continue
1066
1071
1067 if node is None:
1072 if node is None:
1068 last = len(filelog) - 1
1073 last = len(filelog) - 1
1069 else:
1074 else:
1070 last = filelog.rev(node)
1075 last = filelog.rev(node)
1071
1076
1072
1077
1073 # keep track of all ancestors of the file
1078 # keep track of all ancestors of the file
1074 ancestors = set([filelog.linkrev(last)])
1079 ancestors = set([filelog.linkrev(last)])
1075
1080
1076 # iterate from latest to oldest revision
1081 # iterate from latest to oldest revision
1077 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1082 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1078 if not follow:
1083 if not follow:
1079 if rev > maxrev:
1084 if rev > maxrev:
1080 continue
1085 continue
1081 else:
1086 else:
1082 # Note that last might not be the first interesting
1087 # Note that last might not be the first interesting
1083 # rev to us:
1088 # rev to us:
1084 # if the file has been changed after maxrev, we'll
1089 # if the file has been changed after maxrev, we'll
1085 # have linkrev(last) > maxrev, and we still need
1090 # have linkrev(last) > maxrev, and we still need
1086 # to explore the file graph
1091 # to explore the file graph
1087 if rev not in ancestors:
1092 if rev not in ancestors:
1088 continue
1093 continue
1089 # XXX insert 1327 fix here
1094 # XXX insert 1327 fix here
1090 if flparentlinkrevs:
1095 if flparentlinkrevs:
1091 ancestors.update(flparentlinkrevs)
1096 ancestors.update(flparentlinkrevs)
1092
1097
1093 fncache.setdefault(rev, []).append(file_)
1098 fncache.setdefault(rev, []).append(file_)
1094 wanted.add(rev)
1099 wanted.add(rev)
1095 if copied:
1100 if copied:
1096 copies.append(copied)
1101 copies.append(copied)
1097 if slowpath:
1102 if slowpath:
1098 # We have to read the changelog to match filenames against
1103 # We have to read the changelog to match filenames against
1099 # changed files
1104 # changed files
1100
1105
1101 if follow:
1106 if follow:
1102 raise util.Abort(_('can only follow copies/renames for explicit '
1107 raise util.Abort(_('can only follow copies/renames for explicit '
1103 'filenames'))
1108 'filenames'))
1104
1109
1105 # The slow path checks files modified in every changeset.
1110 # The slow path checks files modified in every changeset.
1106 for i in sorted(revs):
1111 for i in sorted(revs):
1107 ctx = change(i)
1112 ctx = change(i)
1108 matches = filter(match, ctx.files())
1113 matches = filter(match, ctx.files())
1109 if matches:
1114 if matches:
1110 fncache[i] = matches
1115 fncache[i] = matches
1111 wanted.add(i)
1116 wanted.add(i)
1112
1117
1113 class followfilter(object):
1118 class followfilter(object):
1114 def __init__(self, onlyfirst=False):
1119 def __init__(self, onlyfirst=False):
1115 self.startrev = nullrev
1120 self.startrev = nullrev
1116 self.roots = set()
1121 self.roots = set()
1117 self.onlyfirst = onlyfirst
1122 self.onlyfirst = onlyfirst
1118
1123
1119 def match(self, rev):
1124 def match(self, rev):
1120 def realparents(rev):
1125 def realparents(rev):
1121 if self.onlyfirst:
1126 if self.onlyfirst:
1122 return repo.changelog.parentrevs(rev)[0:1]
1127 return repo.changelog.parentrevs(rev)[0:1]
1123 else:
1128 else:
1124 return filter(lambda x: x != nullrev,
1129 return filter(lambda x: x != nullrev,
1125 repo.changelog.parentrevs(rev))
1130 repo.changelog.parentrevs(rev))
1126
1131
1127 if self.startrev == nullrev:
1132 if self.startrev == nullrev:
1128 self.startrev = rev
1133 self.startrev = rev
1129 return True
1134 return True
1130
1135
1131 if rev > self.startrev:
1136 if rev > self.startrev:
1132 # forward: all descendants
1137 # forward: all descendants
1133 if not self.roots:
1138 if not self.roots:
1134 self.roots.add(self.startrev)
1139 self.roots.add(self.startrev)
1135 for parent in realparents(rev):
1140 for parent in realparents(rev):
1136 if parent in self.roots:
1141 if parent in self.roots:
1137 self.roots.add(rev)
1142 self.roots.add(rev)
1138 return True
1143 return True
1139 else:
1144 else:
1140 # backwards: all parents
1145 # backwards: all parents
1141 if not self.roots:
1146 if not self.roots:
1142 self.roots.update(realparents(self.startrev))
1147 self.roots.update(realparents(self.startrev))
1143 if rev in self.roots:
1148 if rev in self.roots:
1144 self.roots.remove(rev)
1149 self.roots.remove(rev)
1145 self.roots.update(realparents(rev))
1150 self.roots.update(realparents(rev))
1146 return True
1151 return True
1147
1152
1148 return False
1153 return False
1149
1154
1150 # it might be worthwhile to do this in the iterator if the rev range
1155 # it might be worthwhile to do this in the iterator if the rev range
1151 # is descending and the prune args are all within that range
1156 # is descending and the prune args are all within that range
1152 for rev in opts.get('prune', ()):
1157 for rev in opts.get('prune', ()):
1153 rev = repo[rev].rev()
1158 rev = repo[rev].rev()
1154 ff = followfilter()
1159 ff = followfilter()
1155 stop = min(revs[0], revs[-1])
1160 stop = min(revs[0], revs[-1])
1156 for x in xrange(rev, stop - 1, -1):
1161 for x in xrange(rev, stop - 1, -1):
1157 if ff.match(x):
1162 if ff.match(x):
1158 wanted.discard(x)
1163 wanted.discard(x)
1159
1164
1160 # Now that wanted is correctly initialized, we can iterate over the
1165 # Now that wanted is correctly initialized, we can iterate over the
1161 # revision range, yielding only revisions in wanted.
1166 # revision range, yielding only revisions in wanted.
1162 def iterate():
1167 def iterate():
1163 if follow and not match.files():
1168 if follow and not match.files():
1164 ff = followfilter(onlyfirst=opts.get('follow_first'))
1169 ff = followfilter(onlyfirst=opts.get('follow_first'))
1165 def want(rev):
1170 def want(rev):
1166 return ff.match(rev) and rev in wanted
1171 return ff.match(rev) and rev in wanted
1167 else:
1172 else:
1168 def want(rev):
1173 def want(rev):
1169 return rev in wanted
1174 return rev in wanted
1170
1175
1171 for i, window in increasing_windows(0, len(revs)):
1176 for i, window in increasing_windows(0, len(revs)):
1172 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1177 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1173 for rev in sorted(nrevs):
1178 for rev in sorted(nrevs):
1174 fns = fncache.get(rev)
1179 fns = fncache.get(rev)
1175 ctx = change(rev)
1180 ctx = change(rev)
1176 if not fns:
1181 if not fns:
1177 def fns_generator():
1182 def fns_generator():
1178 for f in ctx.files():
1183 for f in ctx.files():
1179 if match(f):
1184 if match(f):
1180 yield f
1185 yield f
1181 fns = fns_generator()
1186 fns = fns_generator()
1182 prepare(ctx, fns)
1187 prepare(ctx, fns)
1183 for rev in nrevs:
1188 for rev in nrevs:
1184 yield change(rev)
1189 yield change(rev)
1185 return iterate()
1190 return iterate()
1186
1191
1187 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1192 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1188 join = lambda f: os.path.join(prefix, f)
1193 join = lambda f: os.path.join(prefix, f)
1189 bad = []
1194 bad = []
1190 oldbad = match.bad
1195 oldbad = match.bad
1191 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1196 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1192 names = []
1197 names = []
1193 wctx = repo[None]
1198 wctx = repo[None]
1194 cca = None
1199 cca = None
1195 abort, warn = scmutil.checkportabilityalert(ui)
1200 abort, warn = scmutil.checkportabilityalert(ui)
1196 if abort or warn:
1201 if abort or warn:
1197 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1202 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1198 for f in repo.walk(match):
1203 for f in repo.walk(match):
1199 exact = match.exact(f)
1204 exact = match.exact(f)
1200 if exact or not explicitonly and f not in repo.dirstate:
1205 if exact or not explicitonly and f not in repo.dirstate:
1201 if cca:
1206 if cca:
1202 cca(f)
1207 cca(f)
1203 names.append(f)
1208 names.append(f)
1204 if ui.verbose or not exact:
1209 if ui.verbose or not exact:
1205 ui.status(_('adding %s\n') % match.rel(join(f)))
1210 ui.status(_('adding %s\n') % match.rel(join(f)))
1206
1211
1207 for subpath in wctx.substate:
1212 for subpath in wctx.substate:
1208 sub = wctx.sub(subpath)
1213 sub = wctx.sub(subpath)
1209 try:
1214 try:
1210 submatch = matchmod.narrowmatcher(subpath, match)
1215 submatch = matchmod.narrowmatcher(subpath, match)
1211 if listsubrepos:
1216 if listsubrepos:
1212 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1217 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1213 False))
1218 False))
1214 else:
1219 else:
1215 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1220 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1216 True))
1221 True))
1217 except error.LookupError:
1222 except error.LookupError:
1218 ui.status(_("skipping missing subrepository: %s\n")
1223 ui.status(_("skipping missing subrepository: %s\n")
1219 % join(subpath))
1224 % join(subpath))
1220
1225
1221 if not dryrun:
1226 if not dryrun:
1222 rejected = wctx.add(names, prefix)
1227 rejected = wctx.add(names, prefix)
1223 bad.extend(f for f in rejected if f in match.files())
1228 bad.extend(f for f in rejected if f in match.files())
1224 return bad
1229 return bad
1225
1230
1226 def forget(ui, repo, match, prefix, explicitonly):
1231 def forget(ui, repo, match, prefix, explicitonly):
1227 join = lambda f: os.path.join(prefix, f)
1232 join = lambda f: os.path.join(prefix, f)
1228 bad = []
1233 bad = []
1229 oldbad = match.bad
1234 oldbad = match.bad
1230 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1235 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1231 wctx = repo[None]
1236 wctx = repo[None]
1232 forgot = []
1237 forgot = []
1233 s = repo.status(match=match, clean=True)
1238 s = repo.status(match=match, clean=True)
1234 forget = sorted(s[0] + s[1] + s[3] + s[6])
1239 forget = sorted(s[0] + s[1] + s[3] + s[6])
1235 if explicitonly:
1240 if explicitonly:
1236 forget = [f for f in forget if match.exact(f)]
1241 forget = [f for f in forget if match.exact(f)]
1237
1242
1238 for subpath in wctx.substate:
1243 for subpath in wctx.substate:
1239 sub = wctx.sub(subpath)
1244 sub = wctx.sub(subpath)
1240 try:
1245 try:
1241 submatch = matchmod.narrowmatcher(subpath, match)
1246 submatch = matchmod.narrowmatcher(subpath, match)
1242 subbad, subforgot = sub.forget(ui, submatch, prefix)
1247 subbad, subforgot = sub.forget(ui, submatch, prefix)
1243 bad.extend([subpath + '/' + f for f in subbad])
1248 bad.extend([subpath + '/' + f for f in subbad])
1244 forgot.extend([subpath + '/' + f for f in subforgot])
1249 forgot.extend([subpath + '/' + f for f in subforgot])
1245 except error.LookupError:
1250 except error.LookupError:
1246 ui.status(_("skipping missing subrepository: %s\n")
1251 ui.status(_("skipping missing subrepository: %s\n")
1247 % join(subpath))
1252 % join(subpath))
1248
1253
1249 if not explicitonly:
1254 if not explicitonly:
1250 for f in match.files():
1255 for f in match.files():
1251 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1256 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1252 if f not in forgot:
1257 if f not in forgot:
1253 if os.path.exists(match.rel(join(f))):
1258 if os.path.exists(match.rel(join(f))):
1254 ui.warn(_('not removing %s: '
1259 ui.warn(_('not removing %s: '
1255 'file is already untracked\n')
1260 'file is already untracked\n')
1256 % match.rel(join(f)))
1261 % match.rel(join(f)))
1257 bad.append(f)
1262 bad.append(f)
1258
1263
1259 for f in forget:
1264 for f in forget:
1260 if ui.verbose or not match.exact(f):
1265 if ui.verbose or not match.exact(f):
1261 ui.status(_('removing %s\n') % match.rel(join(f)))
1266 ui.status(_('removing %s\n') % match.rel(join(f)))
1262
1267
1263 rejected = wctx.forget(forget, prefix)
1268 rejected = wctx.forget(forget, prefix)
1264 bad.extend(f for f in rejected if f in match.files())
1269 bad.extend(f for f in rejected if f in match.files())
1265 forgot.extend(forget)
1270 forgot.extend(forget)
1266 return bad, forgot
1271 return bad, forgot
1267
1272
1268 def duplicatecopies(repo, rev, p1):
1273 def duplicatecopies(repo, rev, p1):
1269 "Reproduce copies found in the source revision in the dirstate for grafts"
1274 "Reproduce copies found in the source revision in the dirstate for grafts"
1270 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1275 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1271 repo.dirstate.copy(src, dst)
1276 repo.dirstate.copy(src, dst)
1272
1277
1273 def commit(ui, repo, commitfunc, pats, opts):
1278 def commit(ui, repo, commitfunc, pats, opts):
1274 '''commit the specified files or all outstanding changes'''
1279 '''commit the specified files or all outstanding changes'''
1275 date = opts.get('date')
1280 date = opts.get('date')
1276 if date:
1281 if date:
1277 opts['date'] = util.parsedate(date)
1282 opts['date'] = util.parsedate(date)
1278 message = logmessage(ui, opts)
1283 message = logmessage(ui, opts)
1279
1284
1280 # extract addremove carefully -- this function can be called from a command
1285 # extract addremove carefully -- this function can be called from a command
1281 # that doesn't support addremove
1286 # that doesn't support addremove
1282 if opts.get('addremove'):
1287 if opts.get('addremove'):
1283 scmutil.addremove(repo, pats, opts)
1288 scmutil.addremove(repo, pats, opts)
1284
1289
1285 return commitfunc(ui, repo, message,
1290 return commitfunc(ui, repo, message,
1286 scmutil.match(repo[None], pats, opts), opts)
1291 scmutil.match(repo[None], pats, opts), opts)
1287
1292
1288 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1293 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1289 ui.note(_('amending changeset %s\n') % old)
1294 ui.note(_('amending changeset %s\n') % old)
1290 base = old.p1()
1295 base = old.p1()
1291
1296
1292 wlock = repo.wlock()
1297 wlock = repo.wlock()
1293 try:
1298 try:
1294 # Fix up dirstate for copies and renames
1299 # Fix up dirstate for copies and renames
1295 duplicatecopies(repo, None, base.node())
1300 duplicatecopies(repo, None, base.node())
1296
1301
1297 # First, do a regular commit to record all changes in the working
1302 # First, do a regular commit to record all changes in the working
1298 # directory (if there are any)
1303 # directory (if there are any)
1299 node = commit(ui, repo, commitfunc, pats, opts)
1304 node = commit(ui, repo, commitfunc, pats, opts)
1300 ctx = repo[node]
1305 ctx = repo[node]
1301
1306
1302 # Participating changesets:
1307 # Participating changesets:
1303 #
1308 #
1304 # node/ctx o - new (intermediate) commit that contains changes from
1309 # node/ctx o - new (intermediate) commit that contains changes from
1305 # | working dir to go into amending commit (or a workingctx
1310 # | working dir to go into amending commit (or a workingctx
1306 # | if there were no changes)
1311 # | if there were no changes)
1307 # |
1312 # |
1308 # old o - changeset to amend
1313 # old o - changeset to amend
1309 # |
1314 # |
1310 # base o - parent of amending changeset
1315 # base o - parent of amending changeset
1311
1316
1312 files = set(old.files())
1317 files = set(old.files())
1313
1318
1314 # Second, we use either the commit we just did, or if there were no
1319 # Second, we use either the commit we just did, or if there were no
1315 # changes the parent of the working directory as the version of the
1320 # changes the parent of the working directory as the version of the
1316 # files in the final amend commit
1321 # files in the final amend commit
1317 if node:
1322 if node:
1318 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1323 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1319
1324
1320 user = ctx.user()
1325 user = ctx.user()
1321 date = ctx.date()
1326 date = ctx.date()
1322 message = ctx.description()
1327 message = ctx.description()
1323 extra = ctx.extra()
1328 extra = ctx.extra()
1324
1329
1325 # Prune files which were reverted by the updates: if old introduced
1330 # Prune files which were reverted by the updates: if old introduced
1326 # file X and our intermediate commit, node, renamed that file, then
1331 # file X and our intermediate commit, node, renamed that file, then
1327 # those two files are the same and we can discard X from our list
1332 # those two files are the same and we can discard X from our list
1328 # of files. Likewise if X was deleted, it's no longer relevant
1333 # of files. Likewise if X was deleted, it's no longer relevant
1329 files.update(ctx.files())
1334 files.update(ctx.files())
1330
1335
1331 def samefile(f):
1336 def samefile(f):
1332 if f in ctx.manifest():
1337 if f in ctx.manifest():
1333 a = ctx.filectx(f)
1338 a = ctx.filectx(f)
1334 if f in base.manifest():
1339 if f in base.manifest():
1335 b = base.filectx(f)
1340 b = base.filectx(f)
1336 return (a.data() == b.data()
1341 return (a.data() == b.data()
1337 and a.flags() == b.flags()
1342 and a.flags() == b.flags()
1338 and a.renamed() == b.renamed())
1343 and a.renamed() == b.renamed())
1339 else:
1344 else:
1340 return False
1345 return False
1341 else:
1346 else:
1342 return f not in base.manifest()
1347 return f not in base.manifest()
1343 files = [f for f in files if not samefile(f)]
1348 files = [f for f in files if not samefile(f)]
1344
1349
1345 def filectxfn(repo, ctx_, path):
1350 def filectxfn(repo, ctx_, path):
1346 try:
1351 try:
1347 return ctx.filectx(path)
1352 return ctx.filectx(path)
1348 except KeyError:
1353 except KeyError:
1349 raise IOError()
1354 raise IOError()
1350 else:
1355 else:
1351 ui.note(_('copying changeset %s to %s\n') % (old, base))
1356 ui.note(_('copying changeset %s to %s\n') % (old, base))
1352
1357
1353 # Use version of files as in the old cset
1358 # Use version of files as in the old cset
1354 def filectxfn(repo, ctx_, path):
1359 def filectxfn(repo, ctx_, path):
1355 try:
1360 try:
1356 return old.filectx(path)
1361 return old.filectx(path)
1357 except KeyError:
1362 except KeyError:
1358 raise IOError()
1363 raise IOError()
1359
1364
1360 # See if we got a message from -m or -l, if not, open the editor
1365 # See if we got a message from -m or -l, if not, open the editor
1361 # with the message of the changeset to amend
1366 # with the message of the changeset to amend
1362 user = opts.get('user') or old.user()
1367 user = opts.get('user') or old.user()
1363 date = opts.get('date') or old.date()
1368 date = opts.get('date') or old.date()
1364 message = logmessage(ui, opts)
1369 message = logmessage(ui, opts)
1365 if not message:
1370 if not message:
1366 cctx = context.workingctx(repo, old.description(), user, date,
1371 cctx = context.workingctx(repo, old.description(), user, date,
1367 extra,
1372 extra,
1368 repo.status(base.node(), old.node()))
1373 repo.status(base.node(), old.node()))
1369 message = commitforceeditor(repo, cctx, [])
1374 message = commitforceeditor(repo, cctx, [])
1370
1375
1371 new = context.memctx(repo,
1376 new = context.memctx(repo,
1372 parents=[base.node(), nullid],
1377 parents=[base.node(), nullid],
1373 text=message,
1378 text=message,
1374 files=files,
1379 files=files,
1375 filectxfn=filectxfn,
1380 filectxfn=filectxfn,
1376 user=user,
1381 user=user,
1377 date=date,
1382 date=date,
1378 extra=extra)
1383 extra=extra)
1379 newid = repo.commitctx(new)
1384 newid = repo.commitctx(new)
1380 if newid != old.node():
1385 if newid != old.node():
1381 # Reroute the working copy parent to the new changeset
1386 # Reroute the working copy parent to the new changeset
1382 repo.dirstate.setparents(newid, nullid)
1387 repo.dirstate.setparents(newid, nullid)
1383
1388
1384 # Move bookmarks from old parent to amend commit
1389 # Move bookmarks from old parent to amend commit
1385 bms = repo.nodebookmarks(old.node())
1390 bms = repo.nodebookmarks(old.node())
1386 if bms:
1391 if bms:
1387 for bm in bms:
1392 for bm in bms:
1388 repo._bookmarks[bm] = newid
1393 repo._bookmarks[bm] = newid
1389 bookmarks.write(repo)
1394 bookmarks.write(repo)
1390
1395
1391 # Strip the intermediate commit (if there was one) and the amended
1396 # Strip the intermediate commit (if there was one) and the amended
1392 # commit
1397 # commit
1393 lock = repo.lock()
1398 lock = repo.lock()
1394 try:
1399 try:
1395 if node:
1400 if node:
1396 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1401 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1397 ui.note(_('stripping amended changeset %s\n') % old)
1402 ui.note(_('stripping amended changeset %s\n') % old)
1398 repair.strip(ui, repo, old.node(), topic='amend-backup')
1403 repair.strip(ui, repo, old.node(), topic='amend-backup')
1399 finally:
1404 finally:
1400 lock.release()
1405 lock.release()
1401 finally:
1406 finally:
1402 wlock.release()
1407 wlock.release()
1403 return newid
1408 return newid
1404
1409
1405 def commiteditor(repo, ctx, subs):
1410 def commiteditor(repo, ctx, subs):
1406 if ctx.description():
1411 if ctx.description():
1407 return ctx.description()
1412 return ctx.description()
1408 return commitforceeditor(repo, ctx, subs)
1413 return commitforceeditor(repo, ctx, subs)
1409
1414
1410 def commitforceeditor(repo, ctx, subs):
1415 def commitforceeditor(repo, ctx, subs):
1411 edittext = []
1416 edittext = []
1412 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1417 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1413 if ctx.description():
1418 if ctx.description():
1414 edittext.append(ctx.description())
1419 edittext.append(ctx.description())
1415 edittext.append("")
1420 edittext.append("")
1416 edittext.append("") # Empty line between message and comments.
1421 edittext.append("") # Empty line between message and comments.
1417 edittext.append(_("HG: Enter commit message."
1422 edittext.append(_("HG: Enter commit message."
1418 " Lines beginning with 'HG:' are removed."))
1423 " Lines beginning with 'HG:' are removed."))
1419 edittext.append(_("HG: Leave message empty to abort commit."))
1424 edittext.append(_("HG: Leave message empty to abort commit."))
1420 edittext.append("HG: --")
1425 edittext.append("HG: --")
1421 edittext.append(_("HG: user: %s") % ctx.user())
1426 edittext.append(_("HG: user: %s") % ctx.user())
1422 if ctx.p2():
1427 if ctx.p2():
1423 edittext.append(_("HG: branch merge"))
1428 edittext.append(_("HG: branch merge"))
1424 if ctx.branch():
1429 if ctx.branch():
1425 edittext.append(_("HG: branch '%s'") % ctx.branch())
1430 edittext.append(_("HG: branch '%s'") % ctx.branch())
1426 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1431 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1427 edittext.extend([_("HG: added %s") % f for f in added])
1432 edittext.extend([_("HG: added %s") % f for f in added])
1428 edittext.extend([_("HG: changed %s") % f for f in modified])
1433 edittext.extend([_("HG: changed %s") % f for f in modified])
1429 edittext.extend([_("HG: removed %s") % f for f in removed])
1434 edittext.extend([_("HG: removed %s") % f for f in removed])
1430 if not added and not modified and not removed:
1435 if not added and not modified and not removed:
1431 edittext.append(_("HG: no files changed"))
1436 edittext.append(_("HG: no files changed"))
1432 edittext.append("")
1437 edittext.append("")
1433 # run editor in the repository root
1438 # run editor in the repository root
1434 olddir = os.getcwd()
1439 olddir = os.getcwd()
1435 os.chdir(repo.root)
1440 os.chdir(repo.root)
1436 text = repo.ui.edit("\n".join(edittext), ctx.user())
1441 text = repo.ui.edit("\n".join(edittext), ctx.user())
1437 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1442 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1438 os.chdir(olddir)
1443 os.chdir(olddir)
1439
1444
1440 if not text.strip():
1445 if not text.strip():
1441 raise util.Abort(_("empty commit message"))
1446 raise util.Abort(_("empty commit message"))
1442
1447
1443 return text
1448 return text
1444
1449
1445 def revert(ui, repo, ctx, parents, *pats, **opts):
1450 def revert(ui, repo, ctx, parents, *pats, **opts):
1446 parent, p2 = parents
1451 parent, p2 = parents
1447 node = ctx.node()
1452 node = ctx.node()
1448
1453
1449 mf = ctx.manifest()
1454 mf = ctx.manifest()
1450 if node == parent:
1455 if node == parent:
1451 pmf = mf
1456 pmf = mf
1452 else:
1457 else:
1453 pmf = None
1458 pmf = None
1454
1459
1455 # need all matching names in dirstate and manifest of target rev,
1460 # need all matching names in dirstate and manifest of target rev,
1456 # so have to walk both. do not print errors if files exist in one
1461 # so have to walk both. do not print errors if files exist in one
1457 # but not other.
1462 # but not other.
1458
1463
1459 names = {}
1464 names = {}
1460
1465
1461 wlock = repo.wlock()
1466 wlock = repo.wlock()
1462 try:
1467 try:
1463 # walk dirstate.
1468 # walk dirstate.
1464
1469
1465 m = scmutil.match(repo[None], pats, opts)
1470 m = scmutil.match(repo[None], pats, opts)
1466 m.bad = lambda x, y: False
1471 m.bad = lambda x, y: False
1467 for abs in repo.walk(m):
1472 for abs in repo.walk(m):
1468 names[abs] = m.rel(abs), m.exact(abs)
1473 names[abs] = m.rel(abs), m.exact(abs)
1469
1474
1470 # walk target manifest.
1475 # walk target manifest.
1471
1476
1472 def badfn(path, msg):
1477 def badfn(path, msg):
1473 if path in names:
1478 if path in names:
1474 return
1479 return
1475 if path in repo[node].substate:
1480 if path in repo[node].substate:
1476 return
1481 return
1477 path_ = path + '/'
1482 path_ = path + '/'
1478 for f in names:
1483 for f in names:
1479 if f.startswith(path_):
1484 if f.startswith(path_):
1480 return
1485 return
1481 ui.warn("%s: %s\n" % (m.rel(path), msg))
1486 ui.warn("%s: %s\n" % (m.rel(path), msg))
1482
1487
1483 m = scmutil.match(repo[node], pats, opts)
1488 m = scmutil.match(repo[node], pats, opts)
1484 m.bad = badfn
1489 m.bad = badfn
1485 for abs in repo[node].walk(m):
1490 for abs in repo[node].walk(m):
1486 if abs not in names:
1491 if abs not in names:
1487 names[abs] = m.rel(abs), m.exact(abs)
1492 names[abs] = m.rel(abs), m.exact(abs)
1488
1493
1489 # get the list of subrepos that must be reverted
1494 # get the list of subrepos that must be reverted
1490 targetsubs = [s for s in repo[node].substate if m(s)]
1495 targetsubs = [s for s in repo[node].substate if m(s)]
1491 m = scmutil.matchfiles(repo, names)
1496 m = scmutil.matchfiles(repo, names)
1492 changes = repo.status(match=m)[:4]
1497 changes = repo.status(match=m)[:4]
1493 modified, added, removed, deleted = map(set, changes)
1498 modified, added, removed, deleted = map(set, changes)
1494
1499
1495 # if f is a rename, also revert the source
1500 # if f is a rename, also revert the source
1496 cwd = repo.getcwd()
1501 cwd = repo.getcwd()
1497 for f in added:
1502 for f in added:
1498 src = repo.dirstate.copied(f)
1503 src = repo.dirstate.copied(f)
1499 if src and src not in names and repo.dirstate[src] == 'r':
1504 if src and src not in names and repo.dirstate[src] == 'r':
1500 removed.add(src)
1505 removed.add(src)
1501 names[src] = (repo.pathto(src, cwd), True)
1506 names[src] = (repo.pathto(src, cwd), True)
1502
1507
1503 def removeforget(abs):
1508 def removeforget(abs):
1504 if repo.dirstate[abs] == 'a':
1509 if repo.dirstate[abs] == 'a':
1505 return _('forgetting %s\n')
1510 return _('forgetting %s\n')
1506 return _('removing %s\n')
1511 return _('removing %s\n')
1507
1512
1508 revert = ([], _('reverting %s\n'))
1513 revert = ([], _('reverting %s\n'))
1509 add = ([], _('adding %s\n'))
1514 add = ([], _('adding %s\n'))
1510 remove = ([], removeforget)
1515 remove = ([], removeforget)
1511 undelete = ([], _('undeleting %s\n'))
1516 undelete = ([], _('undeleting %s\n'))
1512
1517
1513 disptable = (
1518 disptable = (
1514 # dispatch table:
1519 # dispatch table:
1515 # file state
1520 # file state
1516 # action if in target manifest
1521 # action if in target manifest
1517 # action if not in target manifest
1522 # action if not in target manifest
1518 # make backup if in target manifest
1523 # make backup if in target manifest
1519 # make backup if not in target manifest
1524 # make backup if not in target manifest
1520 (modified, revert, remove, True, True),
1525 (modified, revert, remove, True, True),
1521 (added, revert, remove, True, False),
1526 (added, revert, remove, True, False),
1522 (removed, undelete, None, False, False),
1527 (removed, undelete, None, False, False),
1523 (deleted, revert, remove, False, False),
1528 (deleted, revert, remove, False, False),
1524 )
1529 )
1525
1530
1526 for abs, (rel, exact) in sorted(names.items()):
1531 for abs, (rel, exact) in sorted(names.items()):
1527 mfentry = mf.get(abs)
1532 mfentry = mf.get(abs)
1528 target = repo.wjoin(abs)
1533 target = repo.wjoin(abs)
1529 def handle(xlist, dobackup):
1534 def handle(xlist, dobackup):
1530 xlist[0].append(abs)
1535 xlist[0].append(abs)
1531 if (dobackup and not opts.get('no_backup') and
1536 if (dobackup and not opts.get('no_backup') and
1532 os.path.lexists(target)):
1537 os.path.lexists(target)):
1533 bakname = "%s.orig" % rel
1538 bakname = "%s.orig" % rel
1534 ui.note(_('saving current version of %s as %s\n') %
1539 ui.note(_('saving current version of %s as %s\n') %
1535 (rel, bakname))
1540 (rel, bakname))
1536 if not opts.get('dry_run'):
1541 if not opts.get('dry_run'):
1537 util.rename(target, bakname)
1542 util.rename(target, bakname)
1538 if ui.verbose or not exact:
1543 if ui.verbose or not exact:
1539 msg = xlist[1]
1544 msg = xlist[1]
1540 if not isinstance(msg, basestring):
1545 if not isinstance(msg, basestring):
1541 msg = msg(abs)
1546 msg = msg(abs)
1542 ui.status(msg % rel)
1547 ui.status(msg % rel)
1543 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1548 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1544 if abs not in table:
1549 if abs not in table:
1545 continue
1550 continue
1546 # file has changed in dirstate
1551 # file has changed in dirstate
1547 if mfentry:
1552 if mfentry:
1548 handle(hitlist, backuphit)
1553 handle(hitlist, backuphit)
1549 elif misslist is not None:
1554 elif misslist is not None:
1550 handle(misslist, backupmiss)
1555 handle(misslist, backupmiss)
1551 break
1556 break
1552 else:
1557 else:
1553 if abs not in repo.dirstate:
1558 if abs not in repo.dirstate:
1554 if mfentry:
1559 if mfentry:
1555 handle(add, True)
1560 handle(add, True)
1556 elif exact:
1561 elif exact:
1557 ui.warn(_('file not managed: %s\n') % rel)
1562 ui.warn(_('file not managed: %s\n') % rel)
1558 continue
1563 continue
1559 # file has not changed in dirstate
1564 # file has not changed in dirstate
1560 if node == parent:
1565 if node == parent:
1561 if exact:
1566 if exact:
1562 ui.warn(_('no changes needed to %s\n') % rel)
1567 ui.warn(_('no changes needed to %s\n') % rel)
1563 continue
1568 continue
1564 if pmf is None:
1569 if pmf is None:
1565 # only need parent manifest in this unlikely case,
1570 # only need parent manifest in this unlikely case,
1566 # so do not read by default
1571 # so do not read by default
1567 pmf = repo[parent].manifest()
1572 pmf = repo[parent].manifest()
1568 if abs in pmf and mfentry:
1573 if abs in pmf and mfentry:
1569 # if version of file is same in parent and target
1574 # if version of file is same in parent and target
1570 # manifests, do nothing
1575 # manifests, do nothing
1571 if (pmf[abs] != mfentry or
1576 if (pmf[abs] != mfentry or
1572 pmf.flags(abs) != mf.flags(abs)):
1577 pmf.flags(abs) != mf.flags(abs)):
1573 handle(revert, False)
1578 handle(revert, False)
1574 else:
1579 else:
1575 handle(remove, False)
1580 handle(remove, False)
1576
1581
1577 if not opts.get('dry_run'):
1582 if not opts.get('dry_run'):
1578 def checkout(f):
1583 def checkout(f):
1579 fc = ctx[f]
1584 fc = ctx[f]
1580 repo.wwrite(f, fc.data(), fc.flags())
1585 repo.wwrite(f, fc.data(), fc.flags())
1581
1586
1582 audit_path = scmutil.pathauditor(repo.root)
1587 audit_path = scmutil.pathauditor(repo.root)
1583 for f in remove[0]:
1588 for f in remove[0]:
1584 if repo.dirstate[f] == 'a':
1589 if repo.dirstate[f] == 'a':
1585 repo.dirstate.drop(f)
1590 repo.dirstate.drop(f)
1586 continue
1591 continue
1587 audit_path(f)
1592 audit_path(f)
1588 try:
1593 try:
1589 util.unlinkpath(repo.wjoin(f))
1594 util.unlinkpath(repo.wjoin(f))
1590 except OSError:
1595 except OSError:
1591 pass
1596 pass
1592 repo.dirstate.remove(f)
1597 repo.dirstate.remove(f)
1593
1598
1594 normal = None
1599 normal = None
1595 if node == parent:
1600 if node == parent:
1596 # We're reverting to our parent. If possible, we'd like status
1601 # We're reverting to our parent. If possible, we'd like status
1597 # to report the file as clean. We have to use normallookup for
1602 # to report the file as clean. We have to use normallookup for
1598 # merges to avoid losing information about merged/dirty files.
1603 # merges to avoid losing information about merged/dirty files.
1599 if p2 != nullid:
1604 if p2 != nullid:
1600 normal = repo.dirstate.normallookup
1605 normal = repo.dirstate.normallookup
1601 else:
1606 else:
1602 normal = repo.dirstate.normal
1607 normal = repo.dirstate.normal
1603 for f in revert[0]:
1608 for f in revert[0]:
1604 checkout(f)
1609 checkout(f)
1605 if normal:
1610 if normal:
1606 normal(f)
1611 normal(f)
1607
1612
1608 for f in add[0]:
1613 for f in add[0]:
1609 checkout(f)
1614 checkout(f)
1610 repo.dirstate.add(f)
1615 repo.dirstate.add(f)
1611
1616
1612 normal = repo.dirstate.normallookup
1617 normal = repo.dirstate.normallookup
1613 if node == parent and p2 == nullid:
1618 if node == parent and p2 == nullid:
1614 normal = repo.dirstate.normal
1619 normal = repo.dirstate.normal
1615 for f in undelete[0]:
1620 for f in undelete[0]:
1616 checkout(f)
1621 checkout(f)
1617 normal(f)
1622 normal(f)
1618
1623
1619 if targetsubs:
1624 if targetsubs:
1620 # Revert the subrepos on the revert list
1625 # Revert the subrepos on the revert list
1621 for sub in targetsubs:
1626 for sub in targetsubs:
1622 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1627 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1623 finally:
1628 finally:
1624 wlock.release()
1629 wlock.release()
1625
1630
1626 def command(table):
1631 def command(table):
1627 '''returns a function object bound to table which can be used as
1632 '''returns a function object bound to table which can be used as
1628 a decorator for populating table as a command table'''
1633 a decorator for populating table as a command table'''
1629
1634
1630 def cmd(name, options, synopsis=None):
1635 def cmd(name, options, synopsis=None):
1631 def decorator(func):
1636 def decorator(func):
1632 if synopsis:
1637 if synopsis:
1633 table[name] = func, options[:], synopsis
1638 table[name] = func, options[:], synopsis
1634 else:
1639 else:
1635 table[name] = func, options[:]
1640 table[name] = func, options[:]
1636 return func
1641 return func
1637 return decorator
1642 return decorator
1638
1643
1639 return cmd
1644 return cmd
@@ -1,767 +1,783 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import struct, os, stat, errno
12 import struct, os, stat, errno
13 import cStringIO
13 import cStringIO
14
14
15 _format = ">cllll"
15 _format = ">cllll"
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17 filecache = scmutil.filecache
17 filecache = scmutil.filecache
18
18
19 class repocache(filecache):
19 class repocache(filecache):
20 """filecache for files in .hg/"""
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
22 return obj._opener.join(fname)
23
23
24 class rootcache(filecache):
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
26 def join(self, obj, fname):
27 return obj._join(fname)
27 return obj._join(fname)
28
28
29 def _finddirs(path):
29 def _finddirs(path):
30 pos = path.rfind('/')
30 pos = path.rfind('/')
31 while pos != -1:
31 while pos != -1:
32 yield path[:pos]
32 yield path[:pos]
33 pos = path.rfind('/', 0, pos)
33 pos = path.rfind('/', 0, pos)
34
34
35 def _incdirs(dirs, path):
35 def _incdirs(dirs, path):
36 for base in _finddirs(path):
36 for base in _finddirs(path):
37 if base in dirs:
37 if base in dirs:
38 dirs[base] += 1
38 dirs[base] += 1
39 return
39 return
40 dirs[base] = 1
40 dirs[base] = 1
41
41
42 def _decdirs(dirs, path):
42 def _decdirs(dirs, path):
43 for base in _finddirs(path):
43 for base in _finddirs(path):
44 if dirs[base] > 1:
44 if dirs[base] > 1:
45 dirs[base] -= 1
45 dirs[base] -= 1
46 return
46 return
47 del dirs[base]
47 del dirs[base]
48
48
49 class dirstate(object):
49 class dirstate(object):
50
50
51 def __init__(self, opener, ui, root, validate):
51 def __init__(self, opener, ui, root, validate):
52 '''Create a new dirstate object.
52 '''Create a new dirstate object.
53
53
54 opener is an open()-like callable that can be used to open the
54 opener is an open()-like callable that can be used to open the
55 dirstate file; root is the root of the directory tracked by
55 dirstate file; root is the root of the directory tracked by
56 the dirstate.
56 the dirstate.
57 '''
57 '''
58 self._opener = opener
58 self._opener = opener
59 self._validate = validate
59 self._validate = validate
60 self._root = root
60 self._root = root
61 self._rootdir = os.path.join(root, '')
61 self._rootdir = os.path.join(root, '')
62 self._dirty = False
62 self._dirty = False
63 self._dirtypl = False
63 self._dirtypl = False
64 self._lastnormaltime = 0
64 self._lastnormaltime = 0
65 self._ui = ui
65 self._ui = ui
66 self._filecache = {}
66 self._filecache = {}
67
67
68 @propertycache
68 @propertycache
69 def _map(self):
69 def _map(self):
70 '''Return the dirstate contents as a map from filename to
70 '''Return the dirstate contents as a map from filename to
71 (state, mode, size, time).'''
71 (state, mode, size, time).'''
72 self._read()
72 self._read()
73 return self._map
73 return self._map
74
74
75 @propertycache
75 @propertycache
76 def _copymap(self):
76 def _copymap(self):
77 self._read()
77 self._read()
78 return self._copymap
78 return self._copymap
79
79
80 @propertycache
80 @propertycache
81 def _foldmap(self):
81 def _foldmap(self):
82 f = {}
82 f = {}
83 for name in self._map:
83 for name in self._map:
84 f[util.normcase(name)] = name
84 f[util.normcase(name)] = name
85 for name in self._dirs:
85 for name in self._dirs:
86 f[util.normcase(name)] = name
86 f[util.normcase(name)] = name
87 f['.'] = '.' # prevents useless util.fspath() invocation
87 f['.'] = '.' # prevents useless util.fspath() invocation
88 return f
88 return f
89
89
90 @repocache('branch')
90 @repocache('branch')
91 def _branch(self):
91 def _branch(self):
92 try:
92 try:
93 return self._opener.read("branch").strip() or "default"
93 return self._opener.read("branch").strip() or "default"
94 except IOError, inst:
94 except IOError, inst:
95 if inst.errno != errno.ENOENT:
95 if inst.errno != errno.ENOENT:
96 raise
96 raise
97 return "default"
97 return "default"
98
98
99 @propertycache
99 @propertycache
100 def _pl(self):
100 def _pl(self):
101 try:
101 try:
102 fp = self._opener("dirstate")
102 fp = self._opener("dirstate")
103 st = fp.read(40)
103 st = fp.read(40)
104 fp.close()
104 fp.close()
105 l = len(st)
105 l = len(st)
106 if l == 40:
106 if l == 40:
107 return st[:20], st[20:40]
107 return st[:20], st[20:40]
108 elif l > 0 and l < 40:
108 elif l > 0 and l < 40:
109 raise util.Abort(_('working directory state appears damaged!'))
109 raise util.Abort(_('working directory state appears damaged!'))
110 except IOError, err:
110 except IOError, err:
111 if err.errno != errno.ENOENT:
111 if err.errno != errno.ENOENT:
112 raise
112 raise
113 return [nullid, nullid]
113 return [nullid, nullid]
114
114
115 @propertycache
115 @propertycache
116 def _dirs(self):
116 def _dirs(self):
117 dirs = {}
117 dirs = {}
118 for f, s in self._map.iteritems():
118 for f, s in self._map.iteritems():
119 if s[0] != 'r':
119 if s[0] != 'r':
120 _incdirs(dirs, f)
120 _incdirs(dirs, f)
121 return dirs
121 return dirs
122
122
123 def dirs(self):
123 def dirs(self):
124 return self._dirs
124 return self._dirs
125
125
126 @rootcache('.hgignore')
126 @rootcache('.hgignore')
127 def _ignore(self):
127 def _ignore(self):
128 files = [self._join('.hgignore')]
128 files = [self._join('.hgignore')]
129 for name, path in self._ui.configitems("ui"):
129 for name, path in self._ui.configitems("ui"):
130 if name == 'ignore' or name.startswith('ignore.'):
130 if name == 'ignore' or name.startswith('ignore.'):
131 files.append(util.expandpath(path))
131 files.append(util.expandpath(path))
132 return ignore.ignore(self._root, files, self._ui.warn)
132 return ignore.ignore(self._root, files, self._ui.warn)
133
133
134 @propertycache
134 @propertycache
135 def _slash(self):
135 def _slash(self):
136 return self._ui.configbool('ui', 'slash') and os.sep != '/'
136 return self._ui.configbool('ui', 'slash') and os.sep != '/'
137
137
138 @propertycache
138 @propertycache
139 def _checklink(self):
139 def _checklink(self):
140 return util.checklink(self._root)
140 return util.checklink(self._root)
141
141
142 @propertycache
142 @propertycache
143 def _checkexec(self):
143 def _checkexec(self):
144 return util.checkexec(self._root)
144 return util.checkexec(self._root)
145
145
146 @propertycache
146 @propertycache
147 def _checkcase(self):
147 def _checkcase(self):
148 return not util.checkcase(self._join('.hg'))
148 return not util.checkcase(self._join('.hg'))
149
149
150 def _join(self, f):
150 def _join(self, f):
151 # much faster than os.path.join()
151 # much faster than os.path.join()
152 # it's safe because f is always a relative path
152 # it's safe because f is always a relative path
153 return self._rootdir + f
153 return self._rootdir + f
154
154
155 def flagfunc(self, buildfallback):
155 def flagfunc(self, buildfallback):
156 if self._checklink and self._checkexec:
156 if self._checklink and self._checkexec:
157 def f(x):
157 def f(x):
158 p = self._join(x)
158 p = self._join(x)
159 if os.path.islink(p):
159 if os.path.islink(p):
160 return 'l'
160 return 'l'
161 if util.isexec(p):
161 if util.isexec(p):
162 return 'x'
162 return 'x'
163 return ''
163 return ''
164 return f
164 return f
165
165
166 fallback = buildfallback()
166 fallback = buildfallback()
167 if self._checklink:
167 if self._checklink:
168 def f(x):
168 def f(x):
169 if os.path.islink(self._join(x)):
169 if os.path.islink(self._join(x)):
170 return 'l'
170 return 'l'
171 if 'x' in fallback(x):
171 if 'x' in fallback(x):
172 return 'x'
172 return 'x'
173 return ''
173 return ''
174 return f
174 return f
175 if self._checkexec:
175 if self._checkexec:
176 def f(x):
176 def f(x):
177 if 'l' in fallback(x):
177 if 'l' in fallback(x):
178 return 'l'
178 return 'l'
179 if util.isexec(self._join(x)):
179 if util.isexec(self._join(x)):
180 return 'x'
180 return 'x'
181 return ''
181 return ''
182 return f
182 return f
183 else:
183 else:
184 return fallback
184 return fallback
185
185
186 def getcwd(self):
186 def getcwd(self):
187 cwd = os.getcwd()
187 cwd = os.getcwd()
188 if cwd == self._root:
188 if cwd == self._root:
189 return ''
189 return ''
190 # self._root ends with a path separator if self._root is '/' or 'C:\'
190 # self._root ends with a path separator if self._root is '/' or 'C:\'
191 rootsep = self._root
191 rootsep = self._root
192 if not util.endswithsep(rootsep):
192 if not util.endswithsep(rootsep):
193 rootsep += os.sep
193 rootsep += os.sep
194 if cwd.startswith(rootsep):
194 if cwd.startswith(rootsep):
195 return cwd[len(rootsep):]
195 return cwd[len(rootsep):]
196 else:
196 else:
197 # we're outside the repo. return an absolute path.
197 # we're outside the repo. return an absolute path.
198 return cwd
198 return cwd
199
199
200 def pathto(self, f, cwd=None):
200 def pathto(self, f, cwd=None):
201 if cwd is None:
201 if cwd is None:
202 cwd = self.getcwd()
202 cwd = self.getcwd()
203 path = util.pathto(self._root, cwd, f)
203 path = util.pathto(self._root, cwd, f)
204 if self._slash:
204 if self._slash:
205 return util.normpath(path)
205 return util.normpath(path)
206 return path
206 return path
207
207
208 def __getitem__(self, key):
208 def __getitem__(self, key):
209 '''Return the current state of key (a filename) in the dirstate.
209 '''Return the current state of key (a filename) in the dirstate.
210
210
211 States are:
211 States are:
212 n normal
212 n normal
213 m needs merging
213 m needs merging
214 r marked for removal
214 r marked for removal
215 a marked for addition
215 a marked for addition
216 ? not tracked
216 ? not tracked
217 '''
217 '''
218 return self._map.get(key, ("?",))[0]
218 return self._map.get(key, ("?",))[0]
219
219
220 def __contains__(self, key):
220 def __contains__(self, key):
221 return key in self._map
221 return key in self._map
222
222
223 def __iter__(self):
223 def __iter__(self):
224 for x in sorted(self._map):
224 for x in sorted(self._map):
225 yield x
225 yield x
226
226
227 def parents(self):
227 def parents(self):
228 return [self._validate(p) for p in self._pl]
228 return [self._validate(p) for p in self._pl]
229
229
230 def p1(self):
230 def p1(self):
231 return self._validate(self._pl[0])
231 return self._validate(self._pl[0])
232
232
233 def p2(self):
233 def p2(self):
234 return self._validate(self._pl[1])
234 return self._validate(self._pl[1])
235
235
236 def branch(self):
236 def branch(self):
237 return encoding.tolocal(self._branch)
237 return encoding.tolocal(self._branch)
238
238
239 def setparents(self, p1, p2=nullid):
239 def setparents(self, p1, p2=nullid):
240 self._dirty = self._dirtypl = True
240 self._dirty = self._dirtypl = True
241 oldp2 = self._pl[1]
241 oldp2 = self._pl[1]
242 self._pl = p1, p2
242 self._pl = p1, p2
243 if oldp2 != nullid and p2 == nullid:
243 if oldp2 != nullid and p2 == nullid:
244 # Discard 'm' markers when moving away from a merge state
244 # Discard 'm' markers when moving away from a merge state
245 for f, s in self._map.iteritems():
245 for f, s in self._map.iteritems():
246 if s[0] == 'm':
246 if s[0] == 'm':
247 self.normallookup(f)
247 self.normallookup(f)
248
248
249 def setbranch(self, branch):
249 def setbranch(self, branch):
250 if branch in ['tip', '.', 'null']:
250 if branch in ['tip', '.', 'null']:
251 raise util.Abort(_('the name \'%s\' is reserved') % branch)
251 raise util.Abort(_('the name \'%s\' is reserved') % branch)
252 self._branch = encoding.fromlocal(branch)
252 self._branch = encoding.fromlocal(branch)
253 f = self._opener('branch', 'w', atomictemp=True)
253 f = self._opener('branch', 'w', atomictemp=True)
254 try:
254 try:
255 f.write(self._branch + '\n')
255 f.write(self._branch + '\n')
256 finally:
256 finally:
257 f.close()
257 f.close()
258
258
259 def _read(self):
259 def _read(self):
260 self._map = {}
260 self._map = {}
261 self._copymap = {}
261 self._copymap = {}
262 try:
262 try:
263 st = self._opener.read("dirstate")
263 st = self._opener.read("dirstate")
264 except IOError, err:
264 except IOError, err:
265 if err.errno != errno.ENOENT:
265 if err.errno != errno.ENOENT:
266 raise
266 raise
267 return
267 return
268 if not st:
268 if not st:
269 return
269 return
270
270
271 p = parsers.parse_dirstate(self._map, self._copymap, st)
271 p = parsers.parse_dirstate(self._map, self._copymap, st)
272 if not self._dirtypl:
272 if not self._dirtypl:
273 self._pl = p
273 self._pl = p
274
274
275 def invalidate(self):
275 def invalidate(self):
276 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
276 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
277 "_ignore"):
277 "_ignore"):
278 if a in self.__dict__:
278 if a in self.__dict__:
279 delattr(self, a)
279 delattr(self, a)
280 self._lastnormaltime = 0
280 self._lastnormaltime = 0
281 self._dirty = False
281 self._dirty = False
282
282
283 def copy(self, source, dest):
283 def copy(self, source, dest):
284 """Mark dest as a copy of source. Unmark dest if source is None."""
284 """Mark dest as a copy of source. Unmark dest if source is None."""
285 if source == dest:
285 if source == dest:
286 return
286 return
287 self._dirty = True
287 self._dirty = True
288 if source is not None:
288 if source is not None:
289 self._copymap[dest] = source
289 self._copymap[dest] = source
290 elif dest in self._copymap:
290 elif dest in self._copymap:
291 del self._copymap[dest]
291 del self._copymap[dest]
292
292
293 def copied(self, file):
293 def copied(self, file):
294 return self._copymap.get(file, None)
294 return self._copymap.get(file, None)
295
295
296 def copies(self):
296 def copies(self):
297 return self._copymap
297 return self._copymap
298
298
299 def _droppath(self, f):
299 def _droppath(self, f):
300 if self[f] not in "?r" and "_dirs" in self.__dict__:
300 if self[f] not in "?r" and "_dirs" in self.__dict__:
301 _decdirs(self._dirs, f)
301 _decdirs(self._dirs, f)
302
302
303 def _addpath(self, f, check=False):
303 def _addpath(self, f, check=False):
304 oldstate = self[f]
304 oldstate = self[f]
305 if check or oldstate == "r":
305 if check or oldstate == "r":
306 scmutil.checkfilename(f)
306 scmutil.checkfilename(f)
307 if f in self._dirs:
307 if f in self._dirs:
308 raise util.Abort(_('directory %r already in dirstate') % f)
308 raise util.Abort(_('directory %r already in dirstate') % f)
309 # shadows
309 # shadows
310 for d in _finddirs(f):
310 for d in _finddirs(f):
311 if d in self._dirs:
311 if d in self._dirs:
312 break
312 break
313 if d in self._map and self[d] != 'r':
313 if d in self._map and self[d] != 'r':
314 raise util.Abort(
314 raise util.Abort(
315 _('file %r in dirstate clashes with %r') % (d, f))
315 _('file %r in dirstate clashes with %r') % (d, f))
316 if oldstate in "?r" and "_dirs" in self.__dict__:
316 if oldstate in "?r" and "_dirs" in self.__dict__:
317 _incdirs(self._dirs, f)
317 _incdirs(self._dirs, f)
318
318
319 def normal(self, f):
319 def normal(self, f):
320 '''Mark a file normal and clean.'''
320 '''Mark a file normal and clean.'''
321 self._dirty = True
321 self._dirty = True
322 self._addpath(f)
322 self._addpath(f)
323 s = os.lstat(self._join(f))
323 s = os.lstat(self._join(f))
324 mtime = int(s.st_mtime)
324 mtime = int(s.st_mtime)
325 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
325 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
326 if f in self._copymap:
326 if f in self._copymap:
327 del self._copymap[f]
327 del self._copymap[f]
328 if mtime > self._lastnormaltime:
328 if mtime > self._lastnormaltime:
329 # Remember the most recent modification timeslot for status(),
329 # Remember the most recent modification timeslot for status(),
330 # to make sure we won't miss future size-preserving file content
330 # to make sure we won't miss future size-preserving file content
331 # modifications that happen within the same timeslot.
331 # modifications that happen within the same timeslot.
332 self._lastnormaltime = mtime
332 self._lastnormaltime = mtime
333
333
334 def normallookup(self, f):
334 def normallookup(self, f):
335 '''Mark a file normal, but possibly dirty.'''
335 '''Mark a file normal, but possibly dirty.'''
336 if self._pl[1] != nullid and f in self._map:
336 if self._pl[1] != nullid and f in self._map:
337 # if there is a merge going on and the file was either
337 # if there is a merge going on and the file was either
338 # in state 'm' (-1) or coming from other parent (-2) before
338 # in state 'm' (-1) or coming from other parent (-2) before
339 # being removed, restore that state.
339 # being removed, restore that state.
340 entry = self._map[f]
340 entry = self._map[f]
341 if entry[0] == 'r' and entry[2] in (-1, -2):
341 if entry[0] == 'r' and entry[2] in (-1, -2):
342 source = self._copymap.get(f)
342 source = self._copymap.get(f)
343 if entry[2] == -1:
343 if entry[2] == -1:
344 self.merge(f)
344 self.merge(f)
345 elif entry[2] == -2:
345 elif entry[2] == -2:
346 self.otherparent(f)
346 self.otherparent(f)
347 if source:
347 if source:
348 self.copy(source, f)
348 self.copy(source, f)
349 return
349 return
350 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
350 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
351 return
351 return
352 self._dirty = True
352 self._dirty = True
353 self._addpath(f)
353 self._addpath(f)
354 self._map[f] = ('n', 0, -1, -1)
354 self._map[f] = ('n', 0, -1, -1)
355 if f in self._copymap:
355 if f in self._copymap:
356 del self._copymap[f]
356 del self._copymap[f]
357
357
358 def otherparent(self, f):
358 def otherparent(self, f):
359 '''Mark as coming from the other parent, always dirty.'''
359 '''Mark as coming from the other parent, always dirty.'''
360 if self._pl[1] == nullid:
360 if self._pl[1] == nullid:
361 raise util.Abort(_("setting %r to other parent "
361 raise util.Abort(_("setting %r to other parent "
362 "only allowed in merges") % f)
362 "only allowed in merges") % f)
363 self._dirty = True
363 self._dirty = True
364 self._addpath(f)
364 self._addpath(f)
365 self._map[f] = ('n', 0, -2, -1)
365 self._map[f] = ('n', 0, -2, -1)
366 if f in self._copymap:
366 if f in self._copymap:
367 del self._copymap[f]
367 del self._copymap[f]
368
368
369 def add(self, f):
369 def add(self, f):
370 '''Mark a file added.'''
370 '''Mark a file added.'''
371 self._dirty = True
371 self._dirty = True
372 self._addpath(f, True)
372 self._addpath(f, True)
373 self._map[f] = ('a', 0, -1, -1)
373 self._map[f] = ('a', 0, -1, -1)
374 if f in self._copymap:
374 if f in self._copymap:
375 del self._copymap[f]
375 del self._copymap[f]
376
376
377 def remove(self, f):
377 def remove(self, f):
378 '''Mark a file removed.'''
378 '''Mark a file removed.'''
379 self._dirty = True
379 self._dirty = True
380 self._droppath(f)
380 self._droppath(f)
381 size = 0
381 size = 0
382 if self._pl[1] != nullid and f in self._map:
382 if self._pl[1] != nullid and f in self._map:
383 # backup the previous state
383 # backup the previous state
384 entry = self._map[f]
384 entry = self._map[f]
385 if entry[0] == 'm': # merge
385 if entry[0] == 'm': # merge
386 size = -1
386 size = -1
387 elif entry[0] == 'n' and entry[2] == -2: # other parent
387 elif entry[0] == 'n' and entry[2] == -2: # other parent
388 size = -2
388 size = -2
389 self._map[f] = ('r', 0, size, 0)
389 self._map[f] = ('r', 0, size, 0)
390 if size == 0 and f in self._copymap:
390 if size == 0 and f in self._copymap:
391 del self._copymap[f]
391 del self._copymap[f]
392
392
393 def merge(self, f):
393 def merge(self, f):
394 '''Mark a file merged.'''
394 '''Mark a file merged.'''
395 if self._pl[1] == nullid:
395 if self._pl[1] == nullid:
396 return self.normallookup(f)
396 return self.normallookup(f)
397 self._dirty = True
397 self._dirty = True
398 s = os.lstat(self._join(f))
398 s = os.lstat(self._join(f))
399 self._addpath(f)
399 self._addpath(f)
400 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
400 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
401 if f in self._copymap:
401 if f in self._copymap:
402 del self._copymap[f]
402 del self._copymap[f]
403
403
404 def drop(self, f):
404 def drop(self, f):
405 '''Drop a file from the dirstate'''
405 '''Drop a file from the dirstate'''
406 if f in self._map:
406 if f in self._map:
407 self._dirty = True
407 self._dirty = True
408 self._droppath(f)
408 self._droppath(f)
409 del self._map[f]
409 del self._map[f]
410
410
411 def _normalize(self, path, isknown):
411 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
412 normed = util.normcase(path)
412 normed = util.normcase(path)
413 folded = self._foldmap.get(normed, None)
413 folded = self._foldmap.get(normed, None)
414 if folded is None:
414 if folded is None:
415 if isknown or not os.path.lexists(os.path.join(self._root, path)):
415 if isknown:
416 folded = path
417 else:
418 if exists is None:
419 exists = os.path.lexists(os.path.join(self._root, path))
420 if not exists:
421 # Maybe a path component exists
422 if not ignoremissing and '/' in path:
423 d, f = path.rsplit('/', 1)
424 d = self._normalize(d, isknown, ignoremissing, None)
425 folded = d + "/" + f
426 else:
427 # No path components, preserve original case
416 folded = path
428 folded = path
417 else:
429 else:
418 # recursively normalize leading directory components
430 # recursively normalize leading directory components
419 # against dirstate
431 # against dirstate
420 if '/' in normed:
432 if '/' in normed:
421 d, f = normed.rsplit('/', 1)
433 d, f = normed.rsplit('/', 1)
422 d = self._normalize(d, isknown)
434 d = self._normalize(d, isknown, ignoremissing, True)
423 r = self._root + "/" + d
435 r = self._root + "/" + d
424 folded = d + "/" + util.fspath(f, r)
436 folded = d + "/" + util.fspath(f, r)
425 else:
437 else:
426 folded = util.fspath(normed, self._root)
438 folded = util.fspath(normed, self._root)
427 self._foldmap[normed] = folded
439 self._foldmap[normed] = folded
428
440
429 return folded
441 return folded
430
442
431 def normalize(self, path, isknown=False):
443 def normalize(self, path, isknown=False, ignoremissing=False):
432 '''
444 '''
433 normalize the case of a pathname when on a casefolding filesystem
445 normalize the case of a pathname when on a casefolding filesystem
434
446
435 isknown specifies whether the filename came from walking the
447 isknown specifies whether the filename came from walking the
436 disk, to avoid extra filesystem access
448 disk, to avoid extra filesystem access.
449
450 If ignoremissing is True, missing path are returned
451 unchanged. Otherwise, we try harder to normalize possibly
452 existing path components.
437
453
438 The normalized case is determined based on the following precedence:
454 The normalized case is determined based on the following precedence:
439
455
440 - version of name already stored in the dirstate
456 - version of name already stored in the dirstate
441 - version of name stored on disk
457 - version of name stored on disk
442 - version provided via command arguments
458 - version provided via command arguments
443 '''
459 '''
444
460
445 if self._checkcase:
461 if self._checkcase:
446 return self._normalize(path, isknown)
462 return self._normalize(path, isknown, ignoremissing)
447 return path
463 return path
448
464
449 def clear(self):
465 def clear(self):
450 self._map = {}
466 self._map = {}
451 if "_dirs" in self.__dict__:
467 if "_dirs" in self.__dict__:
452 delattr(self, "_dirs")
468 delattr(self, "_dirs")
453 self._copymap = {}
469 self._copymap = {}
454 self._pl = [nullid, nullid]
470 self._pl = [nullid, nullid]
455 self._lastnormaltime = 0
471 self._lastnormaltime = 0
456 self._dirty = True
472 self._dirty = True
457
473
458 def rebuild(self, parent, files):
474 def rebuild(self, parent, files):
459 self.clear()
475 self.clear()
460 for f in files:
476 for f in files:
461 if 'x' in files.flags(f):
477 if 'x' in files.flags(f):
462 self._map[f] = ('n', 0777, -1, 0)
478 self._map[f] = ('n', 0777, -1, 0)
463 else:
479 else:
464 self._map[f] = ('n', 0666, -1, 0)
480 self._map[f] = ('n', 0666, -1, 0)
465 self._pl = (parent, nullid)
481 self._pl = (parent, nullid)
466 self._dirty = True
482 self._dirty = True
467
483
468 def write(self):
484 def write(self):
469 if not self._dirty:
485 if not self._dirty:
470 return
486 return
471 st = self._opener("dirstate", "w", atomictemp=True)
487 st = self._opener("dirstate", "w", atomictemp=True)
472
488
473 # use the modification time of the newly created temporary file as the
489 # use the modification time of the newly created temporary file as the
474 # filesystem's notion of 'now'
490 # filesystem's notion of 'now'
475 now = int(util.fstat(st).st_mtime)
491 now = int(util.fstat(st).st_mtime)
476
492
477 cs = cStringIO.StringIO()
493 cs = cStringIO.StringIO()
478 copymap = self._copymap
494 copymap = self._copymap
479 pack = struct.pack
495 pack = struct.pack
480 write = cs.write
496 write = cs.write
481 write("".join(self._pl))
497 write("".join(self._pl))
482 for f, e in self._map.iteritems():
498 for f, e in self._map.iteritems():
483 if e[0] == 'n' and e[3] == now:
499 if e[0] == 'n' and e[3] == now:
484 # The file was last modified "simultaneously" with the current
500 # The file was last modified "simultaneously" with the current
485 # write to dirstate (i.e. within the same second for file-
501 # write to dirstate (i.e. within the same second for file-
486 # systems with a granularity of 1 sec). This commonly happens
502 # systems with a granularity of 1 sec). This commonly happens
487 # for at least a couple of files on 'update'.
503 # for at least a couple of files on 'update'.
488 # The user could change the file without changing its size
504 # The user could change the file without changing its size
489 # within the same second. Invalidate the file's stat data in
505 # within the same second. Invalidate the file's stat data in
490 # dirstate, forcing future 'status' calls to compare the
506 # dirstate, forcing future 'status' calls to compare the
491 # contents of the file. This prevents mistakenly treating such
507 # contents of the file. This prevents mistakenly treating such
492 # files as clean.
508 # files as clean.
493 e = (e[0], 0, -1, -1) # mark entry as 'unset'
509 e = (e[0], 0, -1, -1) # mark entry as 'unset'
494 self._map[f] = e
510 self._map[f] = e
495
511
496 if f in copymap:
512 if f in copymap:
497 f = "%s\0%s" % (f, copymap[f])
513 f = "%s\0%s" % (f, copymap[f])
498 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
514 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
499 write(e)
515 write(e)
500 write(f)
516 write(f)
501 st.write(cs.getvalue())
517 st.write(cs.getvalue())
502 st.close()
518 st.close()
503 self._lastnormaltime = 0
519 self._lastnormaltime = 0
504 self._dirty = self._dirtypl = False
520 self._dirty = self._dirtypl = False
505
521
506 def _dirignore(self, f):
522 def _dirignore(self, f):
507 if f == '.':
523 if f == '.':
508 return False
524 return False
509 if self._ignore(f):
525 if self._ignore(f):
510 return True
526 return True
511 for p in _finddirs(f):
527 for p in _finddirs(f):
512 if self._ignore(p):
528 if self._ignore(p):
513 return True
529 return True
514 return False
530 return False
515
531
516 def walk(self, match, subrepos, unknown, ignored):
532 def walk(self, match, subrepos, unknown, ignored):
517 '''
533 '''
518 Walk recursively through the directory tree, finding all files
534 Walk recursively through the directory tree, finding all files
519 matched by match.
535 matched by match.
520
536
521 Return a dict mapping filename to stat-like object (either
537 Return a dict mapping filename to stat-like object (either
522 mercurial.osutil.stat instance or return value of os.stat()).
538 mercurial.osutil.stat instance or return value of os.stat()).
523 '''
539 '''
524
540
525 def fwarn(f, msg):
541 def fwarn(f, msg):
526 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
542 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
527 return False
543 return False
528
544
529 def badtype(mode):
545 def badtype(mode):
530 kind = _('unknown')
546 kind = _('unknown')
531 if stat.S_ISCHR(mode):
547 if stat.S_ISCHR(mode):
532 kind = _('character device')
548 kind = _('character device')
533 elif stat.S_ISBLK(mode):
549 elif stat.S_ISBLK(mode):
534 kind = _('block device')
550 kind = _('block device')
535 elif stat.S_ISFIFO(mode):
551 elif stat.S_ISFIFO(mode):
536 kind = _('fifo')
552 kind = _('fifo')
537 elif stat.S_ISSOCK(mode):
553 elif stat.S_ISSOCK(mode):
538 kind = _('socket')
554 kind = _('socket')
539 elif stat.S_ISDIR(mode):
555 elif stat.S_ISDIR(mode):
540 kind = _('directory')
556 kind = _('directory')
541 return _('unsupported file type (type is %s)') % kind
557 return _('unsupported file type (type is %s)') % kind
542
558
543 ignore = self._ignore
559 ignore = self._ignore
544 dirignore = self._dirignore
560 dirignore = self._dirignore
545 if ignored:
561 if ignored:
546 ignore = util.never
562 ignore = util.never
547 dirignore = util.never
563 dirignore = util.never
548 elif not unknown:
564 elif not unknown:
549 # if unknown and ignored are False, skip step 2
565 # if unknown and ignored are False, skip step 2
550 ignore = util.always
566 ignore = util.always
551 dirignore = util.always
567 dirignore = util.always
552
568
553 matchfn = match.matchfn
569 matchfn = match.matchfn
554 badfn = match.bad
570 badfn = match.bad
555 dmap = self._map
571 dmap = self._map
556 normpath = util.normpath
572 normpath = util.normpath
557 listdir = osutil.listdir
573 listdir = osutil.listdir
558 lstat = os.lstat
574 lstat = os.lstat
559 getkind = stat.S_IFMT
575 getkind = stat.S_IFMT
560 dirkind = stat.S_IFDIR
576 dirkind = stat.S_IFDIR
561 regkind = stat.S_IFREG
577 regkind = stat.S_IFREG
562 lnkkind = stat.S_IFLNK
578 lnkkind = stat.S_IFLNK
563 join = self._join
579 join = self._join
564 work = []
580 work = []
565 wadd = work.append
581 wadd = work.append
566
582
567 exact = skipstep3 = False
583 exact = skipstep3 = False
568 if matchfn == match.exact: # match.exact
584 if matchfn == match.exact: # match.exact
569 exact = True
585 exact = True
570 dirignore = util.always # skip step 2
586 dirignore = util.always # skip step 2
571 elif match.files() and not match.anypats(): # match.match, no patterns
587 elif match.files() and not match.anypats(): # match.match, no patterns
572 skipstep3 = True
588 skipstep3 = True
573
589
574 if not exact and self._checkcase:
590 if not exact and self._checkcase:
575 normalize = self._normalize
591 normalize = self._normalize
576 skipstep3 = False
592 skipstep3 = False
577 else:
593 else:
578 normalize = lambda x, y: x
594 normalize = lambda x, y, z: x
579
595
580 files = sorted(match.files())
596 files = sorted(match.files())
581 subrepos.sort()
597 subrepos.sort()
582 i, j = 0, 0
598 i, j = 0, 0
583 while i < len(files) and j < len(subrepos):
599 while i < len(files) and j < len(subrepos):
584 subpath = subrepos[j] + "/"
600 subpath = subrepos[j] + "/"
585 if files[i] < subpath:
601 if files[i] < subpath:
586 i += 1
602 i += 1
587 continue
603 continue
588 while i < len(files) and files[i].startswith(subpath):
604 while i < len(files) and files[i].startswith(subpath):
589 del files[i]
605 del files[i]
590 j += 1
606 j += 1
591
607
592 if not files or '.' in files:
608 if not files or '.' in files:
593 files = ['']
609 files = ['']
594 results = dict.fromkeys(subrepos)
610 results = dict.fromkeys(subrepos)
595 results['.hg'] = None
611 results['.hg'] = None
596
612
597 # step 1: find all explicit files
613 # step 1: find all explicit files
598 for ff in files:
614 for ff in files:
599 nf = normalize(normpath(ff), False)
615 nf = normalize(normpath(ff), False, True)
600 if nf in results:
616 if nf in results:
601 continue
617 continue
602
618
603 try:
619 try:
604 st = lstat(join(nf))
620 st = lstat(join(nf))
605 kind = getkind(st.st_mode)
621 kind = getkind(st.st_mode)
606 if kind == dirkind:
622 if kind == dirkind:
607 skipstep3 = False
623 skipstep3 = False
608 if nf in dmap:
624 if nf in dmap:
609 #file deleted on disk but still in dirstate
625 #file deleted on disk but still in dirstate
610 results[nf] = None
626 results[nf] = None
611 match.dir(nf)
627 match.dir(nf)
612 if not dirignore(nf):
628 if not dirignore(nf):
613 wadd(nf)
629 wadd(nf)
614 elif kind == regkind or kind == lnkkind:
630 elif kind == regkind or kind == lnkkind:
615 results[nf] = st
631 results[nf] = st
616 else:
632 else:
617 badfn(ff, badtype(kind))
633 badfn(ff, badtype(kind))
618 if nf in dmap:
634 if nf in dmap:
619 results[nf] = None
635 results[nf] = None
620 except OSError, inst:
636 except OSError, inst:
621 if nf in dmap: # does it exactly match a file?
637 if nf in dmap: # does it exactly match a file?
622 results[nf] = None
638 results[nf] = None
623 else: # does it match a directory?
639 else: # does it match a directory?
624 prefix = nf + "/"
640 prefix = nf + "/"
625 for fn in dmap:
641 for fn in dmap:
626 if fn.startswith(prefix):
642 if fn.startswith(prefix):
627 match.dir(nf)
643 match.dir(nf)
628 skipstep3 = False
644 skipstep3 = False
629 break
645 break
630 else:
646 else:
631 badfn(ff, inst.strerror)
647 badfn(ff, inst.strerror)
632
648
633 # step 2: visit subdirectories
649 # step 2: visit subdirectories
634 while work:
650 while work:
635 nd = work.pop()
651 nd = work.pop()
636 skip = None
652 skip = None
637 if nd == '.':
653 if nd == '.':
638 nd = ''
654 nd = ''
639 else:
655 else:
640 skip = '.hg'
656 skip = '.hg'
641 try:
657 try:
642 entries = listdir(join(nd), stat=True, skip=skip)
658 entries = listdir(join(nd), stat=True, skip=skip)
643 except OSError, inst:
659 except OSError, inst:
644 if inst.errno == errno.EACCES:
660 if inst.errno == errno.EACCES:
645 fwarn(nd, inst.strerror)
661 fwarn(nd, inst.strerror)
646 continue
662 continue
647 raise
663 raise
648 for f, kind, st in entries:
664 for f, kind, st in entries:
649 nf = normalize(nd and (nd + "/" + f) or f, True)
665 nf = normalize(nd and (nd + "/" + f) or f, True, True)
650 if nf not in results:
666 if nf not in results:
651 if kind == dirkind:
667 if kind == dirkind:
652 if not ignore(nf):
668 if not ignore(nf):
653 match.dir(nf)
669 match.dir(nf)
654 wadd(nf)
670 wadd(nf)
655 if nf in dmap and matchfn(nf):
671 if nf in dmap and matchfn(nf):
656 results[nf] = None
672 results[nf] = None
657 elif kind == regkind or kind == lnkkind:
673 elif kind == regkind or kind == lnkkind:
658 if nf in dmap:
674 if nf in dmap:
659 if matchfn(nf):
675 if matchfn(nf):
660 results[nf] = st
676 results[nf] = st
661 elif matchfn(nf) and not ignore(nf):
677 elif matchfn(nf) and not ignore(nf):
662 results[nf] = st
678 results[nf] = st
663 elif nf in dmap and matchfn(nf):
679 elif nf in dmap and matchfn(nf):
664 results[nf] = None
680 results[nf] = None
665
681
666 # step 3: report unseen items in the dmap hash
682 # step 3: report unseen items in the dmap hash
667 if not skipstep3 and not exact:
683 if not skipstep3 and not exact:
668 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
684 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
669 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
685 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
670 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
686 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
671 st = None
687 st = None
672 results[nf] = st
688 results[nf] = st
673 for s in subrepos:
689 for s in subrepos:
674 del results[s]
690 del results[s]
675 del results['.hg']
691 del results['.hg']
676 return results
692 return results
677
693
678 def status(self, match, subrepos, ignored, clean, unknown):
694 def status(self, match, subrepos, ignored, clean, unknown):
679 '''Determine the status of the working copy relative to the
695 '''Determine the status of the working copy relative to the
680 dirstate and return a tuple of lists (unsure, modified, added,
696 dirstate and return a tuple of lists (unsure, modified, added,
681 removed, deleted, unknown, ignored, clean), where:
697 removed, deleted, unknown, ignored, clean), where:
682
698
683 unsure:
699 unsure:
684 files that might have been modified since the dirstate was
700 files that might have been modified since the dirstate was
685 written, but need to be read to be sure (size is the same
701 written, but need to be read to be sure (size is the same
686 but mtime differs)
702 but mtime differs)
687 modified:
703 modified:
688 files that have definitely been modified since the dirstate
704 files that have definitely been modified since the dirstate
689 was written (different size or mode)
705 was written (different size or mode)
690 added:
706 added:
691 files that have been explicitly added with hg add
707 files that have been explicitly added with hg add
692 removed:
708 removed:
693 files that have been explicitly removed with hg remove
709 files that have been explicitly removed with hg remove
694 deleted:
710 deleted:
695 files that have been deleted through other means ("missing")
711 files that have been deleted through other means ("missing")
696 unknown:
712 unknown:
697 files not in the dirstate that are not ignored
713 files not in the dirstate that are not ignored
698 ignored:
714 ignored:
699 files not in the dirstate that are ignored
715 files not in the dirstate that are ignored
700 (by _dirignore())
716 (by _dirignore())
701 clean:
717 clean:
702 files that have definitely not been modified since the
718 files that have definitely not been modified since the
703 dirstate was written
719 dirstate was written
704 '''
720 '''
705 listignored, listclean, listunknown = ignored, clean, unknown
721 listignored, listclean, listunknown = ignored, clean, unknown
706 lookup, modified, added, unknown, ignored = [], [], [], [], []
722 lookup, modified, added, unknown, ignored = [], [], [], [], []
707 removed, deleted, clean = [], [], []
723 removed, deleted, clean = [], [], []
708
724
709 dmap = self._map
725 dmap = self._map
710 ladd = lookup.append # aka "unsure"
726 ladd = lookup.append # aka "unsure"
711 madd = modified.append
727 madd = modified.append
712 aadd = added.append
728 aadd = added.append
713 uadd = unknown.append
729 uadd = unknown.append
714 iadd = ignored.append
730 iadd = ignored.append
715 radd = removed.append
731 radd = removed.append
716 dadd = deleted.append
732 dadd = deleted.append
717 cadd = clean.append
733 cadd = clean.append
718
734
719 lnkkind = stat.S_IFLNK
735 lnkkind = stat.S_IFLNK
720
736
721 for fn, st in self.walk(match, subrepos, listunknown,
737 for fn, st in self.walk(match, subrepos, listunknown,
722 listignored).iteritems():
738 listignored).iteritems():
723 if fn not in dmap:
739 if fn not in dmap:
724 if (listignored or match.exact(fn)) and self._dirignore(fn):
740 if (listignored or match.exact(fn)) and self._dirignore(fn):
725 if listignored:
741 if listignored:
726 iadd(fn)
742 iadd(fn)
727 elif listunknown:
743 elif listunknown:
728 uadd(fn)
744 uadd(fn)
729 continue
745 continue
730
746
731 state, mode, size, time = dmap[fn]
747 state, mode, size, time = dmap[fn]
732
748
733 if not st and state in "nma":
749 if not st and state in "nma":
734 dadd(fn)
750 dadd(fn)
735 elif state == 'n':
751 elif state == 'n':
736 # The "mode & lnkkind != lnkkind or self._checklink"
752 # The "mode & lnkkind != lnkkind or self._checklink"
737 # lines are an expansion of "islink => checklink"
753 # lines are an expansion of "islink => checklink"
738 # where islink means "is this a link?" and checklink
754 # where islink means "is this a link?" and checklink
739 # means "can we check links?".
755 # means "can we check links?".
740 mtime = int(st.st_mtime)
756 mtime = int(st.st_mtime)
741 if (size >= 0 and
757 if (size >= 0 and
742 (size != st.st_size
758 (size != st.st_size
743 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
759 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
744 and (mode & lnkkind != lnkkind or self._checklink)
760 and (mode & lnkkind != lnkkind or self._checklink)
745 or size == -2 # other parent
761 or size == -2 # other parent
746 or fn in self._copymap):
762 or fn in self._copymap):
747 madd(fn)
763 madd(fn)
748 elif (mtime != time
764 elif (mtime != time
749 and (mode & lnkkind != lnkkind or self._checklink)):
765 and (mode & lnkkind != lnkkind or self._checklink)):
750 ladd(fn)
766 ladd(fn)
751 elif mtime == self._lastnormaltime:
767 elif mtime == self._lastnormaltime:
752 # fn may have been changed in the same timeslot without
768 # fn may have been changed in the same timeslot without
753 # changing its size. This can happen if we quickly do
769 # changing its size. This can happen if we quickly do
754 # multiple commits in a single transaction.
770 # multiple commits in a single transaction.
755 # Force lookup, so we don't miss such a racy file change.
771 # Force lookup, so we don't miss such a racy file change.
756 ladd(fn)
772 ladd(fn)
757 elif listclean:
773 elif listclean:
758 cadd(fn)
774 cadd(fn)
759 elif state == 'm':
775 elif state == 'm':
760 madd(fn)
776 madd(fn)
761 elif state == 'a':
777 elif state == 'a':
762 aadd(fn)
778 aadd(fn)
763 elif state == 'r':
779 elif state == 'r':
764 radd(fn)
780 radd(fn)
765
781
766 return (lookup, modified, added, removed, deleted, unknown, ignored,
782 return (lookup, modified, added, removed, deleted, unknown, ignored,
767 clean)
783 clean)
@@ -1,127 +1,163 b''
1 $ "$TESTDIR/hghave" icasefs || exit 80
1 $ "$TESTDIR/hghave" icasefs || exit 80
2
2
3 $ hg debugfs | grep 'case-sensitive:'
3 $ hg debugfs | grep 'case-sensitive:'
4 case-sensitive: no
4 case-sensitive: no
5
5
6 test file addition with bad case
6 test file addition with bad case
7
7
8 $ hg init repo1
8 $ hg init repo1
9 $ cd repo1
9 $ cd repo1
10 $ echo a > a
10 $ echo a > a
11 $ hg add A
11 $ hg add A
12 adding a
12 adding a
13 $ hg st
13 $ hg st
14 A a
14 A a
15 $ hg ci -m adda
15 $ hg ci -m adda
16 $ hg manifest
16 $ hg manifest
17 a
17 a
18 $ cd ..
18 $ cd ..
19
19
20 test case collision on rename (issue750)
20 test case collision on rename (issue750)
21
21
22 $ hg init repo2
22 $ hg init repo2
23 $ cd repo2
23 $ cd repo2
24 $ echo a > a
24 $ echo a > a
25 $ hg --debug ci -Am adda
25 $ hg --debug ci -Am adda
26 adding a
26 adding a
27 a
27 a
28 committed changeset 0:07f4944404050f47db2e5c5071e0e84e7a27bba9
28 committed changeset 0:07f4944404050f47db2e5c5071e0e84e7a27bba9
29
29
30 Case-changing renames should work:
30 Case-changing renames should work:
31
31
32 $ hg mv a A
32 $ hg mv a A
33 $ hg mv A a
33 $ hg mv A a
34 $ hg st
34 $ hg st
35
36 test changing case of path components
37
38 $ mkdir D
39 $ echo b > D/b
40 $ hg ci -Am addb D/b
41 $ hg mv D/b d/b
42 D/b: not overwriting - file exists
43 $ hg mv D/b d/c
44 $ hg st
45 A D/c
46 R D/b
47 $ mv D temp
48 $ mv temp d
49 $ hg st
50 A D/c
51 R D/b
52 $ hg revert -aq
53 $ rm d/c
54 $ echo c > D/c
55 $ hg add D/c
56 $ hg st
57 A D/c
58 $ hg ci -m addc D/c
59 $ hg mv d/b d/e
60 moving D/b to D/e
61 $ hg st
62 A D/e
63 R D/b
64 $ hg revert -aq
65 $ rm d/e
66 $ hg mv d/b D/B
67 moving D/b to D/B
68 $ hg st
69 A D/B
70 R D/b
35 $ cd ..
71 $ cd ..
36
72
37 test case collision between revisions (issue912)
73 test case collision between revisions (issue912)
38
74
39 $ hg init repo3
75 $ hg init repo3
40 $ cd repo3
76 $ cd repo3
41 $ echo a > a
77 $ echo a > a
42 $ hg ci -Am adda
78 $ hg ci -Am adda
43 adding a
79 adding a
44 $ hg rm a
80 $ hg rm a
45 $ hg ci -Am removea
81 $ hg ci -Am removea
46 $ echo A > A
82 $ echo A > A
47
83
48 on linux hfs keeps the old case stored, force it
84 on linux hfs keeps the old case stored, force it
49
85
50 $ mv a aa
86 $ mv a aa
51 $ mv aa A
87 $ mv aa A
52 $ hg ci -Am addA
88 $ hg ci -Am addA
53 adding A
89 adding A
54
90
55 used to fail under case insensitive fs
91 used to fail under case insensitive fs
56
92
57 $ hg up -C 0
93 $ hg up -C 0
58 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
94 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 $ hg up -C
95 $ hg up -C
60 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
96 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
61
97
62 no clobbering of untracked files with wrong casing
98 no clobbering of untracked files with wrong casing
63
99
64 $ hg up -r null
100 $ hg up -r null
65 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
101 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
66 $ echo gold > a
102 $ echo gold > a
67 $ hg up
103 $ hg up
68 A: untracked file differs
104 A: untracked file differs
69 abort: untracked files in working directory differ from files in requested revision
105 abort: untracked files in working directory differ from files in requested revision
70 [255]
106 [255]
71 $ cat a
107 $ cat a
72 gold
108 gold
73
109
74 $ cd ..
110 $ cd ..
75
111
76 issue 3342: file in nested directory causes unexpected abort
112 issue 3342: file in nested directory causes unexpected abort
77
113
78 $ hg init issue3342
114 $ hg init issue3342
79 $ cd issue3342
115 $ cd issue3342
80
116
81 $ mkdir -p a/B/c/D
117 $ mkdir -p a/B/c/D
82 $ echo e > a/B/c/D/e
118 $ echo e > a/B/c/D/e
83 $ hg add a/B/c/D/e
119 $ hg add a/B/c/D/e
84
120
85 $ cd ..
121 $ cd ..
86
122
87 issue 3340: mq does not handle case changes correctly
123 issue 3340: mq does not handle case changes correctly
88
124
89 in addition to reported case, 'hg qrefresh' is also tested against
125 in addition to reported case, 'hg qrefresh' is also tested against
90 case changes.
126 case changes.
91
127
92 $ echo "[extensions]" >> $HGRCPATH
128 $ echo "[extensions]" >> $HGRCPATH
93 $ echo "mq=" >> $HGRCPATH
129 $ echo "mq=" >> $HGRCPATH
94
130
95 $ hg init issue3340
131 $ hg init issue3340
96 $ cd issue3340
132 $ cd issue3340
97
133
98 $ echo a > mIxEdCaSe
134 $ echo a > mIxEdCaSe
99 $ hg add mIxEdCaSe
135 $ hg add mIxEdCaSe
100 $ hg commit -m '#0'
136 $ hg commit -m '#0'
101 $ hg rename mIxEdCaSe tmp
137 $ hg rename mIxEdCaSe tmp
102 $ hg rename tmp MiXeDcAsE
138 $ hg rename tmp MiXeDcAsE
103 $ hg status -A
139 $ hg status -A
104 A MiXeDcAsE
140 A MiXeDcAsE
105 mIxEdCaSe
141 mIxEdCaSe
106 R mIxEdCaSe
142 R mIxEdCaSe
107 $ hg qnew changecase
143 $ hg qnew changecase
108 $ hg status -A
144 $ hg status -A
109 C MiXeDcAsE
145 C MiXeDcAsE
110
146
111 $ hg qpop -a
147 $ hg qpop -a
112 popping changecase
148 popping changecase
113 patch queue now empty
149 patch queue now empty
114 $ hg qnew refresh-casechange
150 $ hg qnew refresh-casechange
115 $ hg status -A
151 $ hg status -A
116 C mIxEdCaSe
152 C mIxEdCaSe
117 $ hg rename mIxEdCaSe tmp
153 $ hg rename mIxEdCaSe tmp
118 $ hg rename tmp MiXeDcAsE
154 $ hg rename tmp MiXeDcAsE
119 $ hg status -A
155 $ hg status -A
120 A MiXeDcAsE
156 A MiXeDcAsE
121 mIxEdCaSe
157 mIxEdCaSe
122 R mIxEdCaSe
158 R mIxEdCaSe
123 $ hg qrefresh
159 $ hg qrefresh
124 $ hg status -A
160 $ hg status -A
125 C MiXeDcAsE
161 C MiXeDcAsE
126
162
127 $ cd ..
163 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now