##// END OF EJS Templates
itersubrepos: move to scmutil to break a direct import cycle
Augie Fackler -
r20392:d4f804ca default
parent child Browse files
Show More
@@ -1,2167 +1,2167 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import lock as lockmod
16 import lock as lockmod
17
17
18 def parsealiases(cmd):
18 def parsealiases(cmd):
19 return cmd.lstrip("^").split("|")
19 return cmd.lstrip("^").split("|")
20
20
21 def findpossible(cmd, table, strict=False):
21 def findpossible(cmd, table, strict=False):
22 """
22 """
23 Return cmd -> (aliases, command table entry)
23 Return cmd -> (aliases, command table entry)
24 for each matching command.
24 for each matching command.
25 Return debug commands (or their aliases) only if no normal command matches.
25 Return debug commands (or their aliases) only if no normal command matches.
26 """
26 """
27 choice = {}
27 choice = {}
28 debugchoice = {}
28 debugchoice = {}
29
29
30 if cmd in table:
30 if cmd in table:
31 # short-circuit exact matches, "log" alias beats "^log|history"
31 # short-circuit exact matches, "log" alias beats "^log|history"
32 keys = [cmd]
32 keys = [cmd]
33 else:
33 else:
34 keys = table.keys()
34 keys = table.keys()
35
35
36 for e in keys:
36 for e in keys:
37 aliases = parsealiases(e)
37 aliases = parsealiases(e)
38 found = None
38 found = None
39 if cmd in aliases:
39 if cmd in aliases:
40 found = cmd
40 found = cmd
41 elif not strict:
41 elif not strict:
42 for a in aliases:
42 for a in aliases:
43 if a.startswith(cmd):
43 if a.startswith(cmd):
44 found = a
44 found = a
45 break
45 break
46 if found is not None:
46 if found is not None:
47 if aliases[0].startswith("debug") or found.startswith("debug"):
47 if aliases[0].startswith("debug") or found.startswith("debug"):
48 debugchoice[found] = (aliases, table[e])
48 debugchoice[found] = (aliases, table[e])
49 else:
49 else:
50 choice[found] = (aliases, table[e])
50 choice[found] = (aliases, table[e])
51
51
52 if not choice and debugchoice:
52 if not choice and debugchoice:
53 choice = debugchoice
53 choice = debugchoice
54
54
55 return choice
55 return choice
56
56
57 def findcmd(cmd, table, strict=True):
57 def findcmd(cmd, table, strict=True):
58 """Return (aliases, command table entry) for command string."""
58 """Return (aliases, command table entry) for command string."""
59 choice = findpossible(cmd, table, strict)
59 choice = findpossible(cmd, table, strict)
60
60
61 if cmd in choice:
61 if cmd in choice:
62 return choice[cmd]
62 return choice[cmd]
63
63
64 if len(choice) > 1:
64 if len(choice) > 1:
65 clist = choice.keys()
65 clist = choice.keys()
66 clist.sort()
66 clist.sort()
67 raise error.AmbiguousCommand(cmd, clist)
67 raise error.AmbiguousCommand(cmd, clist)
68
68
69 if choice:
69 if choice:
70 return choice.values()[0]
70 return choice.values()[0]
71
71
72 raise error.UnknownCommand(cmd)
72 raise error.UnknownCommand(cmd)
73
73
74 def findrepo(p):
74 def findrepo(p):
75 while not os.path.isdir(os.path.join(p, ".hg")):
75 while not os.path.isdir(os.path.join(p, ".hg")):
76 oldp, p = p, os.path.dirname(p)
76 oldp, p = p, os.path.dirname(p)
77 if p == oldp:
77 if p == oldp:
78 return None
78 return None
79
79
80 return p
80 return p
81
81
82 def bailifchanged(repo):
82 def bailifchanged(repo):
83 if repo.dirstate.p2() != nullid:
83 if repo.dirstate.p2() != nullid:
84 raise util.Abort(_('outstanding uncommitted merge'))
84 raise util.Abort(_('outstanding uncommitted merge'))
85 modified, added, removed, deleted = repo.status()[:4]
85 modified, added, removed, deleted = repo.status()[:4]
86 if modified or added or removed or deleted:
86 if modified or added or removed or deleted:
87 raise util.Abort(_('uncommitted changes'))
87 raise util.Abort(_('uncommitted changes'))
88 ctx = repo[None]
88 ctx = repo[None]
89 for s in sorted(ctx.substate):
89 for s in sorted(ctx.substate):
90 if ctx.sub(s).dirty():
90 if ctx.sub(s).dirty():
91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
92
92
93 def logmessage(ui, opts):
93 def logmessage(ui, opts):
94 """ get the log message according to -m and -l option """
94 """ get the log message according to -m and -l option """
95 message = opts.get('message')
95 message = opts.get('message')
96 logfile = opts.get('logfile')
96 logfile = opts.get('logfile')
97
97
98 if message and logfile:
98 if message and logfile:
99 raise util.Abort(_('options --message and --logfile are mutually '
99 raise util.Abort(_('options --message and --logfile are mutually '
100 'exclusive'))
100 'exclusive'))
101 if not message and logfile:
101 if not message and logfile:
102 try:
102 try:
103 if logfile == '-':
103 if logfile == '-':
104 message = ui.fin.read()
104 message = ui.fin.read()
105 else:
105 else:
106 message = '\n'.join(util.readfile(logfile).splitlines())
106 message = '\n'.join(util.readfile(logfile).splitlines())
107 except IOError, inst:
107 except IOError, inst:
108 raise util.Abort(_("can't read commit message '%s': %s") %
108 raise util.Abort(_("can't read commit message '%s': %s") %
109 (logfile, inst.strerror))
109 (logfile, inst.strerror))
110 return message
110 return message
111
111
112 def loglimit(opts):
112 def loglimit(opts):
113 """get the log limit according to option -l/--limit"""
113 """get the log limit according to option -l/--limit"""
114 limit = opts.get('limit')
114 limit = opts.get('limit')
115 if limit:
115 if limit:
116 try:
116 try:
117 limit = int(limit)
117 limit = int(limit)
118 except ValueError:
118 except ValueError:
119 raise util.Abort(_('limit must be a positive integer'))
119 raise util.Abort(_('limit must be a positive integer'))
120 if limit <= 0:
120 if limit <= 0:
121 raise util.Abort(_('limit must be positive'))
121 raise util.Abort(_('limit must be positive'))
122 else:
122 else:
123 limit = None
123 limit = None
124 return limit
124 return limit
125
125
126 def makefilename(repo, pat, node, desc=None,
126 def makefilename(repo, pat, node, desc=None,
127 total=None, seqno=None, revwidth=None, pathname=None):
127 total=None, seqno=None, revwidth=None, pathname=None):
128 node_expander = {
128 node_expander = {
129 'H': lambda: hex(node),
129 'H': lambda: hex(node),
130 'R': lambda: str(repo.changelog.rev(node)),
130 'R': lambda: str(repo.changelog.rev(node)),
131 'h': lambda: short(node),
131 'h': lambda: short(node),
132 'm': lambda: re.sub('[^\w]', '_', str(desc))
132 'm': lambda: re.sub('[^\w]', '_', str(desc))
133 }
133 }
134 expander = {
134 expander = {
135 '%': lambda: '%',
135 '%': lambda: '%',
136 'b': lambda: os.path.basename(repo.root),
136 'b': lambda: os.path.basename(repo.root),
137 }
137 }
138
138
139 try:
139 try:
140 if node:
140 if node:
141 expander.update(node_expander)
141 expander.update(node_expander)
142 if node:
142 if node:
143 expander['r'] = (lambda:
143 expander['r'] = (lambda:
144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
145 if total is not None:
145 if total is not None:
146 expander['N'] = lambda: str(total)
146 expander['N'] = lambda: str(total)
147 if seqno is not None:
147 if seqno is not None:
148 expander['n'] = lambda: str(seqno)
148 expander['n'] = lambda: str(seqno)
149 if total is not None and seqno is not None:
149 if total is not None and seqno is not None:
150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
151 if pathname is not None:
151 if pathname is not None:
152 expander['s'] = lambda: os.path.basename(pathname)
152 expander['s'] = lambda: os.path.basename(pathname)
153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
154 expander['p'] = lambda: pathname
154 expander['p'] = lambda: pathname
155
155
156 newname = []
156 newname = []
157 patlen = len(pat)
157 patlen = len(pat)
158 i = 0
158 i = 0
159 while i < patlen:
159 while i < patlen:
160 c = pat[i]
160 c = pat[i]
161 if c == '%':
161 if c == '%':
162 i += 1
162 i += 1
163 c = pat[i]
163 c = pat[i]
164 c = expander[c]()
164 c = expander[c]()
165 newname.append(c)
165 newname.append(c)
166 i += 1
166 i += 1
167 return ''.join(newname)
167 return ''.join(newname)
168 except KeyError, inst:
168 except KeyError, inst:
169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
170 inst.args[0])
170 inst.args[0])
171
171
172 def makefileobj(repo, pat, node=None, desc=None, total=None,
172 def makefileobj(repo, pat, node=None, desc=None, total=None,
173 seqno=None, revwidth=None, mode='wb', modemap=None,
173 seqno=None, revwidth=None, mode='wb', modemap=None,
174 pathname=None):
174 pathname=None):
175
175
176 writable = mode not in ('r', 'rb')
176 writable = mode not in ('r', 'rb')
177
177
178 if not pat or pat == '-':
178 if not pat or pat == '-':
179 fp = writable and repo.ui.fout or repo.ui.fin
179 fp = writable and repo.ui.fout or repo.ui.fin
180 if util.safehasattr(fp, 'fileno'):
180 if util.safehasattr(fp, 'fileno'):
181 return os.fdopen(os.dup(fp.fileno()), mode)
181 return os.fdopen(os.dup(fp.fileno()), mode)
182 else:
182 else:
183 # if this fp can't be duped properly, return
183 # if this fp can't be duped properly, return
184 # a dummy object that can be closed
184 # a dummy object that can be closed
185 class wrappedfileobj(object):
185 class wrappedfileobj(object):
186 noop = lambda x: None
186 noop = lambda x: None
187 def __init__(self, f):
187 def __init__(self, f):
188 self.f = f
188 self.f = f
189 def __getattr__(self, attr):
189 def __getattr__(self, attr):
190 if attr == 'close':
190 if attr == 'close':
191 return self.noop
191 return self.noop
192 else:
192 else:
193 return getattr(self.f, attr)
193 return getattr(self.f, attr)
194
194
195 return wrappedfileobj(fp)
195 return wrappedfileobj(fp)
196 if util.safehasattr(pat, 'write') and writable:
196 if util.safehasattr(pat, 'write') and writable:
197 return pat
197 return pat
198 if util.safehasattr(pat, 'read') and 'r' in mode:
198 if util.safehasattr(pat, 'read') and 'r' in mode:
199 return pat
199 return pat
200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
201 if modemap is not None:
201 if modemap is not None:
202 mode = modemap.get(fn, mode)
202 mode = modemap.get(fn, mode)
203 if mode == 'wb':
203 if mode == 'wb':
204 modemap[fn] = 'ab'
204 modemap[fn] = 'ab'
205 return open(fn, mode)
205 return open(fn, mode)
206
206
207 def openrevlog(repo, cmd, file_, opts):
207 def openrevlog(repo, cmd, file_, opts):
208 """opens the changelog, manifest, a filelog or a given revlog"""
208 """opens the changelog, manifest, a filelog or a given revlog"""
209 cl = opts['changelog']
209 cl = opts['changelog']
210 mf = opts['manifest']
210 mf = opts['manifest']
211 msg = None
211 msg = None
212 if cl and mf:
212 if cl and mf:
213 msg = _('cannot specify --changelog and --manifest at the same time')
213 msg = _('cannot specify --changelog and --manifest at the same time')
214 elif cl or mf:
214 elif cl or mf:
215 if file_:
215 if file_:
216 msg = _('cannot specify filename with --changelog or --manifest')
216 msg = _('cannot specify filename with --changelog or --manifest')
217 elif not repo:
217 elif not repo:
218 msg = _('cannot specify --changelog or --manifest '
218 msg = _('cannot specify --changelog or --manifest '
219 'without a repository')
219 'without a repository')
220 if msg:
220 if msg:
221 raise util.Abort(msg)
221 raise util.Abort(msg)
222
222
223 r = None
223 r = None
224 if repo:
224 if repo:
225 if cl:
225 if cl:
226 r = repo.changelog
226 r = repo.changelog
227 elif mf:
227 elif mf:
228 r = repo.manifest
228 r = repo.manifest
229 elif file_:
229 elif file_:
230 filelog = repo.file(file_)
230 filelog = repo.file(file_)
231 if len(filelog):
231 if len(filelog):
232 r = filelog
232 r = filelog
233 if not r:
233 if not r:
234 if not file_:
234 if not file_:
235 raise error.CommandError(cmd, _('invalid arguments'))
235 raise error.CommandError(cmd, _('invalid arguments'))
236 if not os.path.isfile(file_):
236 if not os.path.isfile(file_):
237 raise util.Abort(_("revlog '%s' not found") % file_)
237 raise util.Abort(_("revlog '%s' not found") % file_)
238 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
238 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
239 file_[:-2] + ".i")
239 file_[:-2] + ".i")
240 return r
240 return r
241
241
242 def copy(ui, repo, pats, opts, rename=False):
242 def copy(ui, repo, pats, opts, rename=False):
243 # called with the repo lock held
243 # called with the repo lock held
244 #
244 #
245 # hgsep => pathname that uses "/" to separate directories
245 # hgsep => pathname that uses "/" to separate directories
246 # ossep => pathname that uses os.sep to separate directories
246 # ossep => pathname that uses os.sep to separate directories
247 cwd = repo.getcwd()
247 cwd = repo.getcwd()
248 targets = {}
248 targets = {}
249 after = opts.get("after")
249 after = opts.get("after")
250 dryrun = opts.get("dry_run")
250 dryrun = opts.get("dry_run")
251 wctx = repo[None]
251 wctx = repo[None]
252
252
253 def walkpat(pat):
253 def walkpat(pat):
254 srcs = []
254 srcs = []
255 badstates = after and '?' or '?r'
255 badstates = after and '?' or '?r'
256 m = scmutil.match(repo[None], [pat], opts, globbed=True)
256 m = scmutil.match(repo[None], [pat], opts, globbed=True)
257 for abs in repo.walk(m):
257 for abs in repo.walk(m):
258 state = repo.dirstate[abs]
258 state = repo.dirstate[abs]
259 rel = m.rel(abs)
259 rel = m.rel(abs)
260 exact = m.exact(abs)
260 exact = m.exact(abs)
261 if state in badstates:
261 if state in badstates:
262 if exact and state == '?':
262 if exact and state == '?':
263 ui.warn(_('%s: not copying - file is not managed\n') % rel)
263 ui.warn(_('%s: not copying - file is not managed\n') % rel)
264 if exact and state == 'r':
264 if exact and state == 'r':
265 ui.warn(_('%s: not copying - file has been marked for'
265 ui.warn(_('%s: not copying - file has been marked for'
266 ' remove\n') % rel)
266 ' remove\n') % rel)
267 continue
267 continue
268 # abs: hgsep
268 # abs: hgsep
269 # rel: ossep
269 # rel: ossep
270 srcs.append((abs, rel, exact))
270 srcs.append((abs, rel, exact))
271 return srcs
271 return srcs
272
272
273 # abssrc: hgsep
273 # abssrc: hgsep
274 # relsrc: ossep
274 # relsrc: ossep
275 # otarget: ossep
275 # otarget: ossep
276 def copyfile(abssrc, relsrc, otarget, exact):
276 def copyfile(abssrc, relsrc, otarget, exact):
277 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
277 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
278 if '/' in abstarget:
278 if '/' in abstarget:
279 # We cannot normalize abstarget itself, this would prevent
279 # We cannot normalize abstarget itself, this would prevent
280 # case only renames, like a => A.
280 # case only renames, like a => A.
281 abspath, absname = abstarget.rsplit('/', 1)
281 abspath, absname = abstarget.rsplit('/', 1)
282 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
282 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
283 reltarget = repo.pathto(abstarget, cwd)
283 reltarget = repo.pathto(abstarget, cwd)
284 target = repo.wjoin(abstarget)
284 target = repo.wjoin(abstarget)
285 src = repo.wjoin(abssrc)
285 src = repo.wjoin(abssrc)
286 state = repo.dirstate[abstarget]
286 state = repo.dirstate[abstarget]
287
287
288 scmutil.checkportable(ui, abstarget)
288 scmutil.checkportable(ui, abstarget)
289
289
290 # check for collisions
290 # check for collisions
291 prevsrc = targets.get(abstarget)
291 prevsrc = targets.get(abstarget)
292 if prevsrc is not None:
292 if prevsrc is not None:
293 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
293 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
294 (reltarget, repo.pathto(abssrc, cwd),
294 (reltarget, repo.pathto(abssrc, cwd),
295 repo.pathto(prevsrc, cwd)))
295 repo.pathto(prevsrc, cwd)))
296 return
296 return
297
297
298 # check for overwrites
298 # check for overwrites
299 exists = os.path.lexists(target)
299 exists = os.path.lexists(target)
300 samefile = False
300 samefile = False
301 if exists and abssrc != abstarget:
301 if exists and abssrc != abstarget:
302 if (repo.dirstate.normalize(abssrc) ==
302 if (repo.dirstate.normalize(abssrc) ==
303 repo.dirstate.normalize(abstarget)):
303 repo.dirstate.normalize(abstarget)):
304 if not rename:
304 if not rename:
305 ui.warn(_("%s: can't copy - same file\n") % reltarget)
305 ui.warn(_("%s: can't copy - same file\n") % reltarget)
306 return
306 return
307 exists = False
307 exists = False
308 samefile = True
308 samefile = True
309
309
310 if not after and exists or after and state in 'mn':
310 if not after and exists or after and state in 'mn':
311 if not opts['force']:
311 if not opts['force']:
312 ui.warn(_('%s: not overwriting - file exists\n') %
312 ui.warn(_('%s: not overwriting - file exists\n') %
313 reltarget)
313 reltarget)
314 return
314 return
315
315
316 if after:
316 if after:
317 if not exists:
317 if not exists:
318 if rename:
318 if rename:
319 ui.warn(_('%s: not recording move - %s does not exist\n') %
319 ui.warn(_('%s: not recording move - %s does not exist\n') %
320 (relsrc, reltarget))
320 (relsrc, reltarget))
321 else:
321 else:
322 ui.warn(_('%s: not recording copy - %s does not exist\n') %
322 ui.warn(_('%s: not recording copy - %s does not exist\n') %
323 (relsrc, reltarget))
323 (relsrc, reltarget))
324 return
324 return
325 elif not dryrun:
325 elif not dryrun:
326 try:
326 try:
327 if exists:
327 if exists:
328 os.unlink(target)
328 os.unlink(target)
329 targetdir = os.path.dirname(target) or '.'
329 targetdir = os.path.dirname(target) or '.'
330 if not os.path.isdir(targetdir):
330 if not os.path.isdir(targetdir):
331 os.makedirs(targetdir)
331 os.makedirs(targetdir)
332 if samefile:
332 if samefile:
333 tmp = target + "~hgrename"
333 tmp = target + "~hgrename"
334 os.rename(src, tmp)
334 os.rename(src, tmp)
335 os.rename(tmp, target)
335 os.rename(tmp, target)
336 else:
336 else:
337 util.copyfile(src, target)
337 util.copyfile(src, target)
338 srcexists = True
338 srcexists = True
339 except IOError, inst:
339 except IOError, inst:
340 if inst.errno == errno.ENOENT:
340 if inst.errno == errno.ENOENT:
341 ui.warn(_('%s: deleted in working copy\n') % relsrc)
341 ui.warn(_('%s: deleted in working copy\n') % relsrc)
342 srcexists = False
342 srcexists = False
343 else:
343 else:
344 ui.warn(_('%s: cannot copy - %s\n') %
344 ui.warn(_('%s: cannot copy - %s\n') %
345 (relsrc, inst.strerror))
345 (relsrc, inst.strerror))
346 return True # report a failure
346 return True # report a failure
347
347
348 if ui.verbose or not exact:
348 if ui.verbose or not exact:
349 if rename:
349 if rename:
350 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
350 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
351 else:
351 else:
352 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
352 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
353
353
354 targets[abstarget] = abssrc
354 targets[abstarget] = abssrc
355
355
356 # fix up dirstate
356 # fix up dirstate
357 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
357 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
358 dryrun=dryrun, cwd=cwd)
358 dryrun=dryrun, cwd=cwd)
359 if rename and not dryrun:
359 if rename and not dryrun:
360 if not after and srcexists and not samefile:
360 if not after and srcexists and not samefile:
361 util.unlinkpath(repo.wjoin(abssrc))
361 util.unlinkpath(repo.wjoin(abssrc))
362 wctx.forget([abssrc])
362 wctx.forget([abssrc])
363
363
364 # pat: ossep
364 # pat: ossep
365 # dest ossep
365 # dest ossep
366 # srcs: list of (hgsep, hgsep, ossep, bool)
366 # srcs: list of (hgsep, hgsep, ossep, bool)
367 # return: function that takes hgsep and returns ossep
367 # return: function that takes hgsep and returns ossep
368 def targetpathfn(pat, dest, srcs):
368 def targetpathfn(pat, dest, srcs):
369 if os.path.isdir(pat):
369 if os.path.isdir(pat):
370 abspfx = pathutil.canonpath(repo.root, cwd, pat)
370 abspfx = pathutil.canonpath(repo.root, cwd, pat)
371 abspfx = util.localpath(abspfx)
371 abspfx = util.localpath(abspfx)
372 if destdirexists:
372 if destdirexists:
373 striplen = len(os.path.split(abspfx)[0])
373 striplen = len(os.path.split(abspfx)[0])
374 else:
374 else:
375 striplen = len(abspfx)
375 striplen = len(abspfx)
376 if striplen:
376 if striplen:
377 striplen += len(os.sep)
377 striplen += len(os.sep)
378 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
378 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
379 elif destdirexists:
379 elif destdirexists:
380 res = lambda p: os.path.join(dest,
380 res = lambda p: os.path.join(dest,
381 os.path.basename(util.localpath(p)))
381 os.path.basename(util.localpath(p)))
382 else:
382 else:
383 res = lambda p: dest
383 res = lambda p: dest
384 return res
384 return res
385
385
386 # pat: ossep
386 # pat: ossep
387 # dest ossep
387 # dest ossep
388 # srcs: list of (hgsep, hgsep, ossep, bool)
388 # srcs: list of (hgsep, hgsep, ossep, bool)
389 # return: function that takes hgsep and returns ossep
389 # return: function that takes hgsep and returns ossep
390 def targetpathafterfn(pat, dest, srcs):
390 def targetpathafterfn(pat, dest, srcs):
391 if matchmod.patkind(pat):
391 if matchmod.patkind(pat):
392 # a mercurial pattern
392 # a mercurial pattern
393 res = lambda p: os.path.join(dest,
393 res = lambda p: os.path.join(dest,
394 os.path.basename(util.localpath(p)))
394 os.path.basename(util.localpath(p)))
395 else:
395 else:
396 abspfx = pathutil.canonpath(repo.root, cwd, pat)
396 abspfx = pathutil.canonpath(repo.root, cwd, pat)
397 if len(abspfx) < len(srcs[0][0]):
397 if len(abspfx) < len(srcs[0][0]):
398 # A directory. Either the target path contains the last
398 # A directory. Either the target path contains the last
399 # component of the source path or it does not.
399 # component of the source path or it does not.
400 def evalpath(striplen):
400 def evalpath(striplen):
401 score = 0
401 score = 0
402 for s in srcs:
402 for s in srcs:
403 t = os.path.join(dest, util.localpath(s[0])[striplen:])
403 t = os.path.join(dest, util.localpath(s[0])[striplen:])
404 if os.path.lexists(t):
404 if os.path.lexists(t):
405 score += 1
405 score += 1
406 return score
406 return score
407
407
408 abspfx = util.localpath(abspfx)
408 abspfx = util.localpath(abspfx)
409 striplen = len(abspfx)
409 striplen = len(abspfx)
410 if striplen:
410 if striplen:
411 striplen += len(os.sep)
411 striplen += len(os.sep)
412 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
412 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
413 score = evalpath(striplen)
413 score = evalpath(striplen)
414 striplen1 = len(os.path.split(abspfx)[0])
414 striplen1 = len(os.path.split(abspfx)[0])
415 if striplen1:
415 if striplen1:
416 striplen1 += len(os.sep)
416 striplen1 += len(os.sep)
417 if evalpath(striplen1) > score:
417 if evalpath(striplen1) > score:
418 striplen = striplen1
418 striplen = striplen1
419 res = lambda p: os.path.join(dest,
419 res = lambda p: os.path.join(dest,
420 util.localpath(p)[striplen:])
420 util.localpath(p)[striplen:])
421 else:
421 else:
422 # a file
422 # a file
423 if destdirexists:
423 if destdirexists:
424 res = lambda p: os.path.join(dest,
424 res = lambda p: os.path.join(dest,
425 os.path.basename(util.localpath(p)))
425 os.path.basename(util.localpath(p)))
426 else:
426 else:
427 res = lambda p: dest
427 res = lambda p: dest
428 return res
428 return res
429
429
430
430
431 pats = scmutil.expandpats(pats)
431 pats = scmutil.expandpats(pats)
432 if not pats:
432 if not pats:
433 raise util.Abort(_('no source or destination specified'))
433 raise util.Abort(_('no source or destination specified'))
434 if len(pats) == 1:
434 if len(pats) == 1:
435 raise util.Abort(_('no destination specified'))
435 raise util.Abort(_('no destination specified'))
436 dest = pats.pop()
436 dest = pats.pop()
437 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
437 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
438 if not destdirexists:
438 if not destdirexists:
439 if len(pats) > 1 or matchmod.patkind(pats[0]):
439 if len(pats) > 1 or matchmod.patkind(pats[0]):
440 raise util.Abort(_('with multiple sources, destination must be an '
440 raise util.Abort(_('with multiple sources, destination must be an '
441 'existing directory'))
441 'existing directory'))
442 if util.endswithsep(dest):
442 if util.endswithsep(dest):
443 raise util.Abort(_('destination %s is not a directory') % dest)
443 raise util.Abort(_('destination %s is not a directory') % dest)
444
444
445 tfn = targetpathfn
445 tfn = targetpathfn
446 if after:
446 if after:
447 tfn = targetpathafterfn
447 tfn = targetpathafterfn
448 copylist = []
448 copylist = []
449 for pat in pats:
449 for pat in pats:
450 srcs = walkpat(pat)
450 srcs = walkpat(pat)
451 if not srcs:
451 if not srcs:
452 continue
452 continue
453 copylist.append((tfn(pat, dest, srcs), srcs))
453 copylist.append((tfn(pat, dest, srcs), srcs))
454 if not copylist:
454 if not copylist:
455 raise util.Abort(_('no files to copy'))
455 raise util.Abort(_('no files to copy'))
456
456
457 errors = 0
457 errors = 0
458 for targetpath, srcs in copylist:
458 for targetpath, srcs in copylist:
459 for abssrc, relsrc, exact in srcs:
459 for abssrc, relsrc, exact in srcs:
460 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
460 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
461 errors += 1
461 errors += 1
462
462
463 if errors:
463 if errors:
464 ui.warn(_('(consider using --after)\n'))
464 ui.warn(_('(consider using --after)\n'))
465
465
466 return errors != 0
466 return errors != 0
467
467
468 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
468 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
469 runargs=None, appendpid=False):
469 runargs=None, appendpid=False):
470 '''Run a command as a service.'''
470 '''Run a command as a service.'''
471
471
472 def writepid(pid):
472 def writepid(pid):
473 if opts['pid_file']:
473 if opts['pid_file']:
474 mode = appendpid and 'a' or 'w'
474 mode = appendpid and 'a' or 'w'
475 fp = open(opts['pid_file'], mode)
475 fp = open(opts['pid_file'], mode)
476 fp.write(str(pid) + '\n')
476 fp.write(str(pid) + '\n')
477 fp.close()
477 fp.close()
478
478
479 if opts['daemon'] and not opts['daemon_pipefds']:
479 if opts['daemon'] and not opts['daemon_pipefds']:
480 # Signal child process startup with file removal
480 # Signal child process startup with file removal
481 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
481 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
482 os.close(lockfd)
482 os.close(lockfd)
483 try:
483 try:
484 if not runargs:
484 if not runargs:
485 runargs = util.hgcmd() + sys.argv[1:]
485 runargs = util.hgcmd() + sys.argv[1:]
486 runargs.append('--daemon-pipefds=%s' % lockpath)
486 runargs.append('--daemon-pipefds=%s' % lockpath)
487 # Don't pass --cwd to the child process, because we've already
487 # Don't pass --cwd to the child process, because we've already
488 # changed directory.
488 # changed directory.
489 for i in xrange(1, len(runargs)):
489 for i in xrange(1, len(runargs)):
490 if runargs[i].startswith('--cwd='):
490 if runargs[i].startswith('--cwd='):
491 del runargs[i]
491 del runargs[i]
492 break
492 break
493 elif runargs[i].startswith('--cwd'):
493 elif runargs[i].startswith('--cwd'):
494 del runargs[i:i + 2]
494 del runargs[i:i + 2]
495 break
495 break
496 def condfn():
496 def condfn():
497 return not os.path.exists(lockpath)
497 return not os.path.exists(lockpath)
498 pid = util.rundetached(runargs, condfn)
498 pid = util.rundetached(runargs, condfn)
499 if pid < 0:
499 if pid < 0:
500 raise util.Abort(_('child process failed to start'))
500 raise util.Abort(_('child process failed to start'))
501 writepid(pid)
501 writepid(pid)
502 finally:
502 finally:
503 try:
503 try:
504 os.unlink(lockpath)
504 os.unlink(lockpath)
505 except OSError, e:
505 except OSError, e:
506 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
507 raise
507 raise
508 if parentfn:
508 if parentfn:
509 return parentfn(pid)
509 return parentfn(pid)
510 else:
510 else:
511 return
511 return
512
512
513 if initfn:
513 if initfn:
514 initfn()
514 initfn()
515
515
516 if not opts['daemon']:
516 if not opts['daemon']:
517 writepid(os.getpid())
517 writepid(os.getpid())
518
518
519 if opts['daemon_pipefds']:
519 if opts['daemon_pipefds']:
520 lockpath = opts['daemon_pipefds']
520 lockpath = opts['daemon_pipefds']
521 try:
521 try:
522 os.setsid()
522 os.setsid()
523 except AttributeError:
523 except AttributeError:
524 pass
524 pass
525 os.unlink(lockpath)
525 os.unlink(lockpath)
526 util.hidewindow()
526 util.hidewindow()
527 sys.stdout.flush()
527 sys.stdout.flush()
528 sys.stderr.flush()
528 sys.stderr.flush()
529
529
530 nullfd = os.open(os.devnull, os.O_RDWR)
530 nullfd = os.open(os.devnull, os.O_RDWR)
531 logfilefd = nullfd
531 logfilefd = nullfd
532 if logfile:
532 if logfile:
533 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
533 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
534 os.dup2(nullfd, 0)
534 os.dup2(nullfd, 0)
535 os.dup2(logfilefd, 1)
535 os.dup2(logfilefd, 1)
536 os.dup2(logfilefd, 2)
536 os.dup2(logfilefd, 2)
537 if nullfd not in (0, 1, 2):
537 if nullfd not in (0, 1, 2):
538 os.close(nullfd)
538 os.close(nullfd)
539 if logfile and logfilefd not in (0, 1, 2):
539 if logfile and logfilefd not in (0, 1, 2):
540 os.close(logfilefd)
540 os.close(logfilefd)
541
541
542 if runfn:
542 if runfn:
543 return runfn()
543 return runfn()
544
544
545 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
545 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
546 opts=None):
546 opts=None):
547 '''export changesets as hg patches.'''
547 '''export changesets as hg patches.'''
548
548
549 total = len(revs)
549 total = len(revs)
550 revwidth = max([len(str(rev)) for rev in revs])
550 revwidth = max([len(str(rev)) for rev in revs])
551 filemode = {}
551 filemode = {}
552
552
553 def single(rev, seqno, fp):
553 def single(rev, seqno, fp):
554 ctx = repo[rev]
554 ctx = repo[rev]
555 node = ctx.node()
555 node = ctx.node()
556 parents = [p.node() for p in ctx.parents() if p]
556 parents = [p.node() for p in ctx.parents() if p]
557 branch = ctx.branch()
557 branch = ctx.branch()
558 if switch_parent:
558 if switch_parent:
559 parents.reverse()
559 parents.reverse()
560 prev = (parents and parents[0]) or nullid
560 prev = (parents and parents[0]) or nullid
561
561
562 shouldclose = False
562 shouldclose = False
563 if not fp and len(template) > 0:
563 if not fp and len(template) > 0:
564 desc_lines = ctx.description().rstrip().split('\n')
564 desc_lines = ctx.description().rstrip().split('\n')
565 desc = desc_lines[0] #Commit always has a first line.
565 desc = desc_lines[0] #Commit always has a first line.
566 fp = makefileobj(repo, template, node, desc=desc, total=total,
566 fp = makefileobj(repo, template, node, desc=desc, total=total,
567 seqno=seqno, revwidth=revwidth, mode='wb',
567 seqno=seqno, revwidth=revwidth, mode='wb',
568 modemap=filemode)
568 modemap=filemode)
569 if fp != template:
569 if fp != template:
570 shouldclose = True
570 shouldclose = True
571 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
571 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
572 repo.ui.note("%s\n" % fp.name)
572 repo.ui.note("%s\n" % fp.name)
573
573
574 if not fp:
574 if not fp:
575 write = repo.ui.write
575 write = repo.ui.write
576 else:
576 else:
577 def write(s, **kw):
577 def write(s, **kw):
578 fp.write(s)
578 fp.write(s)
579
579
580
580
581 write("# HG changeset patch\n")
581 write("# HG changeset patch\n")
582 write("# User %s\n" % ctx.user())
582 write("# User %s\n" % ctx.user())
583 write("# Date %d %d\n" % ctx.date())
583 write("# Date %d %d\n" % ctx.date())
584 write("# %s\n" % util.datestr(ctx.date()))
584 write("# %s\n" % util.datestr(ctx.date()))
585 if branch and branch != 'default':
585 if branch and branch != 'default':
586 write("# Branch %s\n" % branch)
586 write("# Branch %s\n" % branch)
587 write("# Node ID %s\n" % hex(node))
587 write("# Node ID %s\n" % hex(node))
588 write("# Parent %s\n" % hex(prev))
588 write("# Parent %s\n" % hex(prev))
589 if len(parents) > 1:
589 if len(parents) > 1:
590 write("# Parent %s\n" % hex(parents[1]))
590 write("# Parent %s\n" % hex(parents[1]))
591 write(ctx.description().rstrip())
591 write(ctx.description().rstrip())
592 write("\n\n")
592 write("\n\n")
593
593
594 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
594 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
595 write(chunk, label=label)
595 write(chunk, label=label)
596
596
597 if shouldclose:
597 if shouldclose:
598 fp.close()
598 fp.close()
599
599
600 for seqno, rev in enumerate(revs):
600 for seqno, rev in enumerate(revs):
601 single(rev, seqno + 1, fp)
601 single(rev, seqno + 1, fp)
602
602
603 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
603 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
604 changes=None, stat=False, fp=None, prefix='',
604 changes=None, stat=False, fp=None, prefix='',
605 listsubrepos=False):
605 listsubrepos=False):
606 '''show diff or diffstat.'''
606 '''show diff or diffstat.'''
607 if fp is None:
607 if fp is None:
608 write = ui.write
608 write = ui.write
609 else:
609 else:
610 def write(s, **kw):
610 def write(s, **kw):
611 fp.write(s)
611 fp.write(s)
612
612
613 if stat:
613 if stat:
614 diffopts = diffopts.copy(context=0)
614 diffopts = diffopts.copy(context=0)
615 width = 80
615 width = 80
616 if not ui.plain():
616 if not ui.plain():
617 width = ui.termwidth()
617 width = ui.termwidth()
618 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
618 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
619 prefix=prefix)
619 prefix=prefix)
620 for chunk, label in patch.diffstatui(util.iterlines(chunks),
620 for chunk, label in patch.diffstatui(util.iterlines(chunks),
621 width=width,
621 width=width,
622 git=diffopts.git):
622 git=diffopts.git):
623 write(chunk, label=label)
623 write(chunk, label=label)
624 else:
624 else:
625 for chunk, label in patch.diffui(repo, node1, node2, match,
625 for chunk, label in patch.diffui(repo, node1, node2, match,
626 changes, diffopts, prefix=prefix):
626 changes, diffopts, prefix=prefix):
627 write(chunk, label=label)
627 write(chunk, label=label)
628
628
629 if listsubrepos:
629 if listsubrepos:
630 ctx1 = repo[node1]
630 ctx1 = repo[node1]
631 ctx2 = repo[node2]
631 ctx2 = repo[node2]
632 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
632 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
633 tempnode2 = node2
633 tempnode2 = node2
634 try:
634 try:
635 if node2 is not None:
635 if node2 is not None:
636 tempnode2 = ctx2.substate[subpath][1]
636 tempnode2 = ctx2.substate[subpath][1]
637 except KeyError:
637 except KeyError:
638 # A subrepo that existed in node1 was deleted between node1 and
638 # A subrepo that existed in node1 was deleted between node1 and
639 # node2 (inclusive). Thus, ctx2's substate won't contain that
639 # node2 (inclusive). Thus, ctx2's substate won't contain that
640 # subpath. The best we can do is to ignore it.
640 # subpath. The best we can do is to ignore it.
641 tempnode2 = None
641 tempnode2 = None
642 submatch = matchmod.narrowmatcher(subpath, match)
642 submatch = matchmod.narrowmatcher(subpath, match)
643 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
643 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
644 stat=stat, fp=fp, prefix=prefix)
644 stat=stat, fp=fp, prefix=prefix)
645
645
646 class changeset_printer(object):
646 class changeset_printer(object):
647 '''show changeset information when templating not requested.'''
647 '''show changeset information when templating not requested.'''
648
648
649 def __init__(self, ui, repo, patch, diffopts, buffered):
649 def __init__(self, ui, repo, patch, diffopts, buffered):
650 self.ui = ui
650 self.ui = ui
651 self.repo = repo
651 self.repo = repo
652 self.buffered = buffered
652 self.buffered = buffered
653 self.patch = patch
653 self.patch = patch
654 self.diffopts = diffopts
654 self.diffopts = diffopts
655 self.header = {}
655 self.header = {}
656 self.hunk = {}
656 self.hunk = {}
657 self.lastheader = None
657 self.lastheader = None
658 self.footer = None
658 self.footer = None
659
659
660 def flush(self, rev):
660 def flush(self, rev):
661 if rev in self.header:
661 if rev in self.header:
662 h = self.header[rev]
662 h = self.header[rev]
663 if h != self.lastheader:
663 if h != self.lastheader:
664 self.lastheader = h
664 self.lastheader = h
665 self.ui.write(h)
665 self.ui.write(h)
666 del self.header[rev]
666 del self.header[rev]
667 if rev in self.hunk:
667 if rev in self.hunk:
668 self.ui.write(self.hunk[rev])
668 self.ui.write(self.hunk[rev])
669 del self.hunk[rev]
669 del self.hunk[rev]
670 return 1
670 return 1
671 return 0
671 return 0
672
672
673 def close(self):
673 def close(self):
674 if self.footer:
674 if self.footer:
675 self.ui.write(self.footer)
675 self.ui.write(self.footer)
676
676
677 def show(self, ctx, copies=None, matchfn=None, **props):
677 def show(self, ctx, copies=None, matchfn=None, **props):
678 if self.buffered:
678 if self.buffered:
679 self.ui.pushbuffer()
679 self.ui.pushbuffer()
680 self._show(ctx, copies, matchfn, props)
680 self._show(ctx, copies, matchfn, props)
681 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
681 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
682 else:
682 else:
683 self._show(ctx, copies, matchfn, props)
683 self._show(ctx, copies, matchfn, props)
684
684
685 def _show(self, ctx, copies, matchfn, props):
685 def _show(self, ctx, copies, matchfn, props):
686 '''show a single changeset or file revision'''
686 '''show a single changeset or file revision'''
687 changenode = ctx.node()
687 changenode = ctx.node()
688 rev = ctx.rev()
688 rev = ctx.rev()
689
689
690 if self.ui.quiet:
690 if self.ui.quiet:
691 self.ui.write("%d:%s\n" % (rev, short(changenode)),
691 self.ui.write("%d:%s\n" % (rev, short(changenode)),
692 label='log.node')
692 label='log.node')
693 return
693 return
694
694
695 log = self.repo.changelog
695 log = self.repo.changelog
696 date = util.datestr(ctx.date())
696 date = util.datestr(ctx.date())
697
697
698 hexfunc = self.ui.debugflag and hex or short
698 hexfunc = self.ui.debugflag and hex or short
699
699
700 parents = [(p, hexfunc(log.node(p)))
700 parents = [(p, hexfunc(log.node(p)))
701 for p in self._meaningful_parentrevs(log, rev)]
701 for p in self._meaningful_parentrevs(log, rev)]
702
702
703 # i18n: column positioning for "hg log"
703 # i18n: column positioning for "hg log"
704 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
704 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
705 label='log.changeset changeset.%s' % ctx.phasestr())
705 label='log.changeset changeset.%s' % ctx.phasestr())
706
706
707 branch = ctx.branch()
707 branch = ctx.branch()
708 # don't show the default branch name
708 # don't show the default branch name
709 if branch != 'default':
709 if branch != 'default':
710 # i18n: column positioning for "hg log"
710 # i18n: column positioning for "hg log"
711 self.ui.write(_("branch: %s\n") % branch,
711 self.ui.write(_("branch: %s\n") % branch,
712 label='log.branch')
712 label='log.branch')
713 for bookmark in self.repo.nodebookmarks(changenode):
713 for bookmark in self.repo.nodebookmarks(changenode):
714 # i18n: column positioning for "hg log"
714 # i18n: column positioning for "hg log"
715 self.ui.write(_("bookmark: %s\n") % bookmark,
715 self.ui.write(_("bookmark: %s\n") % bookmark,
716 label='log.bookmark')
716 label='log.bookmark')
717 for tag in self.repo.nodetags(changenode):
717 for tag in self.repo.nodetags(changenode):
718 # i18n: column positioning for "hg log"
718 # i18n: column positioning for "hg log"
719 self.ui.write(_("tag: %s\n") % tag,
719 self.ui.write(_("tag: %s\n") % tag,
720 label='log.tag')
720 label='log.tag')
721 if self.ui.debugflag and ctx.phase():
721 if self.ui.debugflag and ctx.phase():
722 # i18n: column positioning for "hg log"
722 # i18n: column positioning for "hg log"
723 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
723 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
724 label='log.phase')
724 label='log.phase')
725 for parent in parents:
725 for parent in parents:
726 # i18n: column positioning for "hg log"
726 # i18n: column positioning for "hg log"
727 self.ui.write(_("parent: %d:%s\n") % parent,
727 self.ui.write(_("parent: %d:%s\n") % parent,
728 label='log.parent changeset.%s' % ctx.phasestr())
728 label='log.parent changeset.%s' % ctx.phasestr())
729
729
730 if self.ui.debugflag:
730 if self.ui.debugflag:
731 mnode = ctx.manifestnode()
731 mnode = ctx.manifestnode()
732 # i18n: column positioning for "hg log"
732 # i18n: column positioning for "hg log"
733 self.ui.write(_("manifest: %d:%s\n") %
733 self.ui.write(_("manifest: %d:%s\n") %
734 (self.repo.manifest.rev(mnode), hex(mnode)),
734 (self.repo.manifest.rev(mnode), hex(mnode)),
735 label='ui.debug log.manifest')
735 label='ui.debug log.manifest')
736 # i18n: column positioning for "hg log"
736 # i18n: column positioning for "hg log"
737 self.ui.write(_("user: %s\n") % ctx.user(),
737 self.ui.write(_("user: %s\n") % ctx.user(),
738 label='log.user')
738 label='log.user')
739 # i18n: column positioning for "hg log"
739 # i18n: column positioning for "hg log"
740 self.ui.write(_("date: %s\n") % date,
740 self.ui.write(_("date: %s\n") % date,
741 label='log.date')
741 label='log.date')
742
742
743 if self.ui.debugflag:
743 if self.ui.debugflag:
744 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
744 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
745 for key, value in zip([# i18n: column positioning for "hg log"
745 for key, value in zip([# i18n: column positioning for "hg log"
746 _("files:"),
746 _("files:"),
747 # i18n: column positioning for "hg log"
747 # i18n: column positioning for "hg log"
748 _("files+:"),
748 _("files+:"),
749 # i18n: column positioning for "hg log"
749 # i18n: column positioning for "hg log"
750 _("files-:")], files):
750 _("files-:")], files):
751 if value:
751 if value:
752 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
752 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
753 label='ui.debug log.files')
753 label='ui.debug log.files')
754 elif ctx.files() and self.ui.verbose:
754 elif ctx.files() and self.ui.verbose:
755 # i18n: column positioning for "hg log"
755 # i18n: column positioning for "hg log"
756 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
756 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
757 label='ui.note log.files')
757 label='ui.note log.files')
758 if copies and self.ui.verbose:
758 if copies and self.ui.verbose:
759 copies = ['%s (%s)' % c for c in copies]
759 copies = ['%s (%s)' % c for c in copies]
760 # i18n: column positioning for "hg log"
760 # i18n: column positioning for "hg log"
761 self.ui.write(_("copies: %s\n") % ' '.join(copies),
761 self.ui.write(_("copies: %s\n") % ' '.join(copies),
762 label='ui.note log.copies')
762 label='ui.note log.copies')
763
763
764 extra = ctx.extra()
764 extra = ctx.extra()
765 if extra and self.ui.debugflag:
765 if extra and self.ui.debugflag:
766 for key, value in sorted(extra.items()):
766 for key, value in sorted(extra.items()):
767 # i18n: column positioning for "hg log"
767 # i18n: column positioning for "hg log"
768 self.ui.write(_("extra: %s=%s\n")
768 self.ui.write(_("extra: %s=%s\n")
769 % (key, value.encode('string_escape')),
769 % (key, value.encode('string_escape')),
770 label='ui.debug log.extra')
770 label='ui.debug log.extra')
771
771
772 description = ctx.description().strip()
772 description = ctx.description().strip()
773 if description:
773 if description:
774 if self.ui.verbose:
774 if self.ui.verbose:
775 self.ui.write(_("description:\n"),
775 self.ui.write(_("description:\n"),
776 label='ui.note log.description')
776 label='ui.note log.description')
777 self.ui.write(description,
777 self.ui.write(description,
778 label='ui.note log.description')
778 label='ui.note log.description')
779 self.ui.write("\n\n")
779 self.ui.write("\n\n")
780 else:
780 else:
781 # i18n: column positioning for "hg log"
781 # i18n: column positioning for "hg log"
782 self.ui.write(_("summary: %s\n") %
782 self.ui.write(_("summary: %s\n") %
783 description.splitlines()[0],
783 description.splitlines()[0],
784 label='log.summary')
784 label='log.summary')
785 self.ui.write("\n")
785 self.ui.write("\n")
786
786
787 self.showpatch(changenode, matchfn)
787 self.showpatch(changenode, matchfn)
788
788
789 def showpatch(self, node, matchfn):
789 def showpatch(self, node, matchfn):
790 if not matchfn:
790 if not matchfn:
791 matchfn = self.patch
791 matchfn = self.patch
792 if matchfn:
792 if matchfn:
793 stat = self.diffopts.get('stat')
793 stat = self.diffopts.get('stat')
794 diff = self.diffopts.get('patch')
794 diff = self.diffopts.get('patch')
795 diffopts = patch.diffopts(self.ui, self.diffopts)
795 diffopts = patch.diffopts(self.ui, self.diffopts)
796 prev = self.repo.changelog.parents(node)[0]
796 prev = self.repo.changelog.parents(node)[0]
797 if stat:
797 if stat:
798 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
798 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
799 match=matchfn, stat=True)
799 match=matchfn, stat=True)
800 if diff:
800 if diff:
801 if stat:
801 if stat:
802 self.ui.write("\n")
802 self.ui.write("\n")
803 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
803 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
804 match=matchfn, stat=False)
804 match=matchfn, stat=False)
805 self.ui.write("\n")
805 self.ui.write("\n")
806
806
807 def _meaningful_parentrevs(self, log, rev):
807 def _meaningful_parentrevs(self, log, rev):
808 """Return list of meaningful (or all if debug) parentrevs for rev.
808 """Return list of meaningful (or all if debug) parentrevs for rev.
809
809
810 For merges (two non-nullrev revisions) both parents are meaningful.
810 For merges (two non-nullrev revisions) both parents are meaningful.
811 Otherwise the first parent revision is considered meaningful if it
811 Otherwise the first parent revision is considered meaningful if it
812 is not the preceding revision.
812 is not the preceding revision.
813 """
813 """
814 parents = log.parentrevs(rev)
814 parents = log.parentrevs(rev)
815 if not self.ui.debugflag and parents[1] == nullrev:
815 if not self.ui.debugflag and parents[1] == nullrev:
816 if parents[0] >= rev - 1:
816 if parents[0] >= rev - 1:
817 parents = []
817 parents = []
818 else:
818 else:
819 parents = [parents[0]]
819 parents = [parents[0]]
820 return parents
820 return parents
821
821
822
822
823 class changeset_templater(changeset_printer):
823 class changeset_templater(changeset_printer):
824 '''format changeset information.'''
824 '''format changeset information.'''
825
825
826 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
826 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
827 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
827 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
828 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
828 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
829 defaulttempl = {
829 defaulttempl = {
830 'parent': '{rev}:{node|formatnode} ',
830 'parent': '{rev}:{node|formatnode} ',
831 'manifest': '{rev}:{node|formatnode}',
831 'manifest': '{rev}:{node|formatnode}',
832 'file_copy': '{name} ({source})',
832 'file_copy': '{name} ({source})',
833 'extra': '{key}={value|stringescape}'
833 'extra': '{key}={value|stringescape}'
834 }
834 }
835 # filecopy is preserved for compatibility reasons
835 # filecopy is preserved for compatibility reasons
836 defaulttempl['filecopy'] = defaulttempl['file_copy']
836 defaulttempl['filecopy'] = defaulttempl['file_copy']
837 self.t = templater.templater(mapfile, {'formatnode': formatnode},
837 self.t = templater.templater(mapfile, {'formatnode': formatnode},
838 cache=defaulttempl)
838 cache=defaulttempl)
839 self.cache = {}
839 self.cache = {}
840
840
841 def use_template(self, t):
841 def use_template(self, t):
842 '''set template string to use'''
842 '''set template string to use'''
843 self.t.cache['changeset'] = t
843 self.t.cache['changeset'] = t
844
844
845 def _meaningful_parentrevs(self, ctx):
845 def _meaningful_parentrevs(self, ctx):
846 """Return list of meaningful (or all if debug) parentrevs for rev.
846 """Return list of meaningful (or all if debug) parentrevs for rev.
847 """
847 """
848 parents = ctx.parents()
848 parents = ctx.parents()
849 if len(parents) > 1:
849 if len(parents) > 1:
850 return parents
850 return parents
851 if self.ui.debugflag:
851 if self.ui.debugflag:
852 return [parents[0], self.repo['null']]
852 return [parents[0], self.repo['null']]
853 if parents[0].rev() >= ctx.rev() - 1:
853 if parents[0].rev() >= ctx.rev() - 1:
854 return []
854 return []
855 return parents
855 return parents
856
856
857 def _show(self, ctx, copies, matchfn, props):
857 def _show(self, ctx, copies, matchfn, props):
858 '''show a single changeset or file revision'''
858 '''show a single changeset or file revision'''
859
859
860 showlist = templatekw.showlist
860 showlist = templatekw.showlist
861
861
862 # showparents() behaviour depends on ui trace level which
862 # showparents() behaviour depends on ui trace level which
863 # causes unexpected behaviours at templating level and makes
863 # causes unexpected behaviours at templating level and makes
864 # it harder to extract it in a standalone function. Its
864 # it harder to extract it in a standalone function. Its
865 # behaviour cannot be changed so leave it here for now.
865 # behaviour cannot be changed so leave it here for now.
866 def showparents(**args):
866 def showparents(**args):
867 ctx = args['ctx']
867 ctx = args['ctx']
868 parents = [[('rev', p.rev()), ('node', p.hex())]
868 parents = [[('rev', p.rev()), ('node', p.hex())]
869 for p in self._meaningful_parentrevs(ctx)]
869 for p in self._meaningful_parentrevs(ctx)]
870 return showlist('parent', parents, **args)
870 return showlist('parent', parents, **args)
871
871
872 props = props.copy()
872 props = props.copy()
873 props.update(templatekw.keywords)
873 props.update(templatekw.keywords)
874 props['parents'] = showparents
874 props['parents'] = showparents
875 props['templ'] = self.t
875 props['templ'] = self.t
876 props['ctx'] = ctx
876 props['ctx'] = ctx
877 props['repo'] = self.repo
877 props['repo'] = self.repo
878 props['revcache'] = {'copies': copies}
878 props['revcache'] = {'copies': copies}
879 props['cache'] = self.cache
879 props['cache'] = self.cache
880
880
881 # find correct templates for current mode
881 # find correct templates for current mode
882
882
883 tmplmodes = [
883 tmplmodes = [
884 (True, None),
884 (True, None),
885 (self.ui.verbose, 'verbose'),
885 (self.ui.verbose, 'verbose'),
886 (self.ui.quiet, 'quiet'),
886 (self.ui.quiet, 'quiet'),
887 (self.ui.debugflag, 'debug'),
887 (self.ui.debugflag, 'debug'),
888 ]
888 ]
889
889
890 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
890 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
891 for mode, postfix in tmplmodes:
891 for mode, postfix in tmplmodes:
892 for type in types:
892 for type in types:
893 cur = postfix and ('%s_%s' % (type, postfix)) or type
893 cur = postfix and ('%s_%s' % (type, postfix)) or type
894 if mode and cur in self.t:
894 if mode and cur in self.t:
895 types[type] = cur
895 types[type] = cur
896
896
897 try:
897 try:
898
898
899 # write header
899 # write header
900 if types['header']:
900 if types['header']:
901 h = templater.stringify(self.t(types['header'], **props))
901 h = templater.stringify(self.t(types['header'], **props))
902 if self.buffered:
902 if self.buffered:
903 self.header[ctx.rev()] = h
903 self.header[ctx.rev()] = h
904 else:
904 else:
905 if self.lastheader != h:
905 if self.lastheader != h:
906 self.lastheader = h
906 self.lastheader = h
907 self.ui.write(h)
907 self.ui.write(h)
908
908
909 # write changeset metadata, then patch if requested
909 # write changeset metadata, then patch if requested
910 key = types['changeset']
910 key = types['changeset']
911 self.ui.write(templater.stringify(self.t(key, **props)))
911 self.ui.write(templater.stringify(self.t(key, **props)))
912 self.showpatch(ctx.node(), matchfn)
912 self.showpatch(ctx.node(), matchfn)
913
913
914 if types['footer']:
914 if types['footer']:
915 if not self.footer:
915 if not self.footer:
916 self.footer = templater.stringify(self.t(types['footer'],
916 self.footer = templater.stringify(self.t(types['footer'],
917 **props))
917 **props))
918
918
919 except KeyError, inst:
919 except KeyError, inst:
920 msg = _("%s: no key named '%s'")
920 msg = _("%s: no key named '%s'")
921 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
921 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
922 except SyntaxError, inst:
922 except SyntaxError, inst:
923 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
923 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
924
924
925 def show_changeset(ui, repo, opts, buffered=False):
925 def show_changeset(ui, repo, opts, buffered=False):
926 """show one changeset using template or regular display.
926 """show one changeset using template or regular display.
927
927
928 Display format will be the first non-empty hit of:
928 Display format will be the first non-empty hit of:
929 1. option 'template'
929 1. option 'template'
930 2. option 'style'
930 2. option 'style'
931 3. [ui] setting 'logtemplate'
931 3. [ui] setting 'logtemplate'
932 4. [ui] setting 'style'
932 4. [ui] setting 'style'
933 If all of these values are either the unset or the empty string,
933 If all of these values are either the unset or the empty string,
934 regular display via changeset_printer() is done.
934 regular display via changeset_printer() is done.
935 """
935 """
936 # options
936 # options
937 patch = None
937 patch = None
938 if opts.get('patch') or opts.get('stat'):
938 if opts.get('patch') or opts.get('stat'):
939 patch = scmutil.matchall(repo)
939 patch = scmutil.matchall(repo)
940
940
941 tmpl = opts.get('template')
941 tmpl = opts.get('template')
942 style = None
942 style = None
943 if not tmpl:
943 if not tmpl:
944 style = opts.get('style')
944 style = opts.get('style')
945
945
946 # ui settings
946 # ui settings
947 if not (tmpl or style):
947 if not (tmpl or style):
948 tmpl = ui.config('ui', 'logtemplate')
948 tmpl = ui.config('ui', 'logtemplate')
949 if tmpl:
949 if tmpl:
950 try:
950 try:
951 tmpl = templater.parsestring(tmpl)
951 tmpl = templater.parsestring(tmpl)
952 except SyntaxError:
952 except SyntaxError:
953 tmpl = templater.parsestring(tmpl, quoted=False)
953 tmpl = templater.parsestring(tmpl, quoted=False)
954 else:
954 else:
955 style = util.expandpath(ui.config('ui', 'style', ''))
955 style = util.expandpath(ui.config('ui', 'style', ''))
956
956
957 if not (tmpl or style):
957 if not (tmpl or style):
958 return changeset_printer(ui, repo, patch, opts, buffered)
958 return changeset_printer(ui, repo, patch, opts, buffered)
959
959
960 mapfile = None
960 mapfile = None
961 if style and not tmpl:
961 if style and not tmpl:
962 mapfile = style
962 mapfile = style
963 if not os.path.split(mapfile)[0]:
963 if not os.path.split(mapfile)[0]:
964 mapname = (templater.templatepath('map-cmdline.' + mapfile)
964 mapname = (templater.templatepath('map-cmdline.' + mapfile)
965 or templater.templatepath(mapfile))
965 or templater.templatepath(mapfile))
966 if mapname:
966 if mapname:
967 mapfile = mapname
967 mapfile = mapname
968
968
969 try:
969 try:
970 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
970 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
971 except SyntaxError, inst:
971 except SyntaxError, inst:
972 raise util.Abort(inst.args[0])
972 raise util.Abort(inst.args[0])
973 if tmpl:
973 if tmpl:
974 t.use_template(tmpl)
974 t.use_template(tmpl)
975 return t
975 return t
976
976
977 def finddate(ui, repo, date):
977 def finddate(ui, repo, date):
978 """Find the tipmost changeset that matches the given date spec"""
978 """Find the tipmost changeset that matches the given date spec"""
979
979
980 df = util.matchdate(date)
980 df = util.matchdate(date)
981 m = scmutil.matchall(repo)
981 m = scmutil.matchall(repo)
982 results = {}
982 results = {}
983
983
984 def prep(ctx, fns):
984 def prep(ctx, fns):
985 d = ctx.date()
985 d = ctx.date()
986 if df(d[0]):
986 if df(d[0]):
987 results[ctx.rev()] = d
987 results[ctx.rev()] = d
988
988
989 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
989 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
990 rev = ctx.rev()
990 rev = ctx.rev()
991 if rev in results:
991 if rev in results:
992 ui.status(_("found revision %s from %s\n") %
992 ui.status(_("found revision %s from %s\n") %
993 (rev, util.datestr(results[rev])))
993 (rev, util.datestr(results[rev])))
994 return str(rev)
994 return str(rev)
995
995
996 raise util.Abort(_("revision matching date not found"))
996 raise util.Abort(_("revision matching date not found"))
997
997
998 def increasingwindows(start, end, windowsize=8, sizelimit=512):
998 def increasingwindows(start, end, windowsize=8, sizelimit=512):
999 if start < end:
999 if start < end:
1000 while start < end:
1000 while start < end:
1001 yield start, min(windowsize, end - start)
1001 yield start, min(windowsize, end - start)
1002 start += windowsize
1002 start += windowsize
1003 if windowsize < sizelimit:
1003 if windowsize < sizelimit:
1004 windowsize *= 2
1004 windowsize *= 2
1005 else:
1005 else:
1006 while start > end:
1006 while start > end:
1007 yield start, min(windowsize, start - end - 1)
1007 yield start, min(windowsize, start - end - 1)
1008 start -= windowsize
1008 start -= windowsize
1009 if windowsize < sizelimit:
1009 if windowsize < sizelimit:
1010 windowsize *= 2
1010 windowsize *= 2
1011
1011
1012 class FileWalkError(Exception):
1012 class FileWalkError(Exception):
1013 pass
1013 pass
1014
1014
1015 def walkfilerevs(repo, match, follow, revs, fncache):
1015 def walkfilerevs(repo, match, follow, revs, fncache):
1016 '''Walks the file history for the matched files.
1016 '''Walks the file history for the matched files.
1017
1017
1018 Returns the changeset revs that are involved in the file history.
1018 Returns the changeset revs that are involved in the file history.
1019
1019
1020 Throws FileWalkError if the file history can't be walked using
1020 Throws FileWalkError if the file history can't be walked using
1021 filelogs alone.
1021 filelogs alone.
1022 '''
1022 '''
1023 wanted = set()
1023 wanted = set()
1024 copies = []
1024 copies = []
1025 minrev, maxrev = min(revs), max(revs)
1025 minrev, maxrev = min(revs), max(revs)
1026 def filerevgen(filelog, last):
1026 def filerevgen(filelog, last):
1027 """
1027 """
1028 Only files, no patterns. Check the history of each file.
1028 Only files, no patterns. Check the history of each file.
1029
1029
1030 Examines filelog entries within minrev, maxrev linkrev range
1030 Examines filelog entries within minrev, maxrev linkrev range
1031 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1031 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1032 tuples in backwards order
1032 tuples in backwards order
1033 """
1033 """
1034 cl_count = len(repo)
1034 cl_count = len(repo)
1035 revs = []
1035 revs = []
1036 for j in xrange(0, last + 1):
1036 for j in xrange(0, last + 1):
1037 linkrev = filelog.linkrev(j)
1037 linkrev = filelog.linkrev(j)
1038 if linkrev < minrev:
1038 if linkrev < minrev:
1039 continue
1039 continue
1040 # only yield rev for which we have the changelog, it can
1040 # only yield rev for which we have the changelog, it can
1041 # happen while doing "hg log" during a pull or commit
1041 # happen while doing "hg log" during a pull or commit
1042 if linkrev >= cl_count:
1042 if linkrev >= cl_count:
1043 break
1043 break
1044
1044
1045 parentlinkrevs = []
1045 parentlinkrevs = []
1046 for p in filelog.parentrevs(j):
1046 for p in filelog.parentrevs(j):
1047 if p != nullrev:
1047 if p != nullrev:
1048 parentlinkrevs.append(filelog.linkrev(p))
1048 parentlinkrevs.append(filelog.linkrev(p))
1049 n = filelog.node(j)
1049 n = filelog.node(j)
1050 revs.append((linkrev, parentlinkrevs,
1050 revs.append((linkrev, parentlinkrevs,
1051 follow and filelog.renamed(n)))
1051 follow and filelog.renamed(n)))
1052
1052
1053 return reversed(revs)
1053 return reversed(revs)
1054 def iterfiles():
1054 def iterfiles():
1055 pctx = repo['.']
1055 pctx = repo['.']
1056 for filename in match.files():
1056 for filename in match.files():
1057 if follow:
1057 if follow:
1058 if filename not in pctx:
1058 if filename not in pctx:
1059 raise util.Abort(_('cannot follow file not in parent '
1059 raise util.Abort(_('cannot follow file not in parent '
1060 'revision: "%s"') % filename)
1060 'revision: "%s"') % filename)
1061 yield filename, pctx[filename].filenode()
1061 yield filename, pctx[filename].filenode()
1062 else:
1062 else:
1063 yield filename, None
1063 yield filename, None
1064 for filename_node in copies:
1064 for filename_node in copies:
1065 yield filename_node
1065 yield filename_node
1066
1066
1067 for file_, node in iterfiles():
1067 for file_, node in iterfiles():
1068 filelog = repo.file(file_)
1068 filelog = repo.file(file_)
1069 if not len(filelog):
1069 if not len(filelog):
1070 if node is None:
1070 if node is None:
1071 # A zero count may be a directory or deleted file, so
1071 # A zero count may be a directory or deleted file, so
1072 # try to find matching entries on the slow path.
1072 # try to find matching entries on the slow path.
1073 if follow:
1073 if follow:
1074 raise util.Abort(
1074 raise util.Abort(
1075 _('cannot follow nonexistent file: "%s"') % file_)
1075 _('cannot follow nonexistent file: "%s"') % file_)
1076 raise FileWalkError("Cannot walk via filelog")
1076 raise FileWalkError("Cannot walk via filelog")
1077 else:
1077 else:
1078 continue
1078 continue
1079
1079
1080 if node is None:
1080 if node is None:
1081 last = len(filelog) - 1
1081 last = len(filelog) - 1
1082 else:
1082 else:
1083 last = filelog.rev(node)
1083 last = filelog.rev(node)
1084
1084
1085
1085
1086 # keep track of all ancestors of the file
1086 # keep track of all ancestors of the file
1087 ancestors = set([filelog.linkrev(last)])
1087 ancestors = set([filelog.linkrev(last)])
1088
1088
1089 # iterate from latest to oldest revision
1089 # iterate from latest to oldest revision
1090 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1090 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1091 if not follow:
1091 if not follow:
1092 if rev > maxrev:
1092 if rev > maxrev:
1093 continue
1093 continue
1094 else:
1094 else:
1095 # Note that last might not be the first interesting
1095 # Note that last might not be the first interesting
1096 # rev to us:
1096 # rev to us:
1097 # if the file has been changed after maxrev, we'll
1097 # if the file has been changed after maxrev, we'll
1098 # have linkrev(last) > maxrev, and we still need
1098 # have linkrev(last) > maxrev, and we still need
1099 # to explore the file graph
1099 # to explore the file graph
1100 if rev not in ancestors:
1100 if rev not in ancestors:
1101 continue
1101 continue
1102 # XXX insert 1327 fix here
1102 # XXX insert 1327 fix here
1103 if flparentlinkrevs:
1103 if flparentlinkrevs:
1104 ancestors.update(flparentlinkrevs)
1104 ancestors.update(flparentlinkrevs)
1105
1105
1106 fncache.setdefault(rev, []).append(file_)
1106 fncache.setdefault(rev, []).append(file_)
1107 wanted.add(rev)
1107 wanted.add(rev)
1108 if copied:
1108 if copied:
1109 copies.append(copied)
1109 copies.append(copied)
1110
1110
1111 return wanted
1111 return wanted
1112
1112
1113 def walkchangerevs(repo, match, opts, prepare):
1113 def walkchangerevs(repo, match, opts, prepare):
1114 '''Iterate over files and the revs in which they changed.
1114 '''Iterate over files and the revs in which they changed.
1115
1115
1116 Callers most commonly need to iterate backwards over the history
1116 Callers most commonly need to iterate backwards over the history
1117 in which they are interested. Doing so has awful (quadratic-looking)
1117 in which they are interested. Doing so has awful (quadratic-looking)
1118 performance, so we use iterators in a "windowed" way.
1118 performance, so we use iterators in a "windowed" way.
1119
1119
1120 We walk a window of revisions in the desired order. Within the
1120 We walk a window of revisions in the desired order. Within the
1121 window, we first walk forwards to gather data, then in the desired
1121 window, we first walk forwards to gather data, then in the desired
1122 order (usually backwards) to display it.
1122 order (usually backwards) to display it.
1123
1123
1124 This function returns an iterator yielding contexts. Before
1124 This function returns an iterator yielding contexts. Before
1125 yielding each context, the iterator will first call the prepare
1125 yielding each context, the iterator will first call the prepare
1126 function on each context in the window in forward order.'''
1126 function on each context in the window in forward order.'''
1127
1127
1128 follow = opts.get('follow') or opts.get('follow_first')
1128 follow = opts.get('follow') or opts.get('follow_first')
1129
1129
1130 if opts.get('rev'):
1130 if opts.get('rev'):
1131 revs = scmutil.revrange(repo, opts.get('rev'))
1131 revs = scmutil.revrange(repo, opts.get('rev'))
1132 elif follow:
1132 elif follow:
1133 revs = repo.revs('reverse(:.)')
1133 revs = repo.revs('reverse(:.)')
1134 else:
1134 else:
1135 revs = revset.baseset(repo)
1135 revs = revset.baseset(repo)
1136 revs.reverse()
1136 revs.reverse()
1137 if not revs:
1137 if not revs:
1138 return []
1138 return []
1139 wanted = set()
1139 wanted = set()
1140 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1140 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1141 fncache = {}
1141 fncache = {}
1142 change = repo.changectx
1142 change = repo.changectx
1143 revs = revset.baseset(revs)
1143 revs = revset.baseset(revs)
1144
1144
1145 # First step is to fill wanted, the set of revisions that we want to yield.
1145 # First step is to fill wanted, the set of revisions that we want to yield.
1146 # When it does not induce extra cost, we also fill fncache for revisions in
1146 # When it does not induce extra cost, we also fill fncache for revisions in
1147 # wanted: a cache of filenames that were changed (ctx.files()) and that
1147 # wanted: a cache of filenames that were changed (ctx.files()) and that
1148 # match the file filtering conditions.
1148 # match the file filtering conditions.
1149
1149
1150 if not slowpath and not match.files():
1150 if not slowpath and not match.files():
1151 # No files, no patterns. Display all revs.
1151 # No files, no patterns. Display all revs.
1152 wanted = set(revs)
1152 wanted = set(revs)
1153
1153
1154 if not slowpath and match.files():
1154 if not slowpath and match.files():
1155 # We only have to read through the filelog to find wanted revisions
1155 # We only have to read through the filelog to find wanted revisions
1156
1156
1157 try:
1157 try:
1158 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1158 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1159 except FileWalkError:
1159 except FileWalkError:
1160 slowpath = True
1160 slowpath = True
1161
1161
1162 # We decided to fall back to the slowpath because at least one
1162 # We decided to fall back to the slowpath because at least one
1163 # of the paths was not a file. Check to see if at least one of them
1163 # of the paths was not a file. Check to see if at least one of them
1164 # existed in history, otherwise simply return
1164 # existed in history, otherwise simply return
1165 for path in match.files():
1165 for path in match.files():
1166 if path == '.' or path in repo.store:
1166 if path == '.' or path in repo.store:
1167 break
1167 break
1168 else:
1168 else:
1169 return []
1169 return []
1170
1170
1171 if slowpath:
1171 if slowpath:
1172 # We have to read the changelog to match filenames against
1172 # We have to read the changelog to match filenames against
1173 # changed files
1173 # changed files
1174
1174
1175 if follow:
1175 if follow:
1176 raise util.Abort(_('can only follow copies/renames for explicit '
1176 raise util.Abort(_('can only follow copies/renames for explicit '
1177 'filenames'))
1177 'filenames'))
1178
1178
1179 # The slow path checks files modified in every changeset.
1179 # The slow path checks files modified in every changeset.
1180 # This is really slow on large repos, so compute the set lazily.
1180 # This is really slow on large repos, so compute the set lazily.
1181 class lazywantedset(object):
1181 class lazywantedset(object):
1182 def __init__(self):
1182 def __init__(self):
1183 self.set = set()
1183 self.set = set()
1184 self.revs = set(revs)
1184 self.revs = set(revs)
1185
1185
1186 # No need to worry about locality here because it will be accessed
1186 # No need to worry about locality here because it will be accessed
1187 # in the same order as the increasing window below.
1187 # in the same order as the increasing window below.
1188 def __contains__(self, value):
1188 def __contains__(self, value):
1189 if value in self.set:
1189 if value in self.set:
1190 return True
1190 return True
1191 elif not value in self.revs:
1191 elif not value in self.revs:
1192 return False
1192 return False
1193 else:
1193 else:
1194 self.revs.discard(value)
1194 self.revs.discard(value)
1195 ctx = change(value)
1195 ctx = change(value)
1196 matches = filter(match, ctx.files())
1196 matches = filter(match, ctx.files())
1197 if matches:
1197 if matches:
1198 fncache[value] = matches
1198 fncache[value] = matches
1199 self.set.add(value)
1199 self.set.add(value)
1200 return True
1200 return True
1201 return False
1201 return False
1202
1202
1203 def discard(self, value):
1203 def discard(self, value):
1204 self.revs.discard(value)
1204 self.revs.discard(value)
1205 self.set.discard(value)
1205 self.set.discard(value)
1206
1206
1207 wanted = lazywantedset()
1207 wanted = lazywantedset()
1208
1208
1209 class followfilter(object):
1209 class followfilter(object):
1210 def __init__(self, onlyfirst=False):
1210 def __init__(self, onlyfirst=False):
1211 self.startrev = nullrev
1211 self.startrev = nullrev
1212 self.roots = set()
1212 self.roots = set()
1213 self.onlyfirst = onlyfirst
1213 self.onlyfirst = onlyfirst
1214
1214
1215 def match(self, rev):
1215 def match(self, rev):
1216 def realparents(rev):
1216 def realparents(rev):
1217 if self.onlyfirst:
1217 if self.onlyfirst:
1218 return repo.changelog.parentrevs(rev)[0:1]
1218 return repo.changelog.parentrevs(rev)[0:1]
1219 else:
1219 else:
1220 return filter(lambda x: x != nullrev,
1220 return filter(lambda x: x != nullrev,
1221 repo.changelog.parentrevs(rev))
1221 repo.changelog.parentrevs(rev))
1222
1222
1223 if self.startrev == nullrev:
1223 if self.startrev == nullrev:
1224 self.startrev = rev
1224 self.startrev = rev
1225 return True
1225 return True
1226
1226
1227 if rev > self.startrev:
1227 if rev > self.startrev:
1228 # forward: all descendants
1228 # forward: all descendants
1229 if not self.roots:
1229 if not self.roots:
1230 self.roots.add(self.startrev)
1230 self.roots.add(self.startrev)
1231 for parent in realparents(rev):
1231 for parent in realparents(rev):
1232 if parent in self.roots:
1232 if parent in self.roots:
1233 self.roots.add(rev)
1233 self.roots.add(rev)
1234 return True
1234 return True
1235 else:
1235 else:
1236 # backwards: all parents
1236 # backwards: all parents
1237 if not self.roots:
1237 if not self.roots:
1238 self.roots.update(realparents(self.startrev))
1238 self.roots.update(realparents(self.startrev))
1239 if rev in self.roots:
1239 if rev in self.roots:
1240 self.roots.remove(rev)
1240 self.roots.remove(rev)
1241 self.roots.update(realparents(rev))
1241 self.roots.update(realparents(rev))
1242 return True
1242 return True
1243
1243
1244 return False
1244 return False
1245
1245
1246 # it might be worthwhile to do this in the iterator if the rev range
1246 # it might be worthwhile to do this in the iterator if the rev range
1247 # is descending and the prune args are all within that range
1247 # is descending and the prune args are all within that range
1248 for rev in opts.get('prune', ()):
1248 for rev in opts.get('prune', ()):
1249 rev = repo[rev].rev()
1249 rev = repo[rev].rev()
1250 ff = followfilter()
1250 ff = followfilter()
1251 stop = min(revs[0], revs[-1])
1251 stop = min(revs[0], revs[-1])
1252 for x in xrange(rev, stop - 1, -1):
1252 for x in xrange(rev, stop - 1, -1):
1253 if ff.match(x):
1253 if ff.match(x):
1254 wanted.discard(x)
1254 wanted.discard(x)
1255
1255
1256 # Choose a small initial window if we will probably only visit a
1256 # Choose a small initial window if we will probably only visit a
1257 # few commits.
1257 # few commits.
1258 limit = loglimit(opts)
1258 limit = loglimit(opts)
1259 windowsize = 8
1259 windowsize = 8
1260 if limit:
1260 if limit:
1261 windowsize = min(limit, windowsize)
1261 windowsize = min(limit, windowsize)
1262
1262
1263 # Now that wanted is correctly initialized, we can iterate over the
1263 # Now that wanted is correctly initialized, we can iterate over the
1264 # revision range, yielding only revisions in wanted.
1264 # revision range, yielding only revisions in wanted.
1265 def iterate():
1265 def iterate():
1266 if follow and not match.files():
1266 if follow and not match.files():
1267 ff = followfilter(onlyfirst=opts.get('follow_first'))
1267 ff = followfilter(onlyfirst=opts.get('follow_first'))
1268 def want(rev):
1268 def want(rev):
1269 return ff.match(rev) and rev in wanted
1269 return ff.match(rev) and rev in wanted
1270 else:
1270 else:
1271 def want(rev):
1271 def want(rev):
1272 return rev in wanted
1272 return rev in wanted
1273
1273
1274 for i, window in increasingwindows(0, len(revs), windowsize):
1274 for i, window in increasingwindows(0, len(revs), windowsize):
1275 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1275 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1276 for rev in sorted(nrevs):
1276 for rev in sorted(nrevs):
1277 fns = fncache.get(rev)
1277 fns = fncache.get(rev)
1278 ctx = change(rev)
1278 ctx = change(rev)
1279 if not fns:
1279 if not fns:
1280 def fns_generator():
1280 def fns_generator():
1281 for f in ctx.files():
1281 for f in ctx.files():
1282 if match(f):
1282 if match(f):
1283 yield f
1283 yield f
1284 fns = fns_generator()
1284 fns = fns_generator()
1285 prepare(ctx, fns)
1285 prepare(ctx, fns)
1286 for rev in nrevs:
1286 for rev in nrevs:
1287 yield change(rev)
1287 yield change(rev)
1288 return iterate()
1288 return iterate()
1289
1289
1290 def _makegraphfilematcher(repo, pats, followfirst):
1290 def _makegraphfilematcher(repo, pats, followfirst):
1291 # When displaying a revision with --patch --follow FILE, we have
1291 # When displaying a revision with --patch --follow FILE, we have
1292 # to know which file of the revision must be diffed. With
1292 # to know which file of the revision must be diffed. With
1293 # --follow, we want the names of the ancestors of FILE in the
1293 # --follow, we want the names of the ancestors of FILE in the
1294 # revision, stored in "fcache". "fcache" is populated by
1294 # revision, stored in "fcache". "fcache" is populated by
1295 # reproducing the graph traversal already done by --follow revset
1295 # reproducing the graph traversal already done by --follow revset
1296 # and relating linkrevs to file names (which is not "correct" but
1296 # and relating linkrevs to file names (which is not "correct" but
1297 # good enough).
1297 # good enough).
1298 fcache = {}
1298 fcache = {}
1299 fcacheready = [False]
1299 fcacheready = [False]
1300 pctx = repo['.']
1300 pctx = repo['.']
1301 wctx = repo[None]
1301 wctx = repo[None]
1302
1302
1303 def populate():
1303 def populate():
1304 for fn in pats:
1304 for fn in pats:
1305 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1305 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1306 for c in i:
1306 for c in i:
1307 fcache.setdefault(c.linkrev(), set()).add(c.path())
1307 fcache.setdefault(c.linkrev(), set()).add(c.path())
1308
1308
1309 def filematcher(rev):
1309 def filematcher(rev):
1310 if not fcacheready[0]:
1310 if not fcacheready[0]:
1311 # Lazy initialization
1311 # Lazy initialization
1312 fcacheready[0] = True
1312 fcacheready[0] = True
1313 populate()
1313 populate()
1314 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1314 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1315
1315
1316 return filematcher
1316 return filematcher
1317
1317
1318 def _makegraphlogrevset(repo, pats, opts, revs):
1318 def _makegraphlogrevset(repo, pats, opts, revs):
1319 """Return (expr, filematcher) where expr is a revset string built
1319 """Return (expr, filematcher) where expr is a revset string built
1320 from log options and file patterns or None. If --stat or --patch
1320 from log options and file patterns or None. If --stat or --patch
1321 are not passed filematcher is None. Otherwise it is a callable
1321 are not passed filematcher is None. Otherwise it is a callable
1322 taking a revision number and returning a match objects filtering
1322 taking a revision number and returning a match objects filtering
1323 the files to be detailed when displaying the revision.
1323 the files to be detailed when displaying the revision.
1324 """
1324 """
1325 opt2revset = {
1325 opt2revset = {
1326 'no_merges': ('not merge()', None),
1326 'no_merges': ('not merge()', None),
1327 'only_merges': ('merge()', None),
1327 'only_merges': ('merge()', None),
1328 '_ancestors': ('ancestors(%(val)s)', None),
1328 '_ancestors': ('ancestors(%(val)s)', None),
1329 '_fancestors': ('_firstancestors(%(val)s)', None),
1329 '_fancestors': ('_firstancestors(%(val)s)', None),
1330 '_descendants': ('descendants(%(val)s)', None),
1330 '_descendants': ('descendants(%(val)s)', None),
1331 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1331 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1332 '_matchfiles': ('_matchfiles(%(val)s)', None),
1332 '_matchfiles': ('_matchfiles(%(val)s)', None),
1333 'date': ('date(%(val)r)', None),
1333 'date': ('date(%(val)r)', None),
1334 'branch': ('branch(%(val)r)', ' or '),
1334 'branch': ('branch(%(val)r)', ' or '),
1335 '_patslog': ('filelog(%(val)r)', ' or '),
1335 '_patslog': ('filelog(%(val)r)', ' or '),
1336 '_patsfollow': ('follow(%(val)r)', ' or '),
1336 '_patsfollow': ('follow(%(val)r)', ' or '),
1337 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1337 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1338 'keyword': ('keyword(%(val)r)', ' or '),
1338 'keyword': ('keyword(%(val)r)', ' or '),
1339 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1339 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1340 'user': ('user(%(val)r)', ' or '),
1340 'user': ('user(%(val)r)', ' or '),
1341 }
1341 }
1342
1342
1343 opts = dict(opts)
1343 opts = dict(opts)
1344 # follow or not follow?
1344 # follow or not follow?
1345 follow = opts.get('follow') or opts.get('follow_first')
1345 follow = opts.get('follow') or opts.get('follow_first')
1346 followfirst = opts.get('follow_first') and 1 or 0
1346 followfirst = opts.get('follow_first') and 1 or 0
1347 # --follow with FILE behaviour depends on revs...
1347 # --follow with FILE behaviour depends on revs...
1348 startrev = revs[0]
1348 startrev = revs[0]
1349 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1349 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1350
1350
1351 # branch and only_branch are really aliases and must be handled at
1351 # branch and only_branch are really aliases and must be handled at
1352 # the same time
1352 # the same time
1353 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1353 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1354 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1354 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1355 # pats/include/exclude are passed to match.match() directly in
1355 # pats/include/exclude are passed to match.match() directly in
1356 # _matchfiles() revset but walkchangerevs() builds its matcher with
1356 # _matchfiles() revset but walkchangerevs() builds its matcher with
1357 # scmutil.match(). The difference is input pats are globbed on
1357 # scmutil.match(). The difference is input pats are globbed on
1358 # platforms without shell expansion (windows).
1358 # platforms without shell expansion (windows).
1359 pctx = repo[None]
1359 pctx = repo[None]
1360 match, pats = scmutil.matchandpats(pctx, pats, opts)
1360 match, pats = scmutil.matchandpats(pctx, pats, opts)
1361 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1361 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1362 if not slowpath:
1362 if not slowpath:
1363 for f in match.files():
1363 for f in match.files():
1364 if follow and f not in pctx:
1364 if follow and f not in pctx:
1365 raise util.Abort(_('cannot follow file not in parent '
1365 raise util.Abort(_('cannot follow file not in parent '
1366 'revision: "%s"') % f)
1366 'revision: "%s"') % f)
1367 filelog = repo.file(f)
1367 filelog = repo.file(f)
1368 if not filelog:
1368 if not filelog:
1369 # A zero count may be a directory or deleted file, so
1369 # A zero count may be a directory or deleted file, so
1370 # try to find matching entries on the slow path.
1370 # try to find matching entries on the slow path.
1371 if follow:
1371 if follow:
1372 raise util.Abort(
1372 raise util.Abort(
1373 _('cannot follow nonexistent file: "%s"') % f)
1373 _('cannot follow nonexistent file: "%s"') % f)
1374 slowpath = True
1374 slowpath = True
1375
1375
1376 # We decided to fall back to the slowpath because at least one
1376 # We decided to fall back to the slowpath because at least one
1377 # of the paths was not a file. Check to see if at least one of them
1377 # of the paths was not a file. Check to see if at least one of them
1378 # existed in history - in that case, we'll continue down the
1378 # existed in history - in that case, we'll continue down the
1379 # slowpath; otherwise, we can turn off the slowpath
1379 # slowpath; otherwise, we can turn off the slowpath
1380 if slowpath:
1380 if slowpath:
1381 for path in match.files():
1381 for path in match.files():
1382 if path == '.' or path in repo.store:
1382 if path == '.' or path in repo.store:
1383 break
1383 break
1384 else:
1384 else:
1385 slowpath = False
1385 slowpath = False
1386
1386
1387 if slowpath:
1387 if slowpath:
1388 # See walkchangerevs() slow path.
1388 # See walkchangerevs() slow path.
1389 #
1389 #
1390 if follow:
1390 if follow:
1391 raise util.Abort(_('can only follow copies/renames for explicit '
1391 raise util.Abort(_('can only follow copies/renames for explicit '
1392 'filenames'))
1392 'filenames'))
1393 # pats/include/exclude cannot be represented as separate
1393 # pats/include/exclude cannot be represented as separate
1394 # revset expressions as their filtering logic applies at file
1394 # revset expressions as their filtering logic applies at file
1395 # level. For instance "-I a -X a" matches a revision touching
1395 # level. For instance "-I a -X a" matches a revision touching
1396 # "a" and "b" while "file(a) and not file(b)" does
1396 # "a" and "b" while "file(a) and not file(b)" does
1397 # not. Besides, filesets are evaluated against the working
1397 # not. Besides, filesets are evaluated against the working
1398 # directory.
1398 # directory.
1399 matchargs = ['r:', 'd:relpath']
1399 matchargs = ['r:', 'd:relpath']
1400 for p in pats:
1400 for p in pats:
1401 matchargs.append('p:' + p)
1401 matchargs.append('p:' + p)
1402 for p in opts.get('include', []):
1402 for p in opts.get('include', []):
1403 matchargs.append('i:' + p)
1403 matchargs.append('i:' + p)
1404 for p in opts.get('exclude', []):
1404 for p in opts.get('exclude', []):
1405 matchargs.append('x:' + p)
1405 matchargs.append('x:' + p)
1406 matchargs = ','.join(('%r' % p) for p in matchargs)
1406 matchargs = ','.join(('%r' % p) for p in matchargs)
1407 opts['_matchfiles'] = matchargs
1407 opts['_matchfiles'] = matchargs
1408 else:
1408 else:
1409 if follow:
1409 if follow:
1410 fpats = ('_patsfollow', '_patsfollowfirst')
1410 fpats = ('_patsfollow', '_patsfollowfirst')
1411 fnopats = (('_ancestors', '_fancestors'),
1411 fnopats = (('_ancestors', '_fancestors'),
1412 ('_descendants', '_fdescendants'))
1412 ('_descendants', '_fdescendants'))
1413 if pats:
1413 if pats:
1414 # follow() revset interprets its file argument as a
1414 # follow() revset interprets its file argument as a
1415 # manifest entry, so use match.files(), not pats.
1415 # manifest entry, so use match.files(), not pats.
1416 opts[fpats[followfirst]] = list(match.files())
1416 opts[fpats[followfirst]] = list(match.files())
1417 else:
1417 else:
1418 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1418 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1419 else:
1419 else:
1420 opts['_patslog'] = list(pats)
1420 opts['_patslog'] = list(pats)
1421
1421
1422 filematcher = None
1422 filematcher = None
1423 if opts.get('patch') or opts.get('stat'):
1423 if opts.get('patch') or opts.get('stat'):
1424 if follow:
1424 if follow:
1425 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1425 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1426 else:
1426 else:
1427 filematcher = lambda rev: match
1427 filematcher = lambda rev: match
1428
1428
1429 expr = []
1429 expr = []
1430 for op, val in opts.iteritems():
1430 for op, val in opts.iteritems():
1431 if not val:
1431 if not val:
1432 continue
1432 continue
1433 if op not in opt2revset:
1433 if op not in opt2revset:
1434 continue
1434 continue
1435 revop, andor = opt2revset[op]
1435 revop, andor = opt2revset[op]
1436 if '%(val)' not in revop:
1436 if '%(val)' not in revop:
1437 expr.append(revop)
1437 expr.append(revop)
1438 else:
1438 else:
1439 if not isinstance(val, list):
1439 if not isinstance(val, list):
1440 e = revop % {'val': val}
1440 e = revop % {'val': val}
1441 else:
1441 else:
1442 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1442 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1443 expr.append(e)
1443 expr.append(e)
1444
1444
1445 if expr:
1445 if expr:
1446 expr = '(' + ' and '.join(expr) + ')'
1446 expr = '(' + ' and '.join(expr) + ')'
1447 else:
1447 else:
1448 expr = None
1448 expr = None
1449 return expr, filematcher
1449 return expr, filematcher
1450
1450
1451 def getgraphlogrevs(repo, pats, opts):
1451 def getgraphlogrevs(repo, pats, opts):
1452 """Return (revs, expr, filematcher) where revs is an iterable of
1452 """Return (revs, expr, filematcher) where revs is an iterable of
1453 revision numbers, expr is a revset string built from log options
1453 revision numbers, expr is a revset string built from log options
1454 and file patterns or None, and used to filter 'revs'. If --stat or
1454 and file patterns or None, and used to filter 'revs'. If --stat or
1455 --patch are not passed filematcher is None. Otherwise it is a
1455 --patch are not passed filematcher is None. Otherwise it is a
1456 callable taking a revision number and returning a match objects
1456 callable taking a revision number and returning a match objects
1457 filtering the files to be detailed when displaying the revision.
1457 filtering the files to be detailed when displaying the revision.
1458 """
1458 """
1459 if not len(repo):
1459 if not len(repo):
1460 return [], None, None
1460 return [], None, None
1461 limit = loglimit(opts)
1461 limit = loglimit(opts)
1462 # Default --rev value depends on --follow but --follow behaviour
1462 # Default --rev value depends on --follow but --follow behaviour
1463 # depends on revisions resolved from --rev...
1463 # depends on revisions resolved from --rev...
1464 follow = opts.get('follow') or opts.get('follow_first')
1464 follow = opts.get('follow') or opts.get('follow_first')
1465 possiblyunsorted = False # whether revs might need sorting
1465 possiblyunsorted = False # whether revs might need sorting
1466 if opts.get('rev'):
1466 if opts.get('rev'):
1467 revs = scmutil.revrange(repo, opts['rev'])
1467 revs = scmutil.revrange(repo, opts['rev'])
1468 # Don't sort here because _makegraphlogrevset might depend on the
1468 # Don't sort here because _makegraphlogrevset might depend on the
1469 # order of revs
1469 # order of revs
1470 possiblyunsorted = True
1470 possiblyunsorted = True
1471 else:
1471 else:
1472 if follow and len(repo) > 0:
1472 if follow and len(repo) > 0:
1473 revs = repo.revs('reverse(:.)')
1473 revs = repo.revs('reverse(:.)')
1474 else:
1474 else:
1475 revs = revset.baseset(repo.changelog)
1475 revs = revset.baseset(repo.changelog)
1476 revs.reverse()
1476 revs.reverse()
1477 if not revs:
1477 if not revs:
1478 return [], None, None
1478 return [], None, None
1479 revs = revset.baseset(revs)
1479 revs = revset.baseset(revs)
1480 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1480 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1481 if possiblyunsorted:
1481 if possiblyunsorted:
1482 revs.sort(reverse=True)
1482 revs.sort(reverse=True)
1483 if expr:
1483 if expr:
1484 # Revset matchers often operate faster on revisions in changelog
1484 # Revset matchers often operate faster on revisions in changelog
1485 # order, because most filters deal with the changelog.
1485 # order, because most filters deal with the changelog.
1486 revs.reverse()
1486 revs.reverse()
1487 matcher = revset.match(repo.ui, expr)
1487 matcher = revset.match(repo.ui, expr)
1488 # Revset matches can reorder revisions. "A or B" typically returns
1488 # Revset matches can reorder revisions. "A or B" typically returns
1489 # returns the revision matching A then the revision matching B. Sort
1489 # returns the revision matching A then the revision matching B. Sort
1490 # again to fix that.
1490 # again to fix that.
1491 revs = matcher(repo, revs)
1491 revs = matcher(repo, revs)
1492 revs.sort(reverse=True)
1492 revs.sort(reverse=True)
1493 if limit is not None:
1493 if limit is not None:
1494 revs = revs[:limit]
1494 revs = revs[:limit]
1495
1495
1496 return revs, expr, filematcher
1496 return revs, expr, filematcher
1497
1497
1498 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1498 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1499 filematcher=None):
1499 filematcher=None):
1500 seen, state = [], graphmod.asciistate()
1500 seen, state = [], graphmod.asciistate()
1501 for rev, type, ctx, parents in dag:
1501 for rev, type, ctx, parents in dag:
1502 char = 'o'
1502 char = 'o'
1503 if ctx.node() in showparents:
1503 if ctx.node() in showparents:
1504 char = '@'
1504 char = '@'
1505 elif ctx.obsolete():
1505 elif ctx.obsolete():
1506 char = 'x'
1506 char = 'x'
1507 copies = None
1507 copies = None
1508 if getrenamed and ctx.rev():
1508 if getrenamed and ctx.rev():
1509 copies = []
1509 copies = []
1510 for fn in ctx.files():
1510 for fn in ctx.files():
1511 rename = getrenamed(fn, ctx.rev())
1511 rename = getrenamed(fn, ctx.rev())
1512 if rename:
1512 if rename:
1513 copies.append((fn, rename[0]))
1513 copies.append((fn, rename[0]))
1514 revmatchfn = None
1514 revmatchfn = None
1515 if filematcher is not None:
1515 if filematcher is not None:
1516 revmatchfn = filematcher(ctx.rev())
1516 revmatchfn = filematcher(ctx.rev())
1517 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1517 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1518 lines = displayer.hunk.pop(rev).split('\n')
1518 lines = displayer.hunk.pop(rev).split('\n')
1519 if not lines[-1]:
1519 if not lines[-1]:
1520 del lines[-1]
1520 del lines[-1]
1521 displayer.flush(rev)
1521 displayer.flush(rev)
1522 edges = edgefn(type, char, lines, seen, rev, parents)
1522 edges = edgefn(type, char, lines, seen, rev, parents)
1523 for type, char, lines, coldata in edges:
1523 for type, char, lines, coldata in edges:
1524 graphmod.ascii(ui, state, type, char, lines, coldata)
1524 graphmod.ascii(ui, state, type, char, lines, coldata)
1525 displayer.close()
1525 displayer.close()
1526
1526
1527 def graphlog(ui, repo, *pats, **opts):
1527 def graphlog(ui, repo, *pats, **opts):
1528 # Parameters are identical to log command ones
1528 # Parameters are identical to log command ones
1529 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1529 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1530 revdag = graphmod.dagwalker(repo, revs)
1530 revdag = graphmod.dagwalker(repo, revs)
1531
1531
1532 getrenamed = None
1532 getrenamed = None
1533 if opts.get('copies'):
1533 if opts.get('copies'):
1534 endrev = None
1534 endrev = None
1535 if opts.get('rev'):
1535 if opts.get('rev'):
1536 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1536 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1537 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1537 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1538 displayer = show_changeset(ui, repo, opts, buffered=True)
1538 displayer = show_changeset(ui, repo, opts, buffered=True)
1539 showparents = [ctx.node() for ctx in repo[None].parents()]
1539 showparents = [ctx.node() for ctx in repo[None].parents()]
1540 displaygraph(ui, revdag, displayer, showparents,
1540 displaygraph(ui, revdag, displayer, showparents,
1541 graphmod.asciiedges, getrenamed, filematcher)
1541 graphmod.asciiedges, getrenamed, filematcher)
1542
1542
1543 def checkunsupportedgraphflags(pats, opts):
1543 def checkunsupportedgraphflags(pats, opts):
1544 for op in ["newest_first"]:
1544 for op in ["newest_first"]:
1545 if op in opts and opts[op]:
1545 if op in opts and opts[op]:
1546 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1546 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1547 % op.replace("_", "-"))
1547 % op.replace("_", "-"))
1548
1548
1549 def graphrevs(repo, nodes, opts):
1549 def graphrevs(repo, nodes, opts):
1550 limit = loglimit(opts)
1550 limit = loglimit(opts)
1551 nodes.reverse()
1551 nodes.reverse()
1552 if limit is not None:
1552 if limit is not None:
1553 nodes = nodes[:limit]
1553 nodes = nodes[:limit]
1554 return graphmod.nodes(repo, nodes)
1554 return graphmod.nodes(repo, nodes)
1555
1555
1556 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1556 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1557 join = lambda f: os.path.join(prefix, f)
1557 join = lambda f: os.path.join(prefix, f)
1558 bad = []
1558 bad = []
1559 oldbad = match.bad
1559 oldbad = match.bad
1560 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1560 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1561 names = []
1561 names = []
1562 wctx = repo[None]
1562 wctx = repo[None]
1563 cca = None
1563 cca = None
1564 abort, warn = scmutil.checkportabilityalert(ui)
1564 abort, warn = scmutil.checkportabilityalert(ui)
1565 if abort or warn:
1565 if abort or warn:
1566 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1566 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1567 for f in repo.walk(match):
1567 for f in repo.walk(match):
1568 exact = match.exact(f)
1568 exact = match.exact(f)
1569 if exact or not explicitonly and f not in repo.dirstate:
1569 if exact or not explicitonly and f not in repo.dirstate:
1570 if cca:
1570 if cca:
1571 cca(f)
1571 cca(f)
1572 names.append(f)
1572 names.append(f)
1573 if ui.verbose or not exact:
1573 if ui.verbose or not exact:
1574 ui.status(_('adding %s\n') % match.rel(join(f)))
1574 ui.status(_('adding %s\n') % match.rel(join(f)))
1575
1575
1576 for subpath in sorted(wctx.substate):
1576 for subpath in sorted(wctx.substate):
1577 sub = wctx.sub(subpath)
1577 sub = wctx.sub(subpath)
1578 try:
1578 try:
1579 submatch = matchmod.narrowmatcher(subpath, match)
1579 submatch = matchmod.narrowmatcher(subpath, match)
1580 if listsubrepos:
1580 if listsubrepos:
1581 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1581 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1582 False))
1582 False))
1583 else:
1583 else:
1584 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1584 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1585 True))
1585 True))
1586 except error.LookupError:
1586 except error.LookupError:
1587 ui.status(_("skipping missing subrepository: %s\n")
1587 ui.status(_("skipping missing subrepository: %s\n")
1588 % join(subpath))
1588 % join(subpath))
1589
1589
1590 if not dryrun:
1590 if not dryrun:
1591 rejected = wctx.add(names, prefix)
1591 rejected = wctx.add(names, prefix)
1592 bad.extend(f for f in rejected if f in match.files())
1592 bad.extend(f for f in rejected if f in match.files())
1593 return bad
1593 return bad
1594
1594
1595 def forget(ui, repo, match, prefix, explicitonly):
1595 def forget(ui, repo, match, prefix, explicitonly):
1596 join = lambda f: os.path.join(prefix, f)
1596 join = lambda f: os.path.join(prefix, f)
1597 bad = []
1597 bad = []
1598 oldbad = match.bad
1598 oldbad = match.bad
1599 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1599 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1600 wctx = repo[None]
1600 wctx = repo[None]
1601 forgot = []
1601 forgot = []
1602 s = repo.status(match=match, clean=True)
1602 s = repo.status(match=match, clean=True)
1603 forget = sorted(s[0] + s[1] + s[3] + s[6])
1603 forget = sorted(s[0] + s[1] + s[3] + s[6])
1604 if explicitonly:
1604 if explicitonly:
1605 forget = [f for f in forget if match.exact(f)]
1605 forget = [f for f in forget if match.exact(f)]
1606
1606
1607 for subpath in sorted(wctx.substate):
1607 for subpath in sorted(wctx.substate):
1608 sub = wctx.sub(subpath)
1608 sub = wctx.sub(subpath)
1609 try:
1609 try:
1610 submatch = matchmod.narrowmatcher(subpath, match)
1610 submatch = matchmod.narrowmatcher(subpath, match)
1611 subbad, subforgot = sub.forget(ui, submatch, prefix)
1611 subbad, subforgot = sub.forget(ui, submatch, prefix)
1612 bad.extend([subpath + '/' + f for f in subbad])
1612 bad.extend([subpath + '/' + f for f in subbad])
1613 forgot.extend([subpath + '/' + f for f in subforgot])
1613 forgot.extend([subpath + '/' + f for f in subforgot])
1614 except error.LookupError:
1614 except error.LookupError:
1615 ui.status(_("skipping missing subrepository: %s\n")
1615 ui.status(_("skipping missing subrepository: %s\n")
1616 % join(subpath))
1616 % join(subpath))
1617
1617
1618 if not explicitonly:
1618 if not explicitonly:
1619 for f in match.files():
1619 for f in match.files():
1620 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1620 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1621 if f not in forgot:
1621 if f not in forgot:
1622 if os.path.exists(match.rel(join(f))):
1622 if os.path.exists(match.rel(join(f))):
1623 ui.warn(_('not removing %s: '
1623 ui.warn(_('not removing %s: '
1624 'file is already untracked\n')
1624 'file is already untracked\n')
1625 % match.rel(join(f)))
1625 % match.rel(join(f)))
1626 bad.append(f)
1626 bad.append(f)
1627
1627
1628 for f in forget:
1628 for f in forget:
1629 if ui.verbose or not match.exact(f):
1629 if ui.verbose or not match.exact(f):
1630 ui.status(_('removing %s\n') % match.rel(join(f)))
1630 ui.status(_('removing %s\n') % match.rel(join(f)))
1631
1631
1632 rejected = wctx.forget(forget, prefix)
1632 rejected = wctx.forget(forget, prefix)
1633 bad.extend(f for f in rejected if f in match.files())
1633 bad.extend(f for f in rejected if f in match.files())
1634 forgot.extend(forget)
1634 forgot.extend(forget)
1635 return bad, forgot
1635 return bad, forgot
1636
1636
1637 def duplicatecopies(repo, rev, fromrev):
1637 def duplicatecopies(repo, rev, fromrev):
1638 '''reproduce copies from fromrev to rev in the dirstate'''
1638 '''reproduce copies from fromrev to rev in the dirstate'''
1639 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1639 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1640 # copies.pathcopies returns backward renames, so dst might not
1640 # copies.pathcopies returns backward renames, so dst might not
1641 # actually be in the dirstate
1641 # actually be in the dirstate
1642 if repo.dirstate[dst] in "nma":
1642 if repo.dirstate[dst] in "nma":
1643 repo.dirstate.copy(src, dst)
1643 repo.dirstate.copy(src, dst)
1644
1644
1645 def commit(ui, repo, commitfunc, pats, opts):
1645 def commit(ui, repo, commitfunc, pats, opts):
1646 '''commit the specified files or all outstanding changes'''
1646 '''commit the specified files or all outstanding changes'''
1647 date = opts.get('date')
1647 date = opts.get('date')
1648 if date:
1648 if date:
1649 opts['date'] = util.parsedate(date)
1649 opts['date'] = util.parsedate(date)
1650 message = logmessage(ui, opts)
1650 message = logmessage(ui, opts)
1651
1651
1652 # extract addremove carefully -- this function can be called from a command
1652 # extract addremove carefully -- this function can be called from a command
1653 # that doesn't support addremove
1653 # that doesn't support addremove
1654 if opts.get('addremove'):
1654 if opts.get('addremove'):
1655 scmutil.addremove(repo, pats, opts)
1655 scmutil.addremove(repo, pats, opts)
1656
1656
1657 return commitfunc(ui, repo, message,
1657 return commitfunc(ui, repo, message,
1658 scmutil.match(repo[None], pats, opts), opts)
1658 scmutil.match(repo[None], pats, opts), opts)
1659
1659
1660 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1660 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1661 ui.note(_('amending changeset %s\n') % old)
1661 ui.note(_('amending changeset %s\n') % old)
1662 base = old.p1()
1662 base = old.p1()
1663
1663
1664 wlock = lock = newid = None
1664 wlock = lock = newid = None
1665 try:
1665 try:
1666 wlock = repo.wlock()
1666 wlock = repo.wlock()
1667 lock = repo.lock()
1667 lock = repo.lock()
1668 tr = repo.transaction('amend')
1668 tr = repo.transaction('amend')
1669 try:
1669 try:
1670 # See if we got a message from -m or -l, if not, open the editor
1670 # See if we got a message from -m or -l, if not, open the editor
1671 # with the message of the changeset to amend
1671 # with the message of the changeset to amend
1672 message = logmessage(ui, opts)
1672 message = logmessage(ui, opts)
1673 # ensure logfile does not conflict with later enforcement of the
1673 # ensure logfile does not conflict with later enforcement of the
1674 # message. potential logfile content has been processed by
1674 # message. potential logfile content has been processed by
1675 # `logmessage` anyway.
1675 # `logmessage` anyway.
1676 opts.pop('logfile')
1676 opts.pop('logfile')
1677 # First, do a regular commit to record all changes in the working
1677 # First, do a regular commit to record all changes in the working
1678 # directory (if there are any)
1678 # directory (if there are any)
1679 ui.callhooks = False
1679 ui.callhooks = False
1680 currentbookmark = repo._bookmarkcurrent
1680 currentbookmark = repo._bookmarkcurrent
1681 try:
1681 try:
1682 repo._bookmarkcurrent = None
1682 repo._bookmarkcurrent = None
1683 opts['message'] = 'temporary amend commit for %s' % old
1683 opts['message'] = 'temporary amend commit for %s' % old
1684 node = commit(ui, repo, commitfunc, pats, opts)
1684 node = commit(ui, repo, commitfunc, pats, opts)
1685 finally:
1685 finally:
1686 repo._bookmarkcurrent = currentbookmark
1686 repo._bookmarkcurrent = currentbookmark
1687 ui.callhooks = True
1687 ui.callhooks = True
1688 ctx = repo[node]
1688 ctx = repo[node]
1689
1689
1690 # Participating changesets:
1690 # Participating changesets:
1691 #
1691 #
1692 # node/ctx o - new (intermediate) commit that contains changes
1692 # node/ctx o - new (intermediate) commit that contains changes
1693 # | from working dir to go into amending commit
1693 # | from working dir to go into amending commit
1694 # | (or a workingctx if there were no changes)
1694 # | (or a workingctx if there were no changes)
1695 # |
1695 # |
1696 # old o - changeset to amend
1696 # old o - changeset to amend
1697 # |
1697 # |
1698 # base o - parent of amending changeset
1698 # base o - parent of amending changeset
1699
1699
1700 # Update extra dict from amended commit (e.g. to preserve graft
1700 # Update extra dict from amended commit (e.g. to preserve graft
1701 # source)
1701 # source)
1702 extra.update(old.extra())
1702 extra.update(old.extra())
1703
1703
1704 # Also update it from the intermediate commit or from the wctx
1704 # Also update it from the intermediate commit or from the wctx
1705 extra.update(ctx.extra())
1705 extra.update(ctx.extra())
1706
1706
1707 if len(old.parents()) > 1:
1707 if len(old.parents()) > 1:
1708 # ctx.files() isn't reliable for merges, so fall back to the
1708 # ctx.files() isn't reliable for merges, so fall back to the
1709 # slower repo.status() method
1709 # slower repo.status() method
1710 files = set([fn for st in repo.status(base, old)[:3]
1710 files = set([fn for st in repo.status(base, old)[:3]
1711 for fn in st])
1711 for fn in st])
1712 else:
1712 else:
1713 files = set(old.files())
1713 files = set(old.files())
1714
1714
1715 # Second, we use either the commit we just did, or if there were no
1715 # Second, we use either the commit we just did, or if there were no
1716 # changes the parent of the working directory as the version of the
1716 # changes the parent of the working directory as the version of the
1717 # files in the final amend commit
1717 # files in the final amend commit
1718 if node:
1718 if node:
1719 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1719 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1720
1720
1721 user = ctx.user()
1721 user = ctx.user()
1722 date = ctx.date()
1722 date = ctx.date()
1723 # Recompute copies (avoid recording a -> b -> a)
1723 # Recompute copies (avoid recording a -> b -> a)
1724 copied = copies.pathcopies(base, ctx)
1724 copied = copies.pathcopies(base, ctx)
1725
1725
1726 # Prune files which were reverted by the updates: if old
1726 # Prune files which were reverted by the updates: if old
1727 # introduced file X and our intermediate commit, node,
1727 # introduced file X and our intermediate commit, node,
1728 # renamed that file, then those two files are the same and
1728 # renamed that file, then those two files are the same and
1729 # we can discard X from our list of files. Likewise if X
1729 # we can discard X from our list of files. Likewise if X
1730 # was deleted, it's no longer relevant
1730 # was deleted, it's no longer relevant
1731 files.update(ctx.files())
1731 files.update(ctx.files())
1732
1732
1733 def samefile(f):
1733 def samefile(f):
1734 if f in ctx.manifest():
1734 if f in ctx.manifest():
1735 a = ctx.filectx(f)
1735 a = ctx.filectx(f)
1736 if f in base.manifest():
1736 if f in base.manifest():
1737 b = base.filectx(f)
1737 b = base.filectx(f)
1738 return (not a.cmp(b)
1738 return (not a.cmp(b)
1739 and a.flags() == b.flags())
1739 and a.flags() == b.flags())
1740 else:
1740 else:
1741 return False
1741 return False
1742 else:
1742 else:
1743 return f not in base.manifest()
1743 return f not in base.manifest()
1744 files = [f for f in files if not samefile(f)]
1744 files = [f for f in files if not samefile(f)]
1745
1745
1746 def filectxfn(repo, ctx_, path):
1746 def filectxfn(repo, ctx_, path):
1747 try:
1747 try:
1748 fctx = ctx[path]
1748 fctx = ctx[path]
1749 flags = fctx.flags()
1749 flags = fctx.flags()
1750 mctx = context.memfilectx(fctx.path(), fctx.data(),
1750 mctx = context.memfilectx(fctx.path(), fctx.data(),
1751 islink='l' in flags,
1751 islink='l' in flags,
1752 isexec='x' in flags,
1752 isexec='x' in flags,
1753 copied=copied.get(path))
1753 copied=copied.get(path))
1754 return mctx
1754 return mctx
1755 except KeyError:
1755 except KeyError:
1756 raise IOError
1756 raise IOError
1757 else:
1757 else:
1758 ui.note(_('copying changeset %s to %s\n') % (old, base))
1758 ui.note(_('copying changeset %s to %s\n') % (old, base))
1759
1759
1760 # Use version of files as in the old cset
1760 # Use version of files as in the old cset
1761 def filectxfn(repo, ctx_, path):
1761 def filectxfn(repo, ctx_, path):
1762 try:
1762 try:
1763 return old.filectx(path)
1763 return old.filectx(path)
1764 except KeyError:
1764 except KeyError:
1765 raise IOError
1765 raise IOError
1766
1766
1767 user = opts.get('user') or old.user()
1767 user = opts.get('user') or old.user()
1768 date = opts.get('date') or old.date()
1768 date = opts.get('date') or old.date()
1769 editmsg = False
1769 editmsg = False
1770 if not message:
1770 if not message:
1771 editmsg = True
1771 editmsg = True
1772 message = old.description()
1772 message = old.description()
1773
1773
1774 pureextra = extra.copy()
1774 pureextra = extra.copy()
1775 extra['amend_source'] = old.hex()
1775 extra['amend_source'] = old.hex()
1776
1776
1777 new = context.memctx(repo,
1777 new = context.memctx(repo,
1778 parents=[base.node(), old.p2().node()],
1778 parents=[base.node(), old.p2().node()],
1779 text=message,
1779 text=message,
1780 files=files,
1780 files=files,
1781 filectxfn=filectxfn,
1781 filectxfn=filectxfn,
1782 user=user,
1782 user=user,
1783 date=date,
1783 date=date,
1784 extra=extra)
1784 extra=extra)
1785 if editmsg:
1785 if editmsg:
1786 new._text = commitforceeditor(repo, new, [])
1786 new._text = commitforceeditor(repo, new, [])
1787
1787
1788 newdesc = changelog.stripdesc(new.description())
1788 newdesc = changelog.stripdesc(new.description())
1789 if ((not node)
1789 if ((not node)
1790 and newdesc == old.description()
1790 and newdesc == old.description()
1791 and user == old.user()
1791 and user == old.user()
1792 and date == old.date()
1792 and date == old.date()
1793 and pureextra == old.extra()):
1793 and pureextra == old.extra()):
1794 # nothing changed. continuing here would create a new node
1794 # nothing changed. continuing here would create a new node
1795 # anyway because of the amend_source noise.
1795 # anyway because of the amend_source noise.
1796 #
1796 #
1797 # This not what we expect from amend.
1797 # This not what we expect from amend.
1798 return old.node()
1798 return old.node()
1799
1799
1800 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1800 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1801 try:
1801 try:
1802 repo.ui.setconfig('phases', 'new-commit', old.phase())
1802 repo.ui.setconfig('phases', 'new-commit', old.phase())
1803 newid = repo.commitctx(new)
1803 newid = repo.commitctx(new)
1804 finally:
1804 finally:
1805 repo.ui.setconfig('phases', 'new-commit', ph)
1805 repo.ui.setconfig('phases', 'new-commit', ph)
1806 if newid != old.node():
1806 if newid != old.node():
1807 # Reroute the working copy parent to the new changeset
1807 # Reroute the working copy parent to the new changeset
1808 repo.setparents(newid, nullid)
1808 repo.setparents(newid, nullid)
1809
1809
1810 # Move bookmarks from old parent to amend commit
1810 # Move bookmarks from old parent to amend commit
1811 bms = repo.nodebookmarks(old.node())
1811 bms = repo.nodebookmarks(old.node())
1812 if bms:
1812 if bms:
1813 marks = repo._bookmarks
1813 marks = repo._bookmarks
1814 for bm in bms:
1814 for bm in bms:
1815 marks[bm] = newid
1815 marks[bm] = newid
1816 marks.write()
1816 marks.write()
1817 #commit the whole amend process
1817 #commit the whole amend process
1818 if obsolete._enabled and newid != old.node():
1818 if obsolete._enabled and newid != old.node():
1819 # mark the new changeset as successor of the rewritten one
1819 # mark the new changeset as successor of the rewritten one
1820 new = repo[newid]
1820 new = repo[newid]
1821 obs = [(old, (new,))]
1821 obs = [(old, (new,))]
1822 if node:
1822 if node:
1823 obs.append((ctx, ()))
1823 obs.append((ctx, ()))
1824
1824
1825 obsolete.createmarkers(repo, obs)
1825 obsolete.createmarkers(repo, obs)
1826 tr.close()
1826 tr.close()
1827 finally:
1827 finally:
1828 tr.release()
1828 tr.release()
1829 if (not obsolete._enabled) and newid != old.node():
1829 if (not obsolete._enabled) and newid != old.node():
1830 # Strip the intermediate commit (if there was one) and the amended
1830 # Strip the intermediate commit (if there was one) and the amended
1831 # commit
1831 # commit
1832 if node:
1832 if node:
1833 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1833 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1834 ui.note(_('stripping amended changeset %s\n') % old)
1834 ui.note(_('stripping amended changeset %s\n') % old)
1835 repair.strip(ui, repo, old.node(), topic='amend-backup')
1835 repair.strip(ui, repo, old.node(), topic='amend-backup')
1836 finally:
1836 finally:
1837 if newid is None:
1837 if newid is None:
1838 repo.dirstate.invalidate()
1838 repo.dirstate.invalidate()
1839 lockmod.release(lock, wlock)
1839 lockmod.release(lock, wlock)
1840 return newid
1840 return newid
1841
1841
1842 def commiteditor(repo, ctx, subs):
1842 def commiteditor(repo, ctx, subs):
1843 if ctx.description():
1843 if ctx.description():
1844 return ctx.description()
1844 return ctx.description()
1845 return commitforceeditor(repo, ctx, subs)
1845 return commitforceeditor(repo, ctx, subs)
1846
1846
1847 def commitforceeditor(repo, ctx, subs):
1847 def commitforceeditor(repo, ctx, subs):
1848 edittext = []
1848 edittext = []
1849 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1849 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1850 if ctx.description():
1850 if ctx.description():
1851 edittext.append(ctx.description())
1851 edittext.append(ctx.description())
1852 edittext.append("")
1852 edittext.append("")
1853 edittext.append("") # Empty line between message and comments.
1853 edittext.append("") # Empty line between message and comments.
1854 edittext.append(_("HG: Enter commit message."
1854 edittext.append(_("HG: Enter commit message."
1855 " Lines beginning with 'HG:' are removed."))
1855 " Lines beginning with 'HG:' are removed."))
1856 edittext.append(_("HG: Leave message empty to abort commit."))
1856 edittext.append(_("HG: Leave message empty to abort commit."))
1857 edittext.append("HG: --")
1857 edittext.append("HG: --")
1858 edittext.append(_("HG: user: %s") % ctx.user())
1858 edittext.append(_("HG: user: %s") % ctx.user())
1859 if ctx.p2():
1859 if ctx.p2():
1860 edittext.append(_("HG: branch merge"))
1860 edittext.append(_("HG: branch merge"))
1861 if ctx.branch():
1861 if ctx.branch():
1862 edittext.append(_("HG: branch '%s'") % ctx.branch())
1862 edittext.append(_("HG: branch '%s'") % ctx.branch())
1863 if bookmarks.iscurrent(repo):
1863 if bookmarks.iscurrent(repo):
1864 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1864 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1865 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1865 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1866 edittext.extend([_("HG: added %s") % f for f in added])
1866 edittext.extend([_("HG: added %s") % f for f in added])
1867 edittext.extend([_("HG: changed %s") % f for f in modified])
1867 edittext.extend([_("HG: changed %s") % f for f in modified])
1868 edittext.extend([_("HG: removed %s") % f for f in removed])
1868 edittext.extend([_("HG: removed %s") % f for f in removed])
1869 if not added and not modified and not removed:
1869 if not added and not modified and not removed:
1870 edittext.append(_("HG: no files changed"))
1870 edittext.append(_("HG: no files changed"))
1871 edittext.append("")
1871 edittext.append("")
1872 # run editor in the repository root
1872 # run editor in the repository root
1873 olddir = os.getcwd()
1873 olddir = os.getcwd()
1874 os.chdir(repo.root)
1874 os.chdir(repo.root)
1875 text = repo.ui.edit("\n".join(edittext), ctx.user())
1875 text = repo.ui.edit("\n".join(edittext), ctx.user())
1876 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1876 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1877 os.chdir(olddir)
1877 os.chdir(olddir)
1878
1878
1879 if not text.strip():
1879 if not text.strip():
1880 raise util.Abort(_("empty commit message"))
1880 raise util.Abort(_("empty commit message"))
1881
1881
1882 return text
1882 return text
1883
1883
1884 def commitstatus(repo, node, branch, bheads=None, opts={}):
1884 def commitstatus(repo, node, branch, bheads=None, opts={}):
1885 ctx = repo[node]
1885 ctx = repo[node]
1886 parents = ctx.parents()
1886 parents = ctx.parents()
1887
1887
1888 if (not opts.get('amend') and bheads and node not in bheads and not
1888 if (not opts.get('amend') and bheads and node not in bheads and not
1889 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1889 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1890 repo.ui.status(_('created new head\n'))
1890 repo.ui.status(_('created new head\n'))
1891 # The message is not printed for initial roots. For the other
1891 # The message is not printed for initial roots. For the other
1892 # changesets, it is printed in the following situations:
1892 # changesets, it is printed in the following situations:
1893 #
1893 #
1894 # Par column: for the 2 parents with ...
1894 # Par column: for the 2 parents with ...
1895 # N: null or no parent
1895 # N: null or no parent
1896 # B: parent is on another named branch
1896 # B: parent is on another named branch
1897 # C: parent is a regular non head changeset
1897 # C: parent is a regular non head changeset
1898 # H: parent was a branch head of the current branch
1898 # H: parent was a branch head of the current branch
1899 # Msg column: whether we print "created new head" message
1899 # Msg column: whether we print "created new head" message
1900 # In the following, it is assumed that there already exists some
1900 # In the following, it is assumed that there already exists some
1901 # initial branch heads of the current branch, otherwise nothing is
1901 # initial branch heads of the current branch, otherwise nothing is
1902 # printed anyway.
1902 # printed anyway.
1903 #
1903 #
1904 # Par Msg Comment
1904 # Par Msg Comment
1905 # N N y additional topo root
1905 # N N y additional topo root
1906 #
1906 #
1907 # B N y additional branch root
1907 # B N y additional branch root
1908 # C N y additional topo head
1908 # C N y additional topo head
1909 # H N n usual case
1909 # H N n usual case
1910 #
1910 #
1911 # B B y weird additional branch root
1911 # B B y weird additional branch root
1912 # C B y branch merge
1912 # C B y branch merge
1913 # H B n merge with named branch
1913 # H B n merge with named branch
1914 #
1914 #
1915 # C C y additional head from merge
1915 # C C y additional head from merge
1916 # C H n merge with a head
1916 # C H n merge with a head
1917 #
1917 #
1918 # H H n head merge: head count decreases
1918 # H H n head merge: head count decreases
1919
1919
1920 if not opts.get('close_branch'):
1920 if not opts.get('close_branch'):
1921 for r in parents:
1921 for r in parents:
1922 if r.closesbranch() and r.branch() == branch:
1922 if r.closesbranch() and r.branch() == branch:
1923 repo.ui.status(_('reopening closed branch head %d\n') % r)
1923 repo.ui.status(_('reopening closed branch head %d\n') % r)
1924
1924
1925 if repo.ui.debugflag:
1925 if repo.ui.debugflag:
1926 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1926 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1927 elif repo.ui.verbose:
1927 elif repo.ui.verbose:
1928 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1928 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1929
1929
1930 def revert(ui, repo, ctx, parents, *pats, **opts):
1930 def revert(ui, repo, ctx, parents, *pats, **opts):
1931 parent, p2 = parents
1931 parent, p2 = parents
1932 node = ctx.node()
1932 node = ctx.node()
1933
1933
1934 mf = ctx.manifest()
1934 mf = ctx.manifest()
1935 if node == parent:
1935 if node == parent:
1936 pmf = mf
1936 pmf = mf
1937 else:
1937 else:
1938 pmf = None
1938 pmf = None
1939
1939
1940 # need all matching names in dirstate and manifest of target rev,
1940 # need all matching names in dirstate and manifest of target rev,
1941 # so have to walk both. do not print errors if files exist in one
1941 # so have to walk both. do not print errors if files exist in one
1942 # but not other.
1942 # but not other.
1943
1943
1944 names = {}
1944 names = {}
1945
1945
1946 wlock = repo.wlock()
1946 wlock = repo.wlock()
1947 try:
1947 try:
1948 # walk dirstate.
1948 # walk dirstate.
1949
1949
1950 m = scmutil.match(repo[None], pats, opts)
1950 m = scmutil.match(repo[None], pats, opts)
1951 m.bad = lambda x, y: False
1951 m.bad = lambda x, y: False
1952 for abs in repo.walk(m):
1952 for abs in repo.walk(m):
1953 names[abs] = m.rel(abs), m.exact(abs)
1953 names[abs] = m.rel(abs), m.exact(abs)
1954
1954
1955 # walk target manifest.
1955 # walk target manifest.
1956
1956
1957 def badfn(path, msg):
1957 def badfn(path, msg):
1958 if path in names:
1958 if path in names:
1959 return
1959 return
1960 if path in ctx.substate:
1960 if path in ctx.substate:
1961 return
1961 return
1962 path_ = path + '/'
1962 path_ = path + '/'
1963 for f in names:
1963 for f in names:
1964 if f.startswith(path_):
1964 if f.startswith(path_):
1965 return
1965 return
1966 ui.warn("%s: %s\n" % (m.rel(path), msg))
1966 ui.warn("%s: %s\n" % (m.rel(path), msg))
1967
1967
1968 m = scmutil.match(ctx, pats, opts)
1968 m = scmutil.match(ctx, pats, opts)
1969 m.bad = badfn
1969 m.bad = badfn
1970 for abs in ctx.walk(m):
1970 for abs in ctx.walk(m):
1971 if abs not in names:
1971 if abs not in names:
1972 names[abs] = m.rel(abs), m.exact(abs)
1972 names[abs] = m.rel(abs), m.exact(abs)
1973
1973
1974 # get the list of subrepos that must be reverted
1974 # get the list of subrepos that must be reverted
1975 targetsubs = sorted(s for s in ctx.substate if m(s))
1975 targetsubs = sorted(s for s in ctx.substate if m(s))
1976 m = scmutil.matchfiles(repo, names)
1976 m = scmutil.matchfiles(repo, names)
1977 changes = repo.status(match=m)[:4]
1977 changes = repo.status(match=m)[:4]
1978 modified, added, removed, deleted = map(set, changes)
1978 modified, added, removed, deleted = map(set, changes)
1979
1979
1980 # if f is a rename, also revert the source
1980 # if f is a rename, also revert the source
1981 cwd = repo.getcwd()
1981 cwd = repo.getcwd()
1982 for f in added:
1982 for f in added:
1983 src = repo.dirstate.copied(f)
1983 src = repo.dirstate.copied(f)
1984 if src and src not in names and repo.dirstate[src] == 'r':
1984 if src and src not in names and repo.dirstate[src] == 'r':
1985 removed.add(src)
1985 removed.add(src)
1986 names[src] = (repo.pathto(src, cwd), True)
1986 names[src] = (repo.pathto(src, cwd), True)
1987
1987
1988 def removeforget(abs):
1988 def removeforget(abs):
1989 if repo.dirstate[abs] == 'a':
1989 if repo.dirstate[abs] == 'a':
1990 return _('forgetting %s\n')
1990 return _('forgetting %s\n')
1991 return _('removing %s\n')
1991 return _('removing %s\n')
1992
1992
1993 revert = ([], _('reverting %s\n'))
1993 revert = ([], _('reverting %s\n'))
1994 add = ([], _('adding %s\n'))
1994 add = ([], _('adding %s\n'))
1995 remove = ([], removeforget)
1995 remove = ([], removeforget)
1996 undelete = ([], _('undeleting %s\n'))
1996 undelete = ([], _('undeleting %s\n'))
1997
1997
1998 disptable = (
1998 disptable = (
1999 # dispatch table:
1999 # dispatch table:
2000 # file state
2000 # file state
2001 # action if in target manifest
2001 # action if in target manifest
2002 # action if not in target manifest
2002 # action if not in target manifest
2003 # make backup if in target manifest
2003 # make backup if in target manifest
2004 # make backup if not in target manifest
2004 # make backup if not in target manifest
2005 (modified, revert, remove, True, True),
2005 (modified, revert, remove, True, True),
2006 (added, revert, remove, True, False),
2006 (added, revert, remove, True, False),
2007 (removed, undelete, None, True, False),
2007 (removed, undelete, None, True, False),
2008 (deleted, revert, remove, False, False),
2008 (deleted, revert, remove, False, False),
2009 )
2009 )
2010
2010
2011 for abs, (rel, exact) in sorted(names.items()):
2011 for abs, (rel, exact) in sorted(names.items()):
2012 mfentry = mf.get(abs)
2012 mfentry = mf.get(abs)
2013 target = repo.wjoin(abs)
2013 target = repo.wjoin(abs)
2014 def handle(xlist, dobackup):
2014 def handle(xlist, dobackup):
2015 xlist[0].append(abs)
2015 xlist[0].append(abs)
2016 if (dobackup and not opts.get('no_backup') and
2016 if (dobackup and not opts.get('no_backup') and
2017 os.path.lexists(target) and
2017 os.path.lexists(target) and
2018 abs in ctx and repo[None][abs].cmp(ctx[abs])):
2018 abs in ctx and repo[None][abs].cmp(ctx[abs])):
2019 bakname = "%s.orig" % rel
2019 bakname = "%s.orig" % rel
2020 ui.note(_('saving current version of %s as %s\n') %
2020 ui.note(_('saving current version of %s as %s\n') %
2021 (rel, bakname))
2021 (rel, bakname))
2022 if not opts.get('dry_run'):
2022 if not opts.get('dry_run'):
2023 util.rename(target, bakname)
2023 util.rename(target, bakname)
2024 if ui.verbose or not exact:
2024 if ui.verbose or not exact:
2025 msg = xlist[1]
2025 msg = xlist[1]
2026 if not isinstance(msg, basestring):
2026 if not isinstance(msg, basestring):
2027 msg = msg(abs)
2027 msg = msg(abs)
2028 ui.status(msg % rel)
2028 ui.status(msg % rel)
2029 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2029 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2030 if abs not in table:
2030 if abs not in table:
2031 continue
2031 continue
2032 # file has changed in dirstate
2032 # file has changed in dirstate
2033 if mfentry:
2033 if mfentry:
2034 handle(hitlist, backuphit)
2034 handle(hitlist, backuphit)
2035 elif misslist is not None:
2035 elif misslist is not None:
2036 handle(misslist, backupmiss)
2036 handle(misslist, backupmiss)
2037 break
2037 break
2038 else:
2038 else:
2039 if abs not in repo.dirstate:
2039 if abs not in repo.dirstate:
2040 if mfentry:
2040 if mfentry:
2041 handle(add, True)
2041 handle(add, True)
2042 elif exact:
2042 elif exact:
2043 ui.warn(_('file not managed: %s\n') % rel)
2043 ui.warn(_('file not managed: %s\n') % rel)
2044 continue
2044 continue
2045 # file has not changed in dirstate
2045 # file has not changed in dirstate
2046 if node == parent:
2046 if node == parent:
2047 if exact:
2047 if exact:
2048 ui.warn(_('no changes needed to %s\n') % rel)
2048 ui.warn(_('no changes needed to %s\n') % rel)
2049 continue
2049 continue
2050 if pmf is None:
2050 if pmf is None:
2051 # only need parent manifest in this unlikely case,
2051 # only need parent manifest in this unlikely case,
2052 # so do not read by default
2052 # so do not read by default
2053 pmf = repo[parent].manifest()
2053 pmf = repo[parent].manifest()
2054 if abs in pmf and mfentry:
2054 if abs in pmf and mfentry:
2055 # if version of file is same in parent and target
2055 # if version of file is same in parent and target
2056 # manifests, do nothing
2056 # manifests, do nothing
2057 if (pmf[abs] != mfentry or
2057 if (pmf[abs] != mfentry or
2058 pmf.flags(abs) != mf.flags(abs)):
2058 pmf.flags(abs) != mf.flags(abs)):
2059 handle(revert, False)
2059 handle(revert, False)
2060 else:
2060 else:
2061 handle(remove, False)
2061 handle(remove, False)
2062
2062
2063 if not opts.get('dry_run'):
2063 if not opts.get('dry_run'):
2064 def checkout(f):
2064 def checkout(f):
2065 fc = ctx[f]
2065 fc = ctx[f]
2066 repo.wwrite(f, fc.data(), fc.flags())
2066 repo.wwrite(f, fc.data(), fc.flags())
2067
2067
2068 audit_path = pathutil.pathauditor(repo.root)
2068 audit_path = pathutil.pathauditor(repo.root)
2069 for f in remove[0]:
2069 for f in remove[0]:
2070 if repo.dirstate[f] == 'a':
2070 if repo.dirstate[f] == 'a':
2071 repo.dirstate.drop(f)
2071 repo.dirstate.drop(f)
2072 continue
2072 continue
2073 audit_path(f)
2073 audit_path(f)
2074 try:
2074 try:
2075 util.unlinkpath(repo.wjoin(f))
2075 util.unlinkpath(repo.wjoin(f))
2076 except OSError:
2076 except OSError:
2077 pass
2077 pass
2078 repo.dirstate.remove(f)
2078 repo.dirstate.remove(f)
2079
2079
2080 normal = None
2080 normal = None
2081 if node == parent:
2081 if node == parent:
2082 # We're reverting to our parent. If possible, we'd like status
2082 # We're reverting to our parent. If possible, we'd like status
2083 # to report the file as clean. We have to use normallookup for
2083 # to report the file as clean. We have to use normallookup for
2084 # merges to avoid losing information about merged/dirty files.
2084 # merges to avoid losing information about merged/dirty files.
2085 if p2 != nullid:
2085 if p2 != nullid:
2086 normal = repo.dirstate.normallookup
2086 normal = repo.dirstate.normallookup
2087 else:
2087 else:
2088 normal = repo.dirstate.normal
2088 normal = repo.dirstate.normal
2089 for f in revert[0]:
2089 for f in revert[0]:
2090 checkout(f)
2090 checkout(f)
2091 if normal:
2091 if normal:
2092 normal(f)
2092 normal(f)
2093
2093
2094 for f in add[0]:
2094 for f in add[0]:
2095 checkout(f)
2095 checkout(f)
2096 repo.dirstate.add(f)
2096 repo.dirstate.add(f)
2097
2097
2098 normal = repo.dirstate.normallookup
2098 normal = repo.dirstate.normallookup
2099 if node == parent and p2 == nullid:
2099 if node == parent and p2 == nullid:
2100 normal = repo.dirstate.normal
2100 normal = repo.dirstate.normal
2101 for f in undelete[0]:
2101 for f in undelete[0]:
2102 checkout(f)
2102 checkout(f)
2103 normal(f)
2103 normal(f)
2104
2104
2105 copied = copies.pathcopies(repo[parent], ctx)
2105 copied = copies.pathcopies(repo[parent], ctx)
2106
2106
2107 for f in add[0] + undelete[0] + revert[0]:
2107 for f in add[0] + undelete[0] + revert[0]:
2108 if f in copied:
2108 if f in copied:
2109 repo.dirstate.copy(copied[f], f)
2109 repo.dirstate.copy(copied[f], f)
2110
2110
2111 if targetsubs:
2111 if targetsubs:
2112 # Revert the subrepos on the revert list
2112 # Revert the subrepos on the revert list
2113 for sub in targetsubs:
2113 for sub in targetsubs:
2114 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2114 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2115 finally:
2115 finally:
2116 wlock.release()
2116 wlock.release()
2117
2117
2118 def command(table):
2118 def command(table):
2119 '''returns a function object bound to table which can be used as
2119 '''returns a function object bound to table which can be used as
2120 a decorator for populating table as a command table'''
2120 a decorator for populating table as a command table'''
2121
2121
2122 def cmd(name, options=(), synopsis=None):
2122 def cmd(name, options=(), synopsis=None):
2123 def decorator(func):
2123 def decorator(func):
2124 if synopsis:
2124 if synopsis:
2125 table[name] = func, list(options), synopsis
2125 table[name] = func, list(options), synopsis
2126 else:
2126 else:
2127 table[name] = func, list(options)
2127 table[name] = func, list(options)
2128 return func
2128 return func
2129 return decorator
2129 return decorator
2130
2130
2131 return cmd
2131 return cmd
2132
2132
2133 # a list of (ui, repo) functions called by commands.summary
2133 # a list of (ui, repo) functions called by commands.summary
2134 summaryhooks = util.hooks()
2134 summaryhooks = util.hooks()
2135
2135
2136 # A list of state files kept by multistep operations like graft.
2136 # A list of state files kept by multistep operations like graft.
2137 # Since graft cannot be aborted, it is considered 'clearable' by update.
2137 # Since graft cannot be aborted, it is considered 'clearable' by update.
2138 # note: bisect is intentionally excluded
2138 # note: bisect is intentionally excluded
2139 # (state file, clearable, allowcommit, error, hint)
2139 # (state file, clearable, allowcommit, error, hint)
2140 unfinishedstates = [
2140 unfinishedstates = [
2141 ('graftstate', True, False, _('graft in progress'),
2141 ('graftstate', True, False, _('graft in progress'),
2142 _("use 'hg graft --continue' or 'hg update' to abort")),
2142 _("use 'hg graft --continue' or 'hg update' to abort")),
2143 ('updatestate', True, False, _('last update was interrupted'),
2143 ('updatestate', True, False, _('last update was interrupted'),
2144 _("use 'hg update' to get a consistent checkout"))
2144 _("use 'hg update' to get a consistent checkout"))
2145 ]
2145 ]
2146
2146
2147 def checkunfinished(repo, commit=False):
2147 def checkunfinished(repo, commit=False):
2148 '''Look for an unfinished multistep operation, like graft, and abort
2148 '''Look for an unfinished multistep operation, like graft, and abort
2149 if found. It's probably good to check this right before
2149 if found. It's probably good to check this right before
2150 bailifchanged().
2150 bailifchanged().
2151 '''
2151 '''
2152 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2152 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2153 if commit and allowcommit:
2153 if commit and allowcommit:
2154 continue
2154 continue
2155 if repo.vfs.exists(f):
2155 if repo.vfs.exists(f):
2156 raise util.Abort(msg, hint=hint)
2156 raise util.Abort(msg, hint=hint)
2157
2157
2158 def clearunfinished(repo):
2158 def clearunfinished(repo):
2159 '''Check for unfinished operations (as above), and clear the ones
2159 '''Check for unfinished operations (as above), and clear the ones
2160 that are clearable.
2160 that are clearable.
2161 '''
2161 '''
2162 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2162 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2163 if not clearable and repo.vfs.exists(f):
2163 if not clearable and repo.vfs.exists(f):
2164 raise util.Abort(msg, hint=hint)
2164 raise util.Abort(msg, hint=hint)
2165 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2165 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2166 if clearable and repo.vfs.exists(f):
2166 if clearable and repo.vfs.exists(f):
2167 util.unlink(repo.join(f))
2167 util.unlink(repo.join(f))
@@ -1,2237 +1,2237 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding, exchange
12 import transaction, store, encoding, exchange
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
67
67
68 class localpeer(peer.peerrepository):
68 class localpeer(peer.peerrepository):
69 '''peer for a local repo; reflects only the most recent API'''
69 '''peer for a local repo; reflects only the most recent API'''
70
70
71 def __init__(self, repo, caps=MODERNCAPS):
71 def __init__(self, repo, caps=MODERNCAPS):
72 peer.peerrepository.__init__(self)
72 peer.peerrepository.__init__(self)
73 self._repo = repo.filtered('served')
73 self._repo = repo.filtered('served')
74 self.ui = repo.ui
74 self.ui = repo.ui
75 self._caps = repo._restrictcapabilities(caps)
75 self._caps = repo._restrictcapabilities(caps)
76 self.requirements = repo.requirements
76 self.requirements = repo.requirements
77 self.supportedformats = repo.supportedformats
77 self.supportedformats = repo.supportedformats
78
78
79 def close(self):
79 def close(self):
80 self._repo.close()
80 self._repo.close()
81
81
82 def _capabilities(self):
82 def _capabilities(self):
83 return self._caps
83 return self._caps
84
84
85 def local(self):
85 def local(self):
86 return self._repo
86 return self._repo
87
87
88 def canpush(self):
88 def canpush(self):
89 return True
89 return True
90
90
91 def url(self):
91 def url(self):
92 return self._repo.url()
92 return self._repo.url()
93
93
94 def lookup(self, key):
94 def lookup(self, key):
95 return self._repo.lookup(key)
95 return self._repo.lookup(key)
96
96
97 def branchmap(self):
97 def branchmap(self):
98 return self._repo.branchmap()
98 return self._repo.branchmap()
99
99
100 def heads(self):
100 def heads(self):
101 return self._repo.heads()
101 return self._repo.heads()
102
102
103 def known(self, nodes):
103 def known(self, nodes):
104 return self._repo.known(nodes)
104 return self._repo.known(nodes)
105
105
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
107 return self._repo.getbundle(source, heads=heads, common=common,
107 return self._repo.getbundle(source, heads=heads, common=common,
108 bundlecaps=None)
108 bundlecaps=None)
109
109
110 # TODO We might want to move the next two calls into legacypeer and add
110 # TODO We might want to move the next two calls into legacypeer and add
111 # unbundle instead.
111 # unbundle instead.
112
112
113 def lock(self):
113 def lock(self):
114 return self._repo.lock()
114 return self._repo.lock()
115
115
116 def addchangegroup(self, cg, source, url):
116 def addchangegroup(self, cg, source, url):
117 return self._repo.addchangegroup(cg, source, url)
117 return self._repo.addchangegroup(cg, source, url)
118
118
119 def pushkey(self, namespace, key, old, new):
119 def pushkey(self, namespace, key, old, new):
120 return self._repo.pushkey(namespace, key, old, new)
120 return self._repo.pushkey(namespace, key, old, new)
121
121
122 def listkeys(self, namespace):
122 def listkeys(self, namespace):
123 return self._repo.listkeys(namespace)
123 return self._repo.listkeys(namespace)
124
124
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 '''used to test argument passing over the wire'''
126 '''used to test argument passing over the wire'''
127 return "%s %s %s %s %s" % (one, two, three, four, five)
127 return "%s %s %s %s %s" % (one, two, three, four, five)
128
128
129 class locallegacypeer(localpeer):
129 class locallegacypeer(localpeer):
130 '''peer extension which implements legacy methods too; used for tests with
130 '''peer extension which implements legacy methods too; used for tests with
131 restricted capabilities'''
131 restricted capabilities'''
132
132
133 def __init__(self, repo):
133 def __init__(self, repo):
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
135
135
136 def branches(self, nodes):
136 def branches(self, nodes):
137 return self._repo.branches(nodes)
137 return self._repo.branches(nodes)
138
138
139 def between(self, pairs):
139 def between(self, pairs):
140 return self._repo.between(pairs)
140 return self._repo.between(pairs)
141
141
142 def changegroup(self, basenodes, source):
142 def changegroup(self, basenodes, source):
143 return self._repo.changegroup(basenodes, source)
143 return self._repo.changegroup(basenodes, source)
144
144
145 def changegroupsubset(self, bases, heads, source):
145 def changegroupsubset(self, bases, heads, source):
146 return self._repo.changegroupsubset(bases, heads, source)
146 return self._repo.changegroupsubset(bases, heads, source)
147
147
148 class localrepository(object):
148 class localrepository(object):
149
149
150 supportedformats = set(('revlogv1', 'generaldelta'))
150 supportedformats = set(('revlogv1', 'generaldelta'))
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 'dotencode'))
152 'dotencode'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
153 openerreqs = set(('revlogv1', 'generaldelta'))
154 requirements = ['revlogv1']
154 requirements = ['revlogv1']
155 filtername = None
155 filtername = None
156
156
157 # a list of (ui, featureset) functions.
157 # a list of (ui, featureset) functions.
158 # only functions defined in module of enabled extensions are invoked
158 # only functions defined in module of enabled extensions are invoked
159 featuresetupfuncs = set()
159 featuresetupfuncs = set()
160
160
161 def _baserequirements(self, create):
161 def _baserequirements(self, create):
162 return self.requirements[:]
162 return self.requirements[:]
163
163
164 def __init__(self, baseui, path=None, create=False):
164 def __init__(self, baseui, path=None, create=False):
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 self.wopener = self.wvfs
166 self.wopener = self.wvfs
167 self.root = self.wvfs.base
167 self.root = self.wvfs.base
168 self.path = self.wvfs.join(".hg")
168 self.path = self.wvfs.join(".hg")
169 self.origroot = path
169 self.origroot = path
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 self.vfs = scmutil.vfs(self.path)
171 self.vfs = scmutil.vfs(self.path)
172 self.opener = self.vfs
172 self.opener = self.vfs
173 self.baseui = baseui
173 self.baseui = baseui
174 self.ui = baseui.copy()
174 self.ui = baseui.copy()
175 self.ui.copy = baseui.copy # prevent copying repo configuration
175 self.ui.copy = baseui.copy # prevent copying repo configuration
176 # A list of callback to shape the phase if no data were found.
176 # A list of callback to shape the phase if no data were found.
177 # Callback are in the form: func(repo, roots) --> processed root.
177 # Callback are in the form: func(repo, roots) --> processed root.
178 # This list it to be filled by extension during repo setup
178 # This list it to be filled by extension during repo setup
179 self._phasedefaults = []
179 self._phasedefaults = []
180 try:
180 try:
181 self.ui.readconfig(self.join("hgrc"), self.root)
181 self.ui.readconfig(self.join("hgrc"), self.root)
182 extensions.loadall(self.ui)
182 extensions.loadall(self.ui)
183 except IOError:
183 except IOError:
184 pass
184 pass
185
185
186 if self.featuresetupfuncs:
186 if self.featuresetupfuncs:
187 self.supported = set(self._basesupported) # use private copy
187 self.supported = set(self._basesupported) # use private copy
188 extmods = set(m.__name__ for n, m
188 extmods = set(m.__name__ for n, m
189 in extensions.extensions(self.ui))
189 in extensions.extensions(self.ui))
190 for setupfunc in self.featuresetupfuncs:
190 for setupfunc in self.featuresetupfuncs:
191 if setupfunc.__module__ in extmods:
191 if setupfunc.__module__ in extmods:
192 setupfunc(self.ui, self.supported)
192 setupfunc(self.ui, self.supported)
193 else:
193 else:
194 self.supported = self._basesupported
194 self.supported = self._basesupported
195
195
196 if not self.vfs.isdir():
196 if not self.vfs.isdir():
197 if create:
197 if create:
198 if not self.wvfs.exists():
198 if not self.wvfs.exists():
199 self.wvfs.makedirs()
199 self.wvfs.makedirs()
200 self.vfs.makedir(notindexed=True)
200 self.vfs.makedir(notindexed=True)
201 requirements = self._baserequirements(create)
201 requirements = self._baserequirements(create)
202 if self.ui.configbool('format', 'usestore', True):
202 if self.ui.configbool('format', 'usestore', True):
203 self.vfs.mkdir("store")
203 self.vfs.mkdir("store")
204 requirements.append("store")
204 requirements.append("store")
205 if self.ui.configbool('format', 'usefncache', True):
205 if self.ui.configbool('format', 'usefncache', True):
206 requirements.append("fncache")
206 requirements.append("fncache")
207 if self.ui.configbool('format', 'dotencode', True):
207 if self.ui.configbool('format', 'dotencode', True):
208 requirements.append('dotencode')
208 requirements.append('dotencode')
209 # create an invalid changelog
209 # create an invalid changelog
210 self.vfs.append(
210 self.vfs.append(
211 "00changelog.i",
211 "00changelog.i",
212 '\0\0\0\2' # represents revlogv2
212 '\0\0\0\2' # represents revlogv2
213 ' dummy changelog to prevent using the old repo layout'
213 ' dummy changelog to prevent using the old repo layout'
214 )
214 )
215 if self.ui.configbool('format', 'generaldelta', False):
215 if self.ui.configbool('format', 'generaldelta', False):
216 requirements.append("generaldelta")
216 requirements.append("generaldelta")
217 requirements = set(requirements)
217 requirements = set(requirements)
218 else:
218 else:
219 raise error.RepoError(_("repository %s not found") % path)
219 raise error.RepoError(_("repository %s not found") % path)
220 elif create:
220 elif create:
221 raise error.RepoError(_("repository %s already exists") % path)
221 raise error.RepoError(_("repository %s already exists") % path)
222 else:
222 else:
223 try:
223 try:
224 requirements = scmutil.readrequires(self.vfs, self.supported)
224 requirements = scmutil.readrequires(self.vfs, self.supported)
225 except IOError, inst:
225 except IOError, inst:
226 if inst.errno != errno.ENOENT:
226 if inst.errno != errno.ENOENT:
227 raise
227 raise
228 requirements = set()
228 requirements = set()
229
229
230 self.sharedpath = self.path
230 self.sharedpath = self.path
231 try:
231 try:
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 realpath=True)
233 realpath=True)
234 s = vfs.base
234 s = vfs.base
235 if not vfs.exists():
235 if not vfs.exists():
236 raise error.RepoError(
236 raise error.RepoError(
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
237 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 self.sharedpath = s
238 self.sharedpath = s
239 except IOError, inst:
239 except IOError, inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 self.spath = self.store.path
244 self.spath = self.store.path
245 self.svfs = self.store.vfs
245 self.svfs = self.store.vfs
246 self.sopener = self.svfs
246 self.sopener = self.svfs
247 self.sjoin = self.store.join
247 self.sjoin = self.store.join
248 self.vfs.createmode = self.store.createmode
248 self.vfs.createmode = self.store.createmode
249 self._applyrequirements(requirements)
249 self._applyrequirements(requirements)
250 if create:
250 if create:
251 self._writerequirements()
251 self._writerequirements()
252
252
253
253
254 self._branchcaches = {}
254 self._branchcaches = {}
255 self.filterpats = {}
255 self.filterpats = {}
256 self._datafilters = {}
256 self._datafilters = {}
257 self._transref = self._lockref = self._wlockref = None
257 self._transref = self._lockref = self._wlockref = None
258
258
259 # A cache for various files under .hg/ that tracks file changes,
259 # A cache for various files under .hg/ that tracks file changes,
260 # (used by the filecache decorator)
260 # (used by the filecache decorator)
261 #
261 #
262 # Maps a property name to its util.filecacheentry
262 # Maps a property name to its util.filecacheentry
263 self._filecache = {}
263 self._filecache = {}
264
264
265 # hold sets of revision to be filtered
265 # hold sets of revision to be filtered
266 # should be cleared when something might have changed the filter value:
266 # should be cleared when something might have changed the filter value:
267 # - new changesets,
267 # - new changesets,
268 # - phase change,
268 # - phase change,
269 # - new obsolescence marker,
269 # - new obsolescence marker,
270 # - working directory parent change,
270 # - working directory parent change,
271 # - bookmark changes
271 # - bookmark changes
272 self.filteredrevcache = {}
272 self.filteredrevcache = {}
273
273
274 def close(self):
274 def close(self):
275 pass
275 pass
276
276
277 def _restrictcapabilities(self, caps):
277 def _restrictcapabilities(self, caps):
278 return caps
278 return caps
279
279
280 def _applyrequirements(self, requirements):
280 def _applyrequirements(self, requirements):
281 self.requirements = requirements
281 self.requirements = requirements
282 self.sopener.options = dict((r, 1) for r in requirements
282 self.sopener.options = dict((r, 1) for r in requirements
283 if r in self.openerreqs)
283 if r in self.openerreqs)
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 if chunkcachesize is not None:
285 if chunkcachesize is not None:
286 self.sopener.options['chunkcachesize'] = chunkcachesize
286 self.sopener.options['chunkcachesize'] = chunkcachesize
287
287
288 def _writerequirements(self):
288 def _writerequirements(self):
289 reqfile = self.opener("requires", "w")
289 reqfile = self.opener("requires", "w")
290 for r in sorted(self.requirements):
290 for r in sorted(self.requirements):
291 reqfile.write("%s\n" % r)
291 reqfile.write("%s\n" % r)
292 reqfile.close()
292 reqfile.close()
293
293
294 def _checknested(self, path):
294 def _checknested(self, path):
295 """Determine if path is a legal nested repository."""
295 """Determine if path is a legal nested repository."""
296 if not path.startswith(self.root):
296 if not path.startswith(self.root):
297 return False
297 return False
298 subpath = path[len(self.root) + 1:]
298 subpath = path[len(self.root) + 1:]
299 normsubpath = util.pconvert(subpath)
299 normsubpath = util.pconvert(subpath)
300
300
301 # XXX: Checking against the current working copy is wrong in
301 # XXX: Checking against the current working copy is wrong in
302 # the sense that it can reject things like
302 # the sense that it can reject things like
303 #
303 #
304 # $ hg cat -r 10 sub/x.txt
304 # $ hg cat -r 10 sub/x.txt
305 #
305 #
306 # if sub/ is no longer a subrepository in the working copy
306 # if sub/ is no longer a subrepository in the working copy
307 # parent revision.
307 # parent revision.
308 #
308 #
309 # However, it can of course also allow things that would have
309 # However, it can of course also allow things that would have
310 # been rejected before, such as the above cat command if sub/
310 # been rejected before, such as the above cat command if sub/
311 # is a subrepository now, but was a normal directory before.
311 # is a subrepository now, but was a normal directory before.
312 # The old path auditor would have rejected by mistake since it
312 # The old path auditor would have rejected by mistake since it
313 # panics when it sees sub/.hg/.
313 # panics when it sees sub/.hg/.
314 #
314 #
315 # All in all, checking against the working copy seems sensible
315 # All in all, checking against the working copy seems sensible
316 # since we want to prevent access to nested repositories on
316 # since we want to prevent access to nested repositories on
317 # the filesystem *now*.
317 # the filesystem *now*.
318 ctx = self[None]
318 ctx = self[None]
319 parts = util.splitpath(subpath)
319 parts = util.splitpath(subpath)
320 while parts:
320 while parts:
321 prefix = '/'.join(parts)
321 prefix = '/'.join(parts)
322 if prefix in ctx.substate:
322 if prefix in ctx.substate:
323 if prefix == normsubpath:
323 if prefix == normsubpath:
324 return True
324 return True
325 else:
325 else:
326 sub = ctx.sub(prefix)
326 sub = ctx.sub(prefix)
327 return sub.checknested(subpath[len(prefix) + 1:])
327 return sub.checknested(subpath[len(prefix) + 1:])
328 else:
328 else:
329 parts.pop()
329 parts.pop()
330 return False
330 return False
331
331
332 def peer(self):
332 def peer(self):
333 return localpeer(self) # not cached to avoid reference cycle
333 return localpeer(self) # not cached to avoid reference cycle
334
334
335 def unfiltered(self):
335 def unfiltered(self):
336 """Return unfiltered version of the repository
336 """Return unfiltered version of the repository
337
337
338 Intended to be overwritten by filtered repo."""
338 Intended to be overwritten by filtered repo."""
339 return self
339 return self
340
340
341 def filtered(self, name):
341 def filtered(self, name):
342 """Return a filtered version of a repository"""
342 """Return a filtered version of a repository"""
343 # build a new class with the mixin and the current class
343 # build a new class with the mixin and the current class
344 # (possibly subclass of the repo)
344 # (possibly subclass of the repo)
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
345 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 pass
346 pass
347 return proxycls(self, name)
347 return proxycls(self, name)
348
348
349 @repofilecache('bookmarks')
349 @repofilecache('bookmarks')
350 def _bookmarks(self):
350 def _bookmarks(self):
351 return bookmarks.bmstore(self)
351 return bookmarks.bmstore(self)
352
352
353 @repofilecache('bookmarks.current')
353 @repofilecache('bookmarks.current')
354 def _bookmarkcurrent(self):
354 def _bookmarkcurrent(self):
355 return bookmarks.readcurrent(self)
355 return bookmarks.readcurrent(self)
356
356
357 def bookmarkheads(self, bookmark):
357 def bookmarkheads(self, bookmark):
358 name = bookmark.split('@', 1)[0]
358 name = bookmark.split('@', 1)[0]
359 heads = []
359 heads = []
360 for mark, n in self._bookmarks.iteritems():
360 for mark, n in self._bookmarks.iteritems():
361 if mark.split('@', 1)[0] == name:
361 if mark.split('@', 1)[0] == name:
362 heads.append(n)
362 heads.append(n)
363 return heads
363 return heads
364
364
365 @storecache('phaseroots')
365 @storecache('phaseroots')
366 def _phasecache(self):
366 def _phasecache(self):
367 return phases.phasecache(self, self._phasedefaults)
367 return phases.phasecache(self, self._phasedefaults)
368
368
369 @storecache('obsstore')
369 @storecache('obsstore')
370 def obsstore(self):
370 def obsstore(self):
371 store = obsolete.obsstore(self.sopener)
371 store = obsolete.obsstore(self.sopener)
372 if store and not obsolete._enabled:
372 if store and not obsolete._enabled:
373 # message is rare enough to not be translated
373 # message is rare enough to not be translated
374 msg = 'obsolete feature not enabled but %i markers found!\n'
374 msg = 'obsolete feature not enabled but %i markers found!\n'
375 self.ui.warn(msg % len(list(store)))
375 self.ui.warn(msg % len(list(store)))
376 return store
376 return store
377
377
378 @storecache('00changelog.i')
378 @storecache('00changelog.i')
379 def changelog(self):
379 def changelog(self):
380 c = changelog.changelog(self.sopener)
380 c = changelog.changelog(self.sopener)
381 if 'HG_PENDING' in os.environ:
381 if 'HG_PENDING' in os.environ:
382 p = os.environ['HG_PENDING']
382 p = os.environ['HG_PENDING']
383 if p.startswith(self.root):
383 if p.startswith(self.root):
384 c.readpending('00changelog.i.a')
384 c.readpending('00changelog.i.a')
385 return c
385 return c
386
386
387 @storecache('00manifest.i')
387 @storecache('00manifest.i')
388 def manifest(self):
388 def manifest(self):
389 return manifest.manifest(self.sopener)
389 return manifest.manifest(self.sopener)
390
390
391 @repofilecache('dirstate')
391 @repofilecache('dirstate')
392 def dirstate(self):
392 def dirstate(self):
393 warned = [0]
393 warned = [0]
394 def validate(node):
394 def validate(node):
395 try:
395 try:
396 self.changelog.rev(node)
396 self.changelog.rev(node)
397 return node
397 return node
398 except error.LookupError:
398 except error.LookupError:
399 if not warned[0]:
399 if not warned[0]:
400 warned[0] = True
400 warned[0] = True
401 self.ui.warn(_("warning: ignoring unknown"
401 self.ui.warn(_("warning: ignoring unknown"
402 " working parent %s!\n") % short(node))
402 " working parent %s!\n") % short(node))
403 return nullid
403 return nullid
404
404
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406
406
407 def __getitem__(self, changeid):
407 def __getitem__(self, changeid):
408 if changeid is None:
408 if changeid is None:
409 return context.workingctx(self)
409 return context.workingctx(self)
410 return context.changectx(self, changeid)
410 return context.changectx(self, changeid)
411
411
412 def __contains__(self, changeid):
412 def __contains__(self, changeid):
413 try:
413 try:
414 return bool(self.lookup(changeid))
414 return bool(self.lookup(changeid))
415 except error.RepoLookupError:
415 except error.RepoLookupError:
416 return False
416 return False
417
417
418 def __nonzero__(self):
418 def __nonzero__(self):
419 return True
419 return True
420
420
421 def __len__(self):
421 def __len__(self):
422 return len(self.changelog)
422 return len(self.changelog)
423
423
424 def __iter__(self):
424 def __iter__(self):
425 return iter(self.changelog)
425 return iter(self.changelog)
426
426
427 def revs(self, expr, *args):
427 def revs(self, expr, *args):
428 '''Return a list of revisions matching the given revset'''
428 '''Return a list of revisions matching the given revset'''
429 expr = revset.formatspec(expr, *args)
429 expr = revset.formatspec(expr, *args)
430 m = revset.match(None, expr)
430 m = revset.match(None, expr)
431 return revset.baseset([r for r in m(self, revset.baseset(self))])
431 return revset.baseset([r for r in m(self, revset.baseset(self))])
432
432
433 def set(self, expr, *args):
433 def set(self, expr, *args):
434 '''
434 '''
435 Yield a context for each matching revision, after doing arg
435 Yield a context for each matching revision, after doing arg
436 replacement via revset.formatspec
436 replacement via revset.formatspec
437 '''
437 '''
438 for r in self.revs(expr, *args):
438 for r in self.revs(expr, *args):
439 yield self[r]
439 yield self[r]
440
440
441 def url(self):
441 def url(self):
442 return 'file:' + self.root
442 return 'file:' + self.root
443
443
444 def hook(self, name, throw=False, **args):
444 def hook(self, name, throw=False, **args):
445 return hook.hook(self.ui, self, name, throw, **args)
445 return hook.hook(self.ui, self, name, throw, **args)
446
446
447 @unfilteredmethod
447 @unfilteredmethod
448 def _tag(self, names, node, message, local, user, date, extra={}):
448 def _tag(self, names, node, message, local, user, date, extra={}):
449 if isinstance(names, str):
449 if isinstance(names, str):
450 names = (names,)
450 names = (names,)
451
451
452 branches = self.branchmap()
452 branches = self.branchmap()
453 for name in names:
453 for name in names:
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
454 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 local=local)
455 local=local)
456 if name in branches:
456 if name in branches:
457 self.ui.warn(_("warning: tag %s conflicts with existing"
457 self.ui.warn(_("warning: tag %s conflicts with existing"
458 " branch name\n") % name)
458 " branch name\n") % name)
459
459
460 def writetags(fp, names, munge, prevtags):
460 def writetags(fp, names, munge, prevtags):
461 fp.seek(0, 2)
461 fp.seek(0, 2)
462 if prevtags and prevtags[-1] != '\n':
462 if prevtags and prevtags[-1] != '\n':
463 fp.write('\n')
463 fp.write('\n')
464 for name in names:
464 for name in names:
465 m = munge and munge(name) or name
465 m = munge and munge(name) or name
466 if (self._tagscache.tagtypes and
466 if (self._tagscache.tagtypes and
467 name in self._tagscache.tagtypes):
467 name in self._tagscache.tagtypes):
468 old = self.tags().get(name, nullid)
468 old = self.tags().get(name, nullid)
469 fp.write('%s %s\n' % (hex(old), m))
469 fp.write('%s %s\n' % (hex(old), m))
470 fp.write('%s %s\n' % (hex(node), m))
470 fp.write('%s %s\n' % (hex(node), m))
471 fp.close()
471 fp.close()
472
472
473 prevtags = ''
473 prevtags = ''
474 if local:
474 if local:
475 try:
475 try:
476 fp = self.opener('localtags', 'r+')
476 fp = self.opener('localtags', 'r+')
477 except IOError:
477 except IOError:
478 fp = self.opener('localtags', 'a')
478 fp = self.opener('localtags', 'a')
479 else:
479 else:
480 prevtags = fp.read()
480 prevtags = fp.read()
481
481
482 # local tags are stored in the current charset
482 # local tags are stored in the current charset
483 writetags(fp, names, None, prevtags)
483 writetags(fp, names, None, prevtags)
484 for name in names:
484 for name in names:
485 self.hook('tag', node=hex(node), tag=name, local=local)
485 self.hook('tag', node=hex(node), tag=name, local=local)
486 return
486 return
487
487
488 try:
488 try:
489 fp = self.wfile('.hgtags', 'rb+')
489 fp = self.wfile('.hgtags', 'rb+')
490 except IOError, e:
490 except IOError, e:
491 if e.errno != errno.ENOENT:
491 if e.errno != errno.ENOENT:
492 raise
492 raise
493 fp = self.wfile('.hgtags', 'ab')
493 fp = self.wfile('.hgtags', 'ab')
494 else:
494 else:
495 prevtags = fp.read()
495 prevtags = fp.read()
496
496
497 # committed tags are stored in UTF-8
497 # committed tags are stored in UTF-8
498 writetags(fp, names, encoding.fromlocal, prevtags)
498 writetags(fp, names, encoding.fromlocal, prevtags)
499
499
500 fp.close()
500 fp.close()
501
501
502 self.invalidatecaches()
502 self.invalidatecaches()
503
503
504 if '.hgtags' not in self.dirstate:
504 if '.hgtags' not in self.dirstate:
505 self[None].add(['.hgtags'])
505 self[None].add(['.hgtags'])
506
506
507 m = matchmod.exact(self.root, '', ['.hgtags'])
507 m = matchmod.exact(self.root, '', ['.hgtags'])
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
508 tagnode = self.commit(message, user, date, extra=extra, match=m)
509
509
510 for name in names:
510 for name in names:
511 self.hook('tag', node=hex(node), tag=name, local=local)
511 self.hook('tag', node=hex(node), tag=name, local=local)
512
512
513 return tagnode
513 return tagnode
514
514
515 def tag(self, names, node, message, local, user, date):
515 def tag(self, names, node, message, local, user, date):
516 '''tag a revision with one or more symbolic names.
516 '''tag a revision with one or more symbolic names.
517
517
518 names is a list of strings or, when adding a single tag, names may be a
518 names is a list of strings or, when adding a single tag, names may be a
519 string.
519 string.
520
520
521 if local is True, the tags are stored in a per-repository file.
521 if local is True, the tags are stored in a per-repository file.
522 otherwise, they are stored in the .hgtags file, and a new
522 otherwise, they are stored in the .hgtags file, and a new
523 changeset is committed with the change.
523 changeset is committed with the change.
524
524
525 keyword arguments:
525 keyword arguments:
526
526
527 local: whether to store tags in non-version-controlled file
527 local: whether to store tags in non-version-controlled file
528 (default False)
528 (default False)
529
529
530 message: commit message to use if committing
530 message: commit message to use if committing
531
531
532 user: name of user to use if committing
532 user: name of user to use if committing
533
533
534 date: date tuple to use if committing'''
534 date: date tuple to use if committing'''
535
535
536 if not local:
536 if not local:
537 for x in self.status()[:5]:
537 for x in self.status()[:5]:
538 if '.hgtags' in x:
538 if '.hgtags' in x:
539 raise util.Abort(_('working copy of .hgtags is changed '
539 raise util.Abort(_('working copy of .hgtags is changed '
540 '(please commit .hgtags manually)'))
540 '(please commit .hgtags manually)'))
541
541
542 self.tags() # instantiate the cache
542 self.tags() # instantiate the cache
543 self._tag(names, node, message, local, user, date)
543 self._tag(names, node, message, local, user, date)
544
544
545 @filteredpropertycache
545 @filteredpropertycache
546 def _tagscache(self):
546 def _tagscache(self):
547 '''Returns a tagscache object that contains various tags related
547 '''Returns a tagscache object that contains various tags related
548 caches.'''
548 caches.'''
549
549
550 # This simplifies its cache management by having one decorated
550 # This simplifies its cache management by having one decorated
551 # function (this one) and the rest simply fetch things from it.
551 # function (this one) and the rest simply fetch things from it.
552 class tagscache(object):
552 class tagscache(object):
553 def __init__(self):
553 def __init__(self):
554 # These two define the set of tags for this repository. tags
554 # These two define the set of tags for this repository. tags
555 # maps tag name to node; tagtypes maps tag name to 'global' or
555 # maps tag name to node; tagtypes maps tag name to 'global' or
556 # 'local'. (Global tags are defined by .hgtags across all
556 # 'local'. (Global tags are defined by .hgtags across all
557 # heads, and local tags are defined in .hg/localtags.)
557 # heads, and local tags are defined in .hg/localtags.)
558 # They constitute the in-memory cache of tags.
558 # They constitute the in-memory cache of tags.
559 self.tags = self.tagtypes = None
559 self.tags = self.tagtypes = None
560
560
561 self.nodetagscache = self.tagslist = None
561 self.nodetagscache = self.tagslist = None
562
562
563 cache = tagscache()
563 cache = tagscache()
564 cache.tags, cache.tagtypes = self._findtags()
564 cache.tags, cache.tagtypes = self._findtags()
565
565
566 return cache
566 return cache
567
567
568 def tags(self):
568 def tags(self):
569 '''return a mapping of tag to node'''
569 '''return a mapping of tag to node'''
570 t = {}
570 t = {}
571 if self.changelog.filteredrevs:
571 if self.changelog.filteredrevs:
572 tags, tt = self._findtags()
572 tags, tt = self._findtags()
573 else:
573 else:
574 tags = self._tagscache.tags
574 tags = self._tagscache.tags
575 for k, v in tags.iteritems():
575 for k, v in tags.iteritems():
576 try:
576 try:
577 # ignore tags to unknown nodes
577 # ignore tags to unknown nodes
578 self.changelog.rev(v)
578 self.changelog.rev(v)
579 t[k] = v
579 t[k] = v
580 except (error.LookupError, ValueError):
580 except (error.LookupError, ValueError):
581 pass
581 pass
582 return t
582 return t
583
583
584 def _findtags(self):
584 def _findtags(self):
585 '''Do the hard work of finding tags. Return a pair of dicts
585 '''Do the hard work of finding tags. Return a pair of dicts
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 maps tag name to a string like \'global\' or \'local\'.
587 maps tag name to a string like \'global\' or \'local\'.
588 Subclasses or extensions are free to add their own tags, but
588 Subclasses or extensions are free to add their own tags, but
589 should be aware that the returned dicts will be retained for the
589 should be aware that the returned dicts will be retained for the
590 duration of the localrepo object.'''
590 duration of the localrepo object.'''
591
591
592 # XXX what tagtype should subclasses/extensions use? Currently
592 # XXX what tagtype should subclasses/extensions use? Currently
593 # mq and bookmarks add tags, but do not set the tagtype at all.
593 # mq and bookmarks add tags, but do not set the tagtype at all.
594 # Should each extension invent its own tag type? Should there
594 # Should each extension invent its own tag type? Should there
595 # be one tagtype for all such "virtual" tags? Or is the status
595 # be one tagtype for all such "virtual" tags? Or is the status
596 # quo fine?
596 # quo fine?
597
597
598 alltags = {} # map tag name to (node, hist)
598 alltags = {} # map tag name to (node, hist)
599 tagtypes = {}
599 tagtypes = {}
600
600
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603
603
604 # Build the return dicts. Have to re-encode tag names because
604 # Build the return dicts. Have to re-encode tag names because
605 # the tags module always uses UTF-8 (in order not to lose info
605 # the tags module always uses UTF-8 (in order not to lose info
606 # writing to the cache), but the rest of Mercurial wants them in
606 # writing to the cache), but the rest of Mercurial wants them in
607 # local encoding.
607 # local encoding.
608 tags = {}
608 tags = {}
609 for (name, (node, hist)) in alltags.iteritems():
609 for (name, (node, hist)) in alltags.iteritems():
610 if node != nullid:
610 if node != nullid:
611 tags[encoding.tolocal(name)] = node
611 tags[encoding.tolocal(name)] = node
612 tags['tip'] = self.changelog.tip()
612 tags['tip'] = self.changelog.tip()
613 tagtypes = dict([(encoding.tolocal(name), value)
613 tagtypes = dict([(encoding.tolocal(name), value)
614 for (name, value) in tagtypes.iteritems()])
614 for (name, value) in tagtypes.iteritems()])
615 return (tags, tagtypes)
615 return (tags, tagtypes)
616
616
617 def tagtype(self, tagname):
617 def tagtype(self, tagname):
618 '''
618 '''
619 return the type of the given tag. result can be:
619 return the type of the given tag. result can be:
620
620
621 'local' : a local tag
621 'local' : a local tag
622 'global' : a global tag
622 'global' : a global tag
623 None : tag does not exist
623 None : tag does not exist
624 '''
624 '''
625
625
626 return self._tagscache.tagtypes.get(tagname)
626 return self._tagscache.tagtypes.get(tagname)
627
627
628 def tagslist(self):
628 def tagslist(self):
629 '''return a list of tags ordered by revision'''
629 '''return a list of tags ordered by revision'''
630 if not self._tagscache.tagslist:
630 if not self._tagscache.tagslist:
631 l = []
631 l = []
632 for t, n in self.tags().iteritems():
632 for t, n in self.tags().iteritems():
633 r = self.changelog.rev(n)
633 r = self.changelog.rev(n)
634 l.append((r, t, n))
634 l.append((r, t, n))
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636
636
637 return self._tagscache.tagslist
637 return self._tagscache.tagslist
638
638
639 def nodetags(self, node):
639 def nodetags(self, node):
640 '''return the tags associated with a node'''
640 '''return the tags associated with a node'''
641 if not self._tagscache.nodetagscache:
641 if not self._tagscache.nodetagscache:
642 nodetagscache = {}
642 nodetagscache = {}
643 for t, n in self._tagscache.tags.iteritems():
643 for t, n in self._tagscache.tags.iteritems():
644 nodetagscache.setdefault(n, []).append(t)
644 nodetagscache.setdefault(n, []).append(t)
645 for tags in nodetagscache.itervalues():
645 for tags in nodetagscache.itervalues():
646 tags.sort()
646 tags.sort()
647 self._tagscache.nodetagscache = nodetagscache
647 self._tagscache.nodetagscache = nodetagscache
648 return self._tagscache.nodetagscache.get(node, [])
648 return self._tagscache.nodetagscache.get(node, [])
649
649
650 def nodebookmarks(self, node):
650 def nodebookmarks(self, node):
651 marks = []
651 marks = []
652 for bookmark, n in self._bookmarks.iteritems():
652 for bookmark, n in self._bookmarks.iteritems():
653 if n == node:
653 if n == node:
654 marks.append(bookmark)
654 marks.append(bookmark)
655 return sorted(marks)
655 return sorted(marks)
656
656
657 def branchmap(self):
657 def branchmap(self):
658 '''returns a dictionary {branch: [branchheads]} with branchheads
658 '''returns a dictionary {branch: [branchheads]} with branchheads
659 ordered by increasing revision number'''
659 ordered by increasing revision number'''
660 branchmap.updatecache(self)
660 branchmap.updatecache(self)
661 return self._branchcaches[self.filtername]
661 return self._branchcaches[self.filtername]
662
662
663 def branchtip(self, branch):
663 def branchtip(self, branch):
664 '''return the tip node for a given branch'''
664 '''return the tip node for a given branch'''
665 try:
665 try:
666 return self.branchmap().branchtip(branch)
666 return self.branchmap().branchtip(branch)
667 except KeyError:
667 except KeyError:
668 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
668 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
669
669
670 def lookup(self, key):
670 def lookup(self, key):
671 return self[key].node()
671 return self[key].node()
672
672
673 def lookupbranch(self, key, remote=None):
673 def lookupbranch(self, key, remote=None):
674 repo = remote or self
674 repo = remote or self
675 if key in repo.branchmap():
675 if key in repo.branchmap():
676 return key
676 return key
677
677
678 repo = (remote and remote.local()) and remote or self
678 repo = (remote and remote.local()) and remote or self
679 return repo[key].branch()
679 return repo[key].branch()
680
680
681 def known(self, nodes):
681 def known(self, nodes):
682 nm = self.changelog.nodemap
682 nm = self.changelog.nodemap
683 pc = self._phasecache
683 pc = self._phasecache
684 result = []
684 result = []
685 for n in nodes:
685 for n in nodes:
686 r = nm.get(n)
686 r = nm.get(n)
687 resp = not (r is None or pc.phase(self, r) >= phases.secret)
687 resp = not (r is None or pc.phase(self, r) >= phases.secret)
688 result.append(resp)
688 result.append(resp)
689 return result
689 return result
690
690
691 def local(self):
691 def local(self):
692 return self
692 return self
693
693
694 def cancopy(self):
694 def cancopy(self):
695 # so statichttprepo's override of local() works
695 # so statichttprepo's override of local() works
696 if not self.local():
696 if not self.local():
697 return False
697 return False
698 if not self.ui.configbool('phases', 'publish', True):
698 if not self.ui.configbool('phases', 'publish', True):
699 return True
699 return True
700 # if publishing we can't copy if there is filtered content
700 # if publishing we can't copy if there is filtered content
701 return not self.filtered('visible').changelog.filteredrevs
701 return not self.filtered('visible').changelog.filteredrevs
702
702
703 def join(self, f):
703 def join(self, f):
704 return os.path.join(self.path, f)
704 return os.path.join(self.path, f)
705
705
706 def wjoin(self, f):
706 def wjoin(self, f):
707 return os.path.join(self.root, f)
707 return os.path.join(self.root, f)
708
708
709 def file(self, f):
709 def file(self, f):
710 if f[0] == '/':
710 if f[0] == '/':
711 f = f[1:]
711 f = f[1:]
712 return filelog.filelog(self.sopener, f)
712 return filelog.filelog(self.sopener, f)
713
713
714 def changectx(self, changeid):
714 def changectx(self, changeid):
715 return self[changeid]
715 return self[changeid]
716
716
717 def parents(self, changeid=None):
717 def parents(self, changeid=None):
718 '''get list of changectxs for parents of changeid'''
718 '''get list of changectxs for parents of changeid'''
719 return self[changeid].parents()
719 return self[changeid].parents()
720
720
721 def setparents(self, p1, p2=nullid):
721 def setparents(self, p1, p2=nullid):
722 copies = self.dirstate.setparents(p1, p2)
722 copies = self.dirstate.setparents(p1, p2)
723 pctx = self[p1]
723 pctx = self[p1]
724 if copies:
724 if copies:
725 # Adjust copy records, the dirstate cannot do it, it
725 # Adjust copy records, the dirstate cannot do it, it
726 # requires access to parents manifests. Preserve them
726 # requires access to parents manifests. Preserve them
727 # only for entries added to first parent.
727 # only for entries added to first parent.
728 for f in copies:
728 for f in copies:
729 if f not in pctx and copies[f] in pctx:
729 if f not in pctx and copies[f] in pctx:
730 self.dirstate.copy(copies[f], f)
730 self.dirstate.copy(copies[f], f)
731 if p2 == nullid:
731 if p2 == nullid:
732 for f, s in sorted(self.dirstate.copies().items()):
732 for f, s in sorted(self.dirstate.copies().items()):
733 if f not in pctx and s not in pctx:
733 if f not in pctx and s not in pctx:
734 self.dirstate.copy(None, f)
734 self.dirstate.copy(None, f)
735
735
736 def filectx(self, path, changeid=None, fileid=None):
736 def filectx(self, path, changeid=None, fileid=None):
737 """changeid can be a changeset revision, node, or tag.
737 """changeid can be a changeset revision, node, or tag.
738 fileid can be a file revision or node."""
738 fileid can be a file revision or node."""
739 return context.filectx(self, path, changeid, fileid)
739 return context.filectx(self, path, changeid, fileid)
740
740
741 def getcwd(self):
741 def getcwd(self):
742 return self.dirstate.getcwd()
742 return self.dirstate.getcwd()
743
743
744 def pathto(self, f, cwd=None):
744 def pathto(self, f, cwd=None):
745 return self.dirstate.pathto(f, cwd)
745 return self.dirstate.pathto(f, cwd)
746
746
747 def wfile(self, f, mode='r'):
747 def wfile(self, f, mode='r'):
748 return self.wopener(f, mode)
748 return self.wopener(f, mode)
749
749
750 def _link(self, f):
750 def _link(self, f):
751 return self.wvfs.islink(f)
751 return self.wvfs.islink(f)
752
752
753 def _loadfilter(self, filter):
753 def _loadfilter(self, filter):
754 if filter not in self.filterpats:
754 if filter not in self.filterpats:
755 l = []
755 l = []
756 for pat, cmd in self.ui.configitems(filter):
756 for pat, cmd in self.ui.configitems(filter):
757 if cmd == '!':
757 if cmd == '!':
758 continue
758 continue
759 mf = matchmod.match(self.root, '', [pat])
759 mf = matchmod.match(self.root, '', [pat])
760 fn = None
760 fn = None
761 params = cmd
761 params = cmd
762 for name, filterfn in self._datafilters.iteritems():
762 for name, filterfn in self._datafilters.iteritems():
763 if cmd.startswith(name):
763 if cmd.startswith(name):
764 fn = filterfn
764 fn = filterfn
765 params = cmd[len(name):].lstrip()
765 params = cmd[len(name):].lstrip()
766 break
766 break
767 if not fn:
767 if not fn:
768 fn = lambda s, c, **kwargs: util.filter(s, c)
768 fn = lambda s, c, **kwargs: util.filter(s, c)
769 # Wrap old filters not supporting keyword arguments
769 # Wrap old filters not supporting keyword arguments
770 if not inspect.getargspec(fn)[2]:
770 if not inspect.getargspec(fn)[2]:
771 oldfn = fn
771 oldfn = fn
772 fn = lambda s, c, **kwargs: oldfn(s, c)
772 fn = lambda s, c, **kwargs: oldfn(s, c)
773 l.append((mf, fn, params))
773 l.append((mf, fn, params))
774 self.filterpats[filter] = l
774 self.filterpats[filter] = l
775 return self.filterpats[filter]
775 return self.filterpats[filter]
776
776
777 def _filter(self, filterpats, filename, data):
777 def _filter(self, filterpats, filename, data):
778 for mf, fn, cmd in filterpats:
778 for mf, fn, cmd in filterpats:
779 if mf(filename):
779 if mf(filename):
780 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
780 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
781 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
781 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
782 break
782 break
783
783
784 return data
784 return data
785
785
786 @unfilteredpropertycache
786 @unfilteredpropertycache
787 def _encodefilterpats(self):
787 def _encodefilterpats(self):
788 return self._loadfilter('encode')
788 return self._loadfilter('encode')
789
789
790 @unfilteredpropertycache
790 @unfilteredpropertycache
791 def _decodefilterpats(self):
791 def _decodefilterpats(self):
792 return self._loadfilter('decode')
792 return self._loadfilter('decode')
793
793
794 def adddatafilter(self, name, filter):
794 def adddatafilter(self, name, filter):
795 self._datafilters[name] = filter
795 self._datafilters[name] = filter
796
796
797 def wread(self, filename):
797 def wread(self, filename):
798 if self._link(filename):
798 if self._link(filename):
799 data = self.wvfs.readlink(filename)
799 data = self.wvfs.readlink(filename)
800 else:
800 else:
801 data = self.wopener.read(filename)
801 data = self.wopener.read(filename)
802 return self._filter(self._encodefilterpats, filename, data)
802 return self._filter(self._encodefilterpats, filename, data)
803
803
804 def wwrite(self, filename, data, flags):
804 def wwrite(self, filename, data, flags):
805 data = self._filter(self._decodefilterpats, filename, data)
805 data = self._filter(self._decodefilterpats, filename, data)
806 if 'l' in flags:
806 if 'l' in flags:
807 self.wopener.symlink(data, filename)
807 self.wopener.symlink(data, filename)
808 else:
808 else:
809 self.wopener.write(filename, data)
809 self.wopener.write(filename, data)
810 if 'x' in flags:
810 if 'x' in flags:
811 self.wvfs.setflags(filename, False, True)
811 self.wvfs.setflags(filename, False, True)
812
812
813 def wwritedata(self, filename, data):
813 def wwritedata(self, filename, data):
814 return self._filter(self._decodefilterpats, filename, data)
814 return self._filter(self._decodefilterpats, filename, data)
815
815
816 def transaction(self, desc, report=None):
816 def transaction(self, desc, report=None):
817 tr = self._transref and self._transref() or None
817 tr = self._transref and self._transref() or None
818 if tr and tr.running():
818 if tr and tr.running():
819 return tr.nest()
819 return tr.nest()
820
820
821 # abort here if the journal already exists
821 # abort here if the journal already exists
822 if self.svfs.exists("journal"):
822 if self.svfs.exists("journal"):
823 raise error.RepoError(
823 raise error.RepoError(
824 _("abandoned transaction found - run hg recover"))
824 _("abandoned transaction found - run hg recover"))
825
825
826 self._writejournal(desc)
826 self._writejournal(desc)
827 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
827 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
828 rp = report and report or self.ui.warn
828 rp = report and report or self.ui.warn
829 tr = transaction.transaction(rp, self.sopener,
829 tr = transaction.transaction(rp, self.sopener,
830 "journal",
830 "journal",
831 aftertrans(renames),
831 aftertrans(renames),
832 self.store.createmode)
832 self.store.createmode)
833 self._transref = weakref.ref(tr)
833 self._transref = weakref.ref(tr)
834 return tr
834 return tr
835
835
836 def _journalfiles(self):
836 def _journalfiles(self):
837 return ((self.svfs, 'journal'),
837 return ((self.svfs, 'journal'),
838 (self.vfs, 'journal.dirstate'),
838 (self.vfs, 'journal.dirstate'),
839 (self.vfs, 'journal.branch'),
839 (self.vfs, 'journal.branch'),
840 (self.vfs, 'journal.desc'),
840 (self.vfs, 'journal.desc'),
841 (self.vfs, 'journal.bookmarks'),
841 (self.vfs, 'journal.bookmarks'),
842 (self.svfs, 'journal.phaseroots'))
842 (self.svfs, 'journal.phaseroots'))
843
843
844 def undofiles(self):
844 def undofiles(self):
845 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
845 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
846
846
847 def _writejournal(self, desc):
847 def _writejournal(self, desc):
848 self.opener.write("journal.dirstate",
848 self.opener.write("journal.dirstate",
849 self.opener.tryread("dirstate"))
849 self.opener.tryread("dirstate"))
850 self.opener.write("journal.branch",
850 self.opener.write("journal.branch",
851 encoding.fromlocal(self.dirstate.branch()))
851 encoding.fromlocal(self.dirstate.branch()))
852 self.opener.write("journal.desc",
852 self.opener.write("journal.desc",
853 "%d\n%s\n" % (len(self), desc))
853 "%d\n%s\n" % (len(self), desc))
854 self.opener.write("journal.bookmarks",
854 self.opener.write("journal.bookmarks",
855 self.opener.tryread("bookmarks"))
855 self.opener.tryread("bookmarks"))
856 self.sopener.write("journal.phaseroots",
856 self.sopener.write("journal.phaseroots",
857 self.sopener.tryread("phaseroots"))
857 self.sopener.tryread("phaseroots"))
858
858
859 def recover(self):
859 def recover(self):
860 lock = self.lock()
860 lock = self.lock()
861 try:
861 try:
862 if self.svfs.exists("journal"):
862 if self.svfs.exists("journal"):
863 self.ui.status(_("rolling back interrupted transaction\n"))
863 self.ui.status(_("rolling back interrupted transaction\n"))
864 transaction.rollback(self.sopener, "journal",
864 transaction.rollback(self.sopener, "journal",
865 self.ui.warn)
865 self.ui.warn)
866 self.invalidate()
866 self.invalidate()
867 return True
867 return True
868 else:
868 else:
869 self.ui.warn(_("no interrupted transaction available\n"))
869 self.ui.warn(_("no interrupted transaction available\n"))
870 return False
870 return False
871 finally:
871 finally:
872 lock.release()
872 lock.release()
873
873
874 def rollback(self, dryrun=False, force=False):
874 def rollback(self, dryrun=False, force=False):
875 wlock = lock = None
875 wlock = lock = None
876 try:
876 try:
877 wlock = self.wlock()
877 wlock = self.wlock()
878 lock = self.lock()
878 lock = self.lock()
879 if self.svfs.exists("undo"):
879 if self.svfs.exists("undo"):
880 return self._rollback(dryrun, force)
880 return self._rollback(dryrun, force)
881 else:
881 else:
882 self.ui.warn(_("no rollback information available\n"))
882 self.ui.warn(_("no rollback information available\n"))
883 return 1
883 return 1
884 finally:
884 finally:
885 release(lock, wlock)
885 release(lock, wlock)
886
886
887 @unfilteredmethod # Until we get smarter cache management
887 @unfilteredmethod # Until we get smarter cache management
888 def _rollback(self, dryrun, force):
888 def _rollback(self, dryrun, force):
889 ui = self.ui
889 ui = self.ui
890 try:
890 try:
891 args = self.opener.read('undo.desc').splitlines()
891 args = self.opener.read('undo.desc').splitlines()
892 (oldlen, desc, detail) = (int(args[0]), args[1], None)
892 (oldlen, desc, detail) = (int(args[0]), args[1], None)
893 if len(args) >= 3:
893 if len(args) >= 3:
894 detail = args[2]
894 detail = args[2]
895 oldtip = oldlen - 1
895 oldtip = oldlen - 1
896
896
897 if detail and ui.verbose:
897 if detail and ui.verbose:
898 msg = (_('repository tip rolled back to revision %s'
898 msg = (_('repository tip rolled back to revision %s'
899 ' (undo %s: %s)\n')
899 ' (undo %s: %s)\n')
900 % (oldtip, desc, detail))
900 % (oldtip, desc, detail))
901 else:
901 else:
902 msg = (_('repository tip rolled back to revision %s'
902 msg = (_('repository tip rolled back to revision %s'
903 ' (undo %s)\n')
903 ' (undo %s)\n')
904 % (oldtip, desc))
904 % (oldtip, desc))
905 except IOError:
905 except IOError:
906 msg = _('rolling back unknown transaction\n')
906 msg = _('rolling back unknown transaction\n')
907 desc = None
907 desc = None
908
908
909 if not force and self['.'] != self['tip'] and desc == 'commit':
909 if not force and self['.'] != self['tip'] and desc == 'commit':
910 raise util.Abort(
910 raise util.Abort(
911 _('rollback of last commit while not checked out '
911 _('rollback of last commit while not checked out '
912 'may lose data'), hint=_('use -f to force'))
912 'may lose data'), hint=_('use -f to force'))
913
913
914 ui.status(msg)
914 ui.status(msg)
915 if dryrun:
915 if dryrun:
916 return 0
916 return 0
917
917
918 parents = self.dirstate.parents()
918 parents = self.dirstate.parents()
919 self.destroying()
919 self.destroying()
920 transaction.rollback(self.sopener, 'undo', ui.warn)
920 transaction.rollback(self.sopener, 'undo', ui.warn)
921 if self.vfs.exists('undo.bookmarks'):
921 if self.vfs.exists('undo.bookmarks'):
922 self.vfs.rename('undo.bookmarks', 'bookmarks')
922 self.vfs.rename('undo.bookmarks', 'bookmarks')
923 if self.svfs.exists('undo.phaseroots'):
923 if self.svfs.exists('undo.phaseroots'):
924 self.svfs.rename('undo.phaseroots', 'phaseroots')
924 self.svfs.rename('undo.phaseroots', 'phaseroots')
925 self.invalidate()
925 self.invalidate()
926
926
927 parentgone = (parents[0] not in self.changelog.nodemap or
927 parentgone = (parents[0] not in self.changelog.nodemap or
928 parents[1] not in self.changelog.nodemap)
928 parents[1] not in self.changelog.nodemap)
929 if parentgone:
929 if parentgone:
930 self.vfs.rename('undo.dirstate', 'dirstate')
930 self.vfs.rename('undo.dirstate', 'dirstate')
931 try:
931 try:
932 branch = self.opener.read('undo.branch')
932 branch = self.opener.read('undo.branch')
933 self.dirstate.setbranch(encoding.tolocal(branch))
933 self.dirstate.setbranch(encoding.tolocal(branch))
934 except IOError:
934 except IOError:
935 ui.warn(_('named branch could not be reset: '
935 ui.warn(_('named branch could not be reset: '
936 'current branch is still \'%s\'\n')
936 'current branch is still \'%s\'\n')
937 % self.dirstate.branch())
937 % self.dirstate.branch())
938
938
939 self.dirstate.invalidate()
939 self.dirstate.invalidate()
940 parents = tuple([p.rev() for p in self.parents()])
940 parents = tuple([p.rev() for p in self.parents()])
941 if len(parents) > 1:
941 if len(parents) > 1:
942 ui.status(_('working directory now based on '
942 ui.status(_('working directory now based on '
943 'revisions %d and %d\n') % parents)
943 'revisions %d and %d\n') % parents)
944 else:
944 else:
945 ui.status(_('working directory now based on '
945 ui.status(_('working directory now based on '
946 'revision %d\n') % parents)
946 'revision %d\n') % parents)
947 # TODO: if we know which new heads may result from this rollback, pass
947 # TODO: if we know which new heads may result from this rollback, pass
948 # them to destroy(), which will prevent the branchhead cache from being
948 # them to destroy(), which will prevent the branchhead cache from being
949 # invalidated.
949 # invalidated.
950 self.destroyed()
950 self.destroyed()
951 return 0
951 return 0
952
952
953 def invalidatecaches(self):
953 def invalidatecaches(self):
954
954
955 if '_tagscache' in vars(self):
955 if '_tagscache' in vars(self):
956 # can't use delattr on proxy
956 # can't use delattr on proxy
957 del self.__dict__['_tagscache']
957 del self.__dict__['_tagscache']
958
958
959 self.unfiltered()._branchcaches.clear()
959 self.unfiltered()._branchcaches.clear()
960 self.invalidatevolatilesets()
960 self.invalidatevolatilesets()
961
961
962 def invalidatevolatilesets(self):
962 def invalidatevolatilesets(self):
963 self.filteredrevcache.clear()
963 self.filteredrevcache.clear()
964 obsolete.clearobscaches(self)
964 obsolete.clearobscaches(self)
965
965
966 def invalidatedirstate(self):
966 def invalidatedirstate(self):
967 '''Invalidates the dirstate, causing the next call to dirstate
967 '''Invalidates the dirstate, causing the next call to dirstate
968 to check if it was modified since the last time it was read,
968 to check if it was modified since the last time it was read,
969 rereading it if it has.
969 rereading it if it has.
970
970
971 This is different to dirstate.invalidate() that it doesn't always
971 This is different to dirstate.invalidate() that it doesn't always
972 rereads the dirstate. Use dirstate.invalidate() if you want to
972 rereads the dirstate. Use dirstate.invalidate() if you want to
973 explicitly read the dirstate again (i.e. restoring it to a previous
973 explicitly read the dirstate again (i.e. restoring it to a previous
974 known good state).'''
974 known good state).'''
975 if hasunfilteredcache(self, 'dirstate'):
975 if hasunfilteredcache(self, 'dirstate'):
976 for k in self.dirstate._filecache:
976 for k in self.dirstate._filecache:
977 try:
977 try:
978 delattr(self.dirstate, k)
978 delattr(self.dirstate, k)
979 except AttributeError:
979 except AttributeError:
980 pass
980 pass
981 delattr(self.unfiltered(), 'dirstate')
981 delattr(self.unfiltered(), 'dirstate')
982
982
983 def invalidate(self):
983 def invalidate(self):
984 unfiltered = self.unfiltered() # all file caches are stored unfiltered
984 unfiltered = self.unfiltered() # all file caches are stored unfiltered
985 for k in self._filecache:
985 for k in self._filecache:
986 # dirstate is invalidated separately in invalidatedirstate()
986 # dirstate is invalidated separately in invalidatedirstate()
987 if k == 'dirstate':
987 if k == 'dirstate':
988 continue
988 continue
989
989
990 try:
990 try:
991 delattr(unfiltered, k)
991 delattr(unfiltered, k)
992 except AttributeError:
992 except AttributeError:
993 pass
993 pass
994 self.invalidatecaches()
994 self.invalidatecaches()
995
995
996 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
996 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
997 try:
997 try:
998 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
998 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
999 except error.LockHeld, inst:
999 except error.LockHeld, inst:
1000 if not wait:
1000 if not wait:
1001 raise
1001 raise
1002 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1002 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1003 (desc, inst.locker))
1003 (desc, inst.locker))
1004 # default to 600 seconds timeout
1004 # default to 600 seconds timeout
1005 l = lockmod.lock(vfs, lockname,
1005 l = lockmod.lock(vfs, lockname,
1006 int(self.ui.config("ui", "timeout", "600")),
1006 int(self.ui.config("ui", "timeout", "600")),
1007 releasefn, desc=desc)
1007 releasefn, desc=desc)
1008 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1008 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1009 if acquirefn:
1009 if acquirefn:
1010 acquirefn()
1010 acquirefn()
1011 return l
1011 return l
1012
1012
1013 def _afterlock(self, callback):
1013 def _afterlock(self, callback):
1014 """add a callback to the current repository lock.
1014 """add a callback to the current repository lock.
1015
1015
1016 The callback will be executed on lock release."""
1016 The callback will be executed on lock release."""
1017 l = self._lockref and self._lockref()
1017 l = self._lockref and self._lockref()
1018 if l:
1018 if l:
1019 l.postrelease.append(callback)
1019 l.postrelease.append(callback)
1020 else:
1020 else:
1021 callback()
1021 callback()
1022
1022
1023 def lock(self, wait=True):
1023 def lock(self, wait=True):
1024 '''Lock the repository store (.hg/store) and return a weak reference
1024 '''Lock the repository store (.hg/store) and return a weak reference
1025 to the lock. Use this before modifying the store (e.g. committing or
1025 to the lock. Use this before modifying the store (e.g. committing or
1026 stripping). If you are opening a transaction, get a lock as well.)'''
1026 stripping). If you are opening a transaction, get a lock as well.)'''
1027 l = self._lockref and self._lockref()
1027 l = self._lockref and self._lockref()
1028 if l is not None and l.held:
1028 if l is not None and l.held:
1029 l.lock()
1029 l.lock()
1030 return l
1030 return l
1031
1031
1032 def unlock():
1032 def unlock():
1033 self.store.write()
1033 self.store.write()
1034 if hasunfilteredcache(self, '_phasecache'):
1034 if hasunfilteredcache(self, '_phasecache'):
1035 self._phasecache.write()
1035 self._phasecache.write()
1036 for k, ce in self._filecache.items():
1036 for k, ce in self._filecache.items():
1037 if k == 'dirstate' or k not in self.__dict__:
1037 if k == 'dirstate' or k not in self.__dict__:
1038 continue
1038 continue
1039 ce.refresh()
1039 ce.refresh()
1040
1040
1041 l = self._lock(self.svfs, "lock", wait, unlock,
1041 l = self._lock(self.svfs, "lock", wait, unlock,
1042 self.invalidate, _('repository %s') % self.origroot)
1042 self.invalidate, _('repository %s') % self.origroot)
1043 self._lockref = weakref.ref(l)
1043 self._lockref = weakref.ref(l)
1044 return l
1044 return l
1045
1045
1046 def wlock(self, wait=True):
1046 def wlock(self, wait=True):
1047 '''Lock the non-store parts of the repository (everything under
1047 '''Lock the non-store parts of the repository (everything under
1048 .hg except .hg/store) and return a weak reference to the lock.
1048 .hg except .hg/store) and return a weak reference to the lock.
1049 Use this before modifying files in .hg.'''
1049 Use this before modifying files in .hg.'''
1050 l = self._wlockref and self._wlockref()
1050 l = self._wlockref and self._wlockref()
1051 if l is not None and l.held:
1051 if l is not None and l.held:
1052 l.lock()
1052 l.lock()
1053 return l
1053 return l
1054
1054
1055 def unlock():
1055 def unlock():
1056 self.dirstate.write()
1056 self.dirstate.write()
1057 self._filecache['dirstate'].refresh()
1057 self._filecache['dirstate'].refresh()
1058
1058
1059 l = self._lock(self.vfs, "wlock", wait, unlock,
1059 l = self._lock(self.vfs, "wlock", wait, unlock,
1060 self.invalidatedirstate, _('working directory of %s') %
1060 self.invalidatedirstate, _('working directory of %s') %
1061 self.origroot)
1061 self.origroot)
1062 self._wlockref = weakref.ref(l)
1062 self._wlockref = weakref.ref(l)
1063 return l
1063 return l
1064
1064
1065 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1065 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1066 """
1066 """
1067 commit an individual file as part of a larger transaction
1067 commit an individual file as part of a larger transaction
1068 """
1068 """
1069
1069
1070 fname = fctx.path()
1070 fname = fctx.path()
1071 text = fctx.data()
1071 text = fctx.data()
1072 flog = self.file(fname)
1072 flog = self.file(fname)
1073 fparent1 = manifest1.get(fname, nullid)
1073 fparent1 = manifest1.get(fname, nullid)
1074 fparent2 = fparent2o = manifest2.get(fname, nullid)
1074 fparent2 = fparent2o = manifest2.get(fname, nullid)
1075
1075
1076 meta = {}
1076 meta = {}
1077 copy = fctx.renamed()
1077 copy = fctx.renamed()
1078 if copy and copy[0] != fname:
1078 if copy and copy[0] != fname:
1079 # Mark the new revision of this file as a copy of another
1079 # Mark the new revision of this file as a copy of another
1080 # file. This copy data will effectively act as a parent
1080 # file. This copy data will effectively act as a parent
1081 # of this new revision. If this is a merge, the first
1081 # of this new revision. If this is a merge, the first
1082 # parent will be the nullid (meaning "look up the copy data")
1082 # parent will be the nullid (meaning "look up the copy data")
1083 # and the second one will be the other parent. For example:
1083 # and the second one will be the other parent. For example:
1084 #
1084 #
1085 # 0 --- 1 --- 3 rev1 changes file foo
1085 # 0 --- 1 --- 3 rev1 changes file foo
1086 # \ / rev2 renames foo to bar and changes it
1086 # \ / rev2 renames foo to bar and changes it
1087 # \- 2 -/ rev3 should have bar with all changes and
1087 # \- 2 -/ rev3 should have bar with all changes and
1088 # should record that bar descends from
1088 # should record that bar descends from
1089 # bar in rev2 and foo in rev1
1089 # bar in rev2 and foo in rev1
1090 #
1090 #
1091 # this allows this merge to succeed:
1091 # this allows this merge to succeed:
1092 #
1092 #
1093 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1093 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1094 # \ / merging rev3 and rev4 should use bar@rev2
1094 # \ / merging rev3 and rev4 should use bar@rev2
1095 # \- 2 --- 4 as the merge base
1095 # \- 2 --- 4 as the merge base
1096 #
1096 #
1097
1097
1098 cfname = copy[0]
1098 cfname = copy[0]
1099 crev = manifest1.get(cfname)
1099 crev = manifest1.get(cfname)
1100 newfparent = fparent2
1100 newfparent = fparent2
1101
1101
1102 if manifest2: # branch merge
1102 if manifest2: # branch merge
1103 if fparent2 == nullid or crev is None: # copied on remote side
1103 if fparent2 == nullid or crev is None: # copied on remote side
1104 if cfname in manifest2:
1104 if cfname in manifest2:
1105 crev = manifest2[cfname]
1105 crev = manifest2[cfname]
1106 newfparent = fparent1
1106 newfparent = fparent1
1107
1107
1108 # find source in nearest ancestor if we've lost track
1108 # find source in nearest ancestor if we've lost track
1109 if not crev:
1109 if not crev:
1110 self.ui.debug(" %s: searching for copy revision for %s\n" %
1110 self.ui.debug(" %s: searching for copy revision for %s\n" %
1111 (fname, cfname))
1111 (fname, cfname))
1112 for ancestor in self[None].ancestors():
1112 for ancestor in self[None].ancestors():
1113 if cfname in ancestor:
1113 if cfname in ancestor:
1114 crev = ancestor[cfname].filenode()
1114 crev = ancestor[cfname].filenode()
1115 break
1115 break
1116
1116
1117 if crev:
1117 if crev:
1118 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1118 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1119 meta["copy"] = cfname
1119 meta["copy"] = cfname
1120 meta["copyrev"] = hex(crev)
1120 meta["copyrev"] = hex(crev)
1121 fparent1, fparent2 = nullid, newfparent
1121 fparent1, fparent2 = nullid, newfparent
1122 else:
1122 else:
1123 self.ui.warn(_("warning: can't find ancestor for '%s' "
1123 self.ui.warn(_("warning: can't find ancestor for '%s' "
1124 "copied from '%s'!\n") % (fname, cfname))
1124 "copied from '%s'!\n") % (fname, cfname))
1125
1125
1126 elif fparent2 != nullid:
1126 elif fparent2 != nullid:
1127 # is one parent an ancestor of the other?
1127 # is one parent an ancestor of the other?
1128 fparentancestor = flog.ancestor(fparent1, fparent2)
1128 fparentancestor = flog.ancestor(fparent1, fparent2)
1129 if fparentancestor == fparent1:
1129 if fparentancestor == fparent1:
1130 fparent1, fparent2 = fparent2, nullid
1130 fparent1, fparent2 = fparent2, nullid
1131 elif fparentancestor == fparent2:
1131 elif fparentancestor == fparent2:
1132 fparent2 = nullid
1132 fparent2 = nullid
1133
1133
1134 # is the file changed?
1134 # is the file changed?
1135 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1135 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1136 changelist.append(fname)
1136 changelist.append(fname)
1137 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1137 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1138
1138
1139 # are just the flags changed during merge?
1139 # are just the flags changed during merge?
1140 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1140 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1141 changelist.append(fname)
1141 changelist.append(fname)
1142
1142
1143 return fparent1
1143 return fparent1
1144
1144
1145 @unfilteredmethod
1145 @unfilteredmethod
1146 def commit(self, text="", user=None, date=None, match=None, force=False,
1146 def commit(self, text="", user=None, date=None, match=None, force=False,
1147 editor=False, extra={}):
1147 editor=False, extra={}):
1148 """Add a new revision to current repository.
1148 """Add a new revision to current repository.
1149
1149
1150 Revision information is gathered from the working directory,
1150 Revision information is gathered from the working directory,
1151 match can be used to filter the committed files. If editor is
1151 match can be used to filter the committed files. If editor is
1152 supplied, it is called to get a commit message.
1152 supplied, it is called to get a commit message.
1153 """
1153 """
1154
1154
1155 def fail(f, msg):
1155 def fail(f, msg):
1156 raise util.Abort('%s: %s' % (f, msg))
1156 raise util.Abort('%s: %s' % (f, msg))
1157
1157
1158 if not match:
1158 if not match:
1159 match = matchmod.always(self.root, '')
1159 match = matchmod.always(self.root, '')
1160
1160
1161 if not force:
1161 if not force:
1162 vdirs = []
1162 vdirs = []
1163 match.explicitdir = vdirs.append
1163 match.explicitdir = vdirs.append
1164 match.bad = fail
1164 match.bad = fail
1165
1165
1166 wlock = self.wlock()
1166 wlock = self.wlock()
1167 try:
1167 try:
1168 wctx = self[None]
1168 wctx = self[None]
1169 merge = len(wctx.parents()) > 1
1169 merge = len(wctx.parents()) > 1
1170
1170
1171 if (not force and merge and match and
1171 if (not force and merge and match and
1172 (match.files() or match.anypats())):
1172 (match.files() or match.anypats())):
1173 raise util.Abort(_('cannot partially commit a merge '
1173 raise util.Abort(_('cannot partially commit a merge '
1174 '(do not specify files or patterns)'))
1174 '(do not specify files or patterns)'))
1175
1175
1176 changes = self.status(match=match, clean=force)
1176 changes = self.status(match=match, clean=force)
1177 if force:
1177 if force:
1178 changes[0].extend(changes[6]) # mq may commit unchanged files
1178 changes[0].extend(changes[6]) # mq may commit unchanged files
1179
1179
1180 # check subrepos
1180 # check subrepos
1181 subs = []
1181 subs = []
1182 commitsubs = set()
1182 commitsubs = set()
1183 newstate = wctx.substate.copy()
1183 newstate = wctx.substate.copy()
1184 # only manage subrepos and .hgsubstate if .hgsub is present
1184 # only manage subrepos and .hgsubstate if .hgsub is present
1185 if '.hgsub' in wctx:
1185 if '.hgsub' in wctx:
1186 # we'll decide whether to track this ourselves, thanks
1186 # we'll decide whether to track this ourselves, thanks
1187 if '.hgsubstate' in changes[0]:
1187 if '.hgsubstate' in changes[0]:
1188 changes[0].remove('.hgsubstate')
1188 changes[0].remove('.hgsubstate')
1189 if '.hgsubstate' in changes[2]:
1189 if '.hgsubstate' in changes[2]:
1190 changes[2].remove('.hgsubstate')
1190 changes[2].remove('.hgsubstate')
1191
1191
1192 # compare current state to last committed state
1192 # compare current state to last committed state
1193 # build new substate based on last committed state
1193 # build new substate based on last committed state
1194 oldstate = wctx.p1().substate
1194 oldstate = wctx.p1().substate
1195 for s in sorted(newstate.keys()):
1195 for s in sorted(newstate.keys()):
1196 if not match(s):
1196 if not match(s):
1197 # ignore working copy, use old state if present
1197 # ignore working copy, use old state if present
1198 if s in oldstate:
1198 if s in oldstate:
1199 newstate[s] = oldstate[s]
1199 newstate[s] = oldstate[s]
1200 continue
1200 continue
1201 if not force:
1201 if not force:
1202 raise util.Abort(
1202 raise util.Abort(
1203 _("commit with new subrepo %s excluded") % s)
1203 _("commit with new subrepo %s excluded") % s)
1204 if wctx.sub(s).dirty(True):
1204 if wctx.sub(s).dirty(True):
1205 if not self.ui.configbool('ui', 'commitsubrepos'):
1205 if not self.ui.configbool('ui', 'commitsubrepos'):
1206 raise util.Abort(
1206 raise util.Abort(
1207 _("uncommitted changes in subrepo %s") % s,
1207 _("uncommitted changes in subrepo %s") % s,
1208 hint=_("use --subrepos for recursive commit"))
1208 hint=_("use --subrepos for recursive commit"))
1209 subs.append(s)
1209 subs.append(s)
1210 commitsubs.add(s)
1210 commitsubs.add(s)
1211 else:
1211 else:
1212 bs = wctx.sub(s).basestate()
1212 bs = wctx.sub(s).basestate()
1213 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1213 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1214 if oldstate.get(s, (None, None, None))[1] != bs:
1214 if oldstate.get(s, (None, None, None))[1] != bs:
1215 subs.append(s)
1215 subs.append(s)
1216
1216
1217 # check for removed subrepos
1217 # check for removed subrepos
1218 for p in wctx.parents():
1218 for p in wctx.parents():
1219 r = [s for s in p.substate if s not in newstate]
1219 r = [s for s in p.substate if s not in newstate]
1220 subs += [s for s in r if match(s)]
1220 subs += [s for s in r if match(s)]
1221 if subs:
1221 if subs:
1222 if (not match('.hgsub') and
1222 if (not match('.hgsub') and
1223 '.hgsub' in (wctx.modified() + wctx.added())):
1223 '.hgsub' in (wctx.modified() + wctx.added())):
1224 raise util.Abort(
1224 raise util.Abort(
1225 _("can't commit subrepos without .hgsub"))
1225 _("can't commit subrepos without .hgsub"))
1226 changes[0].insert(0, '.hgsubstate')
1226 changes[0].insert(0, '.hgsubstate')
1227
1227
1228 elif '.hgsub' in changes[2]:
1228 elif '.hgsub' in changes[2]:
1229 # clean up .hgsubstate when .hgsub is removed
1229 # clean up .hgsubstate when .hgsub is removed
1230 if ('.hgsubstate' in wctx and
1230 if ('.hgsubstate' in wctx and
1231 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1231 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1232 changes[2].insert(0, '.hgsubstate')
1232 changes[2].insert(0, '.hgsubstate')
1233
1233
1234 # make sure all explicit patterns are matched
1234 # make sure all explicit patterns are matched
1235 if not force and match.files():
1235 if not force and match.files():
1236 matched = set(changes[0] + changes[1] + changes[2])
1236 matched = set(changes[0] + changes[1] + changes[2])
1237
1237
1238 for f in match.files():
1238 for f in match.files():
1239 f = self.dirstate.normalize(f)
1239 f = self.dirstate.normalize(f)
1240 if f == '.' or f in matched or f in wctx.substate:
1240 if f == '.' or f in matched or f in wctx.substate:
1241 continue
1241 continue
1242 if f in changes[3]: # missing
1242 if f in changes[3]: # missing
1243 fail(f, _('file not found!'))
1243 fail(f, _('file not found!'))
1244 if f in vdirs: # visited directory
1244 if f in vdirs: # visited directory
1245 d = f + '/'
1245 d = f + '/'
1246 for mf in matched:
1246 for mf in matched:
1247 if mf.startswith(d):
1247 if mf.startswith(d):
1248 break
1248 break
1249 else:
1249 else:
1250 fail(f, _("no match under directory!"))
1250 fail(f, _("no match under directory!"))
1251 elif f not in self.dirstate:
1251 elif f not in self.dirstate:
1252 fail(f, _("file not tracked!"))
1252 fail(f, _("file not tracked!"))
1253
1253
1254 cctx = context.workingctx(self, text, user, date, extra, changes)
1254 cctx = context.workingctx(self, text, user, date, extra, changes)
1255
1255
1256 if (not force and not extra.get("close") and not merge
1256 if (not force and not extra.get("close") and not merge
1257 and not cctx.files()
1257 and not cctx.files()
1258 and wctx.branch() == wctx.p1().branch()):
1258 and wctx.branch() == wctx.p1().branch()):
1259 return None
1259 return None
1260
1260
1261 if merge and cctx.deleted():
1261 if merge and cctx.deleted():
1262 raise util.Abort(_("cannot commit merge with missing files"))
1262 raise util.Abort(_("cannot commit merge with missing files"))
1263
1263
1264 ms = mergemod.mergestate(self)
1264 ms = mergemod.mergestate(self)
1265 for f in changes[0]:
1265 for f in changes[0]:
1266 if f in ms and ms[f] == 'u':
1266 if f in ms and ms[f] == 'u':
1267 raise util.Abort(_("unresolved merge conflicts "
1267 raise util.Abort(_("unresolved merge conflicts "
1268 "(see hg help resolve)"))
1268 "(see hg help resolve)"))
1269
1269
1270 if editor:
1270 if editor:
1271 cctx._text = editor(self, cctx, subs)
1271 cctx._text = editor(self, cctx, subs)
1272 edited = (text != cctx._text)
1272 edited = (text != cctx._text)
1273
1273
1274 # commit subs and write new state
1274 # commit subs and write new state
1275 if subs:
1275 if subs:
1276 for s in sorted(commitsubs):
1276 for s in sorted(commitsubs):
1277 sub = wctx.sub(s)
1277 sub = wctx.sub(s)
1278 self.ui.status(_('committing subrepository %s\n') %
1278 self.ui.status(_('committing subrepository %s\n') %
1279 subrepo.subrelpath(sub))
1279 subrepo.subrelpath(sub))
1280 sr = sub.commit(cctx._text, user, date)
1280 sr = sub.commit(cctx._text, user, date)
1281 newstate[s] = (newstate[s][0], sr)
1281 newstate[s] = (newstate[s][0], sr)
1282 subrepo.writestate(self, newstate)
1282 subrepo.writestate(self, newstate)
1283
1283
1284 # Save commit message in case this transaction gets rolled back
1284 # Save commit message in case this transaction gets rolled back
1285 # (e.g. by a pretxncommit hook). Leave the content alone on
1285 # (e.g. by a pretxncommit hook). Leave the content alone on
1286 # the assumption that the user will use the same editor again.
1286 # the assumption that the user will use the same editor again.
1287 msgfn = self.savecommitmessage(cctx._text)
1287 msgfn = self.savecommitmessage(cctx._text)
1288
1288
1289 p1, p2 = self.dirstate.parents()
1289 p1, p2 = self.dirstate.parents()
1290 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1290 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1291 try:
1291 try:
1292 self.hook("precommit", throw=True, parent1=hookp1,
1292 self.hook("precommit", throw=True, parent1=hookp1,
1293 parent2=hookp2)
1293 parent2=hookp2)
1294 ret = self.commitctx(cctx, True)
1294 ret = self.commitctx(cctx, True)
1295 except: # re-raises
1295 except: # re-raises
1296 if edited:
1296 if edited:
1297 self.ui.write(
1297 self.ui.write(
1298 _('note: commit message saved in %s\n') % msgfn)
1298 _('note: commit message saved in %s\n') % msgfn)
1299 raise
1299 raise
1300
1300
1301 # update bookmarks, dirstate and mergestate
1301 # update bookmarks, dirstate and mergestate
1302 bookmarks.update(self, [p1, p2], ret)
1302 bookmarks.update(self, [p1, p2], ret)
1303 cctx.markcommitted(ret)
1303 cctx.markcommitted(ret)
1304 ms.reset()
1304 ms.reset()
1305 finally:
1305 finally:
1306 wlock.release()
1306 wlock.release()
1307
1307
1308 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1308 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1309 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1309 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1310 self._afterlock(commithook)
1310 self._afterlock(commithook)
1311 return ret
1311 return ret
1312
1312
1313 @unfilteredmethod
1313 @unfilteredmethod
1314 def commitctx(self, ctx, error=False):
1314 def commitctx(self, ctx, error=False):
1315 """Add a new revision to current repository.
1315 """Add a new revision to current repository.
1316 Revision information is passed via the context argument.
1316 Revision information is passed via the context argument.
1317 """
1317 """
1318
1318
1319 tr = lock = None
1319 tr = lock = None
1320 removed = list(ctx.removed())
1320 removed = list(ctx.removed())
1321 p1, p2 = ctx.p1(), ctx.p2()
1321 p1, p2 = ctx.p1(), ctx.p2()
1322 user = ctx.user()
1322 user = ctx.user()
1323
1323
1324 lock = self.lock()
1324 lock = self.lock()
1325 try:
1325 try:
1326 tr = self.transaction("commit")
1326 tr = self.transaction("commit")
1327 trp = weakref.proxy(tr)
1327 trp = weakref.proxy(tr)
1328
1328
1329 if ctx.files():
1329 if ctx.files():
1330 m1 = p1.manifest().copy()
1330 m1 = p1.manifest().copy()
1331 m2 = p2.manifest()
1331 m2 = p2.manifest()
1332
1332
1333 # check in files
1333 # check in files
1334 new = {}
1334 new = {}
1335 changed = []
1335 changed = []
1336 linkrev = len(self)
1336 linkrev = len(self)
1337 for f in sorted(ctx.modified() + ctx.added()):
1337 for f in sorted(ctx.modified() + ctx.added()):
1338 self.ui.note(f + "\n")
1338 self.ui.note(f + "\n")
1339 try:
1339 try:
1340 fctx = ctx[f]
1340 fctx = ctx[f]
1341 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1341 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1342 changed)
1342 changed)
1343 m1.set(f, fctx.flags())
1343 m1.set(f, fctx.flags())
1344 except OSError, inst:
1344 except OSError, inst:
1345 self.ui.warn(_("trouble committing %s!\n") % f)
1345 self.ui.warn(_("trouble committing %s!\n") % f)
1346 raise
1346 raise
1347 except IOError, inst:
1347 except IOError, inst:
1348 errcode = getattr(inst, 'errno', errno.ENOENT)
1348 errcode = getattr(inst, 'errno', errno.ENOENT)
1349 if error or errcode and errcode != errno.ENOENT:
1349 if error or errcode and errcode != errno.ENOENT:
1350 self.ui.warn(_("trouble committing %s!\n") % f)
1350 self.ui.warn(_("trouble committing %s!\n") % f)
1351 raise
1351 raise
1352 else:
1352 else:
1353 removed.append(f)
1353 removed.append(f)
1354
1354
1355 # update manifest
1355 # update manifest
1356 m1.update(new)
1356 m1.update(new)
1357 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1357 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1358 drop = [f for f in removed if f in m1]
1358 drop = [f for f in removed if f in m1]
1359 for f in drop:
1359 for f in drop:
1360 del m1[f]
1360 del m1[f]
1361 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1361 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1362 p2.manifestnode(), (new, drop))
1362 p2.manifestnode(), (new, drop))
1363 files = changed + removed
1363 files = changed + removed
1364 else:
1364 else:
1365 mn = p1.manifestnode()
1365 mn = p1.manifestnode()
1366 files = []
1366 files = []
1367
1367
1368 # update changelog
1368 # update changelog
1369 self.changelog.delayupdate()
1369 self.changelog.delayupdate()
1370 n = self.changelog.add(mn, files, ctx.description(),
1370 n = self.changelog.add(mn, files, ctx.description(),
1371 trp, p1.node(), p2.node(),
1371 trp, p1.node(), p2.node(),
1372 user, ctx.date(), ctx.extra().copy())
1372 user, ctx.date(), ctx.extra().copy())
1373 p = lambda: self.changelog.writepending() and self.root or ""
1373 p = lambda: self.changelog.writepending() and self.root or ""
1374 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1374 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1375 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1375 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1376 parent2=xp2, pending=p)
1376 parent2=xp2, pending=p)
1377 self.changelog.finalize(trp)
1377 self.changelog.finalize(trp)
1378 # set the new commit is proper phase
1378 # set the new commit is proper phase
1379 targetphase = subrepo.newcommitphase(self.ui, ctx)
1379 targetphase = subrepo.newcommitphase(self.ui, ctx)
1380 if targetphase:
1380 if targetphase:
1381 # retract boundary do not alter parent changeset.
1381 # retract boundary do not alter parent changeset.
1382 # if a parent have higher the resulting phase will
1382 # if a parent have higher the resulting phase will
1383 # be compliant anyway
1383 # be compliant anyway
1384 #
1384 #
1385 # if minimal phase was 0 we don't need to retract anything
1385 # if minimal phase was 0 we don't need to retract anything
1386 phases.retractboundary(self, targetphase, [n])
1386 phases.retractboundary(self, targetphase, [n])
1387 tr.close()
1387 tr.close()
1388 branchmap.updatecache(self.filtered('served'))
1388 branchmap.updatecache(self.filtered('served'))
1389 return n
1389 return n
1390 finally:
1390 finally:
1391 if tr:
1391 if tr:
1392 tr.release()
1392 tr.release()
1393 lock.release()
1393 lock.release()
1394
1394
1395 @unfilteredmethod
1395 @unfilteredmethod
1396 def destroying(self):
1396 def destroying(self):
1397 '''Inform the repository that nodes are about to be destroyed.
1397 '''Inform the repository that nodes are about to be destroyed.
1398 Intended for use by strip and rollback, so there's a common
1398 Intended for use by strip and rollback, so there's a common
1399 place for anything that has to be done before destroying history.
1399 place for anything that has to be done before destroying history.
1400
1400
1401 This is mostly useful for saving state that is in memory and waiting
1401 This is mostly useful for saving state that is in memory and waiting
1402 to be flushed when the current lock is released. Because a call to
1402 to be flushed when the current lock is released. Because a call to
1403 destroyed is imminent, the repo will be invalidated causing those
1403 destroyed is imminent, the repo will be invalidated causing those
1404 changes to stay in memory (waiting for the next unlock), or vanish
1404 changes to stay in memory (waiting for the next unlock), or vanish
1405 completely.
1405 completely.
1406 '''
1406 '''
1407 # When using the same lock to commit and strip, the phasecache is left
1407 # When using the same lock to commit and strip, the phasecache is left
1408 # dirty after committing. Then when we strip, the repo is invalidated,
1408 # dirty after committing. Then when we strip, the repo is invalidated,
1409 # causing those changes to disappear.
1409 # causing those changes to disappear.
1410 if '_phasecache' in vars(self):
1410 if '_phasecache' in vars(self):
1411 self._phasecache.write()
1411 self._phasecache.write()
1412
1412
1413 @unfilteredmethod
1413 @unfilteredmethod
1414 def destroyed(self):
1414 def destroyed(self):
1415 '''Inform the repository that nodes have been destroyed.
1415 '''Inform the repository that nodes have been destroyed.
1416 Intended for use by strip and rollback, so there's a common
1416 Intended for use by strip and rollback, so there's a common
1417 place for anything that has to be done after destroying history.
1417 place for anything that has to be done after destroying history.
1418 '''
1418 '''
1419 # When one tries to:
1419 # When one tries to:
1420 # 1) destroy nodes thus calling this method (e.g. strip)
1420 # 1) destroy nodes thus calling this method (e.g. strip)
1421 # 2) use phasecache somewhere (e.g. commit)
1421 # 2) use phasecache somewhere (e.g. commit)
1422 #
1422 #
1423 # then 2) will fail because the phasecache contains nodes that were
1423 # then 2) will fail because the phasecache contains nodes that were
1424 # removed. We can either remove phasecache from the filecache,
1424 # removed. We can either remove phasecache from the filecache,
1425 # causing it to reload next time it is accessed, or simply filter
1425 # causing it to reload next time it is accessed, or simply filter
1426 # the removed nodes now and write the updated cache.
1426 # the removed nodes now and write the updated cache.
1427 self._phasecache.filterunknown(self)
1427 self._phasecache.filterunknown(self)
1428 self._phasecache.write()
1428 self._phasecache.write()
1429
1429
1430 # update the 'served' branch cache to help read only server process
1430 # update the 'served' branch cache to help read only server process
1431 # Thanks to branchcache collaboration this is done from the nearest
1431 # Thanks to branchcache collaboration this is done from the nearest
1432 # filtered subset and it is expected to be fast.
1432 # filtered subset and it is expected to be fast.
1433 branchmap.updatecache(self.filtered('served'))
1433 branchmap.updatecache(self.filtered('served'))
1434
1434
1435 # Ensure the persistent tag cache is updated. Doing it now
1435 # Ensure the persistent tag cache is updated. Doing it now
1436 # means that the tag cache only has to worry about destroyed
1436 # means that the tag cache only has to worry about destroyed
1437 # heads immediately after a strip/rollback. That in turn
1437 # heads immediately after a strip/rollback. That in turn
1438 # guarantees that "cachetip == currenttip" (comparing both rev
1438 # guarantees that "cachetip == currenttip" (comparing both rev
1439 # and node) always means no nodes have been added or destroyed.
1439 # and node) always means no nodes have been added or destroyed.
1440
1440
1441 # XXX this is suboptimal when qrefresh'ing: we strip the current
1441 # XXX this is suboptimal when qrefresh'ing: we strip the current
1442 # head, refresh the tag cache, then immediately add a new head.
1442 # head, refresh the tag cache, then immediately add a new head.
1443 # But I think doing it this way is necessary for the "instant
1443 # But I think doing it this way is necessary for the "instant
1444 # tag cache retrieval" case to work.
1444 # tag cache retrieval" case to work.
1445 self.invalidate()
1445 self.invalidate()
1446
1446
1447 def walk(self, match, node=None):
1447 def walk(self, match, node=None):
1448 '''
1448 '''
1449 walk recursively through the directory tree or a given
1449 walk recursively through the directory tree or a given
1450 changeset, finding all files matched by the match
1450 changeset, finding all files matched by the match
1451 function
1451 function
1452 '''
1452 '''
1453 return self[node].walk(match)
1453 return self[node].walk(match)
1454
1454
1455 def status(self, node1='.', node2=None, match=None,
1455 def status(self, node1='.', node2=None, match=None,
1456 ignored=False, clean=False, unknown=False,
1456 ignored=False, clean=False, unknown=False,
1457 listsubrepos=False):
1457 listsubrepos=False):
1458 """return status of files between two nodes or node and working
1458 """return status of files between two nodes or node and working
1459 directory.
1459 directory.
1460
1460
1461 If node1 is None, use the first dirstate parent instead.
1461 If node1 is None, use the first dirstate parent instead.
1462 If node2 is None, compare node1 with working directory.
1462 If node2 is None, compare node1 with working directory.
1463 """
1463 """
1464
1464
1465 def mfmatches(ctx):
1465 def mfmatches(ctx):
1466 mf = ctx.manifest().copy()
1466 mf = ctx.manifest().copy()
1467 if match.always():
1467 if match.always():
1468 return mf
1468 return mf
1469 for fn in mf.keys():
1469 for fn in mf.keys():
1470 if not match(fn):
1470 if not match(fn):
1471 del mf[fn]
1471 del mf[fn]
1472 return mf
1472 return mf
1473
1473
1474 ctx1 = self[node1]
1474 ctx1 = self[node1]
1475 ctx2 = self[node2]
1475 ctx2 = self[node2]
1476
1476
1477 working = ctx2.rev() is None
1477 working = ctx2.rev() is None
1478 parentworking = working and ctx1 == self['.']
1478 parentworking = working and ctx1 == self['.']
1479 match = match or matchmod.always(self.root, self.getcwd())
1479 match = match or matchmod.always(self.root, self.getcwd())
1480 listignored, listclean, listunknown = ignored, clean, unknown
1480 listignored, listclean, listunknown = ignored, clean, unknown
1481
1481
1482 # load earliest manifest first for caching reasons
1482 # load earliest manifest first for caching reasons
1483 if not working and ctx2.rev() < ctx1.rev():
1483 if not working and ctx2.rev() < ctx1.rev():
1484 ctx2.manifest()
1484 ctx2.manifest()
1485
1485
1486 if not parentworking:
1486 if not parentworking:
1487 def bad(f, msg):
1487 def bad(f, msg):
1488 # 'f' may be a directory pattern from 'match.files()',
1488 # 'f' may be a directory pattern from 'match.files()',
1489 # so 'f not in ctx1' is not enough
1489 # so 'f not in ctx1' is not enough
1490 if f not in ctx1 and f not in ctx1.dirs():
1490 if f not in ctx1 and f not in ctx1.dirs():
1491 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1491 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1492 match.bad = bad
1492 match.bad = bad
1493
1493
1494 if working: # we need to scan the working dir
1494 if working: # we need to scan the working dir
1495 subrepos = []
1495 subrepos = []
1496 if '.hgsub' in self.dirstate:
1496 if '.hgsub' in self.dirstate:
1497 subrepos = sorted(ctx2.substate)
1497 subrepos = sorted(ctx2.substate)
1498 s = self.dirstate.status(match, subrepos, listignored,
1498 s = self.dirstate.status(match, subrepos, listignored,
1499 listclean, listunknown)
1499 listclean, listunknown)
1500 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1500 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1501
1501
1502 # check for any possibly clean files
1502 # check for any possibly clean files
1503 if parentworking and cmp:
1503 if parentworking and cmp:
1504 fixup = []
1504 fixup = []
1505 # do a full compare of any files that might have changed
1505 # do a full compare of any files that might have changed
1506 for f in sorted(cmp):
1506 for f in sorted(cmp):
1507 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1507 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1508 or ctx1[f].cmp(ctx2[f])):
1508 or ctx1[f].cmp(ctx2[f])):
1509 modified.append(f)
1509 modified.append(f)
1510 else:
1510 else:
1511 fixup.append(f)
1511 fixup.append(f)
1512
1512
1513 # update dirstate for files that are actually clean
1513 # update dirstate for files that are actually clean
1514 if fixup:
1514 if fixup:
1515 if listclean:
1515 if listclean:
1516 clean += fixup
1516 clean += fixup
1517
1517
1518 try:
1518 try:
1519 # updating the dirstate is optional
1519 # updating the dirstate is optional
1520 # so we don't wait on the lock
1520 # so we don't wait on the lock
1521 wlock = self.wlock(False)
1521 wlock = self.wlock(False)
1522 try:
1522 try:
1523 for f in fixup:
1523 for f in fixup:
1524 self.dirstate.normal(f)
1524 self.dirstate.normal(f)
1525 finally:
1525 finally:
1526 wlock.release()
1526 wlock.release()
1527 except error.LockError:
1527 except error.LockError:
1528 pass
1528 pass
1529
1529
1530 if not parentworking:
1530 if not parentworking:
1531 mf1 = mfmatches(ctx1)
1531 mf1 = mfmatches(ctx1)
1532 if working:
1532 if working:
1533 # we are comparing working dir against non-parent
1533 # we are comparing working dir against non-parent
1534 # generate a pseudo-manifest for the working dir
1534 # generate a pseudo-manifest for the working dir
1535 mf2 = mfmatches(self['.'])
1535 mf2 = mfmatches(self['.'])
1536 for f in cmp + modified + added:
1536 for f in cmp + modified + added:
1537 mf2[f] = None
1537 mf2[f] = None
1538 mf2.set(f, ctx2.flags(f))
1538 mf2.set(f, ctx2.flags(f))
1539 for f in removed:
1539 for f in removed:
1540 if f in mf2:
1540 if f in mf2:
1541 del mf2[f]
1541 del mf2[f]
1542 else:
1542 else:
1543 # we are comparing two revisions
1543 # we are comparing two revisions
1544 deleted, unknown, ignored = [], [], []
1544 deleted, unknown, ignored = [], [], []
1545 mf2 = mfmatches(ctx2)
1545 mf2 = mfmatches(ctx2)
1546
1546
1547 modified, added, clean = [], [], []
1547 modified, added, clean = [], [], []
1548 withflags = mf1.withflags() | mf2.withflags()
1548 withflags = mf1.withflags() | mf2.withflags()
1549 for fn, mf2node in mf2.iteritems():
1549 for fn, mf2node in mf2.iteritems():
1550 if fn in mf1:
1550 if fn in mf1:
1551 if (fn not in deleted and
1551 if (fn not in deleted and
1552 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1552 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1553 (mf1[fn] != mf2node and
1553 (mf1[fn] != mf2node and
1554 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1554 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1555 modified.append(fn)
1555 modified.append(fn)
1556 elif listclean:
1556 elif listclean:
1557 clean.append(fn)
1557 clean.append(fn)
1558 del mf1[fn]
1558 del mf1[fn]
1559 elif fn not in deleted:
1559 elif fn not in deleted:
1560 added.append(fn)
1560 added.append(fn)
1561 removed = mf1.keys()
1561 removed = mf1.keys()
1562
1562
1563 if working and modified and not self.dirstate._checklink:
1563 if working and modified and not self.dirstate._checklink:
1564 # Symlink placeholders may get non-symlink-like contents
1564 # Symlink placeholders may get non-symlink-like contents
1565 # via user error or dereferencing by NFS or Samba servers,
1565 # via user error or dereferencing by NFS or Samba servers,
1566 # so we filter out any placeholders that don't look like a
1566 # so we filter out any placeholders that don't look like a
1567 # symlink
1567 # symlink
1568 sane = []
1568 sane = []
1569 for f in modified:
1569 for f in modified:
1570 if ctx2.flags(f) == 'l':
1570 if ctx2.flags(f) == 'l':
1571 d = ctx2[f].data()
1571 d = ctx2[f].data()
1572 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1572 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1573 self.ui.debug('ignoring suspect symlink placeholder'
1573 self.ui.debug('ignoring suspect symlink placeholder'
1574 ' "%s"\n' % f)
1574 ' "%s"\n' % f)
1575 continue
1575 continue
1576 sane.append(f)
1576 sane.append(f)
1577 modified = sane
1577 modified = sane
1578
1578
1579 r = modified, added, removed, deleted, unknown, ignored, clean
1579 r = modified, added, removed, deleted, unknown, ignored, clean
1580
1580
1581 if listsubrepos:
1581 if listsubrepos:
1582 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1582 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1583 if working:
1583 if working:
1584 rev2 = None
1584 rev2 = None
1585 else:
1585 else:
1586 rev2 = ctx2.substate[subpath][1]
1586 rev2 = ctx2.substate[subpath][1]
1587 try:
1587 try:
1588 submatch = matchmod.narrowmatcher(subpath, match)
1588 submatch = matchmod.narrowmatcher(subpath, match)
1589 s = sub.status(rev2, match=submatch, ignored=listignored,
1589 s = sub.status(rev2, match=submatch, ignored=listignored,
1590 clean=listclean, unknown=listunknown,
1590 clean=listclean, unknown=listunknown,
1591 listsubrepos=True)
1591 listsubrepos=True)
1592 for rfiles, sfiles in zip(r, s):
1592 for rfiles, sfiles in zip(r, s):
1593 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1593 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1594 except error.LookupError:
1594 except error.LookupError:
1595 self.ui.status(_("skipping missing subrepository: %s\n")
1595 self.ui.status(_("skipping missing subrepository: %s\n")
1596 % subpath)
1596 % subpath)
1597
1597
1598 for l in r:
1598 for l in r:
1599 l.sort()
1599 l.sort()
1600 return r
1600 return r
1601
1601
1602 def heads(self, start=None):
1602 def heads(self, start=None):
1603 heads = self.changelog.heads(start)
1603 heads = self.changelog.heads(start)
1604 # sort the output in rev descending order
1604 # sort the output in rev descending order
1605 return sorted(heads, key=self.changelog.rev, reverse=True)
1605 return sorted(heads, key=self.changelog.rev, reverse=True)
1606
1606
1607 def branchheads(self, branch=None, start=None, closed=False):
1607 def branchheads(self, branch=None, start=None, closed=False):
1608 '''return a (possibly filtered) list of heads for the given branch
1608 '''return a (possibly filtered) list of heads for the given branch
1609
1609
1610 Heads are returned in topological order, from newest to oldest.
1610 Heads are returned in topological order, from newest to oldest.
1611 If branch is None, use the dirstate branch.
1611 If branch is None, use the dirstate branch.
1612 If start is not None, return only heads reachable from start.
1612 If start is not None, return only heads reachable from start.
1613 If closed is True, return heads that are marked as closed as well.
1613 If closed is True, return heads that are marked as closed as well.
1614 '''
1614 '''
1615 if branch is None:
1615 if branch is None:
1616 branch = self[None].branch()
1616 branch = self[None].branch()
1617 branches = self.branchmap()
1617 branches = self.branchmap()
1618 if branch not in branches:
1618 if branch not in branches:
1619 return []
1619 return []
1620 # the cache returns heads ordered lowest to highest
1620 # the cache returns heads ordered lowest to highest
1621 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1621 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1622 if start is not None:
1622 if start is not None:
1623 # filter out the heads that cannot be reached from startrev
1623 # filter out the heads that cannot be reached from startrev
1624 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1624 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1625 bheads = [h for h in bheads if h in fbheads]
1625 bheads = [h for h in bheads if h in fbheads]
1626 return bheads
1626 return bheads
1627
1627
1628 def branches(self, nodes):
1628 def branches(self, nodes):
1629 if not nodes:
1629 if not nodes:
1630 nodes = [self.changelog.tip()]
1630 nodes = [self.changelog.tip()]
1631 b = []
1631 b = []
1632 for n in nodes:
1632 for n in nodes:
1633 t = n
1633 t = n
1634 while True:
1634 while True:
1635 p = self.changelog.parents(n)
1635 p = self.changelog.parents(n)
1636 if p[1] != nullid or p[0] == nullid:
1636 if p[1] != nullid or p[0] == nullid:
1637 b.append((t, n, p[0], p[1]))
1637 b.append((t, n, p[0], p[1]))
1638 break
1638 break
1639 n = p[0]
1639 n = p[0]
1640 return b
1640 return b
1641
1641
1642 def between(self, pairs):
1642 def between(self, pairs):
1643 r = []
1643 r = []
1644
1644
1645 for top, bottom in pairs:
1645 for top, bottom in pairs:
1646 n, l, i = top, [], 0
1646 n, l, i = top, [], 0
1647 f = 1
1647 f = 1
1648
1648
1649 while n != bottom and n != nullid:
1649 while n != bottom and n != nullid:
1650 p = self.changelog.parents(n)[0]
1650 p = self.changelog.parents(n)[0]
1651 if i == f:
1651 if i == f:
1652 l.append(n)
1652 l.append(n)
1653 f = f * 2
1653 f = f * 2
1654 n = p
1654 n = p
1655 i += 1
1655 i += 1
1656
1656
1657 r.append(l)
1657 r.append(l)
1658
1658
1659 return r
1659 return r
1660
1660
1661 def pull(self, remote, heads=None, force=False):
1661 def pull(self, remote, heads=None, force=False):
1662 if remote.local():
1662 if remote.local():
1663 missing = set(remote.requirements) - self.supported
1663 missing = set(remote.requirements) - self.supported
1664 if missing:
1664 if missing:
1665 msg = _("required features are not"
1665 msg = _("required features are not"
1666 " supported in the destination:"
1666 " supported in the destination:"
1667 " %s") % (', '.join(sorted(missing)))
1667 " %s") % (', '.join(sorted(missing)))
1668 raise util.Abort(msg)
1668 raise util.Abort(msg)
1669
1669
1670 # don't open transaction for nothing or you break future useful
1670 # don't open transaction for nothing or you break future useful
1671 # rollback call
1671 # rollback call
1672 tr = None
1672 tr = None
1673 trname = 'pull\n' + util.hidepassword(remote.url())
1673 trname = 'pull\n' + util.hidepassword(remote.url())
1674 lock = self.lock()
1674 lock = self.lock()
1675 try:
1675 try:
1676 tmp = discovery.findcommonincoming(self.unfiltered(), remote,
1676 tmp = discovery.findcommonincoming(self.unfiltered(), remote,
1677 heads=heads, force=force)
1677 heads=heads, force=force)
1678 common, fetch, rheads = tmp
1678 common, fetch, rheads = tmp
1679 if not fetch:
1679 if not fetch:
1680 self.ui.status(_("no changes found\n"))
1680 self.ui.status(_("no changes found\n"))
1681 result = 0
1681 result = 0
1682 else:
1682 else:
1683 tr = self.transaction(trname)
1683 tr = self.transaction(trname)
1684 if heads is None and list(common) == [nullid]:
1684 if heads is None and list(common) == [nullid]:
1685 self.ui.status(_("requesting all changes\n"))
1685 self.ui.status(_("requesting all changes\n"))
1686 elif heads is None and remote.capable('changegroupsubset'):
1686 elif heads is None and remote.capable('changegroupsubset'):
1687 # issue1320, avoid a race if remote changed after discovery
1687 # issue1320, avoid a race if remote changed after discovery
1688 heads = rheads
1688 heads = rheads
1689
1689
1690 if remote.capable('getbundle'):
1690 if remote.capable('getbundle'):
1691 # TODO: get bundlecaps from remote
1691 # TODO: get bundlecaps from remote
1692 cg = remote.getbundle('pull', common=common,
1692 cg = remote.getbundle('pull', common=common,
1693 heads=heads or rheads)
1693 heads=heads or rheads)
1694 elif heads is None:
1694 elif heads is None:
1695 cg = remote.changegroup(fetch, 'pull')
1695 cg = remote.changegroup(fetch, 'pull')
1696 elif not remote.capable('changegroupsubset'):
1696 elif not remote.capable('changegroupsubset'):
1697 raise util.Abort(_("partial pull cannot be done because "
1697 raise util.Abort(_("partial pull cannot be done because "
1698 "other repository doesn't support "
1698 "other repository doesn't support "
1699 "changegroupsubset."))
1699 "changegroupsubset."))
1700 else:
1700 else:
1701 cg = remote.changegroupsubset(fetch, heads, 'pull')
1701 cg = remote.changegroupsubset(fetch, heads, 'pull')
1702 result = self.addchangegroup(cg, 'pull', remote.url())
1702 result = self.addchangegroup(cg, 'pull', remote.url())
1703
1703
1704 # compute target subset
1704 # compute target subset
1705 if heads is None:
1705 if heads is None:
1706 # We pulled every thing possible
1706 # We pulled every thing possible
1707 # sync on everything common
1707 # sync on everything common
1708 subset = common + rheads
1708 subset = common + rheads
1709 else:
1709 else:
1710 # We pulled a specific subset
1710 # We pulled a specific subset
1711 # sync on this subset
1711 # sync on this subset
1712 subset = heads
1712 subset = heads
1713
1713
1714 # Get remote phases data from remote
1714 # Get remote phases data from remote
1715 remotephases = remote.listkeys('phases')
1715 remotephases = remote.listkeys('phases')
1716 publishing = bool(remotephases.get('publishing', False))
1716 publishing = bool(remotephases.get('publishing', False))
1717 if remotephases and not publishing:
1717 if remotephases and not publishing:
1718 # remote is new and unpublishing
1718 # remote is new and unpublishing
1719 pheads, _dr = phases.analyzeremotephases(self, subset,
1719 pheads, _dr = phases.analyzeremotephases(self, subset,
1720 remotephases)
1720 remotephases)
1721 phases.advanceboundary(self, phases.public, pheads)
1721 phases.advanceboundary(self, phases.public, pheads)
1722 phases.advanceboundary(self, phases.draft, subset)
1722 phases.advanceboundary(self, phases.draft, subset)
1723 else:
1723 else:
1724 # Remote is old or publishing all common changesets
1724 # Remote is old or publishing all common changesets
1725 # should be seen as public
1725 # should be seen as public
1726 phases.advanceboundary(self, phases.public, subset)
1726 phases.advanceboundary(self, phases.public, subset)
1727
1727
1728 def gettransaction():
1728 def gettransaction():
1729 if tr is None:
1729 if tr is None:
1730 return self.transaction(trname)
1730 return self.transaction(trname)
1731 return tr
1731 return tr
1732
1732
1733 obstr = obsolete.syncpull(self, remote, gettransaction)
1733 obstr = obsolete.syncpull(self, remote, gettransaction)
1734 if obstr is not None:
1734 if obstr is not None:
1735 tr = obstr
1735 tr = obstr
1736
1736
1737 if tr is not None:
1737 if tr is not None:
1738 tr.close()
1738 tr.close()
1739 finally:
1739 finally:
1740 if tr is not None:
1740 if tr is not None:
1741 tr.release()
1741 tr.release()
1742 lock.release()
1742 lock.release()
1743
1743
1744 return result
1744 return result
1745
1745
1746 def checkpush(self, force, revs):
1746 def checkpush(self, force, revs):
1747 """Extensions can override this function if additional checks have
1747 """Extensions can override this function if additional checks have
1748 to be performed before pushing, or call it if they override push
1748 to be performed before pushing, or call it if they override push
1749 command.
1749 command.
1750 """
1750 """
1751 pass
1751 pass
1752
1752
1753 def push(self, remote, force=False, revs=None, newbranch=False):
1753 def push(self, remote, force=False, revs=None, newbranch=False):
1754 return exchange.push(self, remote, force, revs, newbranch)
1754 return exchange.push(self, remote, force, revs, newbranch)
1755
1755
1756 def changegroupinfo(self, nodes, source):
1756 def changegroupinfo(self, nodes, source):
1757 if self.ui.verbose or source == 'bundle':
1757 if self.ui.verbose or source == 'bundle':
1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 if self.ui.debugflag:
1759 if self.ui.debugflag:
1760 self.ui.debug("list of changesets:\n")
1760 self.ui.debug("list of changesets:\n")
1761 for node in nodes:
1761 for node in nodes:
1762 self.ui.debug("%s\n" % hex(node))
1762 self.ui.debug("%s\n" % hex(node))
1763
1763
1764 def changegroupsubset(self, bases, heads, source):
1764 def changegroupsubset(self, bases, heads, source):
1765 """Compute a changegroup consisting of all the nodes that are
1765 """Compute a changegroup consisting of all the nodes that are
1766 descendants of any of the bases and ancestors of any of the heads.
1766 descendants of any of the bases and ancestors of any of the heads.
1767 Return a chunkbuffer object whose read() method will return
1767 Return a chunkbuffer object whose read() method will return
1768 successive changegroup chunks.
1768 successive changegroup chunks.
1769
1769
1770 It is fairly complex as determining which filenodes and which
1770 It is fairly complex as determining which filenodes and which
1771 manifest nodes need to be included for the changeset to be complete
1771 manifest nodes need to be included for the changeset to be complete
1772 is non-trivial.
1772 is non-trivial.
1773
1773
1774 Another wrinkle is doing the reverse, figuring out which changeset in
1774 Another wrinkle is doing the reverse, figuring out which changeset in
1775 the changegroup a particular filenode or manifestnode belongs to.
1775 the changegroup a particular filenode or manifestnode belongs to.
1776 """
1776 """
1777 cl = self.changelog
1777 cl = self.changelog
1778 if not bases:
1778 if not bases:
1779 bases = [nullid]
1779 bases = [nullid]
1780 # TODO: remove call to nodesbetween.
1780 # TODO: remove call to nodesbetween.
1781 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 csets, bases, heads = cl.nodesbetween(bases, heads)
1782 discbases = []
1782 discbases = []
1783 for n in bases:
1783 for n in bases:
1784 discbases.extend([p for p in cl.parents(n) if p != nullid])
1784 discbases.extend([p for p in cl.parents(n) if p != nullid])
1785 outgoing = discovery.outgoing(cl, discbases, heads)
1785 outgoing = discovery.outgoing(cl, discbases, heads)
1786 bundler = changegroup.bundle10(self)
1786 bundler = changegroup.bundle10(self)
1787 return self._changegroupsubset(outgoing, bundler, source)
1787 return self._changegroupsubset(outgoing, bundler, source)
1788
1788
1789 def getlocalbundle(self, source, outgoing, bundlecaps=None):
1789 def getlocalbundle(self, source, outgoing, bundlecaps=None):
1790 """Like getbundle, but taking a discovery.outgoing as an argument.
1790 """Like getbundle, but taking a discovery.outgoing as an argument.
1791
1791
1792 This is only implemented for local repos and reuses potentially
1792 This is only implemented for local repos and reuses potentially
1793 precomputed sets in outgoing."""
1793 precomputed sets in outgoing."""
1794 if not outgoing.missing:
1794 if not outgoing.missing:
1795 return None
1795 return None
1796 bundler = changegroup.bundle10(self, bundlecaps)
1796 bundler = changegroup.bundle10(self, bundlecaps)
1797 return self._changegroupsubset(outgoing, bundler, source)
1797 return self._changegroupsubset(outgoing, bundler, source)
1798
1798
1799 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
1799 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
1800 """Like changegroupsubset, but returns the set difference between the
1800 """Like changegroupsubset, but returns the set difference between the
1801 ancestors of heads and the ancestors common.
1801 ancestors of heads and the ancestors common.
1802
1802
1803 If heads is None, use the local heads. If common is None, use [nullid].
1803 If heads is None, use the local heads. If common is None, use [nullid].
1804
1804
1805 The nodes in common might not all be known locally due to the way the
1805 The nodes in common might not all be known locally due to the way the
1806 current discovery protocol works.
1806 current discovery protocol works.
1807 """
1807 """
1808 cl = self.changelog
1808 cl = self.changelog
1809 if common:
1809 if common:
1810 hasnode = cl.hasnode
1810 hasnode = cl.hasnode
1811 common = [n for n in common if hasnode(n)]
1811 common = [n for n in common if hasnode(n)]
1812 else:
1812 else:
1813 common = [nullid]
1813 common = [nullid]
1814 if not heads:
1814 if not heads:
1815 heads = cl.heads()
1815 heads = cl.heads()
1816 return self.getlocalbundle(source,
1816 return self.getlocalbundle(source,
1817 discovery.outgoing(cl, common, heads),
1817 discovery.outgoing(cl, common, heads),
1818 bundlecaps=bundlecaps)
1818 bundlecaps=bundlecaps)
1819
1819
1820 @unfilteredmethod
1820 @unfilteredmethod
1821 def _changegroupsubset(self, outgoing, bundler, source,
1821 def _changegroupsubset(self, outgoing, bundler, source,
1822 fastpath=False):
1822 fastpath=False):
1823 commonrevs = outgoing.common
1823 commonrevs = outgoing.common
1824 csets = outgoing.missing
1824 csets = outgoing.missing
1825 heads = outgoing.missingheads
1825 heads = outgoing.missingheads
1826 # We go through the fast path if we get told to, or if all (unfiltered
1826 # We go through the fast path if we get told to, or if all (unfiltered
1827 # heads have been requested (since we then know there all linkrevs will
1827 # heads have been requested (since we then know there all linkrevs will
1828 # be pulled by the client).
1828 # be pulled by the client).
1829 heads.sort()
1829 heads.sort()
1830 fastpathlinkrev = fastpath or (
1830 fastpathlinkrev = fastpath or (
1831 self.filtername is None and heads == sorted(self.heads()))
1831 self.filtername is None and heads == sorted(self.heads()))
1832
1832
1833 self.hook('preoutgoing', throw=True, source=source)
1833 self.hook('preoutgoing', throw=True, source=source)
1834 self.changegroupinfo(csets, source)
1834 self.changegroupinfo(csets, source)
1835 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1835 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1836 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
1836 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
1837
1837
1838 def changegroup(self, basenodes, source):
1838 def changegroup(self, basenodes, source):
1839 # to avoid a race we use changegroupsubset() (issue1320)
1839 # to avoid a race we use changegroupsubset() (issue1320)
1840 return self.changegroupsubset(basenodes, self.heads(), source)
1840 return self.changegroupsubset(basenodes, self.heads(), source)
1841
1841
1842 @unfilteredmethod
1842 @unfilteredmethod
1843 def addchangegroup(self, source, srctype, url, emptyok=False):
1843 def addchangegroup(self, source, srctype, url, emptyok=False):
1844 """Add the changegroup returned by source.read() to this repo.
1844 """Add the changegroup returned by source.read() to this repo.
1845 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1845 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1846 the URL of the repo where this changegroup is coming from.
1846 the URL of the repo where this changegroup is coming from.
1847
1847
1848 Return an integer summarizing the change to this repo:
1848 Return an integer summarizing the change to this repo:
1849 - nothing changed or no source: 0
1849 - nothing changed or no source: 0
1850 - more heads than before: 1+added heads (2..n)
1850 - more heads than before: 1+added heads (2..n)
1851 - fewer heads than before: -1-removed heads (-2..-n)
1851 - fewer heads than before: -1-removed heads (-2..-n)
1852 - number of heads stays the same: 1
1852 - number of heads stays the same: 1
1853 """
1853 """
1854 def csmap(x):
1854 def csmap(x):
1855 self.ui.debug("add changeset %s\n" % short(x))
1855 self.ui.debug("add changeset %s\n" % short(x))
1856 return len(cl)
1856 return len(cl)
1857
1857
1858 def revmap(x):
1858 def revmap(x):
1859 return cl.rev(x)
1859 return cl.rev(x)
1860
1860
1861 if not source:
1861 if not source:
1862 return 0
1862 return 0
1863
1863
1864 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1864 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1865
1865
1866 changesets = files = revisions = 0
1866 changesets = files = revisions = 0
1867 efiles = set()
1867 efiles = set()
1868
1868
1869 # write changelog data to temp files so concurrent readers will not see
1869 # write changelog data to temp files so concurrent readers will not see
1870 # inconsistent view
1870 # inconsistent view
1871 cl = self.changelog
1871 cl = self.changelog
1872 cl.delayupdate()
1872 cl.delayupdate()
1873 oldheads = cl.heads()
1873 oldheads = cl.heads()
1874
1874
1875 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1875 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1876 try:
1876 try:
1877 trp = weakref.proxy(tr)
1877 trp = weakref.proxy(tr)
1878 # pull off the changeset group
1878 # pull off the changeset group
1879 self.ui.status(_("adding changesets\n"))
1879 self.ui.status(_("adding changesets\n"))
1880 clstart = len(cl)
1880 clstart = len(cl)
1881 class prog(object):
1881 class prog(object):
1882 step = _('changesets')
1882 step = _('changesets')
1883 count = 1
1883 count = 1
1884 ui = self.ui
1884 ui = self.ui
1885 total = None
1885 total = None
1886 def __call__(self):
1886 def __call__(self):
1887 self.ui.progress(self.step, self.count, unit=_('chunks'),
1887 self.ui.progress(self.step, self.count, unit=_('chunks'),
1888 total=self.total)
1888 total=self.total)
1889 self.count += 1
1889 self.count += 1
1890 pr = prog()
1890 pr = prog()
1891 source.callback = pr
1891 source.callback = pr
1892
1892
1893 source.changelogheader()
1893 source.changelogheader()
1894 srccontent = cl.addgroup(source, csmap, trp)
1894 srccontent = cl.addgroup(source, csmap, trp)
1895 if not (srccontent or emptyok):
1895 if not (srccontent or emptyok):
1896 raise util.Abort(_("received changelog group is empty"))
1896 raise util.Abort(_("received changelog group is empty"))
1897 clend = len(cl)
1897 clend = len(cl)
1898 changesets = clend - clstart
1898 changesets = clend - clstart
1899 for c in xrange(clstart, clend):
1899 for c in xrange(clstart, clend):
1900 efiles.update(self[c].files())
1900 efiles.update(self[c].files())
1901 efiles = len(efiles)
1901 efiles = len(efiles)
1902 self.ui.progress(_('changesets'), None)
1902 self.ui.progress(_('changesets'), None)
1903
1903
1904 # pull off the manifest group
1904 # pull off the manifest group
1905 self.ui.status(_("adding manifests\n"))
1905 self.ui.status(_("adding manifests\n"))
1906 pr.step = _('manifests')
1906 pr.step = _('manifests')
1907 pr.count = 1
1907 pr.count = 1
1908 pr.total = changesets # manifests <= changesets
1908 pr.total = changesets # manifests <= changesets
1909 # no need to check for empty manifest group here:
1909 # no need to check for empty manifest group here:
1910 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1910 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1911 # no new manifest will be created and the manifest group will
1911 # no new manifest will be created and the manifest group will
1912 # be empty during the pull
1912 # be empty during the pull
1913 source.manifestheader()
1913 source.manifestheader()
1914 self.manifest.addgroup(source, revmap, trp)
1914 self.manifest.addgroup(source, revmap, trp)
1915 self.ui.progress(_('manifests'), None)
1915 self.ui.progress(_('manifests'), None)
1916
1916
1917 needfiles = {}
1917 needfiles = {}
1918 if self.ui.configbool('server', 'validate', default=False):
1918 if self.ui.configbool('server', 'validate', default=False):
1919 # validate incoming csets have their manifests
1919 # validate incoming csets have their manifests
1920 for cset in xrange(clstart, clend):
1920 for cset in xrange(clstart, clend):
1921 mfest = self.changelog.read(self.changelog.node(cset))[0]
1921 mfest = self.changelog.read(self.changelog.node(cset))[0]
1922 mfest = self.manifest.readdelta(mfest)
1922 mfest = self.manifest.readdelta(mfest)
1923 # store file nodes we must see
1923 # store file nodes we must see
1924 for f, n in mfest.iteritems():
1924 for f, n in mfest.iteritems():
1925 needfiles.setdefault(f, set()).add(n)
1925 needfiles.setdefault(f, set()).add(n)
1926
1926
1927 # process the files
1927 # process the files
1928 self.ui.status(_("adding file changes\n"))
1928 self.ui.status(_("adding file changes\n"))
1929 pr.step = _('files')
1929 pr.step = _('files')
1930 pr.count = 1
1930 pr.count = 1
1931 pr.total = efiles
1931 pr.total = efiles
1932 source.callback = None
1932 source.callback = None
1933
1933
1934 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
1934 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
1935 pr, needfiles)
1935 pr, needfiles)
1936 revisions += newrevs
1936 revisions += newrevs
1937 files += newfiles
1937 files += newfiles
1938
1938
1939 dh = 0
1939 dh = 0
1940 if oldheads:
1940 if oldheads:
1941 heads = cl.heads()
1941 heads = cl.heads()
1942 dh = len(heads) - len(oldheads)
1942 dh = len(heads) - len(oldheads)
1943 for h in heads:
1943 for h in heads:
1944 if h not in oldheads and self[h].closesbranch():
1944 if h not in oldheads and self[h].closesbranch():
1945 dh -= 1
1945 dh -= 1
1946 htext = ""
1946 htext = ""
1947 if dh:
1947 if dh:
1948 htext = _(" (%+d heads)") % dh
1948 htext = _(" (%+d heads)") % dh
1949
1949
1950 self.ui.status(_("added %d changesets"
1950 self.ui.status(_("added %d changesets"
1951 " with %d changes to %d files%s\n")
1951 " with %d changes to %d files%s\n")
1952 % (changesets, revisions, files, htext))
1952 % (changesets, revisions, files, htext))
1953 self.invalidatevolatilesets()
1953 self.invalidatevolatilesets()
1954
1954
1955 if changesets > 0:
1955 if changesets > 0:
1956 p = lambda: cl.writepending() and self.root or ""
1956 p = lambda: cl.writepending() and self.root or ""
1957 self.hook('pretxnchangegroup', throw=True,
1957 self.hook('pretxnchangegroup', throw=True,
1958 node=hex(cl.node(clstart)), source=srctype,
1958 node=hex(cl.node(clstart)), source=srctype,
1959 url=url, pending=p)
1959 url=url, pending=p)
1960
1960
1961 added = [cl.node(r) for r in xrange(clstart, clend)]
1961 added = [cl.node(r) for r in xrange(clstart, clend)]
1962 publishing = self.ui.configbool('phases', 'publish', True)
1962 publishing = self.ui.configbool('phases', 'publish', True)
1963 if srctype == 'push':
1963 if srctype == 'push':
1964 # Old server can not push the boundary themself.
1964 # Old server can not push the boundary themself.
1965 # New server won't push the boundary if changeset already
1965 # New server won't push the boundary if changeset already
1966 # existed locally as secrete
1966 # existed locally as secrete
1967 #
1967 #
1968 # We should not use added here but the list of all change in
1968 # We should not use added here but the list of all change in
1969 # the bundle
1969 # the bundle
1970 if publishing:
1970 if publishing:
1971 phases.advanceboundary(self, phases.public, srccontent)
1971 phases.advanceboundary(self, phases.public, srccontent)
1972 else:
1972 else:
1973 phases.advanceboundary(self, phases.draft, srccontent)
1973 phases.advanceboundary(self, phases.draft, srccontent)
1974 phases.retractboundary(self, phases.draft, added)
1974 phases.retractboundary(self, phases.draft, added)
1975 elif srctype != 'strip':
1975 elif srctype != 'strip':
1976 # publishing only alter behavior during push
1976 # publishing only alter behavior during push
1977 #
1977 #
1978 # strip should not touch boundary at all
1978 # strip should not touch boundary at all
1979 phases.retractboundary(self, phases.draft, added)
1979 phases.retractboundary(self, phases.draft, added)
1980
1980
1981 # make changelog see real files again
1981 # make changelog see real files again
1982 cl.finalize(trp)
1982 cl.finalize(trp)
1983
1983
1984 tr.close()
1984 tr.close()
1985
1985
1986 if changesets > 0:
1986 if changesets > 0:
1987 if srctype != 'strip':
1987 if srctype != 'strip':
1988 # During strip, branchcache is invalid but coming call to
1988 # During strip, branchcache is invalid but coming call to
1989 # `destroyed` will repair it.
1989 # `destroyed` will repair it.
1990 # In other case we can safely update cache on disk.
1990 # In other case we can safely update cache on disk.
1991 branchmap.updatecache(self.filtered('served'))
1991 branchmap.updatecache(self.filtered('served'))
1992 def runhooks():
1992 def runhooks():
1993 # These hooks run when the lock releases, not when the
1993 # These hooks run when the lock releases, not when the
1994 # transaction closes. So it's possible for the changelog
1994 # transaction closes. So it's possible for the changelog
1995 # to have changed since we last saw it.
1995 # to have changed since we last saw it.
1996 if clstart >= len(self):
1996 if clstart >= len(self):
1997 return
1997 return
1998
1998
1999 # forcefully update the on-disk branch cache
1999 # forcefully update the on-disk branch cache
2000 self.ui.debug("updating the branch cache\n")
2000 self.ui.debug("updating the branch cache\n")
2001 self.hook("changegroup", node=hex(cl.node(clstart)),
2001 self.hook("changegroup", node=hex(cl.node(clstart)),
2002 source=srctype, url=url)
2002 source=srctype, url=url)
2003
2003
2004 for n in added:
2004 for n in added:
2005 self.hook("incoming", node=hex(n), source=srctype,
2005 self.hook("incoming", node=hex(n), source=srctype,
2006 url=url)
2006 url=url)
2007
2007
2008 newheads = [h for h in self.heads() if h not in oldheads]
2008 newheads = [h for h in self.heads() if h not in oldheads]
2009 self.ui.log("incoming",
2009 self.ui.log("incoming",
2010 "%s incoming changes - new heads: %s\n",
2010 "%s incoming changes - new heads: %s\n",
2011 len(added),
2011 len(added),
2012 ', '.join([hex(c[:6]) for c in newheads]))
2012 ', '.join([hex(c[:6]) for c in newheads]))
2013 self._afterlock(runhooks)
2013 self._afterlock(runhooks)
2014
2014
2015 finally:
2015 finally:
2016 tr.release()
2016 tr.release()
2017 # never return 0 here:
2017 # never return 0 here:
2018 if dh < 0:
2018 if dh < 0:
2019 return dh - 1
2019 return dh - 1
2020 else:
2020 else:
2021 return dh + 1
2021 return dh + 1
2022
2022
2023 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2023 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2024 revisions = 0
2024 revisions = 0
2025 files = 0
2025 files = 0
2026 while True:
2026 while True:
2027 chunkdata = source.filelogheader()
2027 chunkdata = source.filelogheader()
2028 if not chunkdata:
2028 if not chunkdata:
2029 break
2029 break
2030 f = chunkdata["filename"]
2030 f = chunkdata["filename"]
2031 self.ui.debug("adding %s revisions\n" % f)
2031 self.ui.debug("adding %s revisions\n" % f)
2032 pr()
2032 pr()
2033 fl = self.file(f)
2033 fl = self.file(f)
2034 o = len(fl)
2034 o = len(fl)
2035 if not fl.addgroup(source, revmap, trp):
2035 if not fl.addgroup(source, revmap, trp):
2036 raise util.Abort(_("received file revlog group is empty"))
2036 raise util.Abort(_("received file revlog group is empty"))
2037 revisions += len(fl) - o
2037 revisions += len(fl) - o
2038 files += 1
2038 files += 1
2039 if f in needfiles:
2039 if f in needfiles:
2040 needs = needfiles[f]
2040 needs = needfiles[f]
2041 for new in xrange(o, len(fl)):
2041 for new in xrange(o, len(fl)):
2042 n = fl.node(new)
2042 n = fl.node(new)
2043 if n in needs:
2043 if n in needs:
2044 needs.remove(n)
2044 needs.remove(n)
2045 else:
2045 else:
2046 raise util.Abort(
2046 raise util.Abort(
2047 _("received spurious file revlog entry"))
2047 _("received spurious file revlog entry"))
2048 if not needs:
2048 if not needs:
2049 del needfiles[f]
2049 del needfiles[f]
2050 self.ui.progress(_('files'), None)
2050 self.ui.progress(_('files'), None)
2051
2051
2052 for f, needs in needfiles.iteritems():
2052 for f, needs in needfiles.iteritems():
2053 fl = self.file(f)
2053 fl = self.file(f)
2054 for n in needs:
2054 for n in needs:
2055 try:
2055 try:
2056 fl.rev(n)
2056 fl.rev(n)
2057 except error.LookupError:
2057 except error.LookupError:
2058 raise util.Abort(
2058 raise util.Abort(
2059 _('missing file data for %s:%s - run hg verify') %
2059 _('missing file data for %s:%s - run hg verify') %
2060 (f, hex(n)))
2060 (f, hex(n)))
2061
2061
2062 return revisions, files
2062 return revisions, files
2063
2063
2064 def stream_in(self, remote, requirements):
2064 def stream_in(self, remote, requirements):
2065 lock = self.lock()
2065 lock = self.lock()
2066 try:
2066 try:
2067 # Save remote branchmap. We will use it later
2067 # Save remote branchmap. We will use it later
2068 # to speed up branchcache creation
2068 # to speed up branchcache creation
2069 rbranchmap = None
2069 rbranchmap = None
2070 if remote.capable("branchmap"):
2070 if remote.capable("branchmap"):
2071 rbranchmap = remote.branchmap()
2071 rbranchmap = remote.branchmap()
2072
2072
2073 fp = remote.stream_out()
2073 fp = remote.stream_out()
2074 l = fp.readline()
2074 l = fp.readline()
2075 try:
2075 try:
2076 resp = int(l)
2076 resp = int(l)
2077 except ValueError:
2077 except ValueError:
2078 raise error.ResponseError(
2078 raise error.ResponseError(
2079 _('unexpected response from remote server:'), l)
2079 _('unexpected response from remote server:'), l)
2080 if resp == 1:
2080 if resp == 1:
2081 raise util.Abort(_('operation forbidden by server'))
2081 raise util.Abort(_('operation forbidden by server'))
2082 elif resp == 2:
2082 elif resp == 2:
2083 raise util.Abort(_('locking the remote repository failed'))
2083 raise util.Abort(_('locking the remote repository failed'))
2084 elif resp != 0:
2084 elif resp != 0:
2085 raise util.Abort(_('the server sent an unknown error code'))
2085 raise util.Abort(_('the server sent an unknown error code'))
2086 self.ui.status(_('streaming all changes\n'))
2086 self.ui.status(_('streaming all changes\n'))
2087 l = fp.readline()
2087 l = fp.readline()
2088 try:
2088 try:
2089 total_files, total_bytes = map(int, l.split(' ', 1))
2089 total_files, total_bytes = map(int, l.split(' ', 1))
2090 except (ValueError, TypeError):
2090 except (ValueError, TypeError):
2091 raise error.ResponseError(
2091 raise error.ResponseError(
2092 _('unexpected response from remote server:'), l)
2092 _('unexpected response from remote server:'), l)
2093 self.ui.status(_('%d files to transfer, %s of data\n') %
2093 self.ui.status(_('%d files to transfer, %s of data\n') %
2094 (total_files, util.bytecount(total_bytes)))
2094 (total_files, util.bytecount(total_bytes)))
2095 handled_bytes = 0
2095 handled_bytes = 0
2096 self.ui.progress(_('clone'), 0, total=total_bytes)
2096 self.ui.progress(_('clone'), 0, total=total_bytes)
2097 start = time.time()
2097 start = time.time()
2098 for i in xrange(total_files):
2098 for i in xrange(total_files):
2099 # XXX doesn't support '\n' or '\r' in filenames
2099 # XXX doesn't support '\n' or '\r' in filenames
2100 l = fp.readline()
2100 l = fp.readline()
2101 try:
2101 try:
2102 name, size = l.split('\0', 1)
2102 name, size = l.split('\0', 1)
2103 size = int(size)
2103 size = int(size)
2104 except (ValueError, TypeError):
2104 except (ValueError, TypeError):
2105 raise error.ResponseError(
2105 raise error.ResponseError(
2106 _('unexpected response from remote server:'), l)
2106 _('unexpected response from remote server:'), l)
2107 if self.ui.debugflag:
2107 if self.ui.debugflag:
2108 self.ui.debug('adding %s (%s)\n' %
2108 self.ui.debug('adding %s (%s)\n' %
2109 (name, util.bytecount(size)))
2109 (name, util.bytecount(size)))
2110 # for backwards compat, name was partially encoded
2110 # for backwards compat, name was partially encoded
2111 ofp = self.sopener(store.decodedir(name), 'w')
2111 ofp = self.sopener(store.decodedir(name), 'w')
2112 for chunk in util.filechunkiter(fp, limit=size):
2112 for chunk in util.filechunkiter(fp, limit=size):
2113 handled_bytes += len(chunk)
2113 handled_bytes += len(chunk)
2114 self.ui.progress(_('clone'), handled_bytes,
2114 self.ui.progress(_('clone'), handled_bytes,
2115 total=total_bytes)
2115 total=total_bytes)
2116 ofp.write(chunk)
2116 ofp.write(chunk)
2117 ofp.close()
2117 ofp.close()
2118 elapsed = time.time() - start
2118 elapsed = time.time() - start
2119 if elapsed <= 0:
2119 if elapsed <= 0:
2120 elapsed = 0.001
2120 elapsed = 0.001
2121 self.ui.progress(_('clone'), None)
2121 self.ui.progress(_('clone'), None)
2122 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2122 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2123 (util.bytecount(total_bytes), elapsed,
2123 (util.bytecount(total_bytes), elapsed,
2124 util.bytecount(total_bytes / elapsed)))
2124 util.bytecount(total_bytes / elapsed)))
2125
2125
2126 # new requirements = old non-format requirements +
2126 # new requirements = old non-format requirements +
2127 # new format-related
2127 # new format-related
2128 # requirements from the streamed-in repository
2128 # requirements from the streamed-in repository
2129 requirements.update(set(self.requirements) - self.supportedformats)
2129 requirements.update(set(self.requirements) - self.supportedformats)
2130 self._applyrequirements(requirements)
2130 self._applyrequirements(requirements)
2131 self._writerequirements()
2131 self._writerequirements()
2132
2132
2133 if rbranchmap:
2133 if rbranchmap:
2134 rbheads = []
2134 rbheads = []
2135 for bheads in rbranchmap.itervalues():
2135 for bheads in rbranchmap.itervalues():
2136 rbheads.extend(bheads)
2136 rbheads.extend(bheads)
2137
2137
2138 if rbheads:
2138 if rbheads:
2139 rtiprev = max((int(self.changelog.rev(node))
2139 rtiprev = max((int(self.changelog.rev(node))
2140 for node in rbheads))
2140 for node in rbheads))
2141 cache = branchmap.branchcache(rbranchmap,
2141 cache = branchmap.branchcache(rbranchmap,
2142 self[rtiprev].node(),
2142 self[rtiprev].node(),
2143 rtiprev)
2143 rtiprev)
2144 # Try to stick it as low as possible
2144 # Try to stick it as low as possible
2145 # filter above served are unlikely to be fetch from a clone
2145 # filter above served are unlikely to be fetch from a clone
2146 for candidate in ('base', 'immutable', 'served'):
2146 for candidate in ('base', 'immutable', 'served'):
2147 rview = self.filtered(candidate)
2147 rview = self.filtered(candidate)
2148 if cache.validfor(rview):
2148 if cache.validfor(rview):
2149 self._branchcaches[candidate] = cache
2149 self._branchcaches[candidate] = cache
2150 cache.write(rview)
2150 cache.write(rview)
2151 break
2151 break
2152 self.invalidate()
2152 self.invalidate()
2153 return len(self.heads()) + 1
2153 return len(self.heads()) + 1
2154 finally:
2154 finally:
2155 lock.release()
2155 lock.release()
2156
2156
2157 def clone(self, remote, heads=[], stream=False):
2157 def clone(self, remote, heads=[], stream=False):
2158 '''clone remote repository.
2158 '''clone remote repository.
2159
2159
2160 keyword arguments:
2160 keyword arguments:
2161 heads: list of revs to clone (forces use of pull)
2161 heads: list of revs to clone (forces use of pull)
2162 stream: use streaming clone if possible'''
2162 stream: use streaming clone if possible'''
2163
2163
2164 # now, all clients that can request uncompressed clones can
2164 # now, all clients that can request uncompressed clones can
2165 # read repo formats supported by all servers that can serve
2165 # read repo formats supported by all servers that can serve
2166 # them.
2166 # them.
2167
2167
2168 # if revlog format changes, client will have to check version
2168 # if revlog format changes, client will have to check version
2169 # and format flags on "stream" capability, and use
2169 # and format flags on "stream" capability, and use
2170 # uncompressed only if compatible.
2170 # uncompressed only if compatible.
2171
2171
2172 if not stream:
2172 if not stream:
2173 # if the server explicitly prefers to stream (for fast LANs)
2173 # if the server explicitly prefers to stream (for fast LANs)
2174 stream = remote.capable('stream-preferred')
2174 stream = remote.capable('stream-preferred')
2175
2175
2176 if stream and not heads:
2176 if stream and not heads:
2177 # 'stream' means remote revlog format is revlogv1 only
2177 # 'stream' means remote revlog format is revlogv1 only
2178 if remote.capable('stream'):
2178 if remote.capable('stream'):
2179 return self.stream_in(remote, set(('revlogv1',)))
2179 return self.stream_in(remote, set(('revlogv1',)))
2180 # otherwise, 'streamreqs' contains the remote revlog format
2180 # otherwise, 'streamreqs' contains the remote revlog format
2181 streamreqs = remote.capable('streamreqs')
2181 streamreqs = remote.capable('streamreqs')
2182 if streamreqs:
2182 if streamreqs:
2183 streamreqs = set(streamreqs.split(','))
2183 streamreqs = set(streamreqs.split(','))
2184 # if we support it, stream in and adjust our requirements
2184 # if we support it, stream in and adjust our requirements
2185 if not streamreqs - self.supportedformats:
2185 if not streamreqs - self.supportedformats:
2186 return self.stream_in(remote, streamreqs)
2186 return self.stream_in(remote, streamreqs)
2187 return self.pull(remote, heads)
2187 return self.pull(remote, heads)
2188
2188
2189 def pushkey(self, namespace, key, old, new):
2189 def pushkey(self, namespace, key, old, new):
2190 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2190 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2191 old=old, new=new)
2191 old=old, new=new)
2192 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2192 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2193 ret = pushkey.push(self, namespace, key, old, new)
2193 ret = pushkey.push(self, namespace, key, old, new)
2194 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2194 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2195 ret=ret)
2195 ret=ret)
2196 return ret
2196 return ret
2197
2197
2198 def listkeys(self, namespace):
2198 def listkeys(self, namespace):
2199 self.hook('prelistkeys', throw=True, namespace=namespace)
2199 self.hook('prelistkeys', throw=True, namespace=namespace)
2200 self.ui.debug('listing keys for "%s"\n' % namespace)
2200 self.ui.debug('listing keys for "%s"\n' % namespace)
2201 values = pushkey.list(self, namespace)
2201 values = pushkey.list(self, namespace)
2202 self.hook('listkeys', namespace=namespace, values=values)
2202 self.hook('listkeys', namespace=namespace, values=values)
2203 return values
2203 return values
2204
2204
2205 def debugwireargs(self, one, two, three=None, four=None, five=None):
2205 def debugwireargs(self, one, two, three=None, four=None, five=None):
2206 '''used to test argument passing over the wire'''
2206 '''used to test argument passing over the wire'''
2207 return "%s %s %s %s %s" % (one, two, three, four, five)
2207 return "%s %s %s %s %s" % (one, two, three, four, five)
2208
2208
2209 def savecommitmessage(self, text):
2209 def savecommitmessage(self, text):
2210 fp = self.opener('last-message.txt', 'wb')
2210 fp = self.opener('last-message.txt', 'wb')
2211 try:
2211 try:
2212 fp.write(text)
2212 fp.write(text)
2213 finally:
2213 finally:
2214 fp.close()
2214 fp.close()
2215 return self.pathto(fp.name[len(self.root) + 1:])
2215 return self.pathto(fp.name[len(self.root) + 1:])
2216
2216
2217 # used to avoid circular references so destructors work
2217 # used to avoid circular references so destructors work
2218 def aftertrans(files):
2218 def aftertrans(files):
2219 renamefiles = [tuple(t) for t in files]
2219 renamefiles = [tuple(t) for t in files]
2220 def a():
2220 def a():
2221 for vfs, src, dest in renamefiles:
2221 for vfs, src, dest in renamefiles:
2222 try:
2222 try:
2223 vfs.rename(src, dest)
2223 vfs.rename(src, dest)
2224 except OSError: # journal file does not yet exist
2224 except OSError: # journal file does not yet exist
2225 pass
2225 pass
2226 return a
2226 return a
2227
2227
2228 def undoname(fn):
2228 def undoname(fn):
2229 base, name = os.path.split(fn)
2229 base, name = os.path.split(fn)
2230 assert name.startswith('journal')
2230 assert name.startswith('journal')
2231 return os.path.join(base, name.replace('journal', 'undo', 1))
2231 return os.path.join(base, name.replace('journal', 'undo', 1))
2232
2232
2233 def instance(ui, path, create):
2233 def instance(ui, path, create):
2234 return localrepository(ui, util.urllocalpath(path), create)
2234 return localrepository(ui, util.urllocalpath(path), create)
2235
2235
2236 def islocal(path):
2236 def islocal(path):
2237 return True
2237 return True
@@ -1,919 +1,929 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases, parsers
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob
13 import os, errno, re, glob
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 def itersubrepos(ctx1, ctx2):
24 """find subrepos in ctx1 or ctx2"""
25 # Create a (subpath, ctx) mapping where we prefer subpaths from
26 # ctx1. The subpaths from ctx2 are important when the .hgsub file
27 # has been modified (in ctx2) but not yet committed (in ctx1).
28 subpaths = dict.fromkeys(ctx2.substate, ctx2)
29 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
30 for subpath, ctx in sorted(subpaths.iteritems()):
31 yield subpath, ctx.sub(subpath)
32
23 def nochangesfound(ui, repo, excluded=None):
33 def nochangesfound(ui, repo, excluded=None):
24 '''Report no changes for push/pull, excluded is None or a list of
34 '''Report no changes for push/pull, excluded is None or a list of
25 nodes excluded from the push/pull.
35 nodes excluded from the push/pull.
26 '''
36 '''
27 secretlist = []
37 secretlist = []
28 if excluded:
38 if excluded:
29 for n in excluded:
39 for n in excluded:
30 if n not in repo:
40 if n not in repo:
31 # discovery should not have included the filtered revision,
41 # discovery should not have included the filtered revision,
32 # we have to explicitly exclude it until discovery is cleanup.
42 # we have to explicitly exclude it until discovery is cleanup.
33 continue
43 continue
34 ctx = repo[n]
44 ctx = repo[n]
35 if ctx.phase() >= phases.secret and not ctx.extinct():
45 if ctx.phase() >= phases.secret and not ctx.extinct():
36 secretlist.append(n)
46 secretlist.append(n)
37
47
38 if secretlist:
48 if secretlist:
39 ui.status(_("no changes found (ignored %d secret changesets)\n")
49 ui.status(_("no changes found (ignored %d secret changesets)\n")
40 % len(secretlist))
50 % len(secretlist))
41 else:
51 else:
42 ui.status(_("no changes found\n"))
52 ui.status(_("no changes found\n"))
43
53
44 def checknewlabel(repo, lbl, kind):
54 def checknewlabel(repo, lbl, kind):
45 # Do not use the "kind" parameter in ui output.
55 # Do not use the "kind" parameter in ui output.
46 # It makes strings difficult to translate.
56 # It makes strings difficult to translate.
47 if lbl in ['tip', '.', 'null']:
57 if lbl in ['tip', '.', 'null']:
48 raise util.Abort(_("the name '%s' is reserved") % lbl)
58 raise util.Abort(_("the name '%s' is reserved") % lbl)
49 for c in (':', '\0', '\n', '\r'):
59 for c in (':', '\0', '\n', '\r'):
50 if c in lbl:
60 if c in lbl:
51 raise util.Abort(_("%r cannot be used in a name") % c)
61 raise util.Abort(_("%r cannot be used in a name") % c)
52 try:
62 try:
53 int(lbl)
63 int(lbl)
54 raise util.Abort(_("cannot use an integer as a name"))
64 raise util.Abort(_("cannot use an integer as a name"))
55 except ValueError:
65 except ValueError:
56 pass
66 pass
57
67
58 def checkfilename(f):
68 def checkfilename(f):
59 '''Check that the filename f is an acceptable filename for a tracked file'''
69 '''Check that the filename f is an acceptable filename for a tracked file'''
60 if '\r' in f or '\n' in f:
70 if '\r' in f or '\n' in f:
61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
71 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
62
72
63 def checkportable(ui, f):
73 def checkportable(ui, f):
64 '''Check if filename f is portable and warn or abort depending on config'''
74 '''Check if filename f is portable and warn or abort depending on config'''
65 checkfilename(f)
75 checkfilename(f)
66 abort, warn = checkportabilityalert(ui)
76 abort, warn = checkportabilityalert(ui)
67 if abort or warn:
77 if abort or warn:
68 msg = util.checkwinfilename(f)
78 msg = util.checkwinfilename(f)
69 if msg:
79 if msg:
70 msg = "%s: %r" % (msg, f)
80 msg = "%s: %r" % (msg, f)
71 if abort:
81 if abort:
72 raise util.Abort(msg)
82 raise util.Abort(msg)
73 ui.warn(_("warning: %s\n") % msg)
83 ui.warn(_("warning: %s\n") % msg)
74
84
75 def checkportabilityalert(ui):
85 def checkportabilityalert(ui):
76 '''check if the user's config requests nothing, a warning, or abort for
86 '''check if the user's config requests nothing, a warning, or abort for
77 non-portable filenames'''
87 non-portable filenames'''
78 val = ui.config('ui', 'portablefilenames', 'warn')
88 val = ui.config('ui', 'portablefilenames', 'warn')
79 lval = val.lower()
89 lval = val.lower()
80 bval = util.parsebool(val)
90 bval = util.parsebool(val)
81 abort = os.name == 'nt' or lval == 'abort'
91 abort = os.name == 'nt' or lval == 'abort'
82 warn = bval or lval == 'warn'
92 warn = bval or lval == 'warn'
83 if bval is None and not (warn or abort or lval == 'ignore'):
93 if bval is None and not (warn or abort or lval == 'ignore'):
84 raise error.ConfigError(
94 raise error.ConfigError(
85 _("ui.portablefilenames value is invalid ('%s')") % val)
95 _("ui.portablefilenames value is invalid ('%s')") % val)
86 return abort, warn
96 return abort, warn
87
97
88 class casecollisionauditor(object):
98 class casecollisionauditor(object):
89 def __init__(self, ui, abort, dirstate):
99 def __init__(self, ui, abort, dirstate):
90 self._ui = ui
100 self._ui = ui
91 self._abort = abort
101 self._abort = abort
92 allfiles = '\0'.join(dirstate._map)
102 allfiles = '\0'.join(dirstate._map)
93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
103 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
94 self._dirstate = dirstate
104 self._dirstate = dirstate
95 # The purpose of _newfiles is so that we don't complain about
105 # The purpose of _newfiles is so that we don't complain about
96 # case collisions if someone were to call this object with the
106 # case collisions if someone were to call this object with the
97 # same filename twice.
107 # same filename twice.
98 self._newfiles = set()
108 self._newfiles = set()
99
109
100 def __call__(self, f):
110 def __call__(self, f):
101 if f in self._newfiles:
111 if f in self._newfiles:
102 return
112 return
103 fl = encoding.lower(f)
113 fl = encoding.lower(f)
104 if fl in self._loweredfiles and f not in self._dirstate:
114 if fl in self._loweredfiles and f not in self._dirstate:
105 msg = _('possible case-folding collision for %s') % f
115 msg = _('possible case-folding collision for %s') % f
106 if self._abort:
116 if self._abort:
107 raise util.Abort(msg)
117 raise util.Abort(msg)
108 self._ui.warn(_("warning: %s\n") % msg)
118 self._ui.warn(_("warning: %s\n") % msg)
109 self._loweredfiles.add(fl)
119 self._loweredfiles.add(fl)
110 self._newfiles.add(f)
120 self._newfiles.add(f)
111
121
112 class abstractvfs(object):
122 class abstractvfs(object):
113 """Abstract base class; cannot be instantiated"""
123 """Abstract base class; cannot be instantiated"""
114
124
115 def __init__(self, *args, **kwargs):
125 def __init__(self, *args, **kwargs):
116 '''Prevent instantiation; don't call this from subclasses.'''
126 '''Prevent instantiation; don't call this from subclasses.'''
117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
127 raise NotImplementedError('attempted instantiating ' + str(type(self)))
118
128
119 def tryread(self, path):
129 def tryread(self, path):
120 '''gracefully return an empty string for missing files'''
130 '''gracefully return an empty string for missing files'''
121 try:
131 try:
122 return self.read(path)
132 return self.read(path)
123 except IOError, inst:
133 except IOError, inst:
124 if inst.errno != errno.ENOENT:
134 if inst.errno != errno.ENOENT:
125 raise
135 raise
126 return ""
136 return ""
127
137
128 def open(self, path, mode="r", text=False, atomictemp=False):
138 def open(self, path, mode="r", text=False, atomictemp=False):
129 self.open = self.__call__
139 self.open = self.__call__
130 return self.__call__(path, mode, text, atomictemp)
140 return self.__call__(path, mode, text, atomictemp)
131
141
132 def read(self, path):
142 def read(self, path):
133 fp = self(path, 'rb')
143 fp = self(path, 'rb')
134 try:
144 try:
135 return fp.read()
145 return fp.read()
136 finally:
146 finally:
137 fp.close()
147 fp.close()
138
148
139 def write(self, path, data):
149 def write(self, path, data):
140 fp = self(path, 'wb')
150 fp = self(path, 'wb')
141 try:
151 try:
142 return fp.write(data)
152 return fp.write(data)
143 finally:
153 finally:
144 fp.close()
154 fp.close()
145
155
146 def append(self, path, data):
156 def append(self, path, data):
147 fp = self(path, 'ab')
157 fp = self(path, 'ab')
148 try:
158 try:
149 return fp.write(data)
159 return fp.write(data)
150 finally:
160 finally:
151 fp.close()
161 fp.close()
152
162
153 def chmod(self, path, mode):
163 def chmod(self, path, mode):
154 return os.chmod(self.join(path), mode)
164 return os.chmod(self.join(path), mode)
155
165
156 def exists(self, path=None):
166 def exists(self, path=None):
157 return os.path.exists(self.join(path))
167 return os.path.exists(self.join(path))
158
168
159 def fstat(self, fp):
169 def fstat(self, fp):
160 return util.fstat(fp)
170 return util.fstat(fp)
161
171
162 def isdir(self, path=None):
172 def isdir(self, path=None):
163 return os.path.isdir(self.join(path))
173 return os.path.isdir(self.join(path))
164
174
165 def isfile(self, path=None):
175 def isfile(self, path=None):
166 return os.path.isfile(self.join(path))
176 return os.path.isfile(self.join(path))
167
177
168 def islink(self, path=None):
178 def islink(self, path=None):
169 return os.path.islink(self.join(path))
179 return os.path.islink(self.join(path))
170
180
171 def lstat(self, path=None):
181 def lstat(self, path=None):
172 return os.lstat(self.join(path))
182 return os.lstat(self.join(path))
173
183
174 def makedir(self, path=None, notindexed=True):
184 def makedir(self, path=None, notindexed=True):
175 return util.makedir(self.join(path), notindexed)
185 return util.makedir(self.join(path), notindexed)
176
186
177 def makedirs(self, path=None, mode=None):
187 def makedirs(self, path=None, mode=None):
178 return util.makedirs(self.join(path), mode)
188 return util.makedirs(self.join(path), mode)
179
189
180 def makelock(self, info, path):
190 def makelock(self, info, path):
181 return util.makelock(info, self.join(path))
191 return util.makelock(info, self.join(path))
182
192
183 def mkdir(self, path=None):
193 def mkdir(self, path=None):
184 return os.mkdir(self.join(path))
194 return os.mkdir(self.join(path))
185
195
186 def readdir(self, path=None, stat=None, skip=None):
196 def readdir(self, path=None, stat=None, skip=None):
187 return osutil.listdir(self.join(path), stat, skip)
197 return osutil.listdir(self.join(path), stat, skip)
188
198
189 def readlock(self, path):
199 def readlock(self, path):
190 return util.readlock(self.join(path))
200 return util.readlock(self.join(path))
191
201
192 def rename(self, src, dst):
202 def rename(self, src, dst):
193 return util.rename(self.join(src), self.join(dst))
203 return util.rename(self.join(src), self.join(dst))
194
204
195 def readlink(self, path):
205 def readlink(self, path):
196 return os.readlink(self.join(path))
206 return os.readlink(self.join(path))
197
207
198 def setflags(self, path, l, x):
208 def setflags(self, path, l, x):
199 return util.setflags(self.join(path), l, x)
209 return util.setflags(self.join(path), l, x)
200
210
201 def stat(self, path=None):
211 def stat(self, path=None):
202 return os.stat(self.join(path))
212 return os.stat(self.join(path))
203
213
204 def unlink(self, path=None):
214 def unlink(self, path=None):
205 return util.unlink(self.join(path))
215 return util.unlink(self.join(path))
206
216
207 def utime(self, path=None, t=None):
217 def utime(self, path=None, t=None):
208 return os.utime(self.join(path), t)
218 return os.utime(self.join(path), t)
209
219
210 class vfs(abstractvfs):
220 class vfs(abstractvfs):
211 '''Operate files relative to a base directory
221 '''Operate files relative to a base directory
212
222
213 This class is used to hide the details of COW semantics and
223 This class is used to hide the details of COW semantics and
214 remote file access from higher level code.
224 remote file access from higher level code.
215 '''
225 '''
216 def __init__(self, base, audit=True, expandpath=False, realpath=False):
226 def __init__(self, base, audit=True, expandpath=False, realpath=False):
217 if expandpath:
227 if expandpath:
218 base = util.expandpath(base)
228 base = util.expandpath(base)
219 if realpath:
229 if realpath:
220 base = os.path.realpath(base)
230 base = os.path.realpath(base)
221 self.base = base
231 self.base = base
222 self._setmustaudit(audit)
232 self._setmustaudit(audit)
223 self.createmode = None
233 self.createmode = None
224 self._trustnlink = None
234 self._trustnlink = None
225
235
226 def _getmustaudit(self):
236 def _getmustaudit(self):
227 return self._audit
237 return self._audit
228
238
229 def _setmustaudit(self, onoff):
239 def _setmustaudit(self, onoff):
230 self._audit = onoff
240 self._audit = onoff
231 if onoff:
241 if onoff:
232 self.audit = pathutil.pathauditor(self.base)
242 self.audit = pathutil.pathauditor(self.base)
233 else:
243 else:
234 self.audit = util.always
244 self.audit = util.always
235
245
236 mustaudit = property(_getmustaudit, _setmustaudit)
246 mustaudit = property(_getmustaudit, _setmustaudit)
237
247
238 @util.propertycache
248 @util.propertycache
239 def _cansymlink(self):
249 def _cansymlink(self):
240 return util.checklink(self.base)
250 return util.checklink(self.base)
241
251
242 @util.propertycache
252 @util.propertycache
243 def _chmod(self):
253 def _chmod(self):
244 return util.checkexec(self.base)
254 return util.checkexec(self.base)
245
255
246 def _fixfilemode(self, name):
256 def _fixfilemode(self, name):
247 if self.createmode is None or not self._chmod:
257 if self.createmode is None or not self._chmod:
248 return
258 return
249 os.chmod(name, self.createmode & 0666)
259 os.chmod(name, self.createmode & 0666)
250
260
251 def __call__(self, path, mode="r", text=False, atomictemp=False):
261 def __call__(self, path, mode="r", text=False, atomictemp=False):
252 if self._audit:
262 if self._audit:
253 r = util.checkosfilename(path)
263 r = util.checkosfilename(path)
254 if r:
264 if r:
255 raise util.Abort("%s: %r" % (r, path))
265 raise util.Abort("%s: %r" % (r, path))
256 self.audit(path)
266 self.audit(path)
257 f = self.join(path)
267 f = self.join(path)
258
268
259 if not text and "b" not in mode:
269 if not text and "b" not in mode:
260 mode += "b" # for that other OS
270 mode += "b" # for that other OS
261
271
262 nlink = -1
272 nlink = -1
263 if mode not in ('r', 'rb'):
273 if mode not in ('r', 'rb'):
264 dirname, basename = util.split(f)
274 dirname, basename = util.split(f)
265 # If basename is empty, then the path is malformed because it points
275 # If basename is empty, then the path is malformed because it points
266 # to a directory. Let the posixfile() call below raise IOError.
276 # to a directory. Let the posixfile() call below raise IOError.
267 if basename:
277 if basename:
268 if atomictemp:
278 if atomictemp:
269 util.ensuredirs(dirname, self.createmode)
279 util.ensuredirs(dirname, self.createmode)
270 return util.atomictempfile(f, mode, self.createmode)
280 return util.atomictempfile(f, mode, self.createmode)
271 try:
281 try:
272 if 'w' in mode:
282 if 'w' in mode:
273 util.unlink(f)
283 util.unlink(f)
274 nlink = 0
284 nlink = 0
275 else:
285 else:
276 # nlinks() may behave differently for files on Windows
286 # nlinks() may behave differently for files on Windows
277 # shares if the file is open.
287 # shares if the file is open.
278 fd = util.posixfile(f)
288 fd = util.posixfile(f)
279 nlink = util.nlinks(f)
289 nlink = util.nlinks(f)
280 if nlink < 1:
290 if nlink < 1:
281 nlink = 2 # force mktempcopy (issue1922)
291 nlink = 2 # force mktempcopy (issue1922)
282 fd.close()
292 fd.close()
283 except (OSError, IOError), e:
293 except (OSError, IOError), e:
284 if e.errno != errno.ENOENT:
294 if e.errno != errno.ENOENT:
285 raise
295 raise
286 nlink = 0
296 nlink = 0
287 util.ensuredirs(dirname, self.createmode)
297 util.ensuredirs(dirname, self.createmode)
288 if nlink > 0:
298 if nlink > 0:
289 if self._trustnlink is None:
299 if self._trustnlink is None:
290 self._trustnlink = nlink > 1 or util.checknlink(f)
300 self._trustnlink = nlink > 1 or util.checknlink(f)
291 if nlink > 1 or not self._trustnlink:
301 if nlink > 1 or not self._trustnlink:
292 util.rename(util.mktempcopy(f), f)
302 util.rename(util.mktempcopy(f), f)
293 fp = util.posixfile(f, mode)
303 fp = util.posixfile(f, mode)
294 if nlink == 0:
304 if nlink == 0:
295 self._fixfilemode(f)
305 self._fixfilemode(f)
296 return fp
306 return fp
297
307
298 def symlink(self, src, dst):
308 def symlink(self, src, dst):
299 self.audit(dst)
309 self.audit(dst)
300 linkname = self.join(dst)
310 linkname = self.join(dst)
301 try:
311 try:
302 os.unlink(linkname)
312 os.unlink(linkname)
303 except OSError:
313 except OSError:
304 pass
314 pass
305
315
306 util.ensuredirs(os.path.dirname(linkname), self.createmode)
316 util.ensuredirs(os.path.dirname(linkname), self.createmode)
307
317
308 if self._cansymlink:
318 if self._cansymlink:
309 try:
319 try:
310 os.symlink(src, linkname)
320 os.symlink(src, linkname)
311 except OSError, err:
321 except OSError, err:
312 raise OSError(err.errno, _('could not symlink to %r: %s') %
322 raise OSError(err.errno, _('could not symlink to %r: %s') %
313 (src, err.strerror), linkname)
323 (src, err.strerror), linkname)
314 else:
324 else:
315 self.write(dst, src)
325 self.write(dst, src)
316
326
317 def join(self, path):
327 def join(self, path):
318 if path:
328 if path:
319 return os.path.join(self.base, path)
329 return os.path.join(self.base, path)
320 else:
330 else:
321 return self.base
331 return self.base
322
332
323 opener = vfs
333 opener = vfs
324
334
325 class auditvfs(object):
335 class auditvfs(object):
326 def __init__(self, vfs):
336 def __init__(self, vfs):
327 self.vfs = vfs
337 self.vfs = vfs
328
338
329 def _getmustaudit(self):
339 def _getmustaudit(self):
330 return self.vfs.mustaudit
340 return self.vfs.mustaudit
331
341
332 def _setmustaudit(self, onoff):
342 def _setmustaudit(self, onoff):
333 self.vfs.mustaudit = onoff
343 self.vfs.mustaudit = onoff
334
344
335 mustaudit = property(_getmustaudit, _setmustaudit)
345 mustaudit = property(_getmustaudit, _setmustaudit)
336
346
337 class filtervfs(abstractvfs, auditvfs):
347 class filtervfs(abstractvfs, auditvfs):
338 '''Wrapper vfs for filtering filenames with a function.'''
348 '''Wrapper vfs for filtering filenames with a function.'''
339
349
340 def __init__(self, vfs, filter):
350 def __init__(self, vfs, filter):
341 auditvfs.__init__(self, vfs)
351 auditvfs.__init__(self, vfs)
342 self._filter = filter
352 self._filter = filter
343
353
344 def __call__(self, path, *args, **kwargs):
354 def __call__(self, path, *args, **kwargs):
345 return self.vfs(self._filter(path), *args, **kwargs)
355 return self.vfs(self._filter(path), *args, **kwargs)
346
356
347 def join(self, path):
357 def join(self, path):
348 if path:
358 if path:
349 return self.vfs.join(self._filter(path))
359 return self.vfs.join(self._filter(path))
350 else:
360 else:
351 return self.vfs.join(path)
361 return self.vfs.join(path)
352
362
353 filteropener = filtervfs
363 filteropener = filtervfs
354
364
355 class readonlyvfs(abstractvfs, auditvfs):
365 class readonlyvfs(abstractvfs, auditvfs):
356 '''Wrapper vfs preventing any writing.'''
366 '''Wrapper vfs preventing any writing.'''
357
367
358 def __init__(self, vfs):
368 def __init__(self, vfs):
359 auditvfs.__init__(self, vfs)
369 auditvfs.__init__(self, vfs)
360
370
361 def __call__(self, path, mode='r', *args, **kw):
371 def __call__(self, path, mode='r', *args, **kw):
362 if mode not in ('r', 'rb'):
372 if mode not in ('r', 'rb'):
363 raise util.Abort('this vfs is read only')
373 raise util.Abort('this vfs is read only')
364 return self.vfs(path, mode, *args, **kw)
374 return self.vfs(path, mode, *args, **kw)
365
375
366
376
367 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
368 '''yield every hg repository under path, always recursively.
378 '''yield every hg repository under path, always recursively.
369 The recurse flag will only control recursion into repo working dirs'''
379 The recurse flag will only control recursion into repo working dirs'''
370 def errhandler(err):
380 def errhandler(err):
371 if err.filename == path:
381 if err.filename == path:
372 raise err
382 raise err
373 samestat = getattr(os.path, 'samestat', None)
383 samestat = getattr(os.path, 'samestat', None)
374 if followsym and samestat is not None:
384 if followsym and samestat is not None:
375 def adddir(dirlst, dirname):
385 def adddir(dirlst, dirname):
376 match = False
386 match = False
377 dirstat = os.stat(dirname)
387 dirstat = os.stat(dirname)
378 for lstdirstat in dirlst:
388 for lstdirstat in dirlst:
379 if samestat(dirstat, lstdirstat):
389 if samestat(dirstat, lstdirstat):
380 match = True
390 match = True
381 break
391 break
382 if not match:
392 if not match:
383 dirlst.append(dirstat)
393 dirlst.append(dirstat)
384 return not match
394 return not match
385 else:
395 else:
386 followsym = False
396 followsym = False
387
397
388 if (seen_dirs is None) and followsym:
398 if (seen_dirs is None) and followsym:
389 seen_dirs = []
399 seen_dirs = []
390 adddir(seen_dirs, path)
400 adddir(seen_dirs, path)
391 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 dirs.sort()
402 dirs.sort()
393 if '.hg' in dirs:
403 if '.hg' in dirs:
394 yield root # found a repository
404 yield root # found a repository
395 qroot = os.path.join(root, '.hg', 'patches')
405 qroot = os.path.join(root, '.hg', 'patches')
396 if os.path.isdir(os.path.join(qroot, '.hg')):
406 if os.path.isdir(os.path.join(qroot, '.hg')):
397 yield qroot # we have a patch queue repo here
407 yield qroot # we have a patch queue repo here
398 if recurse:
408 if recurse:
399 # avoid recursing inside the .hg directory
409 # avoid recursing inside the .hg directory
400 dirs.remove('.hg')
410 dirs.remove('.hg')
401 else:
411 else:
402 dirs[:] = [] # don't descend further
412 dirs[:] = [] # don't descend further
403 elif followsym:
413 elif followsym:
404 newdirs = []
414 newdirs = []
405 for d in dirs:
415 for d in dirs:
406 fname = os.path.join(root, d)
416 fname = os.path.join(root, d)
407 if adddir(seen_dirs, fname):
417 if adddir(seen_dirs, fname):
408 if os.path.islink(fname):
418 if os.path.islink(fname):
409 for hgname in walkrepos(fname, True, seen_dirs):
419 for hgname in walkrepos(fname, True, seen_dirs):
410 yield hgname
420 yield hgname
411 else:
421 else:
412 newdirs.append(d)
422 newdirs.append(d)
413 dirs[:] = newdirs
423 dirs[:] = newdirs
414
424
415 def osrcpath():
425 def osrcpath():
416 '''return default os-specific hgrc search path'''
426 '''return default os-specific hgrc search path'''
417 path = systemrcpath()
427 path = systemrcpath()
418 path.extend(userrcpath())
428 path.extend(userrcpath())
419 path = [os.path.normpath(f) for f in path]
429 path = [os.path.normpath(f) for f in path]
420 return path
430 return path
421
431
422 _rcpath = None
432 _rcpath = None
423
433
424 def rcpath():
434 def rcpath():
425 '''return hgrc search path. if env var HGRCPATH is set, use it.
435 '''return hgrc search path. if env var HGRCPATH is set, use it.
426 for each item in path, if directory, use files ending in .rc,
436 for each item in path, if directory, use files ending in .rc,
427 else use item.
437 else use item.
428 make HGRCPATH empty to only look in .hg/hgrc of current repo.
438 make HGRCPATH empty to only look in .hg/hgrc of current repo.
429 if no HGRCPATH, use default os-specific path.'''
439 if no HGRCPATH, use default os-specific path.'''
430 global _rcpath
440 global _rcpath
431 if _rcpath is None:
441 if _rcpath is None:
432 if 'HGRCPATH' in os.environ:
442 if 'HGRCPATH' in os.environ:
433 _rcpath = []
443 _rcpath = []
434 for p in os.environ['HGRCPATH'].split(os.pathsep):
444 for p in os.environ['HGRCPATH'].split(os.pathsep):
435 if not p:
445 if not p:
436 continue
446 continue
437 p = util.expandpath(p)
447 p = util.expandpath(p)
438 if os.path.isdir(p):
448 if os.path.isdir(p):
439 for f, kind in osutil.listdir(p):
449 for f, kind in osutil.listdir(p):
440 if f.endswith('.rc'):
450 if f.endswith('.rc'):
441 _rcpath.append(os.path.join(p, f))
451 _rcpath.append(os.path.join(p, f))
442 else:
452 else:
443 _rcpath.append(p)
453 _rcpath.append(p)
444 else:
454 else:
445 _rcpath = osrcpath()
455 _rcpath = osrcpath()
446 return _rcpath
456 return _rcpath
447
457
448 def revsingle(repo, revspec, default='.'):
458 def revsingle(repo, revspec, default='.'):
449 if not revspec and revspec != 0:
459 if not revspec and revspec != 0:
450 return repo[default]
460 return repo[default]
451
461
452 l = revrange(repo, [revspec])
462 l = revrange(repo, [revspec])
453 if len(l) < 1:
463 if len(l) < 1:
454 raise util.Abort(_('empty revision set'))
464 raise util.Abort(_('empty revision set'))
455 return repo[l[-1]]
465 return repo[l[-1]]
456
466
457 def revpair(repo, revs):
467 def revpair(repo, revs):
458 if not revs:
468 if not revs:
459 return repo.dirstate.p1(), None
469 return repo.dirstate.p1(), None
460
470
461 l = revrange(repo, revs)
471 l = revrange(repo, revs)
462
472
463 if len(l) == 0:
473 if len(l) == 0:
464 if revs:
474 if revs:
465 raise util.Abort(_('empty revision range'))
475 raise util.Abort(_('empty revision range'))
466 return repo.dirstate.p1(), None
476 return repo.dirstate.p1(), None
467
477
468 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
478 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
469 return repo.lookup(l[0]), None
479 return repo.lookup(l[0]), None
470
480
471 return repo.lookup(l[0]), repo.lookup(l[-1])
481 return repo.lookup(l[0]), repo.lookup(l[-1])
472
482
473 _revrangesep = ':'
483 _revrangesep = ':'
474
484
475 def revrange(repo, revs):
485 def revrange(repo, revs):
476 """Yield revision as strings from a list of revision specifications."""
486 """Yield revision as strings from a list of revision specifications."""
477
487
478 def revfix(repo, val, defval):
488 def revfix(repo, val, defval):
479 if not val and val != 0 and defval is not None:
489 if not val and val != 0 and defval is not None:
480 return defval
490 return defval
481 return repo[val].rev()
491 return repo[val].rev()
482
492
483 seen, l = set(), []
493 seen, l = set(), []
484 for spec in revs:
494 for spec in revs:
485 if l and not seen:
495 if l and not seen:
486 seen = set(l)
496 seen = set(l)
487 # attempt to parse old-style ranges first to deal with
497 # attempt to parse old-style ranges first to deal with
488 # things like old-tag which contain query metacharacters
498 # things like old-tag which contain query metacharacters
489 try:
499 try:
490 if isinstance(spec, int):
500 if isinstance(spec, int):
491 seen.add(spec)
501 seen.add(spec)
492 l.append(spec)
502 l.append(spec)
493 continue
503 continue
494
504
495 if _revrangesep in spec:
505 if _revrangesep in spec:
496 start, end = spec.split(_revrangesep, 1)
506 start, end = spec.split(_revrangesep, 1)
497 start = revfix(repo, start, 0)
507 start = revfix(repo, start, 0)
498 end = revfix(repo, end, len(repo) - 1)
508 end = revfix(repo, end, len(repo) - 1)
499 if end == nullrev and start <= 0:
509 if end == nullrev and start <= 0:
500 start = nullrev
510 start = nullrev
501 rangeiter = repo.changelog.revs(start, end)
511 rangeiter = repo.changelog.revs(start, end)
502 if not seen and not l:
512 if not seen and not l:
503 # by far the most common case: revs = ["-1:0"]
513 # by far the most common case: revs = ["-1:0"]
504 l = list(rangeiter)
514 l = list(rangeiter)
505 # defer syncing seen until next iteration
515 # defer syncing seen until next iteration
506 continue
516 continue
507 newrevs = set(rangeiter)
517 newrevs = set(rangeiter)
508 if seen:
518 if seen:
509 newrevs.difference_update(seen)
519 newrevs.difference_update(seen)
510 seen.update(newrevs)
520 seen.update(newrevs)
511 else:
521 else:
512 seen = newrevs
522 seen = newrevs
513 l.extend(sorted(newrevs, reverse=start > end))
523 l.extend(sorted(newrevs, reverse=start > end))
514 continue
524 continue
515 elif spec and spec in repo: # single unquoted rev
525 elif spec and spec in repo: # single unquoted rev
516 rev = revfix(repo, spec, None)
526 rev = revfix(repo, spec, None)
517 if rev in seen:
527 if rev in seen:
518 continue
528 continue
519 seen.add(rev)
529 seen.add(rev)
520 l.append(rev)
530 l.append(rev)
521 continue
531 continue
522 except error.RepoLookupError:
532 except error.RepoLookupError:
523 pass
533 pass
524
534
525 # fall through to new-style queries if old-style fails
535 # fall through to new-style queries if old-style fails
526 m = revset.match(repo.ui, spec)
536 m = revset.match(repo.ui, spec)
527 dl = [r for r in m(repo, revset.baseset(repo)) if r not in seen]
537 dl = [r for r in m(repo, revset.baseset(repo)) if r not in seen]
528 l.extend(dl)
538 l.extend(dl)
529 seen.update(dl)
539 seen.update(dl)
530
540
531 return revset.baseset(l)
541 return revset.baseset(l)
532
542
533 def expandpats(pats):
543 def expandpats(pats):
534 if not util.expandglobs:
544 if not util.expandglobs:
535 return list(pats)
545 return list(pats)
536 ret = []
546 ret = []
537 for p in pats:
547 for p in pats:
538 kind, name = matchmod._patsplit(p, None)
548 kind, name = matchmod._patsplit(p, None)
539 if kind is None:
549 if kind is None:
540 try:
550 try:
541 globbed = glob.glob(name)
551 globbed = glob.glob(name)
542 except re.error:
552 except re.error:
543 globbed = [name]
553 globbed = [name]
544 if globbed:
554 if globbed:
545 ret.extend(globbed)
555 ret.extend(globbed)
546 continue
556 continue
547 ret.append(p)
557 ret.append(p)
548 return ret
558 return ret
549
559
550 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
560 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
551 if pats == ("",):
561 if pats == ("",):
552 pats = []
562 pats = []
553 if not globbed and default == 'relpath':
563 if not globbed and default == 'relpath':
554 pats = expandpats(pats or [])
564 pats = expandpats(pats or [])
555
565
556 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
566 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
557 default)
567 default)
558 def badfn(f, msg):
568 def badfn(f, msg):
559 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
569 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
560 m.bad = badfn
570 m.bad = badfn
561 return m, pats
571 return m, pats
562
572
563 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
573 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
564 return matchandpats(ctx, pats, opts, globbed, default)[0]
574 return matchandpats(ctx, pats, opts, globbed, default)[0]
565
575
566 def matchall(repo):
576 def matchall(repo):
567 return matchmod.always(repo.root, repo.getcwd())
577 return matchmod.always(repo.root, repo.getcwd())
568
578
569 def matchfiles(repo, files):
579 def matchfiles(repo, files):
570 return matchmod.exact(repo.root, repo.getcwd(), files)
580 return matchmod.exact(repo.root, repo.getcwd(), files)
571
581
572 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
582 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
573 if dry_run is None:
583 if dry_run is None:
574 dry_run = opts.get('dry_run')
584 dry_run = opts.get('dry_run')
575 if similarity is None:
585 if similarity is None:
576 similarity = float(opts.get('similarity') or 0)
586 similarity = float(opts.get('similarity') or 0)
577 # we'd use status here, except handling of symlinks and ignore is tricky
587 # we'd use status here, except handling of symlinks and ignore is tricky
578 m = match(repo[None], pats, opts)
588 m = match(repo[None], pats, opts)
579 rejected = []
589 rejected = []
580 m.bad = lambda x, y: rejected.append(x)
590 m.bad = lambda x, y: rejected.append(x)
581
591
582 added, unknown, deleted, removed = _interestingfiles(repo, m)
592 added, unknown, deleted, removed = _interestingfiles(repo, m)
583
593
584 unknownset = set(unknown)
594 unknownset = set(unknown)
585 toprint = unknownset.copy()
595 toprint = unknownset.copy()
586 toprint.update(deleted)
596 toprint.update(deleted)
587 for abs in sorted(toprint):
597 for abs in sorted(toprint):
588 if repo.ui.verbose or not m.exact(abs):
598 if repo.ui.verbose or not m.exact(abs):
589 rel = m.rel(abs)
599 rel = m.rel(abs)
590 if abs in unknownset:
600 if abs in unknownset:
591 status = _('adding %s\n') % ((pats and rel) or abs)
601 status = _('adding %s\n') % ((pats and rel) or abs)
592 else:
602 else:
593 status = _('removing %s\n') % ((pats and rel) or abs)
603 status = _('removing %s\n') % ((pats and rel) or abs)
594 repo.ui.status(status)
604 repo.ui.status(status)
595
605
596 renames = _findrenames(repo, m, added + unknown, removed + deleted,
606 renames = _findrenames(repo, m, added + unknown, removed + deleted,
597 similarity)
607 similarity)
598
608
599 if not dry_run:
609 if not dry_run:
600 _markchanges(repo, unknown, deleted, renames)
610 _markchanges(repo, unknown, deleted, renames)
601
611
602 for f in rejected:
612 for f in rejected:
603 if f in m.files():
613 if f in m.files():
604 return 1
614 return 1
605 return 0
615 return 0
606
616
607 def marktouched(repo, files, similarity=0.0):
617 def marktouched(repo, files, similarity=0.0):
608 '''Assert that files have somehow been operated upon. files are relative to
618 '''Assert that files have somehow been operated upon. files are relative to
609 the repo root.'''
619 the repo root.'''
610 m = matchfiles(repo, files)
620 m = matchfiles(repo, files)
611 rejected = []
621 rejected = []
612 m.bad = lambda x, y: rejected.append(x)
622 m.bad = lambda x, y: rejected.append(x)
613
623
614 added, unknown, deleted, removed = _interestingfiles(repo, m)
624 added, unknown, deleted, removed = _interestingfiles(repo, m)
615
625
616 if repo.ui.verbose:
626 if repo.ui.verbose:
617 unknownset = set(unknown)
627 unknownset = set(unknown)
618 toprint = unknownset.copy()
628 toprint = unknownset.copy()
619 toprint.update(deleted)
629 toprint.update(deleted)
620 for abs in sorted(toprint):
630 for abs in sorted(toprint):
621 if abs in unknownset:
631 if abs in unknownset:
622 status = _('adding %s\n') % abs
632 status = _('adding %s\n') % abs
623 else:
633 else:
624 status = _('removing %s\n') % abs
634 status = _('removing %s\n') % abs
625 repo.ui.status(status)
635 repo.ui.status(status)
626
636
627 renames = _findrenames(repo, m, added + unknown, removed + deleted,
637 renames = _findrenames(repo, m, added + unknown, removed + deleted,
628 similarity)
638 similarity)
629
639
630 _markchanges(repo, unknown, deleted, renames)
640 _markchanges(repo, unknown, deleted, renames)
631
641
632 for f in rejected:
642 for f in rejected:
633 if f in m.files():
643 if f in m.files():
634 return 1
644 return 1
635 return 0
645 return 0
636
646
637 def _interestingfiles(repo, matcher):
647 def _interestingfiles(repo, matcher):
638 '''Walk dirstate with matcher, looking for files that addremove would care
648 '''Walk dirstate with matcher, looking for files that addremove would care
639 about.
649 about.
640
650
641 This is different from dirstate.status because it doesn't care about
651 This is different from dirstate.status because it doesn't care about
642 whether files are modified or clean.'''
652 whether files are modified or clean.'''
643 added, unknown, deleted, removed = [], [], [], []
653 added, unknown, deleted, removed = [], [], [], []
644 audit_path = pathutil.pathauditor(repo.root)
654 audit_path = pathutil.pathauditor(repo.root)
645
655
646 ctx = repo[None]
656 ctx = repo[None]
647 dirstate = repo.dirstate
657 dirstate = repo.dirstate
648 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
658 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
649 full=False)
659 full=False)
650 for abs, st in walkresults.iteritems():
660 for abs, st in walkresults.iteritems():
651 dstate = dirstate[abs]
661 dstate = dirstate[abs]
652 if dstate == '?' and audit_path.check(abs):
662 if dstate == '?' and audit_path.check(abs):
653 unknown.append(abs)
663 unknown.append(abs)
654 elif dstate != 'r' and not st:
664 elif dstate != 'r' and not st:
655 deleted.append(abs)
665 deleted.append(abs)
656 # for finding renames
666 # for finding renames
657 elif dstate == 'r':
667 elif dstate == 'r':
658 removed.append(abs)
668 removed.append(abs)
659 elif dstate == 'a':
669 elif dstate == 'a':
660 added.append(abs)
670 added.append(abs)
661
671
662 return added, unknown, deleted, removed
672 return added, unknown, deleted, removed
663
673
664 def _findrenames(repo, matcher, added, removed, similarity):
674 def _findrenames(repo, matcher, added, removed, similarity):
665 '''Find renames from removed files to added ones.'''
675 '''Find renames from removed files to added ones.'''
666 renames = {}
676 renames = {}
667 if similarity > 0:
677 if similarity > 0:
668 for old, new, score in similar.findrenames(repo, added, removed,
678 for old, new, score in similar.findrenames(repo, added, removed,
669 similarity):
679 similarity):
670 if (repo.ui.verbose or not matcher.exact(old)
680 if (repo.ui.verbose or not matcher.exact(old)
671 or not matcher.exact(new)):
681 or not matcher.exact(new)):
672 repo.ui.status(_('recording removal of %s as rename to %s '
682 repo.ui.status(_('recording removal of %s as rename to %s '
673 '(%d%% similar)\n') %
683 '(%d%% similar)\n') %
674 (matcher.rel(old), matcher.rel(new),
684 (matcher.rel(old), matcher.rel(new),
675 score * 100))
685 score * 100))
676 renames[new] = old
686 renames[new] = old
677 return renames
687 return renames
678
688
679 def _markchanges(repo, unknown, deleted, renames):
689 def _markchanges(repo, unknown, deleted, renames):
680 '''Marks the files in unknown as added, the files in deleted as removed,
690 '''Marks the files in unknown as added, the files in deleted as removed,
681 and the files in renames as copied.'''
691 and the files in renames as copied.'''
682 wctx = repo[None]
692 wctx = repo[None]
683 wlock = repo.wlock()
693 wlock = repo.wlock()
684 try:
694 try:
685 wctx.forget(deleted)
695 wctx.forget(deleted)
686 wctx.add(unknown)
696 wctx.add(unknown)
687 for new, old in renames.iteritems():
697 for new, old in renames.iteritems():
688 wctx.copy(old, new)
698 wctx.copy(old, new)
689 finally:
699 finally:
690 wlock.release()
700 wlock.release()
691
701
692 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
702 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
693 """Update the dirstate to reflect the intent of copying src to dst. For
703 """Update the dirstate to reflect the intent of copying src to dst. For
694 different reasons it might not end with dst being marked as copied from src.
704 different reasons it might not end with dst being marked as copied from src.
695 """
705 """
696 origsrc = repo.dirstate.copied(src) or src
706 origsrc = repo.dirstate.copied(src) or src
697 if dst == origsrc: # copying back a copy?
707 if dst == origsrc: # copying back a copy?
698 if repo.dirstate[dst] not in 'mn' and not dryrun:
708 if repo.dirstate[dst] not in 'mn' and not dryrun:
699 repo.dirstate.normallookup(dst)
709 repo.dirstate.normallookup(dst)
700 else:
710 else:
701 if repo.dirstate[origsrc] == 'a' and origsrc == src:
711 if repo.dirstate[origsrc] == 'a' and origsrc == src:
702 if not ui.quiet:
712 if not ui.quiet:
703 ui.warn(_("%s has not been committed yet, so no copy "
713 ui.warn(_("%s has not been committed yet, so no copy "
704 "data will be stored for %s.\n")
714 "data will be stored for %s.\n")
705 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
715 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
706 if repo.dirstate[dst] in '?r' and not dryrun:
716 if repo.dirstate[dst] in '?r' and not dryrun:
707 wctx.add([dst])
717 wctx.add([dst])
708 elif not dryrun:
718 elif not dryrun:
709 wctx.copy(origsrc, dst)
719 wctx.copy(origsrc, dst)
710
720
711 def readrequires(opener, supported):
721 def readrequires(opener, supported):
712 '''Reads and parses .hg/requires and checks if all entries found
722 '''Reads and parses .hg/requires and checks if all entries found
713 are in the list of supported features.'''
723 are in the list of supported features.'''
714 requirements = set(opener.read("requires").splitlines())
724 requirements = set(opener.read("requires").splitlines())
715 missings = []
725 missings = []
716 for r in requirements:
726 for r in requirements:
717 if r not in supported:
727 if r not in supported:
718 if not r or not r[0].isalnum():
728 if not r or not r[0].isalnum():
719 raise error.RequirementError(_(".hg/requires file is corrupt"))
729 raise error.RequirementError(_(".hg/requires file is corrupt"))
720 missings.append(r)
730 missings.append(r)
721 missings.sort()
731 missings.sort()
722 if missings:
732 if missings:
723 raise error.RequirementError(
733 raise error.RequirementError(
724 _("unknown repository format: requires features '%s' (upgrade "
734 _("unknown repository format: requires features '%s' (upgrade "
725 "Mercurial)") % "', '".join(missings))
735 "Mercurial)") % "', '".join(missings))
726 return requirements
736 return requirements
727
737
728 class filecachesubentry(object):
738 class filecachesubentry(object):
729 def __init__(self, path, stat):
739 def __init__(self, path, stat):
730 self.path = path
740 self.path = path
731 self.cachestat = None
741 self.cachestat = None
732 self._cacheable = None
742 self._cacheable = None
733
743
734 if stat:
744 if stat:
735 self.cachestat = filecachesubentry.stat(self.path)
745 self.cachestat = filecachesubentry.stat(self.path)
736
746
737 if self.cachestat:
747 if self.cachestat:
738 self._cacheable = self.cachestat.cacheable()
748 self._cacheable = self.cachestat.cacheable()
739 else:
749 else:
740 # None means we don't know yet
750 # None means we don't know yet
741 self._cacheable = None
751 self._cacheable = None
742
752
743 def refresh(self):
753 def refresh(self):
744 if self.cacheable():
754 if self.cacheable():
745 self.cachestat = filecachesubentry.stat(self.path)
755 self.cachestat = filecachesubentry.stat(self.path)
746
756
747 def cacheable(self):
757 def cacheable(self):
748 if self._cacheable is not None:
758 if self._cacheable is not None:
749 return self._cacheable
759 return self._cacheable
750
760
751 # we don't know yet, assume it is for now
761 # we don't know yet, assume it is for now
752 return True
762 return True
753
763
754 def changed(self):
764 def changed(self):
755 # no point in going further if we can't cache it
765 # no point in going further if we can't cache it
756 if not self.cacheable():
766 if not self.cacheable():
757 return True
767 return True
758
768
759 newstat = filecachesubentry.stat(self.path)
769 newstat = filecachesubentry.stat(self.path)
760
770
761 # we may not know if it's cacheable yet, check again now
771 # we may not know if it's cacheable yet, check again now
762 if newstat and self._cacheable is None:
772 if newstat and self._cacheable is None:
763 self._cacheable = newstat.cacheable()
773 self._cacheable = newstat.cacheable()
764
774
765 # check again
775 # check again
766 if not self._cacheable:
776 if not self._cacheable:
767 return True
777 return True
768
778
769 if self.cachestat != newstat:
779 if self.cachestat != newstat:
770 self.cachestat = newstat
780 self.cachestat = newstat
771 return True
781 return True
772 else:
782 else:
773 return False
783 return False
774
784
775 @staticmethod
785 @staticmethod
776 def stat(path):
786 def stat(path):
777 try:
787 try:
778 return util.cachestat(path)
788 return util.cachestat(path)
779 except OSError, e:
789 except OSError, e:
780 if e.errno != errno.ENOENT:
790 if e.errno != errno.ENOENT:
781 raise
791 raise
782
792
783 class filecacheentry(object):
793 class filecacheentry(object):
784 def __init__(self, paths, stat=True):
794 def __init__(self, paths, stat=True):
785 self._entries = []
795 self._entries = []
786 for path in paths:
796 for path in paths:
787 self._entries.append(filecachesubentry(path, stat))
797 self._entries.append(filecachesubentry(path, stat))
788
798
789 def changed(self):
799 def changed(self):
790 '''true if any entry has changed'''
800 '''true if any entry has changed'''
791 for entry in self._entries:
801 for entry in self._entries:
792 if entry.changed():
802 if entry.changed():
793 return True
803 return True
794 return False
804 return False
795
805
796 def refresh(self):
806 def refresh(self):
797 for entry in self._entries:
807 for entry in self._entries:
798 entry.refresh()
808 entry.refresh()
799
809
800 class filecache(object):
810 class filecache(object):
801 '''A property like decorator that tracks files under .hg/ for updates.
811 '''A property like decorator that tracks files under .hg/ for updates.
802
812
803 Records stat info when called in _filecache.
813 Records stat info when called in _filecache.
804
814
805 On subsequent calls, compares old stat info with new info, and recreates the
815 On subsequent calls, compares old stat info with new info, and recreates the
806 object when any of the files changes, updating the new stat info in
816 object when any of the files changes, updating the new stat info in
807 _filecache.
817 _filecache.
808
818
809 Mercurial either atomic renames or appends for files under .hg,
819 Mercurial either atomic renames or appends for files under .hg,
810 so to ensure the cache is reliable we need the filesystem to be able
820 so to ensure the cache is reliable we need the filesystem to be able
811 to tell us if a file has been replaced. If it can't, we fallback to
821 to tell us if a file has been replaced. If it can't, we fallback to
812 recreating the object on every call (essentially the same behaviour as
822 recreating the object on every call (essentially the same behaviour as
813 propertycache).
823 propertycache).
814
824
815 '''
825 '''
816 def __init__(self, *paths):
826 def __init__(self, *paths):
817 self.paths = paths
827 self.paths = paths
818
828
819 def join(self, obj, fname):
829 def join(self, obj, fname):
820 """Used to compute the runtime path of a cached file.
830 """Used to compute the runtime path of a cached file.
821
831
822 Users should subclass filecache and provide their own version of this
832 Users should subclass filecache and provide their own version of this
823 function to call the appropriate join function on 'obj' (an instance
833 function to call the appropriate join function on 'obj' (an instance
824 of the class that its member function was decorated).
834 of the class that its member function was decorated).
825 """
835 """
826 return obj.join(fname)
836 return obj.join(fname)
827
837
828 def __call__(self, func):
838 def __call__(self, func):
829 self.func = func
839 self.func = func
830 self.name = func.__name__
840 self.name = func.__name__
831 return self
841 return self
832
842
833 def __get__(self, obj, type=None):
843 def __get__(self, obj, type=None):
834 # do we need to check if the file changed?
844 # do we need to check if the file changed?
835 if self.name in obj.__dict__:
845 if self.name in obj.__dict__:
836 assert self.name in obj._filecache, self.name
846 assert self.name in obj._filecache, self.name
837 return obj.__dict__[self.name]
847 return obj.__dict__[self.name]
838
848
839 entry = obj._filecache.get(self.name)
849 entry = obj._filecache.get(self.name)
840
850
841 if entry:
851 if entry:
842 if entry.changed():
852 if entry.changed():
843 entry.obj = self.func(obj)
853 entry.obj = self.func(obj)
844 else:
854 else:
845 paths = [self.join(obj, path) for path in self.paths]
855 paths = [self.join(obj, path) for path in self.paths]
846
856
847 # We stat -before- creating the object so our cache doesn't lie if
857 # We stat -before- creating the object so our cache doesn't lie if
848 # a writer modified between the time we read and stat
858 # a writer modified between the time we read and stat
849 entry = filecacheentry(paths, True)
859 entry = filecacheentry(paths, True)
850 entry.obj = self.func(obj)
860 entry.obj = self.func(obj)
851
861
852 obj._filecache[self.name] = entry
862 obj._filecache[self.name] = entry
853
863
854 obj.__dict__[self.name] = entry.obj
864 obj.__dict__[self.name] = entry.obj
855 return entry.obj
865 return entry.obj
856
866
857 def __set__(self, obj, value):
867 def __set__(self, obj, value):
858 if self.name not in obj._filecache:
868 if self.name not in obj._filecache:
859 # we add an entry for the missing value because X in __dict__
869 # we add an entry for the missing value because X in __dict__
860 # implies X in _filecache
870 # implies X in _filecache
861 paths = [self.join(obj, path) for path in self.paths]
871 paths = [self.join(obj, path) for path in self.paths]
862 ce = filecacheentry(paths, False)
872 ce = filecacheentry(paths, False)
863 obj._filecache[self.name] = ce
873 obj._filecache[self.name] = ce
864 else:
874 else:
865 ce = obj._filecache[self.name]
875 ce = obj._filecache[self.name]
866
876
867 ce.obj = value # update cached copy
877 ce.obj = value # update cached copy
868 obj.__dict__[self.name] = value # update copy returned by obj.x
878 obj.__dict__[self.name] = value # update copy returned by obj.x
869
879
870 def __delete__(self, obj):
880 def __delete__(self, obj):
871 try:
881 try:
872 del obj.__dict__[self.name]
882 del obj.__dict__[self.name]
873 except KeyError:
883 except KeyError:
874 raise AttributeError(self.name)
884 raise AttributeError(self.name)
875
885
876 class dirs(object):
886 class dirs(object):
877 '''a multiset of directory names from a dirstate or manifest'''
887 '''a multiset of directory names from a dirstate or manifest'''
878
888
879 def __init__(self, map, skip=None):
889 def __init__(self, map, skip=None):
880 self._dirs = {}
890 self._dirs = {}
881 addpath = self.addpath
891 addpath = self.addpath
882 if util.safehasattr(map, 'iteritems') and skip is not None:
892 if util.safehasattr(map, 'iteritems') and skip is not None:
883 for f, s in map.iteritems():
893 for f, s in map.iteritems():
884 if s[0] != skip:
894 if s[0] != skip:
885 addpath(f)
895 addpath(f)
886 else:
896 else:
887 for f in map:
897 for f in map:
888 addpath(f)
898 addpath(f)
889
899
890 def addpath(self, path):
900 def addpath(self, path):
891 dirs = self._dirs
901 dirs = self._dirs
892 for base in finddirs(path):
902 for base in finddirs(path):
893 if base in dirs:
903 if base in dirs:
894 dirs[base] += 1
904 dirs[base] += 1
895 return
905 return
896 dirs[base] = 1
906 dirs[base] = 1
897
907
898 def delpath(self, path):
908 def delpath(self, path):
899 dirs = self._dirs
909 dirs = self._dirs
900 for base in finddirs(path):
910 for base in finddirs(path):
901 if dirs[base] > 1:
911 if dirs[base] > 1:
902 dirs[base] -= 1
912 dirs[base] -= 1
903 return
913 return
904 del dirs[base]
914 del dirs[base]
905
915
906 def __iter__(self):
916 def __iter__(self):
907 return self._dirs.iterkeys()
917 return self._dirs.iterkeys()
908
918
909 def __contains__(self, d):
919 def __contains__(self, d):
910 return d in self._dirs
920 return d in self._dirs
911
921
912 if util.safehasattr(parsers, 'dirs'):
922 if util.safehasattr(parsers, 'dirs'):
913 dirs = parsers.dirs
923 dirs = parsers.dirs
914
924
915 def finddirs(path):
925 def finddirs(path):
916 pos = path.rfind('/')
926 pos = path.rfind('/')
917 while pos != -1:
927 while pos != -1:
918 yield path[:pos]
928 yield path[:pos]
919 pos = path.rfind('/', 0, pos)
929 pos = path.rfind('/', 0, pos)
@@ -1,1537 +1,1527 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, shutil, posixpath, sys
8 import errno, os, re, shutil, posixpath, sys
9 import xml.dom.minidom
9 import xml.dom.minidom
10 import stat, subprocess, tarfile
10 import stat, subprocess, tarfile
11 from i18n import _
11 from i18n import _
12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
13 import phases
13 import phases
14 import pathutil
14 import pathutil
15 hg = None
15 hg = None
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17
17
18 nullstate = ('', '', 'empty')
18 nullstate = ('', '', 'empty')
19
19
20 def _expandedabspath(path):
20 def _expandedabspath(path):
21 '''
21 '''
22 get a path or url and if it is a path expand it and return an absolute path
22 get a path or url and if it is a path expand it and return an absolute path
23 '''
23 '''
24 expandedpath = util.urllocalpath(util.expandpath(path))
24 expandedpath = util.urllocalpath(util.expandpath(path))
25 u = util.url(expandedpath)
25 u = util.url(expandedpath)
26 if not u.scheme:
26 if not u.scheme:
27 path = util.normpath(os.path.abspath(u.path))
27 path = util.normpath(os.path.abspath(u.path))
28 return path
28 return path
29
29
30 def _getstorehashcachename(remotepath):
30 def _getstorehashcachename(remotepath):
31 '''get a unique filename for the store hash cache of a remote repository'''
31 '''get a unique filename for the store hash cache of a remote repository'''
32 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
32 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
33
33
34 def _calcfilehash(filename):
34 def _calcfilehash(filename):
35 data = ''
35 data = ''
36 if os.path.exists(filename):
36 if os.path.exists(filename):
37 fd = open(filename, 'rb')
37 fd = open(filename, 'rb')
38 data = fd.read()
38 data = fd.read()
39 fd.close()
39 fd.close()
40 return util.sha1(data).hexdigest()
40 return util.sha1(data).hexdigest()
41
41
42 class SubrepoAbort(error.Abort):
42 class SubrepoAbort(error.Abort):
43 """Exception class used to avoid handling a subrepo error more than once"""
43 """Exception class used to avoid handling a subrepo error more than once"""
44 def __init__(self, *args, **kw):
44 def __init__(self, *args, **kw):
45 error.Abort.__init__(self, *args, **kw)
45 error.Abort.__init__(self, *args, **kw)
46 self.subrepo = kw.get('subrepo')
46 self.subrepo = kw.get('subrepo')
47 self.cause = kw.get('cause')
47 self.cause = kw.get('cause')
48
48
49 def annotatesubrepoerror(func):
49 def annotatesubrepoerror(func):
50 def decoratedmethod(self, *args, **kargs):
50 def decoratedmethod(self, *args, **kargs):
51 try:
51 try:
52 res = func(self, *args, **kargs)
52 res = func(self, *args, **kargs)
53 except SubrepoAbort, ex:
53 except SubrepoAbort, ex:
54 # This exception has already been handled
54 # This exception has already been handled
55 raise ex
55 raise ex
56 except error.Abort, ex:
56 except error.Abort, ex:
57 subrepo = subrelpath(self)
57 subrepo = subrelpath(self)
58 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
58 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
59 # avoid handling this exception by raising a SubrepoAbort exception
59 # avoid handling this exception by raising a SubrepoAbort exception
60 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
60 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
61 cause=sys.exc_info())
61 cause=sys.exc_info())
62 return res
62 return res
63 return decoratedmethod
63 return decoratedmethod
64
64
65 def state(ctx, ui):
65 def state(ctx, ui):
66 """return a state dict, mapping subrepo paths configured in .hgsub
66 """return a state dict, mapping subrepo paths configured in .hgsub
67 to tuple: (source from .hgsub, revision from .hgsubstate, kind
67 to tuple: (source from .hgsub, revision from .hgsubstate, kind
68 (key in types dict))
68 (key in types dict))
69 """
69 """
70 p = config.config()
70 p = config.config()
71 def read(f, sections=None, remap=None):
71 def read(f, sections=None, remap=None):
72 if f in ctx:
72 if f in ctx:
73 try:
73 try:
74 data = ctx[f].data()
74 data = ctx[f].data()
75 except IOError, err:
75 except IOError, err:
76 if err.errno != errno.ENOENT:
76 if err.errno != errno.ENOENT:
77 raise
77 raise
78 # handle missing subrepo spec files as removed
78 # handle missing subrepo spec files as removed
79 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
79 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
80 return
80 return
81 p.parse(f, data, sections, remap, read)
81 p.parse(f, data, sections, remap, read)
82 else:
82 else:
83 raise util.Abort(_("subrepo spec file %s not found") % f)
83 raise util.Abort(_("subrepo spec file %s not found") % f)
84
84
85 if '.hgsub' in ctx:
85 if '.hgsub' in ctx:
86 read('.hgsub')
86 read('.hgsub')
87
87
88 for path, src in ui.configitems('subpaths'):
88 for path, src in ui.configitems('subpaths'):
89 p.set('subpaths', path, src, ui.configsource('subpaths', path))
89 p.set('subpaths', path, src, ui.configsource('subpaths', path))
90
90
91 rev = {}
91 rev = {}
92 if '.hgsubstate' in ctx:
92 if '.hgsubstate' in ctx:
93 try:
93 try:
94 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
94 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
95 l = l.lstrip()
95 l = l.lstrip()
96 if not l:
96 if not l:
97 continue
97 continue
98 try:
98 try:
99 revision, path = l.split(" ", 1)
99 revision, path = l.split(" ", 1)
100 except ValueError:
100 except ValueError:
101 raise util.Abort(_("invalid subrepository revision "
101 raise util.Abort(_("invalid subrepository revision "
102 "specifier in .hgsubstate line %d")
102 "specifier in .hgsubstate line %d")
103 % (i + 1))
103 % (i + 1))
104 rev[path] = revision
104 rev[path] = revision
105 except IOError, err:
105 except IOError, err:
106 if err.errno != errno.ENOENT:
106 if err.errno != errno.ENOENT:
107 raise
107 raise
108
108
109 def remap(src):
109 def remap(src):
110 for pattern, repl in p.items('subpaths'):
110 for pattern, repl in p.items('subpaths'):
111 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
111 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
112 # does a string decode.
112 # does a string decode.
113 repl = repl.encode('string-escape')
113 repl = repl.encode('string-escape')
114 # However, we still want to allow back references to go
114 # However, we still want to allow back references to go
115 # through unharmed, so we turn r'\\1' into r'\1'. Again,
115 # through unharmed, so we turn r'\\1' into r'\1'. Again,
116 # extra escapes are needed because re.sub string decodes.
116 # extra escapes are needed because re.sub string decodes.
117 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
117 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
118 try:
118 try:
119 src = re.sub(pattern, repl, src, 1)
119 src = re.sub(pattern, repl, src, 1)
120 except re.error, e:
120 except re.error, e:
121 raise util.Abort(_("bad subrepository pattern in %s: %s")
121 raise util.Abort(_("bad subrepository pattern in %s: %s")
122 % (p.source('subpaths', pattern), e))
122 % (p.source('subpaths', pattern), e))
123 return src
123 return src
124
124
125 state = {}
125 state = {}
126 for path, src in p[''].items():
126 for path, src in p[''].items():
127 kind = 'hg'
127 kind = 'hg'
128 if src.startswith('['):
128 if src.startswith('['):
129 if ']' not in src:
129 if ']' not in src:
130 raise util.Abort(_('missing ] in subrepo source'))
130 raise util.Abort(_('missing ] in subrepo source'))
131 kind, src = src.split(']', 1)
131 kind, src = src.split(']', 1)
132 kind = kind[1:]
132 kind = kind[1:]
133 src = src.lstrip() # strip any extra whitespace after ']'
133 src = src.lstrip() # strip any extra whitespace after ']'
134
134
135 if not util.url(src).isabs():
135 if not util.url(src).isabs():
136 parent = _abssource(ctx._repo, abort=False)
136 parent = _abssource(ctx._repo, abort=False)
137 if parent:
137 if parent:
138 parent = util.url(parent)
138 parent = util.url(parent)
139 parent.path = posixpath.join(parent.path or '', src)
139 parent.path = posixpath.join(parent.path or '', src)
140 parent.path = posixpath.normpath(parent.path)
140 parent.path = posixpath.normpath(parent.path)
141 joined = str(parent)
141 joined = str(parent)
142 # Remap the full joined path and use it if it changes,
142 # Remap the full joined path and use it if it changes,
143 # else remap the original source.
143 # else remap the original source.
144 remapped = remap(joined)
144 remapped = remap(joined)
145 if remapped == joined:
145 if remapped == joined:
146 src = remap(src)
146 src = remap(src)
147 else:
147 else:
148 src = remapped
148 src = remapped
149
149
150 src = remap(src)
150 src = remap(src)
151 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
151 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
152
152
153 return state
153 return state
154
154
155 def writestate(repo, state):
155 def writestate(repo, state):
156 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
156 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
157 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
157 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
158 repo.wwrite('.hgsubstate', ''.join(lines), '')
158 repo.wwrite('.hgsubstate', ''.join(lines), '')
159
159
160 def submerge(repo, wctx, mctx, actx, overwrite):
160 def submerge(repo, wctx, mctx, actx, overwrite):
161 """delegated from merge.applyupdates: merging of .hgsubstate file
161 """delegated from merge.applyupdates: merging of .hgsubstate file
162 in working context, merging context and ancestor context"""
162 in working context, merging context and ancestor context"""
163 if mctx == actx: # backwards?
163 if mctx == actx: # backwards?
164 actx = wctx.p1()
164 actx = wctx.p1()
165 s1 = wctx.substate
165 s1 = wctx.substate
166 s2 = mctx.substate
166 s2 = mctx.substate
167 sa = actx.substate
167 sa = actx.substate
168 sm = {}
168 sm = {}
169
169
170 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
170 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
171
171
172 def debug(s, msg, r=""):
172 def debug(s, msg, r=""):
173 if r:
173 if r:
174 r = "%s:%s:%s" % r
174 r = "%s:%s:%s" % r
175 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
175 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
176
176
177 for s, l in sorted(s1.iteritems()):
177 for s, l in sorted(s1.iteritems()):
178 a = sa.get(s, nullstate)
178 a = sa.get(s, nullstate)
179 ld = l # local state with possible dirty flag for compares
179 ld = l # local state with possible dirty flag for compares
180 if wctx.sub(s).dirty():
180 if wctx.sub(s).dirty():
181 ld = (l[0], l[1] + "+")
181 ld = (l[0], l[1] + "+")
182 if wctx == actx: # overwrite
182 if wctx == actx: # overwrite
183 a = ld
183 a = ld
184
184
185 if s in s2:
185 if s in s2:
186 r = s2[s]
186 r = s2[s]
187 if ld == r or r == a: # no change or local is newer
187 if ld == r or r == a: # no change or local is newer
188 sm[s] = l
188 sm[s] = l
189 continue
189 continue
190 elif ld == a: # other side changed
190 elif ld == a: # other side changed
191 debug(s, "other changed, get", r)
191 debug(s, "other changed, get", r)
192 wctx.sub(s).get(r, overwrite)
192 wctx.sub(s).get(r, overwrite)
193 sm[s] = r
193 sm[s] = r
194 elif ld[0] != r[0]: # sources differ
194 elif ld[0] != r[0]: # sources differ
195 if repo.ui.promptchoice(
195 if repo.ui.promptchoice(
196 _(' subrepository sources for %s differ\n'
196 _(' subrepository sources for %s differ\n'
197 'use (l)ocal source (%s) or (r)emote source (%s)?'
197 'use (l)ocal source (%s) or (r)emote source (%s)?'
198 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
198 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
199 debug(s, "prompt changed, get", r)
199 debug(s, "prompt changed, get", r)
200 wctx.sub(s).get(r, overwrite)
200 wctx.sub(s).get(r, overwrite)
201 sm[s] = r
201 sm[s] = r
202 elif ld[1] == a[1]: # local side is unchanged
202 elif ld[1] == a[1]: # local side is unchanged
203 debug(s, "other side changed, get", r)
203 debug(s, "other side changed, get", r)
204 wctx.sub(s).get(r, overwrite)
204 wctx.sub(s).get(r, overwrite)
205 sm[s] = r
205 sm[s] = r
206 else:
206 else:
207 debug(s, "both sides changed")
207 debug(s, "both sides changed")
208 option = repo.ui.promptchoice(
208 option = repo.ui.promptchoice(
209 _(' subrepository %s diverged (local revision: %s, '
209 _(' subrepository %s diverged (local revision: %s, '
210 'remote revision: %s)\n'
210 'remote revision: %s)\n'
211 '(M)erge, keep (l)ocal or keep (r)emote?'
211 '(M)erge, keep (l)ocal or keep (r)emote?'
212 '$$ &Merge $$ &Local $$ &Remote')
212 '$$ &Merge $$ &Local $$ &Remote')
213 % (s, l[1][:12], r[1][:12]), 0)
213 % (s, l[1][:12], r[1][:12]), 0)
214 if option == 0:
214 if option == 0:
215 wctx.sub(s).merge(r)
215 wctx.sub(s).merge(r)
216 sm[s] = l
216 sm[s] = l
217 debug(s, "merge with", r)
217 debug(s, "merge with", r)
218 elif option == 1:
218 elif option == 1:
219 sm[s] = l
219 sm[s] = l
220 debug(s, "keep local subrepo revision", l)
220 debug(s, "keep local subrepo revision", l)
221 else:
221 else:
222 wctx.sub(s).get(r, overwrite)
222 wctx.sub(s).get(r, overwrite)
223 sm[s] = r
223 sm[s] = r
224 debug(s, "get remote subrepo revision", r)
224 debug(s, "get remote subrepo revision", r)
225 elif ld == a: # remote removed, local unchanged
225 elif ld == a: # remote removed, local unchanged
226 debug(s, "remote removed, remove")
226 debug(s, "remote removed, remove")
227 wctx.sub(s).remove()
227 wctx.sub(s).remove()
228 elif a == nullstate: # not present in remote or ancestor
228 elif a == nullstate: # not present in remote or ancestor
229 debug(s, "local added, keep")
229 debug(s, "local added, keep")
230 sm[s] = l
230 sm[s] = l
231 continue
231 continue
232 else:
232 else:
233 if repo.ui.promptchoice(
233 if repo.ui.promptchoice(
234 _(' local changed subrepository %s which remote removed\n'
234 _(' local changed subrepository %s which remote removed\n'
235 'use (c)hanged version or (d)elete?'
235 'use (c)hanged version or (d)elete?'
236 '$$ &Changed $$ &Delete') % s, 0):
236 '$$ &Changed $$ &Delete') % s, 0):
237 debug(s, "prompt remove")
237 debug(s, "prompt remove")
238 wctx.sub(s).remove()
238 wctx.sub(s).remove()
239
239
240 for s, r in sorted(s2.items()):
240 for s, r in sorted(s2.items()):
241 if s in s1:
241 if s in s1:
242 continue
242 continue
243 elif s not in sa:
243 elif s not in sa:
244 debug(s, "remote added, get", r)
244 debug(s, "remote added, get", r)
245 mctx.sub(s).get(r)
245 mctx.sub(s).get(r)
246 sm[s] = r
246 sm[s] = r
247 elif r != sa[s]:
247 elif r != sa[s]:
248 if repo.ui.promptchoice(
248 if repo.ui.promptchoice(
249 _(' remote changed subrepository %s which local removed\n'
249 _(' remote changed subrepository %s which local removed\n'
250 'use (c)hanged version or (d)elete?'
250 'use (c)hanged version or (d)elete?'
251 '$$ &Changed $$ &Delete') % s, 0) == 0:
251 '$$ &Changed $$ &Delete') % s, 0) == 0:
252 debug(s, "prompt recreate", r)
252 debug(s, "prompt recreate", r)
253 wctx.sub(s).get(r)
253 wctx.sub(s).get(r)
254 sm[s] = r
254 sm[s] = r
255
255
256 # record merged .hgsubstate
256 # record merged .hgsubstate
257 writestate(repo, sm)
257 writestate(repo, sm)
258 return sm
258 return sm
259
259
260 def _updateprompt(ui, sub, dirty, local, remote):
260 def _updateprompt(ui, sub, dirty, local, remote):
261 if dirty:
261 if dirty:
262 msg = (_(' subrepository sources for %s differ\n'
262 msg = (_(' subrepository sources for %s differ\n'
263 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
263 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
264 '$$ &Local $$ &Remote')
264 '$$ &Local $$ &Remote')
265 % (subrelpath(sub), local, remote))
265 % (subrelpath(sub), local, remote))
266 else:
266 else:
267 msg = (_(' subrepository sources for %s differ (in checked out '
267 msg = (_(' subrepository sources for %s differ (in checked out '
268 'version)\n'
268 'version)\n'
269 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
269 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
270 '$$ &Local $$ &Remote')
270 '$$ &Local $$ &Remote')
271 % (subrelpath(sub), local, remote))
271 % (subrelpath(sub), local, remote))
272 return ui.promptchoice(msg, 0)
272 return ui.promptchoice(msg, 0)
273
273
274 def reporelpath(repo):
274 def reporelpath(repo):
275 """return path to this (sub)repo as seen from outermost repo"""
275 """return path to this (sub)repo as seen from outermost repo"""
276 parent = repo
276 parent = repo
277 while util.safehasattr(parent, '_subparent'):
277 while util.safehasattr(parent, '_subparent'):
278 parent = parent._subparent
278 parent = parent._subparent
279 p = parent.root.rstrip(os.sep)
279 p = parent.root.rstrip(os.sep)
280 return repo.root[len(p) + 1:]
280 return repo.root[len(p) + 1:]
281
281
282 def subrelpath(sub):
282 def subrelpath(sub):
283 """return path to this subrepo as seen from outermost repo"""
283 """return path to this subrepo as seen from outermost repo"""
284 if util.safehasattr(sub, '_relpath'):
284 if util.safehasattr(sub, '_relpath'):
285 return sub._relpath
285 return sub._relpath
286 if not util.safehasattr(sub, '_repo'):
286 if not util.safehasattr(sub, '_repo'):
287 return sub._path
287 return sub._path
288 return reporelpath(sub._repo)
288 return reporelpath(sub._repo)
289
289
290 def _abssource(repo, push=False, abort=True):
290 def _abssource(repo, push=False, abort=True):
291 """return pull/push path of repo - either based on parent repo .hgsub info
291 """return pull/push path of repo - either based on parent repo .hgsub info
292 or on the top repo config. Abort or return None if no source found."""
292 or on the top repo config. Abort or return None if no source found."""
293 if util.safehasattr(repo, '_subparent'):
293 if util.safehasattr(repo, '_subparent'):
294 source = util.url(repo._subsource)
294 source = util.url(repo._subsource)
295 if source.isabs():
295 if source.isabs():
296 return str(source)
296 return str(source)
297 source.path = posixpath.normpath(source.path)
297 source.path = posixpath.normpath(source.path)
298 parent = _abssource(repo._subparent, push, abort=False)
298 parent = _abssource(repo._subparent, push, abort=False)
299 if parent:
299 if parent:
300 parent = util.url(util.pconvert(parent))
300 parent = util.url(util.pconvert(parent))
301 parent.path = posixpath.join(parent.path or '', source.path)
301 parent.path = posixpath.join(parent.path or '', source.path)
302 parent.path = posixpath.normpath(parent.path)
302 parent.path = posixpath.normpath(parent.path)
303 return str(parent)
303 return str(parent)
304 else: # recursion reached top repo
304 else: # recursion reached top repo
305 if util.safehasattr(repo, '_subtoppath'):
305 if util.safehasattr(repo, '_subtoppath'):
306 return repo._subtoppath
306 return repo._subtoppath
307 if push and repo.ui.config('paths', 'default-push'):
307 if push and repo.ui.config('paths', 'default-push'):
308 return repo.ui.config('paths', 'default-push')
308 return repo.ui.config('paths', 'default-push')
309 if repo.ui.config('paths', 'default'):
309 if repo.ui.config('paths', 'default'):
310 return repo.ui.config('paths', 'default')
310 return repo.ui.config('paths', 'default')
311 if repo.sharedpath != repo.path:
311 if repo.sharedpath != repo.path:
312 # chop off the .hg component to get the default path form
312 # chop off the .hg component to get the default path form
313 return os.path.dirname(repo.sharedpath)
313 return os.path.dirname(repo.sharedpath)
314 if abort:
314 if abort:
315 raise util.Abort(_("default path for subrepository not found"))
315 raise util.Abort(_("default path for subrepository not found"))
316
316
317 def _sanitize(ui, path):
317 def _sanitize(ui, path):
318 def v(arg, dirname, names):
318 def v(arg, dirname, names):
319 if os.path.basename(dirname).lower() != '.hg':
319 if os.path.basename(dirname).lower() != '.hg':
320 return
320 return
321 for f in names:
321 for f in names:
322 if f.lower() == 'hgrc':
322 if f.lower() == 'hgrc':
323 ui.warn(
323 ui.warn(
324 _("warning: removing potentially hostile .hg/hgrc in '%s'"
324 _("warning: removing potentially hostile .hg/hgrc in '%s'"
325 % path))
325 % path))
326 os.unlink(os.path.join(dirname, f))
326 os.unlink(os.path.join(dirname, f))
327 os.walk(path, v, None)
327 os.walk(path, v, None)
328
328
329 def itersubrepos(ctx1, ctx2):
330 """find subrepos in ctx1 or ctx2"""
331 # Create a (subpath, ctx) mapping where we prefer subpaths from
332 # ctx1. The subpaths from ctx2 are important when the .hgsub file
333 # has been modified (in ctx2) but not yet committed (in ctx1).
334 subpaths = dict.fromkeys(ctx2.substate, ctx2)
335 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
336 for subpath, ctx in sorted(subpaths.iteritems()):
337 yield subpath, ctx.sub(subpath)
338
339 def subrepo(ctx, path):
329 def subrepo(ctx, path):
340 """return instance of the right subrepo class for subrepo in path"""
330 """return instance of the right subrepo class for subrepo in path"""
341 # subrepo inherently violates our import layering rules
331 # subrepo inherently violates our import layering rules
342 # because it wants to make repo objects from deep inside the stack
332 # because it wants to make repo objects from deep inside the stack
343 # so we manually delay the circular imports to not break
333 # so we manually delay the circular imports to not break
344 # scripts that don't use our demand-loading
334 # scripts that don't use our demand-loading
345 global hg
335 global hg
346 import hg as h
336 import hg as h
347 hg = h
337 hg = h
348
338
349 pathutil.pathauditor(ctx._repo.root)(path)
339 pathutil.pathauditor(ctx._repo.root)(path)
350 state = ctx.substate[path]
340 state = ctx.substate[path]
351 if state[2] not in types:
341 if state[2] not in types:
352 raise util.Abort(_('unknown subrepo type %s') % state[2])
342 raise util.Abort(_('unknown subrepo type %s') % state[2])
353 return types[state[2]](ctx, path, state[:2])
343 return types[state[2]](ctx, path, state[:2])
354
344
355 def newcommitphase(ui, ctx):
345 def newcommitphase(ui, ctx):
356 commitphase = phases.newcommitphase(ui)
346 commitphase = phases.newcommitphase(ui)
357 substate = getattr(ctx, "substate", None)
347 substate = getattr(ctx, "substate", None)
358 if not substate:
348 if not substate:
359 return commitphase
349 return commitphase
360 check = ui.config('phases', 'checksubrepos', 'follow')
350 check = ui.config('phases', 'checksubrepos', 'follow')
361 if check not in ('ignore', 'follow', 'abort'):
351 if check not in ('ignore', 'follow', 'abort'):
362 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
352 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
363 % (check))
353 % (check))
364 if check == 'ignore':
354 if check == 'ignore':
365 return commitphase
355 return commitphase
366 maxphase = phases.public
356 maxphase = phases.public
367 maxsub = None
357 maxsub = None
368 for s in sorted(substate):
358 for s in sorted(substate):
369 sub = ctx.sub(s)
359 sub = ctx.sub(s)
370 subphase = sub.phase(substate[s][1])
360 subphase = sub.phase(substate[s][1])
371 if maxphase < subphase:
361 if maxphase < subphase:
372 maxphase = subphase
362 maxphase = subphase
373 maxsub = s
363 maxsub = s
374 if commitphase < maxphase:
364 if commitphase < maxphase:
375 if check == 'abort':
365 if check == 'abort':
376 raise util.Abort(_("can't commit in %s phase"
366 raise util.Abort(_("can't commit in %s phase"
377 " conflicting %s from subrepository %s") %
367 " conflicting %s from subrepository %s") %
378 (phases.phasenames[commitphase],
368 (phases.phasenames[commitphase],
379 phases.phasenames[maxphase], maxsub))
369 phases.phasenames[maxphase], maxsub))
380 ui.warn(_("warning: changes are committed in"
370 ui.warn(_("warning: changes are committed in"
381 " %s phase from subrepository %s\n") %
371 " %s phase from subrepository %s\n") %
382 (phases.phasenames[maxphase], maxsub))
372 (phases.phasenames[maxphase], maxsub))
383 return maxphase
373 return maxphase
384 return commitphase
374 return commitphase
385
375
386 # subrepo classes need to implement the following abstract class:
376 # subrepo classes need to implement the following abstract class:
387
377
388 class abstractsubrepo(object):
378 class abstractsubrepo(object):
389
379
390 def storeclean(self, path):
380 def storeclean(self, path):
391 """
381 """
392 returns true if the repository has not changed since it was last
382 returns true if the repository has not changed since it was last
393 cloned from or pushed to a given repository.
383 cloned from or pushed to a given repository.
394 """
384 """
395 return False
385 return False
396
386
397 def dirty(self, ignoreupdate=False):
387 def dirty(self, ignoreupdate=False):
398 """returns true if the dirstate of the subrepo is dirty or does not
388 """returns true if the dirstate of the subrepo is dirty or does not
399 match current stored state. If ignoreupdate is true, only check
389 match current stored state. If ignoreupdate is true, only check
400 whether the subrepo has uncommitted changes in its dirstate.
390 whether the subrepo has uncommitted changes in its dirstate.
401 """
391 """
402 raise NotImplementedError
392 raise NotImplementedError
403
393
404 def basestate(self):
394 def basestate(self):
405 """current working directory base state, disregarding .hgsubstate
395 """current working directory base state, disregarding .hgsubstate
406 state and working directory modifications"""
396 state and working directory modifications"""
407 raise NotImplementedError
397 raise NotImplementedError
408
398
409 def checknested(self, path):
399 def checknested(self, path):
410 """check if path is a subrepository within this repository"""
400 """check if path is a subrepository within this repository"""
411 return False
401 return False
412
402
413 def commit(self, text, user, date):
403 def commit(self, text, user, date):
414 """commit the current changes to the subrepo with the given
404 """commit the current changes to the subrepo with the given
415 log message. Use given user and date if possible. Return the
405 log message. Use given user and date if possible. Return the
416 new state of the subrepo.
406 new state of the subrepo.
417 """
407 """
418 raise NotImplementedError
408 raise NotImplementedError
419
409
420 def phase(self, state):
410 def phase(self, state):
421 """returns phase of specified state in the subrepository.
411 """returns phase of specified state in the subrepository.
422 """
412 """
423 return phases.public
413 return phases.public
424
414
425 def remove(self):
415 def remove(self):
426 """remove the subrepo
416 """remove the subrepo
427
417
428 (should verify the dirstate is not dirty first)
418 (should verify the dirstate is not dirty first)
429 """
419 """
430 raise NotImplementedError
420 raise NotImplementedError
431
421
432 def get(self, state, overwrite=False):
422 def get(self, state, overwrite=False):
433 """run whatever commands are needed to put the subrepo into
423 """run whatever commands are needed to put the subrepo into
434 this state
424 this state
435 """
425 """
436 raise NotImplementedError
426 raise NotImplementedError
437
427
438 def merge(self, state):
428 def merge(self, state):
439 """merge currently-saved state with the new state."""
429 """merge currently-saved state with the new state."""
440 raise NotImplementedError
430 raise NotImplementedError
441
431
442 def push(self, opts):
432 def push(self, opts):
443 """perform whatever action is analogous to 'hg push'
433 """perform whatever action is analogous to 'hg push'
444
434
445 This may be a no-op on some systems.
435 This may be a no-op on some systems.
446 """
436 """
447 raise NotImplementedError
437 raise NotImplementedError
448
438
449 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
439 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
450 return []
440 return []
451
441
452 def status(self, rev2, **opts):
442 def status(self, rev2, **opts):
453 return [], [], [], [], [], [], []
443 return [], [], [], [], [], [], []
454
444
455 def diff(self, ui, diffopts, node2, match, prefix, **opts):
445 def diff(self, ui, diffopts, node2, match, prefix, **opts):
456 pass
446 pass
457
447
458 def outgoing(self, ui, dest, opts):
448 def outgoing(self, ui, dest, opts):
459 return 1
449 return 1
460
450
461 def incoming(self, ui, source, opts):
451 def incoming(self, ui, source, opts):
462 return 1
452 return 1
463
453
464 def files(self):
454 def files(self):
465 """return filename iterator"""
455 """return filename iterator"""
466 raise NotImplementedError
456 raise NotImplementedError
467
457
468 def filedata(self, name):
458 def filedata(self, name):
469 """return file data"""
459 """return file data"""
470 raise NotImplementedError
460 raise NotImplementedError
471
461
472 def fileflags(self, name):
462 def fileflags(self, name):
473 """return file flags"""
463 """return file flags"""
474 return ''
464 return ''
475
465
476 def archive(self, ui, archiver, prefix, match=None):
466 def archive(self, ui, archiver, prefix, match=None):
477 if match is not None:
467 if match is not None:
478 files = [f for f in self.files() if match(f)]
468 files = [f for f in self.files() if match(f)]
479 else:
469 else:
480 files = self.files()
470 files = self.files()
481 total = len(files)
471 total = len(files)
482 relpath = subrelpath(self)
472 relpath = subrelpath(self)
483 ui.progress(_('archiving (%s)') % relpath, 0,
473 ui.progress(_('archiving (%s)') % relpath, 0,
484 unit=_('files'), total=total)
474 unit=_('files'), total=total)
485 for i, name in enumerate(files):
475 for i, name in enumerate(files):
486 flags = self.fileflags(name)
476 flags = self.fileflags(name)
487 mode = 'x' in flags and 0755 or 0644
477 mode = 'x' in flags and 0755 or 0644
488 symlink = 'l' in flags
478 symlink = 'l' in flags
489 archiver.addfile(os.path.join(prefix, self._path, name),
479 archiver.addfile(os.path.join(prefix, self._path, name),
490 mode, symlink, self.filedata(name))
480 mode, symlink, self.filedata(name))
491 ui.progress(_('archiving (%s)') % relpath, i + 1,
481 ui.progress(_('archiving (%s)') % relpath, i + 1,
492 unit=_('files'), total=total)
482 unit=_('files'), total=total)
493 ui.progress(_('archiving (%s)') % relpath, None)
483 ui.progress(_('archiving (%s)') % relpath, None)
494 return total
484 return total
495
485
496 def walk(self, match):
486 def walk(self, match):
497 '''
487 '''
498 walk recursively through the directory tree, finding all files
488 walk recursively through the directory tree, finding all files
499 matched by the match function
489 matched by the match function
500 '''
490 '''
501 pass
491 pass
502
492
503 def forget(self, ui, match, prefix):
493 def forget(self, ui, match, prefix):
504 return ([], [])
494 return ([], [])
505
495
506 def revert(self, ui, substate, *pats, **opts):
496 def revert(self, ui, substate, *pats, **opts):
507 ui.warn('%s: reverting %s subrepos is unsupported\n' \
497 ui.warn('%s: reverting %s subrepos is unsupported\n' \
508 % (substate[0], substate[2]))
498 % (substate[0], substate[2]))
509 return []
499 return []
510
500
511 class hgsubrepo(abstractsubrepo):
501 class hgsubrepo(abstractsubrepo):
512 def __init__(self, ctx, path, state):
502 def __init__(self, ctx, path, state):
513 self._path = path
503 self._path = path
514 self._state = state
504 self._state = state
515 r = ctx._repo
505 r = ctx._repo
516 root = r.wjoin(path)
506 root = r.wjoin(path)
517 create = False
507 create = False
518 if not os.path.exists(os.path.join(root, '.hg')):
508 if not os.path.exists(os.path.join(root, '.hg')):
519 create = True
509 create = True
520 util.makedirs(root)
510 util.makedirs(root)
521 self._repo = hg.repository(r.baseui, root, create=create)
511 self._repo = hg.repository(r.baseui, root, create=create)
522 for s, k in [('ui', 'commitsubrepos')]:
512 for s, k in [('ui', 'commitsubrepos')]:
523 v = r.ui.config(s, k)
513 v = r.ui.config(s, k)
524 if v:
514 if v:
525 self._repo.ui.setconfig(s, k, v)
515 self._repo.ui.setconfig(s, k, v)
526 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
516 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
527 self._initrepo(r, state[0], create)
517 self._initrepo(r, state[0], create)
528
518
529 def storeclean(self, path):
519 def storeclean(self, path):
530 clean = True
520 clean = True
531 lock = self._repo.lock()
521 lock = self._repo.lock()
532 itercache = self._calcstorehash(path)
522 itercache = self._calcstorehash(path)
533 try:
523 try:
534 for filehash in self._readstorehashcache(path):
524 for filehash in self._readstorehashcache(path):
535 if filehash != itercache.next():
525 if filehash != itercache.next():
536 clean = False
526 clean = False
537 break
527 break
538 except StopIteration:
528 except StopIteration:
539 # the cached and current pull states have a different size
529 # the cached and current pull states have a different size
540 clean = False
530 clean = False
541 if clean:
531 if clean:
542 try:
532 try:
543 itercache.next()
533 itercache.next()
544 # the cached and current pull states have a different size
534 # the cached and current pull states have a different size
545 clean = False
535 clean = False
546 except StopIteration:
536 except StopIteration:
547 pass
537 pass
548 lock.release()
538 lock.release()
549 return clean
539 return clean
550
540
551 def _calcstorehash(self, remotepath):
541 def _calcstorehash(self, remotepath):
552 '''calculate a unique "store hash"
542 '''calculate a unique "store hash"
553
543
554 This method is used to to detect when there are changes that may
544 This method is used to to detect when there are changes that may
555 require a push to a given remote path.'''
545 require a push to a given remote path.'''
556 # sort the files that will be hashed in increasing (likely) file size
546 # sort the files that will be hashed in increasing (likely) file size
557 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
547 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
558 yield '# %s\n' % _expandedabspath(remotepath)
548 yield '# %s\n' % _expandedabspath(remotepath)
559 for relname in filelist:
549 for relname in filelist:
560 absname = os.path.normpath(self._repo.join(relname))
550 absname = os.path.normpath(self._repo.join(relname))
561 yield '%s = %s\n' % (relname, _calcfilehash(absname))
551 yield '%s = %s\n' % (relname, _calcfilehash(absname))
562
552
563 def _getstorehashcachepath(self, remotepath):
553 def _getstorehashcachepath(self, remotepath):
564 '''get a unique path for the store hash cache'''
554 '''get a unique path for the store hash cache'''
565 return self._repo.join(os.path.join(
555 return self._repo.join(os.path.join(
566 'cache', 'storehash', _getstorehashcachename(remotepath)))
556 'cache', 'storehash', _getstorehashcachename(remotepath)))
567
557
568 def _readstorehashcache(self, remotepath):
558 def _readstorehashcache(self, remotepath):
569 '''read the store hash cache for a given remote repository'''
559 '''read the store hash cache for a given remote repository'''
570 cachefile = self._getstorehashcachepath(remotepath)
560 cachefile = self._getstorehashcachepath(remotepath)
571 if not os.path.exists(cachefile):
561 if not os.path.exists(cachefile):
572 return ''
562 return ''
573 fd = open(cachefile, 'r')
563 fd = open(cachefile, 'r')
574 pullstate = fd.readlines()
564 pullstate = fd.readlines()
575 fd.close()
565 fd.close()
576 return pullstate
566 return pullstate
577
567
578 def _cachestorehash(self, remotepath):
568 def _cachestorehash(self, remotepath):
579 '''cache the current store hash
569 '''cache the current store hash
580
570
581 Each remote repo requires its own store hash cache, because a subrepo
571 Each remote repo requires its own store hash cache, because a subrepo
582 store may be "clean" versus a given remote repo, but not versus another
572 store may be "clean" versus a given remote repo, but not versus another
583 '''
573 '''
584 cachefile = self._getstorehashcachepath(remotepath)
574 cachefile = self._getstorehashcachepath(remotepath)
585 lock = self._repo.lock()
575 lock = self._repo.lock()
586 storehash = list(self._calcstorehash(remotepath))
576 storehash = list(self._calcstorehash(remotepath))
587 cachedir = os.path.dirname(cachefile)
577 cachedir = os.path.dirname(cachefile)
588 if not os.path.exists(cachedir):
578 if not os.path.exists(cachedir):
589 util.makedirs(cachedir, notindexed=True)
579 util.makedirs(cachedir, notindexed=True)
590 fd = open(cachefile, 'w')
580 fd = open(cachefile, 'w')
591 fd.writelines(storehash)
581 fd.writelines(storehash)
592 fd.close()
582 fd.close()
593 lock.release()
583 lock.release()
594
584
595 @annotatesubrepoerror
585 @annotatesubrepoerror
596 def _initrepo(self, parentrepo, source, create):
586 def _initrepo(self, parentrepo, source, create):
597 self._repo._subparent = parentrepo
587 self._repo._subparent = parentrepo
598 self._repo._subsource = source
588 self._repo._subsource = source
599
589
600 if create:
590 if create:
601 fp = self._repo.opener("hgrc", "w", text=True)
591 fp = self._repo.opener("hgrc", "w", text=True)
602 fp.write('[paths]\n')
592 fp.write('[paths]\n')
603
593
604 def addpathconfig(key, value):
594 def addpathconfig(key, value):
605 if value:
595 if value:
606 fp.write('%s = %s\n' % (key, value))
596 fp.write('%s = %s\n' % (key, value))
607 self._repo.ui.setconfig('paths', key, value)
597 self._repo.ui.setconfig('paths', key, value)
608
598
609 defpath = _abssource(self._repo, abort=False)
599 defpath = _abssource(self._repo, abort=False)
610 defpushpath = _abssource(self._repo, True, abort=False)
600 defpushpath = _abssource(self._repo, True, abort=False)
611 addpathconfig('default', defpath)
601 addpathconfig('default', defpath)
612 if defpath != defpushpath:
602 if defpath != defpushpath:
613 addpathconfig('default-push', defpushpath)
603 addpathconfig('default-push', defpushpath)
614 fp.close()
604 fp.close()
615
605
616 @annotatesubrepoerror
606 @annotatesubrepoerror
617 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
607 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
618 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
608 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
619 os.path.join(prefix, self._path), explicitonly)
609 os.path.join(prefix, self._path), explicitonly)
620
610
621 @annotatesubrepoerror
611 @annotatesubrepoerror
622 def status(self, rev2, **opts):
612 def status(self, rev2, **opts):
623 try:
613 try:
624 rev1 = self._state[1]
614 rev1 = self._state[1]
625 ctx1 = self._repo[rev1]
615 ctx1 = self._repo[rev1]
626 ctx2 = self._repo[rev2]
616 ctx2 = self._repo[rev2]
627 return self._repo.status(ctx1, ctx2, **opts)
617 return self._repo.status(ctx1, ctx2, **opts)
628 except error.RepoLookupError, inst:
618 except error.RepoLookupError, inst:
629 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
619 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
630 % (inst, subrelpath(self)))
620 % (inst, subrelpath(self)))
631 return [], [], [], [], [], [], []
621 return [], [], [], [], [], [], []
632
622
633 @annotatesubrepoerror
623 @annotatesubrepoerror
634 def diff(self, ui, diffopts, node2, match, prefix, **opts):
624 def diff(self, ui, diffopts, node2, match, prefix, **opts):
635 try:
625 try:
636 node1 = node.bin(self._state[1])
626 node1 = node.bin(self._state[1])
637 # We currently expect node2 to come from substate and be
627 # We currently expect node2 to come from substate and be
638 # in hex format
628 # in hex format
639 if node2 is not None:
629 if node2 is not None:
640 node2 = node.bin(node2)
630 node2 = node.bin(node2)
641 cmdutil.diffordiffstat(ui, self._repo, diffopts,
631 cmdutil.diffordiffstat(ui, self._repo, diffopts,
642 node1, node2, match,
632 node1, node2, match,
643 prefix=posixpath.join(prefix, self._path),
633 prefix=posixpath.join(prefix, self._path),
644 listsubrepos=True, **opts)
634 listsubrepos=True, **opts)
645 except error.RepoLookupError, inst:
635 except error.RepoLookupError, inst:
646 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
636 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
647 % (inst, subrelpath(self)))
637 % (inst, subrelpath(self)))
648
638
649 @annotatesubrepoerror
639 @annotatesubrepoerror
650 def archive(self, ui, archiver, prefix, match=None):
640 def archive(self, ui, archiver, prefix, match=None):
651 self._get(self._state + ('hg',))
641 self._get(self._state + ('hg',))
652 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
642 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
653 rev = self._state[1]
643 rev = self._state[1]
654 ctx = self._repo[rev]
644 ctx = self._repo[rev]
655 for subpath in ctx.substate:
645 for subpath in ctx.substate:
656 s = subrepo(ctx, subpath)
646 s = subrepo(ctx, subpath)
657 submatch = matchmod.narrowmatcher(subpath, match)
647 submatch = matchmod.narrowmatcher(subpath, match)
658 total += s.archive(
648 total += s.archive(
659 ui, archiver, os.path.join(prefix, self._path), submatch)
649 ui, archiver, os.path.join(prefix, self._path), submatch)
660 return total
650 return total
661
651
662 @annotatesubrepoerror
652 @annotatesubrepoerror
663 def dirty(self, ignoreupdate=False):
653 def dirty(self, ignoreupdate=False):
664 r = self._state[1]
654 r = self._state[1]
665 if r == '' and not ignoreupdate: # no state recorded
655 if r == '' and not ignoreupdate: # no state recorded
666 return True
656 return True
667 w = self._repo[None]
657 w = self._repo[None]
668 if r != w.p1().hex() and not ignoreupdate:
658 if r != w.p1().hex() and not ignoreupdate:
669 # different version checked out
659 # different version checked out
670 return True
660 return True
671 return w.dirty() # working directory changed
661 return w.dirty() # working directory changed
672
662
673 def basestate(self):
663 def basestate(self):
674 return self._repo['.'].hex()
664 return self._repo['.'].hex()
675
665
676 def checknested(self, path):
666 def checknested(self, path):
677 return self._repo._checknested(self._repo.wjoin(path))
667 return self._repo._checknested(self._repo.wjoin(path))
678
668
679 @annotatesubrepoerror
669 @annotatesubrepoerror
680 def commit(self, text, user, date):
670 def commit(self, text, user, date):
681 # don't bother committing in the subrepo if it's only been
671 # don't bother committing in the subrepo if it's only been
682 # updated
672 # updated
683 if not self.dirty(True):
673 if not self.dirty(True):
684 return self._repo['.'].hex()
674 return self._repo['.'].hex()
685 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
675 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
686 n = self._repo.commit(text, user, date)
676 n = self._repo.commit(text, user, date)
687 if not n:
677 if not n:
688 return self._repo['.'].hex() # different version checked out
678 return self._repo['.'].hex() # different version checked out
689 return node.hex(n)
679 return node.hex(n)
690
680
691 @annotatesubrepoerror
681 @annotatesubrepoerror
692 def phase(self, state):
682 def phase(self, state):
693 return self._repo[state].phase()
683 return self._repo[state].phase()
694
684
695 @annotatesubrepoerror
685 @annotatesubrepoerror
696 def remove(self):
686 def remove(self):
697 # we can't fully delete the repository as it may contain
687 # we can't fully delete the repository as it may contain
698 # local-only history
688 # local-only history
699 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
689 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
700 hg.clean(self._repo, node.nullid, False)
690 hg.clean(self._repo, node.nullid, False)
701
691
702 def _get(self, state):
692 def _get(self, state):
703 source, revision, kind = state
693 source, revision, kind = state
704 if revision in self._repo.unfiltered():
694 if revision in self._repo.unfiltered():
705 return True
695 return True
706 self._repo._subsource = source
696 self._repo._subsource = source
707 srcurl = _abssource(self._repo)
697 srcurl = _abssource(self._repo)
708 other = hg.peer(self._repo, {}, srcurl)
698 other = hg.peer(self._repo, {}, srcurl)
709 if len(self._repo) == 0:
699 if len(self._repo) == 0:
710 self._repo.ui.status(_('cloning subrepo %s from %s\n')
700 self._repo.ui.status(_('cloning subrepo %s from %s\n')
711 % (subrelpath(self), srcurl))
701 % (subrelpath(self), srcurl))
712 parentrepo = self._repo._subparent
702 parentrepo = self._repo._subparent
713 shutil.rmtree(self._repo.path)
703 shutil.rmtree(self._repo.path)
714 other, cloned = hg.clone(self._repo._subparent.baseui, {},
704 other, cloned = hg.clone(self._repo._subparent.baseui, {},
715 other, self._repo.root,
705 other, self._repo.root,
716 update=False)
706 update=False)
717 self._repo = cloned.local()
707 self._repo = cloned.local()
718 self._initrepo(parentrepo, source, create=True)
708 self._initrepo(parentrepo, source, create=True)
719 self._cachestorehash(srcurl)
709 self._cachestorehash(srcurl)
720 else:
710 else:
721 self._repo.ui.status(_('pulling subrepo %s from %s\n')
711 self._repo.ui.status(_('pulling subrepo %s from %s\n')
722 % (subrelpath(self), srcurl))
712 % (subrelpath(self), srcurl))
723 cleansub = self.storeclean(srcurl)
713 cleansub = self.storeclean(srcurl)
724 remotebookmarks = other.listkeys('bookmarks')
714 remotebookmarks = other.listkeys('bookmarks')
725 self._repo.pull(other)
715 self._repo.pull(other)
726 bookmarks.updatefromremote(self._repo.ui, self._repo,
716 bookmarks.updatefromremote(self._repo.ui, self._repo,
727 remotebookmarks, srcurl)
717 remotebookmarks, srcurl)
728 if cleansub:
718 if cleansub:
729 # keep the repo clean after pull
719 # keep the repo clean after pull
730 self._cachestorehash(srcurl)
720 self._cachestorehash(srcurl)
731 return False
721 return False
732
722
733 @annotatesubrepoerror
723 @annotatesubrepoerror
734 def get(self, state, overwrite=False):
724 def get(self, state, overwrite=False):
735 inrepo = self._get(state)
725 inrepo = self._get(state)
736 source, revision, kind = state
726 source, revision, kind = state
737 repo = self._repo
727 repo = self._repo
738 repo.ui.debug("getting subrepo %s\n" % self._path)
728 repo.ui.debug("getting subrepo %s\n" % self._path)
739 if inrepo:
729 if inrepo:
740 urepo = repo.unfiltered()
730 urepo = repo.unfiltered()
741 ctx = urepo[revision]
731 ctx = urepo[revision]
742 if ctx.hidden():
732 if ctx.hidden():
743 urepo.ui.warn(
733 urepo.ui.warn(
744 _('revision %s in subrepo %s is hidden\n') \
734 _('revision %s in subrepo %s is hidden\n') \
745 % (revision[0:12], self._path))
735 % (revision[0:12], self._path))
746 repo = urepo
736 repo = urepo
747 hg.updaterepo(repo, revision, overwrite)
737 hg.updaterepo(repo, revision, overwrite)
748
738
749 @annotatesubrepoerror
739 @annotatesubrepoerror
750 def merge(self, state):
740 def merge(self, state):
751 self._get(state)
741 self._get(state)
752 cur = self._repo['.']
742 cur = self._repo['.']
753 dst = self._repo[state[1]]
743 dst = self._repo[state[1]]
754 anc = dst.ancestor(cur)
744 anc = dst.ancestor(cur)
755
745
756 def mergefunc():
746 def mergefunc():
757 if anc == cur and dst.branch() == cur.branch():
747 if anc == cur and dst.branch() == cur.branch():
758 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
748 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
759 hg.update(self._repo, state[1])
749 hg.update(self._repo, state[1])
760 elif anc == dst:
750 elif anc == dst:
761 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
751 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
762 else:
752 else:
763 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
753 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
764 hg.merge(self._repo, state[1], remind=False)
754 hg.merge(self._repo, state[1], remind=False)
765
755
766 wctx = self._repo[None]
756 wctx = self._repo[None]
767 if self.dirty():
757 if self.dirty():
768 if anc != dst:
758 if anc != dst:
769 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
759 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
770 mergefunc()
760 mergefunc()
771 else:
761 else:
772 mergefunc()
762 mergefunc()
773 else:
763 else:
774 mergefunc()
764 mergefunc()
775
765
776 @annotatesubrepoerror
766 @annotatesubrepoerror
777 def push(self, opts):
767 def push(self, opts):
778 force = opts.get('force')
768 force = opts.get('force')
779 newbranch = opts.get('new_branch')
769 newbranch = opts.get('new_branch')
780 ssh = opts.get('ssh')
770 ssh = opts.get('ssh')
781
771
782 # push subrepos depth-first for coherent ordering
772 # push subrepos depth-first for coherent ordering
783 c = self._repo['']
773 c = self._repo['']
784 subs = c.substate # only repos that are committed
774 subs = c.substate # only repos that are committed
785 for s in sorted(subs):
775 for s in sorted(subs):
786 if c.sub(s).push(opts) == 0:
776 if c.sub(s).push(opts) == 0:
787 return False
777 return False
788
778
789 dsturl = _abssource(self._repo, True)
779 dsturl = _abssource(self._repo, True)
790 if not force:
780 if not force:
791 if self.storeclean(dsturl):
781 if self.storeclean(dsturl):
792 self._repo.ui.status(
782 self._repo.ui.status(
793 _('no changes made to subrepo %s since last push to %s\n')
783 _('no changes made to subrepo %s since last push to %s\n')
794 % (subrelpath(self), dsturl))
784 % (subrelpath(self), dsturl))
795 return None
785 return None
796 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
786 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
797 (subrelpath(self), dsturl))
787 (subrelpath(self), dsturl))
798 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
788 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
799 res = self._repo.push(other, force, newbranch=newbranch)
789 res = self._repo.push(other, force, newbranch=newbranch)
800
790
801 # the repo is now clean
791 # the repo is now clean
802 self._cachestorehash(dsturl)
792 self._cachestorehash(dsturl)
803 return res
793 return res
804
794
805 @annotatesubrepoerror
795 @annotatesubrepoerror
806 def outgoing(self, ui, dest, opts):
796 def outgoing(self, ui, dest, opts):
807 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
797 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
808
798
809 @annotatesubrepoerror
799 @annotatesubrepoerror
810 def incoming(self, ui, source, opts):
800 def incoming(self, ui, source, opts):
811 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
801 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
812
802
813 @annotatesubrepoerror
803 @annotatesubrepoerror
814 def files(self):
804 def files(self):
815 rev = self._state[1]
805 rev = self._state[1]
816 ctx = self._repo[rev]
806 ctx = self._repo[rev]
817 return ctx.manifest()
807 return ctx.manifest()
818
808
819 def filedata(self, name):
809 def filedata(self, name):
820 rev = self._state[1]
810 rev = self._state[1]
821 return self._repo[rev][name].data()
811 return self._repo[rev][name].data()
822
812
823 def fileflags(self, name):
813 def fileflags(self, name):
824 rev = self._state[1]
814 rev = self._state[1]
825 ctx = self._repo[rev]
815 ctx = self._repo[rev]
826 return ctx.flags(name)
816 return ctx.flags(name)
827
817
828 def walk(self, match):
818 def walk(self, match):
829 ctx = self._repo[None]
819 ctx = self._repo[None]
830 return ctx.walk(match)
820 return ctx.walk(match)
831
821
832 @annotatesubrepoerror
822 @annotatesubrepoerror
833 def forget(self, ui, match, prefix):
823 def forget(self, ui, match, prefix):
834 return cmdutil.forget(ui, self._repo, match,
824 return cmdutil.forget(ui, self._repo, match,
835 os.path.join(prefix, self._path), True)
825 os.path.join(prefix, self._path), True)
836
826
837 @annotatesubrepoerror
827 @annotatesubrepoerror
838 def revert(self, ui, substate, *pats, **opts):
828 def revert(self, ui, substate, *pats, **opts):
839 # reverting a subrepo is a 2 step process:
829 # reverting a subrepo is a 2 step process:
840 # 1. if the no_backup is not set, revert all modified
830 # 1. if the no_backup is not set, revert all modified
841 # files inside the subrepo
831 # files inside the subrepo
842 # 2. update the subrepo to the revision specified in
832 # 2. update the subrepo to the revision specified in
843 # the corresponding substate dictionary
833 # the corresponding substate dictionary
844 ui.status(_('reverting subrepo %s\n') % substate[0])
834 ui.status(_('reverting subrepo %s\n') % substate[0])
845 if not opts.get('no_backup'):
835 if not opts.get('no_backup'):
846 # Revert all files on the subrepo, creating backups
836 # Revert all files on the subrepo, creating backups
847 # Note that this will not recursively revert subrepos
837 # Note that this will not recursively revert subrepos
848 # We could do it if there was a set:subrepos() predicate
838 # We could do it if there was a set:subrepos() predicate
849 opts = opts.copy()
839 opts = opts.copy()
850 opts['date'] = None
840 opts['date'] = None
851 opts['rev'] = substate[1]
841 opts['rev'] = substate[1]
852
842
853 pats = []
843 pats = []
854 if not opts.get('all'):
844 if not opts.get('all'):
855 pats = ['set:modified()']
845 pats = ['set:modified()']
856 self.filerevert(ui, *pats, **opts)
846 self.filerevert(ui, *pats, **opts)
857
847
858 # Update the repo to the revision specified in the given substate
848 # Update the repo to the revision specified in the given substate
859 self.get(substate, overwrite=True)
849 self.get(substate, overwrite=True)
860
850
861 def filerevert(self, ui, *pats, **opts):
851 def filerevert(self, ui, *pats, **opts):
862 ctx = self._repo[opts['rev']]
852 ctx = self._repo[opts['rev']]
863 parents = self._repo.dirstate.parents()
853 parents = self._repo.dirstate.parents()
864 if opts.get('all'):
854 if opts.get('all'):
865 pats = ['set:modified()']
855 pats = ['set:modified()']
866 else:
856 else:
867 pats = []
857 pats = []
868 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
858 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
869
859
870 class svnsubrepo(abstractsubrepo):
860 class svnsubrepo(abstractsubrepo):
871 def __init__(self, ctx, path, state):
861 def __init__(self, ctx, path, state):
872 self._path = path
862 self._path = path
873 self._state = state
863 self._state = state
874 self._ctx = ctx
864 self._ctx = ctx
875 self._ui = ctx._repo.ui
865 self._ui = ctx._repo.ui
876 self._exe = util.findexe('svn')
866 self._exe = util.findexe('svn')
877 if not self._exe:
867 if not self._exe:
878 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
868 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
879 % self._path)
869 % self._path)
880
870
881 def _svncommand(self, commands, filename='', failok=False):
871 def _svncommand(self, commands, filename='', failok=False):
882 cmd = [self._exe]
872 cmd = [self._exe]
883 extrakw = {}
873 extrakw = {}
884 if not self._ui.interactive():
874 if not self._ui.interactive():
885 # Making stdin be a pipe should prevent svn from behaving
875 # Making stdin be a pipe should prevent svn from behaving
886 # interactively even if we can't pass --non-interactive.
876 # interactively even if we can't pass --non-interactive.
887 extrakw['stdin'] = subprocess.PIPE
877 extrakw['stdin'] = subprocess.PIPE
888 # Starting in svn 1.5 --non-interactive is a global flag
878 # Starting in svn 1.5 --non-interactive is a global flag
889 # instead of being per-command, but we need to support 1.4 so
879 # instead of being per-command, but we need to support 1.4 so
890 # we have to be intelligent about what commands take
880 # we have to be intelligent about what commands take
891 # --non-interactive.
881 # --non-interactive.
892 if commands[0] in ('update', 'checkout', 'commit'):
882 if commands[0] in ('update', 'checkout', 'commit'):
893 cmd.append('--non-interactive')
883 cmd.append('--non-interactive')
894 cmd.extend(commands)
884 cmd.extend(commands)
895 if filename is not None:
885 if filename is not None:
896 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
886 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
897 cmd.append(path)
887 cmd.append(path)
898 env = dict(os.environ)
888 env = dict(os.environ)
899 # Avoid localized output, preserve current locale for everything else.
889 # Avoid localized output, preserve current locale for everything else.
900 lc_all = env.get('LC_ALL')
890 lc_all = env.get('LC_ALL')
901 if lc_all:
891 if lc_all:
902 env['LANG'] = lc_all
892 env['LANG'] = lc_all
903 del env['LC_ALL']
893 del env['LC_ALL']
904 env['LC_MESSAGES'] = 'C'
894 env['LC_MESSAGES'] = 'C'
905 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
895 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
906 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
896 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
907 universal_newlines=True, env=env, **extrakw)
897 universal_newlines=True, env=env, **extrakw)
908 stdout, stderr = p.communicate()
898 stdout, stderr = p.communicate()
909 stderr = stderr.strip()
899 stderr = stderr.strip()
910 if not failok:
900 if not failok:
911 if p.returncode:
901 if p.returncode:
912 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
902 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
913 if stderr:
903 if stderr:
914 self._ui.warn(stderr + '\n')
904 self._ui.warn(stderr + '\n')
915 return stdout, stderr
905 return stdout, stderr
916
906
917 @propertycache
907 @propertycache
918 def _svnversion(self):
908 def _svnversion(self):
919 output, err = self._svncommand(['--version', '--quiet'], filename=None)
909 output, err = self._svncommand(['--version', '--quiet'], filename=None)
920 m = re.search(r'^(\d+)\.(\d+)', output)
910 m = re.search(r'^(\d+)\.(\d+)', output)
921 if not m:
911 if not m:
922 raise util.Abort(_('cannot retrieve svn tool version'))
912 raise util.Abort(_('cannot retrieve svn tool version'))
923 return (int(m.group(1)), int(m.group(2)))
913 return (int(m.group(1)), int(m.group(2)))
924
914
925 def _wcrevs(self):
915 def _wcrevs(self):
926 # Get the working directory revision as well as the last
916 # Get the working directory revision as well as the last
927 # commit revision so we can compare the subrepo state with
917 # commit revision so we can compare the subrepo state with
928 # both. We used to store the working directory one.
918 # both. We used to store the working directory one.
929 output, err = self._svncommand(['info', '--xml'])
919 output, err = self._svncommand(['info', '--xml'])
930 doc = xml.dom.minidom.parseString(output)
920 doc = xml.dom.minidom.parseString(output)
931 entries = doc.getElementsByTagName('entry')
921 entries = doc.getElementsByTagName('entry')
932 lastrev, rev = '0', '0'
922 lastrev, rev = '0', '0'
933 if entries:
923 if entries:
934 rev = str(entries[0].getAttribute('revision')) or '0'
924 rev = str(entries[0].getAttribute('revision')) or '0'
935 commits = entries[0].getElementsByTagName('commit')
925 commits = entries[0].getElementsByTagName('commit')
936 if commits:
926 if commits:
937 lastrev = str(commits[0].getAttribute('revision')) or '0'
927 lastrev = str(commits[0].getAttribute('revision')) or '0'
938 return (lastrev, rev)
928 return (lastrev, rev)
939
929
940 def _wcrev(self):
930 def _wcrev(self):
941 return self._wcrevs()[0]
931 return self._wcrevs()[0]
942
932
943 def _wcchanged(self):
933 def _wcchanged(self):
944 """Return (changes, extchanges, missing) where changes is True
934 """Return (changes, extchanges, missing) where changes is True
945 if the working directory was changed, extchanges is
935 if the working directory was changed, extchanges is
946 True if any of these changes concern an external entry and missing
936 True if any of these changes concern an external entry and missing
947 is True if any change is a missing entry.
937 is True if any change is a missing entry.
948 """
938 """
949 output, err = self._svncommand(['status', '--xml'])
939 output, err = self._svncommand(['status', '--xml'])
950 externals, changes, missing = [], [], []
940 externals, changes, missing = [], [], []
951 doc = xml.dom.minidom.parseString(output)
941 doc = xml.dom.minidom.parseString(output)
952 for e in doc.getElementsByTagName('entry'):
942 for e in doc.getElementsByTagName('entry'):
953 s = e.getElementsByTagName('wc-status')
943 s = e.getElementsByTagName('wc-status')
954 if not s:
944 if not s:
955 continue
945 continue
956 item = s[0].getAttribute('item')
946 item = s[0].getAttribute('item')
957 props = s[0].getAttribute('props')
947 props = s[0].getAttribute('props')
958 path = e.getAttribute('path')
948 path = e.getAttribute('path')
959 if item == 'external':
949 if item == 'external':
960 externals.append(path)
950 externals.append(path)
961 elif item == 'missing':
951 elif item == 'missing':
962 missing.append(path)
952 missing.append(path)
963 if (item not in ('', 'normal', 'unversioned', 'external')
953 if (item not in ('', 'normal', 'unversioned', 'external')
964 or props not in ('', 'none', 'normal')):
954 or props not in ('', 'none', 'normal')):
965 changes.append(path)
955 changes.append(path)
966 for path in changes:
956 for path in changes:
967 for ext in externals:
957 for ext in externals:
968 if path == ext or path.startswith(ext + os.sep):
958 if path == ext or path.startswith(ext + os.sep):
969 return True, True, bool(missing)
959 return True, True, bool(missing)
970 return bool(changes), False, bool(missing)
960 return bool(changes), False, bool(missing)
971
961
972 def dirty(self, ignoreupdate=False):
962 def dirty(self, ignoreupdate=False):
973 if not self._wcchanged()[0]:
963 if not self._wcchanged()[0]:
974 if self._state[1] in self._wcrevs() or ignoreupdate:
964 if self._state[1] in self._wcrevs() or ignoreupdate:
975 return False
965 return False
976 return True
966 return True
977
967
978 def basestate(self):
968 def basestate(self):
979 lastrev, rev = self._wcrevs()
969 lastrev, rev = self._wcrevs()
980 if lastrev != rev:
970 if lastrev != rev:
981 # Last committed rev is not the same than rev. We would
971 # Last committed rev is not the same than rev. We would
982 # like to take lastrev but we do not know if the subrepo
972 # like to take lastrev but we do not know if the subrepo
983 # URL exists at lastrev. Test it and fallback to rev it
973 # URL exists at lastrev. Test it and fallback to rev it
984 # is not there.
974 # is not there.
985 try:
975 try:
986 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
976 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
987 return lastrev
977 return lastrev
988 except error.Abort:
978 except error.Abort:
989 pass
979 pass
990 return rev
980 return rev
991
981
992 @annotatesubrepoerror
982 @annotatesubrepoerror
993 def commit(self, text, user, date):
983 def commit(self, text, user, date):
994 # user and date are out of our hands since svn is centralized
984 # user and date are out of our hands since svn is centralized
995 changed, extchanged, missing = self._wcchanged()
985 changed, extchanged, missing = self._wcchanged()
996 if not changed:
986 if not changed:
997 return self.basestate()
987 return self.basestate()
998 if extchanged:
988 if extchanged:
999 # Do not try to commit externals
989 # Do not try to commit externals
1000 raise util.Abort(_('cannot commit svn externals'))
990 raise util.Abort(_('cannot commit svn externals'))
1001 if missing:
991 if missing:
1002 # svn can commit with missing entries but aborting like hg
992 # svn can commit with missing entries but aborting like hg
1003 # seems a better approach.
993 # seems a better approach.
1004 raise util.Abort(_('cannot commit missing svn entries'))
994 raise util.Abort(_('cannot commit missing svn entries'))
1005 commitinfo, err = self._svncommand(['commit', '-m', text])
995 commitinfo, err = self._svncommand(['commit', '-m', text])
1006 self._ui.status(commitinfo)
996 self._ui.status(commitinfo)
1007 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
997 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1008 if not newrev:
998 if not newrev:
1009 if not commitinfo.strip():
999 if not commitinfo.strip():
1010 # Sometimes, our definition of "changed" differs from
1000 # Sometimes, our definition of "changed" differs from
1011 # svn one. For instance, svn ignores missing files
1001 # svn one. For instance, svn ignores missing files
1012 # when committing. If there are only missing files, no
1002 # when committing. If there are only missing files, no
1013 # commit is made, no output and no error code.
1003 # commit is made, no output and no error code.
1014 raise util.Abort(_('failed to commit svn changes'))
1004 raise util.Abort(_('failed to commit svn changes'))
1015 raise util.Abort(commitinfo.splitlines()[-1])
1005 raise util.Abort(commitinfo.splitlines()[-1])
1016 newrev = newrev.groups()[0]
1006 newrev = newrev.groups()[0]
1017 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1007 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1018 return newrev
1008 return newrev
1019
1009
1020 @annotatesubrepoerror
1010 @annotatesubrepoerror
1021 def remove(self):
1011 def remove(self):
1022 if self.dirty():
1012 if self.dirty():
1023 self._ui.warn(_('not removing repo %s because '
1013 self._ui.warn(_('not removing repo %s because '
1024 'it has changes.\n' % self._path))
1014 'it has changes.\n' % self._path))
1025 return
1015 return
1026 self._ui.note(_('removing subrepo %s\n') % self._path)
1016 self._ui.note(_('removing subrepo %s\n') % self._path)
1027
1017
1028 def onerror(function, path, excinfo):
1018 def onerror(function, path, excinfo):
1029 if function is not os.remove:
1019 if function is not os.remove:
1030 raise
1020 raise
1031 # read-only files cannot be unlinked under Windows
1021 # read-only files cannot be unlinked under Windows
1032 s = os.stat(path)
1022 s = os.stat(path)
1033 if (s.st_mode & stat.S_IWRITE) != 0:
1023 if (s.st_mode & stat.S_IWRITE) != 0:
1034 raise
1024 raise
1035 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1025 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1036 os.remove(path)
1026 os.remove(path)
1037
1027
1038 path = self._ctx._repo.wjoin(self._path)
1028 path = self._ctx._repo.wjoin(self._path)
1039 shutil.rmtree(path, onerror=onerror)
1029 shutil.rmtree(path, onerror=onerror)
1040 try:
1030 try:
1041 os.removedirs(os.path.dirname(path))
1031 os.removedirs(os.path.dirname(path))
1042 except OSError:
1032 except OSError:
1043 pass
1033 pass
1044
1034
1045 @annotatesubrepoerror
1035 @annotatesubrepoerror
1046 def get(self, state, overwrite=False):
1036 def get(self, state, overwrite=False):
1047 if overwrite:
1037 if overwrite:
1048 self._svncommand(['revert', '--recursive'])
1038 self._svncommand(['revert', '--recursive'])
1049 args = ['checkout']
1039 args = ['checkout']
1050 if self._svnversion >= (1, 5):
1040 if self._svnversion >= (1, 5):
1051 args.append('--force')
1041 args.append('--force')
1052 # The revision must be specified at the end of the URL to properly
1042 # The revision must be specified at the end of the URL to properly
1053 # update to a directory which has since been deleted and recreated.
1043 # update to a directory which has since been deleted and recreated.
1054 args.append('%s@%s' % (state[0], state[1]))
1044 args.append('%s@%s' % (state[0], state[1]))
1055 status, err = self._svncommand(args, failok=True)
1045 status, err = self._svncommand(args, failok=True)
1056 _sanitize(self._ui, self._path)
1046 _sanitize(self._ui, self._path)
1057 if not re.search('Checked out revision [0-9]+.', status):
1047 if not re.search('Checked out revision [0-9]+.', status):
1058 if ('is already a working copy for a different URL' in err
1048 if ('is already a working copy for a different URL' in err
1059 and (self._wcchanged()[:2] == (False, False))):
1049 and (self._wcchanged()[:2] == (False, False))):
1060 # obstructed but clean working copy, so just blow it away.
1050 # obstructed but clean working copy, so just blow it away.
1061 self.remove()
1051 self.remove()
1062 self.get(state, overwrite=False)
1052 self.get(state, overwrite=False)
1063 return
1053 return
1064 raise util.Abort((status or err).splitlines()[-1])
1054 raise util.Abort((status or err).splitlines()[-1])
1065 self._ui.status(status)
1055 self._ui.status(status)
1066
1056
1067 @annotatesubrepoerror
1057 @annotatesubrepoerror
1068 def merge(self, state):
1058 def merge(self, state):
1069 old = self._state[1]
1059 old = self._state[1]
1070 new = state[1]
1060 new = state[1]
1071 wcrev = self._wcrev()
1061 wcrev = self._wcrev()
1072 if new != wcrev:
1062 if new != wcrev:
1073 dirty = old == wcrev or self._wcchanged()[0]
1063 dirty = old == wcrev or self._wcchanged()[0]
1074 if _updateprompt(self._ui, self, dirty, wcrev, new):
1064 if _updateprompt(self._ui, self, dirty, wcrev, new):
1075 self.get(state, False)
1065 self.get(state, False)
1076
1066
1077 def push(self, opts):
1067 def push(self, opts):
1078 # push is a no-op for SVN
1068 # push is a no-op for SVN
1079 return True
1069 return True
1080
1070
1081 @annotatesubrepoerror
1071 @annotatesubrepoerror
1082 def files(self):
1072 def files(self):
1083 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1073 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1084 doc = xml.dom.minidom.parseString(output)
1074 doc = xml.dom.minidom.parseString(output)
1085 paths = []
1075 paths = []
1086 for e in doc.getElementsByTagName('entry'):
1076 for e in doc.getElementsByTagName('entry'):
1087 kind = str(e.getAttribute('kind'))
1077 kind = str(e.getAttribute('kind'))
1088 if kind != 'file':
1078 if kind != 'file':
1089 continue
1079 continue
1090 name = ''.join(c.data for c
1080 name = ''.join(c.data for c
1091 in e.getElementsByTagName('name')[0].childNodes
1081 in e.getElementsByTagName('name')[0].childNodes
1092 if c.nodeType == c.TEXT_NODE)
1082 if c.nodeType == c.TEXT_NODE)
1093 paths.append(name.encode('utf-8'))
1083 paths.append(name.encode('utf-8'))
1094 return paths
1084 return paths
1095
1085
1096 def filedata(self, name):
1086 def filedata(self, name):
1097 return self._svncommand(['cat'], name)[0]
1087 return self._svncommand(['cat'], name)[0]
1098
1088
1099
1089
1100 class gitsubrepo(abstractsubrepo):
1090 class gitsubrepo(abstractsubrepo):
1101 def __init__(self, ctx, path, state):
1091 def __init__(self, ctx, path, state):
1102 self._state = state
1092 self._state = state
1103 self._ctx = ctx
1093 self._ctx = ctx
1104 self._path = path
1094 self._path = path
1105 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1095 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1106 self._abspath = ctx._repo.wjoin(path)
1096 self._abspath = ctx._repo.wjoin(path)
1107 self._subparent = ctx._repo
1097 self._subparent = ctx._repo
1108 self._ui = ctx._repo.ui
1098 self._ui = ctx._repo.ui
1109 self._ensuregit()
1099 self._ensuregit()
1110
1100
1111 def _ensuregit(self):
1101 def _ensuregit(self):
1112 try:
1102 try:
1113 self._gitexecutable = 'git'
1103 self._gitexecutable = 'git'
1114 out, err = self._gitnodir(['--version'])
1104 out, err = self._gitnodir(['--version'])
1115 except OSError, e:
1105 except OSError, e:
1116 if e.errno != 2 or os.name != 'nt':
1106 if e.errno != 2 or os.name != 'nt':
1117 raise
1107 raise
1118 self._gitexecutable = 'git.cmd'
1108 self._gitexecutable = 'git.cmd'
1119 out, err = self._gitnodir(['--version'])
1109 out, err = self._gitnodir(['--version'])
1120 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1110 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1121 if not m:
1111 if not m:
1122 self._ui.warn(_('cannot retrieve git version'))
1112 self._ui.warn(_('cannot retrieve git version'))
1123 return
1113 return
1124 version = (int(m.group(1)), m.group(2), m.group(3))
1114 version = (int(m.group(1)), m.group(2), m.group(3))
1125 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1115 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1126 # despite the docstring comment. For now, error on 1.4.0, warn on
1116 # despite the docstring comment. For now, error on 1.4.0, warn on
1127 # 1.5.0 but attempt to continue.
1117 # 1.5.0 but attempt to continue.
1128 if version < (1, 5, 0):
1118 if version < (1, 5, 0):
1129 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1119 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1130 elif version < (1, 6, 0):
1120 elif version < (1, 6, 0):
1131 self._ui.warn(_('git subrepo requires at least 1.6.0 or later'))
1121 self._ui.warn(_('git subrepo requires at least 1.6.0 or later'))
1132
1122
1133 def _gitcommand(self, commands, env=None, stream=False):
1123 def _gitcommand(self, commands, env=None, stream=False):
1134 return self._gitdir(commands, env=env, stream=stream)[0]
1124 return self._gitdir(commands, env=env, stream=stream)[0]
1135
1125
1136 def _gitdir(self, commands, env=None, stream=False):
1126 def _gitdir(self, commands, env=None, stream=False):
1137 return self._gitnodir(commands, env=env, stream=stream,
1127 return self._gitnodir(commands, env=env, stream=stream,
1138 cwd=self._abspath)
1128 cwd=self._abspath)
1139
1129
1140 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1130 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1141 """Calls the git command
1131 """Calls the git command
1142
1132
1143 The methods tries to call the git command. versions prior to 1.6.0
1133 The methods tries to call the git command. versions prior to 1.6.0
1144 are not supported and very probably fail.
1134 are not supported and very probably fail.
1145 """
1135 """
1146 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1136 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1147 # unless ui.quiet is set, print git's stderr,
1137 # unless ui.quiet is set, print git's stderr,
1148 # which is mostly progress and useful info
1138 # which is mostly progress and useful info
1149 errpipe = None
1139 errpipe = None
1150 if self._ui.quiet:
1140 if self._ui.quiet:
1151 errpipe = open(os.devnull, 'w')
1141 errpipe = open(os.devnull, 'w')
1152 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1142 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1153 cwd=cwd, env=env, close_fds=util.closefds,
1143 cwd=cwd, env=env, close_fds=util.closefds,
1154 stdout=subprocess.PIPE, stderr=errpipe)
1144 stdout=subprocess.PIPE, stderr=errpipe)
1155 if stream:
1145 if stream:
1156 return p.stdout, None
1146 return p.stdout, None
1157
1147
1158 retdata = p.stdout.read().strip()
1148 retdata = p.stdout.read().strip()
1159 # wait for the child to exit to avoid race condition.
1149 # wait for the child to exit to avoid race condition.
1160 p.wait()
1150 p.wait()
1161
1151
1162 if p.returncode != 0 and p.returncode != 1:
1152 if p.returncode != 0 and p.returncode != 1:
1163 # there are certain error codes that are ok
1153 # there are certain error codes that are ok
1164 command = commands[0]
1154 command = commands[0]
1165 if command in ('cat-file', 'symbolic-ref'):
1155 if command in ('cat-file', 'symbolic-ref'):
1166 return retdata, p.returncode
1156 return retdata, p.returncode
1167 # for all others, abort
1157 # for all others, abort
1168 raise util.Abort('git %s error %d in %s' %
1158 raise util.Abort('git %s error %d in %s' %
1169 (command, p.returncode, self._relpath))
1159 (command, p.returncode, self._relpath))
1170
1160
1171 return retdata, p.returncode
1161 return retdata, p.returncode
1172
1162
1173 def _gitmissing(self):
1163 def _gitmissing(self):
1174 return not os.path.exists(os.path.join(self._abspath, '.git'))
1164 return not os.path.exists(os.path.join(self._abspath, '.git'))
1175
1165
1176 def _gitstate(self):
1166 def _gitstate(self):
1177 return self._gitcommand(['rev-parse', 'HEAD'])
1167 return self._gitcommand(['rev-parse', 'HEAD'])
1178
1168
1179 def _gitcurrentbranch(self):
1169 def _gitcurrentbranch(self):
1180 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1170 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1181 if err:
1171 if err:
1182 current = None
1172 current = None
1183 return current
1173 return current
1184
1174
1185 def _gitremote(self, remote):
1175 def _gitremote(self, remote):
1186 out = self._gitcommand(['remote', 'show', '-n', remote])
1176 out = self._gitcommand(['remote', 'show', '-n', remote])
1187 line = out.split('\n')[1]
1177 line = out.split('\n')[1]
1188 i = line.index('URL: ') + len('URL: ')
1178 i = line.index('URL: ') + len('URL: ')
1189 return line[i:]
1179 return line[i:]
1190
1180
1191 def _githavelocally(self, revision):
1181 def _githavelocally(self, revision):
1192 out, code = self._gitdir(['cat-file', '-e', revision])
1182 out, code = self._gitdir(['cat-file', '-e', revision])
1193 return code == 0
1183 return code == 0
1194
1184
1195 def _gitisancestor(self, r1, r2):
1185 def _gitisancestor(self, r1, r2):
1196 base = self._gitcommand(['merge-base', r1, r2])
1186 base = self._gitcommand(['merge-base', r1, r2])
1197 return base == r1
1187 return base == r1
1198
1188
1199 def _gitisbare(self):
1189 def _gitisbare(self):
1200 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1190 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1201
1191
1202 def _gitupdatestat(self):
1192 def _gitupdatestat(self):
1203 """This must be run before git diff-index.
1193 """This must be run before git diff-index.
1204 diff-index only looks at changes to file stat;
1194 diff-index only looks at changes to file stat;
1205 this command looks at file contents and updates the stat."""
1195 this command looks at file contents and updates the stat."""
1206 self._gitcommand(['update-index', '-q', '--refresh'])
1196 self._gitcommand(['update-index', '-q', '--refresh'])
1207
1197
1208 def _gitbranchmap(self):
1198 def _gitbranchmap(self):
1209 '''returns 2 things:
1199 '''returns 2 things:
1210 a map from git branch to revision
1200 a map from git branch to revision
1211 a map from revision to branches'''
1201 a map from revision to branches'''
1212 branch2rev = {}
1202 branch2rev = {}
1213 rev2branch = {}
1203 rev2branch = {}
1214
1204
1215 out = self._gitcommand(['for-each-ref', '--format',
1205 out = self._gitcommand(['for-each-ref', '--format',
1216 '%(objectname) %(refname)'])
1206 '%(objectname) %(refname)'])
1217 for line in out.split('\n'):
1207 for line in out.split('\n'):
1218 revision, ref = line.split(' ')
1208 revision, ref = line.split(' ')
1219 if (not ref.startswith('refs/heads/') and
1209 if (not ref.startswith('refs/heads/') and
1220 not ref.startswith('refs/remotes/')):
1210 not ref.startswith('refs/remotes/')):
1221 continue
1211 continue
1222 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1212 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1223 continue # ignore remote/HEAD redirects
1213 continue # ignore remote/HEAD redirects
1224 branch2rev[ref] = revision
1214 branch2rev[ref] = revision
1225 rev2branch.setdefault(revision, []).append(ref)
1215 rev2branch.setdefault(revision, []).append(ref)
1226 return branch2rev, rev2branch
1216 return branch2rev, rev2branch
1227
1217
1228 def _gittracking(self, branches):
1218 def _gittracking(self, branches):
1229 'return map of remote branch to local tracking branch'
1219 'return map of remote branch to local tracking branch'
1230 # assumes no more than one local tracking branch for each remote
1220 # assumes no more than one local tracking branch for each remote
1231 tracking = {}
1221 tracking = {}
1232 for b in branches:
1222 for b in branches:
1233 if b.startswith('refs/remotes/'):
1223 if b.startswith('refs/remotes/'):
1234 continue
1224 continue
1235 bname = b.split('/', 2)[2]
1225 bname = b.split('/', 2)[2]
1236 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1226 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1237 if remote:
1227 if remote:
1238 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1228 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1239 tracking['refs/remotes/%s/%s' %
1229 tracking['refs/remotes/%s/%s' %
1240 (remote, ref.split('/', 2)[2])] = b
1230 (remote, ref.split('/', 2)[2])] = b
1241 return tracking
1231 return tracking
1242
1232
1243 def _abssource(self, source):
1233 def _abssource(self, source):
1244 if '://' not in source:
1234 if '://' not in source:
1245 # recognize the scp syntax as an absolute source
1235 # recognize the scp syntax as an absolute source
1246 colon = source.find(':')
1236 colon = source.find(':')
1247 if colon != -1 and '/' not in source[:colon]:
1237 if colon != -1 and '/' not in source[:colon]:
1248 return source
1238 return source
1249 self._subsource = source
1239 self._subsource = source
1250 return _abssource(self)
1240 return _abssource(self)
1251
1241
1252 def _fetch(self, source, revision):
1242 def _fetch(self, source, revision):
1253 if self._gitmissing():
1243 if self._gitmissing():
1254 source = self._abssource(source)
1244 source = self._abssource(source)
1255 self._ui.status(_('cloning subrepo %s from %s\n') %
1245 self._ui.status(_('cloning subrepo %s from %s\n') %
1256 (self._relpath, source))
1246 (self._relpath, source))
1257 self._gitnodir(['clone', source, self._abspath])
1247 self._gitnodir(['clone', source, self._abspath])
1258 if self._githavelocally(revision):
1248 if self._githavelocally(revision):
1259 return
1249 return
1260 self._ui.status(_('pulling subrepo %s from %s\n') %
1250 self._ui.status(_('pulling subrepo %s from %s\n') %
1261 (self._relpath, self._gitremote('origin')))
1251 (self._relpath, self._gitremote('origin')))
1262 # try only origin: the originally cloned repo
1252 # try only origin: the originally cloned repo
1263 self._gitcommand(['fetch'])
1253 self._gitcommand(['fetch'])
1264 if not self._githavelocally(revision):
1254 if not self._githavelocally(revision):
1265 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1255 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1266 (revision, self._relpath))
1256 (revision, self._relpath))
1267
1257
1268 @annotatesubrepoerror
1258 @annotatesubrepoerror
1269 def dirty(self, ignoreupdate=False):
1259 def dirty(self, ignoreupdate=False):
1270 if self._gitmissing():
1260 if self._gitmissing():
1271 return self._state[1] != ''
1261 return self._state[1] != ''
1272 if self._gitisbare():
1262 if self._gitisbare():
1273 return True
1263 return True
1274 if not ignoreupdate and self._state[1] != self._gitstate():
1264 if not ignoreupdate and self._state[1] != self._gitstate():
1275 # different version checked out
1265 # different version checked out
1276 return True
1266 return True
1277 # check for staged changes or modified files; ignore untracked files
1267 # check for staged changes or modified files; ignore untracked files
1278 self._gitupdatestat()
1268 self._gitupdatestat()
1279 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1269 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1280 return code == 1
1270 return code == 1
1281
1271
1282 def basestate(self):
1272 def basestate(self):
1283 return self._gitstate()
1273 return self._gitstate()
1284
1274
1285 @annotatesubrepoerror
1275 @annotatesubrepoerror
1286 def get(self, state, overwrite=False):
1276 def get(self, state, overwrite=False):
1287 source, revision, kind = state
1277 source, revision, kind = state
1288 if not revision:
1278 if not revision:
1289 self.remove()
1279 self.remove()
1290 return
1280 return
1291 self._fetch(source, revision)
1281 self._fetch(source, revision)
1292 # if the repo was set to be bare, unbare it
1282 # if the repo was set to be bare, unbare it
1293 if self._gitisbare():
1283 if self._gitisbare():
1294 self._gitcommand(['config', 'core.bare', 'false'])
1284 self._gitcommand(['config', 'core.bare', 'false'])
1295 if self._gitstate() == revision:
1285 if self._gitstate() == revision:
1296 self._gitcommand(['reset', '--hard', 'HEAD'])
1286 self._gitcommand(['reset', '--hard', 'HEAD'])
1297 return
1287 return
1298 elif self._gitstate() == revision:
1288 elif self._gitstate() == revision:
1299 if overwrite:
1289 if overwrite:
1300 # first reset the index to unmark new files for commit, because
1290 # first reset the index to unmark new files for commit, because
1301 # reset --hard will otherwise throw away files added for commit,
1291 # reset --hard will otherwise throw away files added for commit,
1302 # not just unmark them.
1292 # not just unmark them.
1303 self._gitcommand(['reset', 'HEAD'])
1293 self._gitcommand(['reset', 'HEAD'])
1304 self._gitcommand(['reset', '--hard', 'HEAD'])
1294 self._gitcommand(['reset', '--hard', 'HEAD'])
1305 return
1295 return
1306 branch2rev, rev2branch = self._gitbranchmap()
1296 branch2rev, rev2branch = self._gitbranchmap()
1307
1297
1308 def checkout(args):
1298 def checkout(args):
1309 cmd = ['checkout']
1299 cmd = ['checkout']
1310 if overwrite:
1300 if overwrite:
1311 # first reset the index to unmark new files for commit, because
1301 # first reset the index to unmark new files for commit, because
1312 # the -f option will otherwise throw away files added for
1302 # the -f option will otherwise throw away files added for
1313 # commit, not just unmark them.
1303 # commit, not just unmark them.
1314 self._gitcommand(['reset', 'HEAD'])
1304 self._gitcommand(['reset', 'HEAD'])
1315 cmd.append('-f')
1305 cmd.append('-f')
1316 self._gitcommand(cmd + args)
1306 self._gitcommand(cmd + args)
1317 _sanitize(self._ui, self._path)
1307 _sanitize(self._ui, self._path)
1318
1308
1319 def rawcheckout():
1309 def rawcheckout():
1320 # no branch to checkout, check it out with no branch
1310 # no branch to checkout, check it out with no branch
1321 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1311 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1322 self._relpath)
1312 self._relpath)
1323 self._ui.warn(_('check out a git branch if you intend '
1313 self._ui.warn(_('check out a git branch if you intend '
1324 'to make changes\n'))
1314 'to make changes\n'))
1325 checkout(['-q', revision])
1315 checkout(['-q', revision])
1326
1316
1327 if revision not in rev2branch:
1317 if revision not in rev2branch:
1328 rawcheckout()
1318 rawcheckout()
1329 return
1319 return
1330 branches = rev2branch[revision]
1320 branches = rev2branch[revision]
1331 firstlocalbranch = None
1321 firstlocalbranch = None
1332 for b in branches:
1322 for b in branches:
1333 if b == 'refs/heads/master':
1323 if b == 'refs/heads/master':
1334 # master trumps all other branches
1324 # master trumps all other branches
1335 checkout(['refs/heads/master'])
1325 checkout(['refs/heads/master'])
1336 return
1326 return
1337 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1327 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1338 firstlocalbranch = b
1328 firstlocalbranch = b
1339 if firstlocalbranch:
1329 if firstlocalbranch:
1340 checkout([firstlocalbranch])
1330 checkout([firstlocalbranch])
1341 return
1331 return
1342
1332
1343 tracking = self._gittracking(branch2rev.keys())
1333 tracking = self._gittracking(branch2rev.keys())
1344 # choose a remote branch already tracked if possible
1334 # choose a remote branch already tracked if possible
1345 remote = branches[0]
1335 remote = branches[0]
1346 if remote not in tracking:
1336 if remote not in tracking:
1347 for b in branches:
1337 for b in branches:
1348 if b in tracking:
1338 if b in tracking:
1349 remote = b
1339 remote = b
1350 break
1340 break
1351
1341
1352 if remote not in tracking:
1342 if remote not in tracking:
1353 # create a new local tracking branch
1343 # create a new local tracking branch
1354 local = remote.split('/', 3)[3]
1344 local = remote.split('/', 3)[3]
1355 checkout(['-b', local, remote])
1345 checkout(['-b', local, remote])
1356 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1346 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1357 # When updating to a tracked remote branch,
1347 # When updating to a tracked remote branch,
1358 # if the local tracking branch is downstream of it,
1348 # if the local tracking branch is downstream of it,
1359 # a normal `git pull` would have performed a "fast-forward merge"
1349 # a normal `git pull` would have performed a "fast-forward merge"
1360 # which is equivalent to updating the local branch to the remote.
1350 # which is equivalent to updating the local branch to the remote.
1361 # Since we are only looking at branching at update, we need to
1351 # Since we are only looking at branching at update, we need to
1362 # detect this situation and perform this action lazily.
1352 # detect this situation and perform this action lazily.
1363 if tracking[remote] != self._gitcurrentbranch():
1353 if tracking[remote] != self._gitcurrentbranch():
1364 checkout([tracking[remote]])
1354 checkout([tracking[remote]])
1365 self._gitcommand(['merge', '--ff', remote])
1355 self._gitcommand(['merge', '--ff', remote])
1366 else:
1356 else:
1367 # a real merge would be required, just checkout the revision
1357 # a real merge would be required, just checkout the revision
1368 rawcheckout()
1358 rawcheckout()
1369
1359
1370 @annotatesubrepoerror
1360 @annotatesubrepoerror
1371 def commit(self, text, user, date):
1361 def commit(self, text, user, date):
1372 if self._gitmissing():
1362 if self._gitmissing():
1373 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1363 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1374 cmd = ['commit', '-a', '-m', text]
1364 cmd = ['commit', '-a', '-m', text]
1375 env = os.environ.copy()
1365 env = os.environ.copy()
1376 if user:
1366 if user:
1377 cmd += ['--author', user]
1367 cmd += ['--author', user]
1378 if date:
1368 if date:
1379 # git's date parser silently ignores when seconds < 1e9
1369 # git's date parser silently ignores when seconds < 1e9
1380 # convert to ISO8601
1370 # convert to ISO8601
1381 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1371 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1382 '%Y-%m-%dT%H:%M:%S %1%2')
1372 '%Y-%m-%dT%H:%M:%S %1%2')
1383 self._gitcommand(cmd, env=env)
1373 self._gitcommand(cmd, env=env)
1384 # make sure commit works otherwise HEAD might not exist under certain
1374 # make sure commit works otherwise HEAD might not exist under certain
1385 # circumstances
1375 # circumstances
1386 return self._gitstate()
1376 return self._gitstate()
1387
1377
1388 @annotatesubrepoerror
1378 @annotatesubrepoerror
1389 def merge(self, state):
1379 def merge(self, state):
1390 source, revision, kind = state
1380 source, revision, kind = state
1391 self._fetch(source, revision)
1381 self._fetch(source, revision)
1392 base = self._gitcommand(['merge-base', revision, self._state[1]])
1382 base = self._gitcommand(['merge-base', revision, self._state[1]])
1393 self._gitupdatestat()
1383 self._gitupdatestat()
1394 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1384 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1395
1385
1396 def mergefunc():
1386 def mergefunc():
1397 if base == revision:
1387 if base == revision:
1398 self.get(state) # fast forward merge
1388 self.get(state) # fast forward merge
1399 elif base != self._state[1]:
1389 elif base != self._state[1]:
1400 self._gitcommand(['merge', '--no-commit', revision])
1390 self._gitcommand(['merge', '--no-commit', revision])
1401 _sanitize(self._ui, self._path)
1391 _sanitize(self._ui, self._path)
1402
1392
1403 if self.dirty():
1393 if self.dirty():
1404 if self._gitstate() != revision:
1394 if self._gitstate() != revision:
1405 dirty = self._gitstate() == self._state[1] or code != 0
1395 dirty = self._gitstate() == self._state[1] or code != 0
1406 if _updateprompt(self._ui, self, dirty,
1396 if _updateprompt(self._ui, self, dirty,
1407 self._state[1][:7], revision[:7]):
1397 self._state[1][:7], revision[:7]):
1408 mergefunc()
1398 mergefunc()
1409 else:
1399 else:
1410 mergefunc()
1400 mergefunc()
1411
1401
1412 @annotatesubrepoerror
1402 @annotatesubrepoerror
1413 def push(self, opts):
1403 def push(self, opts):
1414 force = opts.get('force')
1404 force = opts.get('force')
1415
1405
1416 if not self._state[1]:
1406 if not self._state[1]:
1417 return True
1407 return True
1418 if self._gitmissing():
1408 if self._gitmissing():
1419 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1409 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1420 # if a branch in origin contains the revision, nothing to do
1410 # if a branch in origin contains the revision, nothing to do
1421 branch2rev, rev2branch = self._gitbranchmap()
1411 branch2rev, rev2branch = self._gitbranchmap()
1422 if self._state[1] in rev2branch:
1412 if self._state[1] in rev2branch:
1423 for b in rev2branch[self._state[1]]:
1413 for b in rev2branch[self._state[1]]:
1424 if b.startswith('refs/remotes/origin/'):
1414 if b.startswith('refs/remotes/origin/'):
1425 return True
1415 return True
1426 for b, revision in branch2rev.iteritems():
1416 for b, revision in branch2rev.iteritems():
1427 if b.startswith('refs/remotes/origin/'):
1417 if b.startswith('refs/remotes/origin/'):
1428 if self._gitisancestor(self._state[1], revision):
1418 if self._gitisancestor(self._state[1], revision):
1429 return True
1419 return True
1430 # otherwise, try to push the currently checked out branch
1420 # otherwise, try to push the currently checked out branch
1431 cmd = ['push']
1421 cmd = ['push']
1432 if force:
1422 if force:
1433 cmd.append('--force')
1423 cmd.append('--force')
1434
1424
1435 current = self._gitcurrentbranch()
1425 current = self._gitcurrentbranch()
1436 if current:
1426 if current:
1437 # determine if the current branch is even useful
1427 # determine if the current branch is even useful
1438 if not self._gitisancestor(self._state[1], current):
1428 if not self._gitisancestor(self._state[1], current):
1439 self._ui.warn(_('unrelated git branch checked out '
1429 self._ui.warn(_('unrelated git branch checked out '
1440 'in subrepo %s\n') % self._relpath)
1430 'in subrepo %s\n') % self._relpath)
1441 return False
1431 return False
1442 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1432 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1443 (current.split('/', 2)[2], self._relpath))
1433 (current.split('/', 2)[2], self._relpath))
1444 self._gitcommand(cmd + ['origin', current])
1434 self._gitcommand(cmd + ['origin', current])
1445 return True
1435 return True
1446 else:
1436 else:
1447 self._ui.warn(_('no branch checked out in subrepo %s\n'
1437 self._ui.warn(_('no branch checked out in subrepo %s\n'
1448 'cannot push revision %s\n') %
1438 'cannot push revision %s\n') %
1449 (self._relpath, self._state[1]))
1439 (self._relpath, self._state[1]))
1450 return False
1440 return False
1451
1441
1452 @annotatesubrepoerror
1442 @annotatesubrepoerror
1453 def remove(self):
1443 def remove(self):
1454 if self._gitmissing():
1444 if self._gitmissing():
1455 return
1445 return
1456 if self.dirty():
1446 if self.dirty():
1457 self._ui.warn(_('not removing repo %s because '
1447 self._ui.warn(_('not removing repo %s because '
1458 'it has changes.\n') % self._relpath)
1448 'it has changes.\n') % self._relpath)
1459 return
1449 return
1460 # we can't fully delete the repository as it may contain
1450 # we can't fully delete the repository as it may contain
1461 # local-only history
1451 # local-only history
1462 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1452 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1463 self._gitcommand(['config', 'core.bare', 'true'])
1453 self._gitcommand(['config', 'core.bare', 'true'])
1464 for f in os.listdir(self._abspath):
1454 for f in os.listdir(self._abspath):
1465 if f == '.git':
1455 if f == '.git':
1466 continue
1456 continue
1467 path = os.path.join(self._abspath, f)
1457 path = os.path.join(self._abspath, f)
1468 if os.path.isdir(path) and not os.path.islink(path):
1458 if os.path.isdir(path) and not os.path.islink(path):
1469 shutil.rmtree(path)
1459 shutil.rmtree(path)
1470 else:
1460 else:
1471 os.remove(path)
1461 os.remove(path)
1472
1462
1473 def archive(self, ui, archiver, prefix, match=None):
1463 def archive(self, ui, archiver, prefix, match=None):
1474 total = 0
1464 total = 0
1475 source, revision = self._state
1465 source, revision = self._state
1476 if not revision:
1466 if not revision:
1477 return total
1467 return total
1478 self._fetch(source, revision)
1468 self._fetch(source, revision)
1479
1469
1480 # Parse git's native archive command.
1470 # Parse git's native archive command.
1481 # This should be much faster than manually traversing the trees
1471 # This should be much faster than manually traversing the trees
1482 # and objects with many subprocess calls.
1472 # and objects with many subprocess calls.
1483 tarstream = self._gitcommand(['archive', revision], stream=True)
1473 tarstream = self._gitcommand(['archive', revision], stream=True)
1484 tar = tarfile.open(fileobj=tarstream, mode='r|')
1474 tar = tarfile.open(fileobj=tarstream, mode='r|')
1485 relpath = subrelpath(self)
1475 relpath = subrelpath(self)
1486 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1476 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1487 for i, info in enumerate(tar):
1477 for i, info in enumerate(tar):
1488 if info.isdir():
1478 if info.isdir():
1489 continue
1479 continue
1490 if match and not match(info.name):
1480 if match and not match(info.name):
1491 continue
1481 continue
1492 if info.issym():
1482 if info.issym():
1493 data = info.linkname
1483 data = info.linkname
1494 else:
1484 else:
1495 data = tar.extractfile(info).read()
1485 data = tar.extractfile(info).read()
1496 archiver.addfile(os.path.join(prefix, self._path, info.name),
1486 archiver.addfile(os.path.join(prefix, self._path, info.name),
1497 info.mode, info.issym(), data)
1487 info.mode, info.issym(), data)
1498 total += 1
1488 total += 1
1499 ui.progress(_('archiving (%s)') % relpath, i + 1,
1489 ui.progress(_('archiving (%s)') % relpath, i + 1,
1500 unit=_('files'))
1490 unit=_('files'))
1501 ui.progress(_('archiving (%s)') % relpath, None)
1491 ui.progress(_('archiving (%s)') % relpath, None)
1502 return total
1492 return total
1503
1493
1504
1494
1505 @annotatesubrepoerror
1495 @annotatesubrepoerror
1506 def status(self, rev2, **opts):
1496 def status(self, rev2, **opts):
1507 rev1 = self._state[1]
1497 rev1 = self._state[1]
1508 if self._gitmissing() or not rev1:
1498 if self._gitmissing() or not rev1:
1509 # if the repo is missing, return no results
1499 # if the repo is missing, return no results
1510 return [], [], [], [], [], [], []
1500 return [], [], [], [], [], [], []
1511 modified, added, removed = [], [], []
1501 modified, added, removed = [], [], []
1512 self._gitupdatestat()
1502 self._gitupdatestat()
1513 if rev2:
1503 if rev2:
1514 command = ['diff-tree', rev1, rev2]
1504 command = ['diff-tree', rev1, rev2]
1515 else:
1505 else:
1516 command = ['diff-index', rev1]
1506 command = ['diff-index', rev1]
1517 out = self._gitcommand(command)
1507 out = self._gitcommand(command)
1518 for line in out.split('\n'):
1508 for line in out.split('\n'):
1519 tab = line.find('\t')
1509 tab = line.find('\t')
1520 if tab == -1:
1510 if tab == -1:
1521 continue
1511 continue
1522 status, f = line[tab - 1], line[tab + 1:]
1512 status, f = line[tab - 1], line[tab + 1:]
1523 if status == 'M':
1513 if status == 'M':
1524 modified.append(f)
1514 modified.append(f)
1525 elif status == 'A':
1515 elif status == 'A':
1526 added.append(f)
1516 added.append(f)
1527 elif status == 'D':
1517 elif status == 'D':
1528 removed.append(f)
1518 removed.append(f)
1529
1519
1530 deleted = unknown = ignored = clean = []
1520 deleted = unknown = ignored = clean = []
1531 return modified, added, removed, deleted, unknown, ignored, clean
1521 return modified, added, removed, deleted, unknown, ignored, clean
1532
1522
1533 types = {
1523 types = {
1534 'hg': hgsubrepo,
1524 'hg': hgsubrepo,
1535 'svn': svnsubrepo,
1525 'svn': svnsubrepo,
1536 'git': gitsubrepo,
1526 'git': gitsubrepo,
1537 }
1527 }
@@ -1,44 +1,44 b''
1 This code uses the ast module, which was new in 2.6, so we'll skip
1 This code uses the ast module, which was new in 2.6, so we'll skip
2 this test on anything earlier.
2 this test on anything earlier.
3 $ python -c 'import sys ; assert sys.version_info >= (2, 6)' || exit 80
3 $ python -c 'import sys ; assert sys.version_info >= (2, 6)' || exit 80
4
4
5 $ import_checker="$TESTDIR"/../contrib/import-checker.py
5 $ import_checker="$TESTDIR"/../contrib/import-checker.py
6 Run the doctests from the import checker, and make sure
6 Run the doctests from the import checker, and make sure
7 it's working correctly.
7 it's working correctly.
8 $ TERM=dumb
8 $ TERM=dumb
9 $ export TERM
9 $ export TERM
10 $ python -m doctest $import_checker
10 $ python -m doctest $import_checker
11
11
12 $ cd "$TESTDIR"/..
12 $ cd "$TESTDIR"/..
13 $ if hg identify -q > /dev/null 2>&1; then :
13 $ if hg identify -q > /dev/null 2>&1; then :
14 > else
14 > else
15 > echo "skipped: not a Mercurial working dir" >&2
15 > echo "skipped: not a Mercurial working dir" >&2
16 > exit 80
16 > exit 80
17 > fi
17 > fi
18
18
19 There are a handful of cases here that require renaming a module so it
19 There are a handful of cases here that require renaming a module so it
20 doesn't overlap with a stdlib module name. There are also some cycles
20 doesn't overlap with a stdlib module name. There are also some cycles
21 here that we should still endeavor to fix, and some cycles will be
21 here that we should still endeavor to fix, and some cycles will be
22 hidden by deduplication algorithm in the cycle detector, so fixing
22 hidden by deduplication algorithm in the cycle detector, so fixing
23 these may expose other cycles.
23 these may expose other cycles.
24
24
25 $ hg locate 'mercurial/**.py' | xargs python "$import_checker"
25 $ hg locate 'mercurial/**.py' | xargs python "$import_checker"
26 mercurial/dispatch.py mixed imports
26 mercurial/dispatch.py mixed imports
27 stdlib: commands
27 stdlib: commands
28 relative: error, extensions, fancyopts, hg, hook, util
28 relative: error, extensions, fancyopts, hg, hook, util
29 mercurial/fileset.py mixed imports
29 mercurial/fileset.py mixed imports
30 stdlib: parser
30 stdlib: parser
31 relative: error, merge, util
31 relative: error, merge, util
32 mercurial/revset.py mixed imports
32 mercurial/revset.py mixed imports
33 stdlib: parser
33 stdlib: parser
34 relative: discovery, error, hbisect, phases, util
34 relative: discovery, error, hbisect, phases, util
35 mercurial/templater.py mixed imports
35 mercurial/templater.py mixed imports
36 stdlib: parser
36 stdlib: parser
37 relative: config, error, templatefilters, util
37 relative: config, error, templatefilters, util
38 mercurial/ui.py mixed imports
38 mercurial/ui.py mixed imports
39 stdlib: formatter
39 stdlib: formatter
40 relative: config, error, scmutil, util
40 relative: config, error, scmutil, util
41 Import cycle: mercurial.cmdutil -> mercurial.subrepo -> mercurial.cmdutil
42 Import cycle: mercurial.repoview -> mercurial.revset -> mercurial.repoview
41 Import cycle: mercurial.repoview -> mercurial.revset -> mercurial.repoview
43 Import cycle: mercurial.fileset -> mercurial.merge -> mercurial.subrepo -> mercurial.match -> mercurial.fileset
42 Import cycle: mercurial.fileset -> mercurial.merge -> mercurial.subrepo -> mercurial.match -> mercurial.fileset
43 Import cycle: mercurial.cmdutil -> mercurial.context -> mercurial.subrepo -> mercurial.cmdutil -> mercurial.cmdutil
44 Import cycle: mercurial.filemerge -> mercurial.match -> mercurial.fileset -> mercurial.merge -> mercurial.filemerge
44 Import cycle: mercurial.filemerge -> mercurial.match -> mercurial.fileset -> mercurial.merge -> mercurial.filemerge
General Comments 0
You need to be logged in to leave comments. Login now