##// END OF EJS Templates
subrepos: add function for iterating over ctx subrepos
Martin Geisler -
r12176:ecab1082 default
parent child Browse files
Show More
@@ -1,1300 +1,1293 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, bin, nullid, nullrev, short
8 from node import hex, bin, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile
10 import os, sys, errno, re, glob, tempfile
11 import util, templater, patch, error, encoding, templatekw
11 import util, templater, patch, error, encoding, templatekw
12 import match as matchmod
12 import match as matchmod
13 import similar, revset
13 import similar, revset, subrepo
14
14
15 revrangesep = ':'
15 revrangesep = ':'
16
16
17 def parsealiases(cmd):
17 def parsealiases(cmd):
18 return cmd.lstrip("^").split("|")
18 return cmd.lstrip("^").split("|")
19
19
20 def findpossible(cmd, table, strict=False):
20 def findpossible(cmd, table, strict=False):
21 """
21 """
22 Return cmd -> (aliases, command table entry)
22 Return cmd -> (aliases, command table entry)
23 for each matching command.
23 for each matching command.
24 Return debug commands (or their aliases) only if no normal command matches.
24 Return debug commands (or their aliases) only if no normal command matches.
25 """
25 """
26 choice = {}
26 choice = {}
27 debugchoice = {}
27 debugchoice = {}
28 for e in table.keys():
28 for e in table.keys():
29 aliases = parsealiases(e)
29 aliases = parsealiases(e)
30 found = None
30 found = None
31 if cmd in aliases:
31 if cmd in aliases:
32 found = cmd
32 found = cmd
33 elif not strict:
33 elif not strict:
34 for a in aliases:
34 for a in aliases:
35 if a.startswith(cmd):
35 if a.startswith(cmd):
36 found = a
36 found = a
37 break
37 break
38 if found is not None:
38 if found is not None:
39 if aliases[0].startswith("debug") or found.startswith("debug"):
39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 debugchoice[found] = (aliases, table[e])
40 debugchoice[found] = (aliases, table[e])
41 else:
41 else:
42 choice[found] = (aliases, table[e])
42 choice[found] = (aliases, table[e])
43
43
44 if not choice and debugchoice:
44 if not choice and debugchoice:
45 choice = debugchoice
45 choice = debugchoice
46
46
47 return choice
47 return choice
48
48
49 def findcmd(cmd, table, strict=True):
49 def findcmd(cmd, table, strict=True):
50 """Return (aliases, command table entry) for command string."""
50 """Return (aliases, command table entry) for command string."""
51 choice = findpossible(cmd, table, strict)
51 choice = findpossible(cmd, table, strict)
52
52
53 if cmd in choice:
53 if cmd in choice:
54 return choice[cmd]
54 return choice[cmd]
55
55
56 if len(choice) > 1:
56 if len(choice) > 1:
57 clist = choice.keys()
57 clist = choice.keys()
58 clist.sort()
58 clist.sort()
59 raise error.AmbiguousCommand(cmd, clist)
59 raise error.AmbiguousCommand(cmd, clist)
60
60
61 if choice:
61 if choice:
62 return choice.values()[0]
62 return choice.values()[0]
63
63
64 raise error.UnknownCommand(cmd)
64 raise error.UnknownCommand(cmd)
65
65
66 def findrepo(p):
66 def findrepo(p):
67 while not os.path.isdir(os.path.join(p, ".hg")):
67 while not os.path.isdir(os.path.join(p, ".hg")):
68 oldp, p = p, os.path.dirname(p)
68 oldp, p = p, os.path.dirname(p)
69 if p == oldp:
69 if p == oldp:
70 return None
70 return None
71
71
72 return p
72 return p
73
73
74 def bail_if_changed(repo):
74 def bail_if_changed(repo):
75 if repo.dirstate.parents()[1] != nullid:
75 if repo.dirstate.parents()[1] != nullid:
76 raise util.Abort(_('outstanding uncommitted merge'))
76 raise util.Abort(_('outstanding uncommitted merge'))
77 modified, added, removed, deleted = repo.status()[:4]
77 modified, added, removed, deleted = repo.status()[:4]
78 if modified or added or removed or deleted:
78 if modified or added or removed or deleted:
79 raise util.Abort(_("outstanding uncommitted changes"))
79 raise util.Abort(_("outstanding uncommitted changes"))
80
80
81 def logmessage(opts):
81 def logmessage(opts):
82 """ get the log message according to -m and -l option """
82 """ get the log message according to -m and -l option """
83 message = opts.get('message')
83 message = opts.get('message')
84 logfile = opts.get('logfile')
84 logfile = opts.get('logfile')
85
85
86 if message and logfile:
86 if message and logfile:
87 raise util.Abort(_('options --message and --logfile are mutually '
87 raise util.Abort(_('options --message and --logfile are mutually '
88 'exclusive'))
88 'exclusive'))
89 if not message and logfile:
89 if not message and logfile:
90 try:
90 try:
91 if logfile == '-':
91 if logfile == '-':
92 message = sys.stdin.read()
92 message = sys.stdin.read()
93 else:
93 else:
94 message = open(logfile).read()
94 message = open(logfile).read()
95 except IOError, inst:
95 except IOError, inst:
96 raise util.Abort(_("can't read commit message '%s': %s") %
96 raise util.Abort(_("can't read commit message '%s': %s") %
97 (logfile, inst.strerror))
97 (logfile, inst.strerror))
98 return message
98 return message
99
99
100 def loglimit(opts):
100 def loglimit(opts):
101 """get the log limit according to option -l/--limit"""
101 """get the log limit according to option -l/--limit"""
102 limit = opts.get('limit')
102 limit = opts.get('limit')
103 if limit:
103 if limit:
104 try:
104 try:
105 limit = int(limit)
105 limit = int(limit)
106 except ValueError:
106 except ValueError:
107 raise util.Abort(_('limit must be a positive integer'))
107 raise util.Abort(_('limit must be a positive integer'))
108 if limit <= 0:
108 if limit <= 0:
109 raise util.Abort(_('limit must be positive'))
109 raise util.Abort(_('limit must be positive'))
110 else:
110 else:
111 limit = None
111 limit = None
112 return limit
112 return limit
113
113
114 def revpair(repo, revs):
114 def revpair(repo, revs):
115 '''return pair of nodes, given list of revisions. second item can
115 '''return pair of nodes, given list of revisions. second item can
116 be None, meaning use working dir.'''
116 be None, meaning use working dir.'''
117
117
118 def revfix(repo, val, defval):
118 def revfix(repo, val, defval):
119 if not val and val != 0 and defval is not None:
119 if not val and val != 0 and defval is not None:
120 val = defval
120 val = defval
121 return repo.lookup(val)
121 return repo.lookup(val)
122
122
123 if not revs:
123 if not revs:
124 return repo.dirstate.parents()[0], None
124 return repo.dirstate.parents()[0], None
125 end = None
125 end = None
126 if len(revs) == 1:
126 if len(revs) == 1:
127 if revrangesep in revs[0]:
127 if revrangesep in revs[0]:
128 start, end = revs[0].split(revrangesep, 1)
128 start, end = revs[0].split(revrangesep, 1)
129 start = revfix(repo, start, 0)
129 start = revfix(repo, start, 0)
130 end = revfix(repo, end, len(repo) - 1)
130 end = revfix(repo, end, len(repo) - 1)
131 else:
131 else:
132 start = revfix(repo, revs[0], None)
132 start = revfix(repo, revs[0], None)
133 elif len(revs) == 2:
133 elif len(revs) == 2:
134 if revrangesep in revs[0] or revrangesep in revs[1]:
134 if revrangesep in revs[0] or revrangesep in revs[1]:
135 raise util.Abort(_('too many revisions specified'))
135 raise util.Abort(_('too many revisions specified'))
136 start = revfix(repo, revs[0], None)
136 start = revfix(repo, revs[0], None)
137 end = revfix(repo, revs[1], None)
137 end = revfix(repo, revs[1], None)
138 else:
138 else:
139 raise util.Abort(_('too many revisions specified'))
139 raise util.Abort(_('too many revisions specified'))
140 return start, end
140 return start, end
141
141
142 def revrange(repo, revs):
142 def revrange(repo, revs):
143 """Yield revision as strings from a list of revision specifications."""
143 """Yield revision as strings from a list of revision specifications."""
144
144
145 def revfix(repo, val, defval):
145 def revfix(repo, val, defval):
146 if not val and val != 0 and defval is not None:
146 if not val and val != 0 and defval is not None:
147 return defval
147 return defval
148 return repo.changelog.rev(repo.lookup(val))
148 return repo.changelog.rev(repo.lookup(val))
149
149
150 seen, l = set(), []
150 seen, l = set(), []
151 for spec in revs:
151 for spec in revs:
152 # attempt to parse old-style ranges first to deal with
152 # attempt to parse old-style ranges first to deal with
153 # things like old-tag which contain query metacharacters
153 # things like old-tag which contain query metacharacters
154 try:
154 try:
155 if revrangesep in spec:
155 if revrangesep in spec:
156 start, end = spec.split(revrangesep, 1)
156 start, end = spec.split(revrangesep, 1)
157 start = revfix(repo, start, 0)
157 start = revfix(repo, start, 0)
158 end = revfix(repo, end, len(repo) - 1)
158 end = revfix(repo, end, len(repo) - 1)
159 step = start > end and -1 or 1
159 step = start > end and -1 or 1
160 for rev in xrange(start, end + step, step):
160 for rev in xrange(start, end + step, step):
161 if rev in seen:
161 if rev in seen:
162 continue
162 continue
163 seen.add(rev)
163 seen.add(rev)
164 l.append(rev)
164 l.append(rev)
165 continue
165 continue
166 elif spec and spec in repo: # single unquoted rev
166 elif spec and spec in repo: # single unquoted rev
167 rev = revfix(repo, spec, None)
167 rev = revfix(repo, spec, None)
168 if rev in seen:
168 if rev in seen:
169 continue
169 continue
170 seen.add(rev)
170 seen.add(rev)
171 l.append(rev)
171 l.append(rev)
172 continue
172 continue
173 except error.RepoLookupError:
173 except error.RepoLookupError:
174 pass
174 pass
175
175
176 # fall through to new-style queries if old-style fails
176 # fall through to new-style queries if old-style fails
177 m = revset.match(spec)
177 m = revset.match(spec)
178 for r in m(repo, range(len(repo))):
178 for r in m(repo, range(len(repo))):
179 if r not in seen:
179 if r not in seen:
180 l.append(r)
180 l.append(r)
181 seen.update(l)
181 seen.update(l)
182
182
183 return l
183 return l
184
184
185 def make_filename(repo, pat, node,
185 def make_filename(repo, pat, node,
186 total=None, seqno=None, revwidth=None, pathname=None):
186 total=None, seqno=None, revwidth=None, pathname=None):
187 node_expander = {
187 node_expander = {
188 'H': lambda: hex(node),
188 'H': lambda: hex(node),
189 'R': lambda: str(repo.changelog.rev(node)),
189 'R': lambda: str(repo.changelog.rev(node)),
190 'h': lambda: short(node),
190 'h': lambda: short(node),
191 }
191 }
192 expander = {
192 expander = {
193 '%': lambda: '%',
193 '%': lambda: '%',
194 'b': lambda: os.path.basename(repo.root),
194 'b': lambda: os.path.basename(repo.root),
195 }
195 }
196
196
197 try:
197 try:
198 if node:
198 if node:
199 expander.update(node_expander)
199 expander.update(node_expander)
200 if node:
200 if node:
201 expander['r'] = (lambda:
201 expander['r'] = (lambda:
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
203 if total is not None:
203 if total is not None:
204 expander['N'] = lambda: str(total)
204 expander['N'] = lambda: str(total)
205 if seqno is not None:
205 if seqno is not None:
206 expander['n'] = lambda: str(seqno)
206 expander['n'] = lambda: str(seqno)
207 if total is not None and seqno is not None:
207 if total is not None and seqno is not None:
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
209 if pathname is not None:
209 if pathname is not None:
210 expander['s'] = lambda: os.path.basename(pathname)
210 expander['s'] = lambda: os.path.basename(pathname)
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
212 expander['p'] = lambda: pathname
212 expander['p'] = lambda: pathname
213
213
214 newname = []
214 newname = []
215 patlen = len(pat)
215 patlen = len(pat)
216 i = 0
216 i = 0
217 while i < patlen:
217 while i < patlen:
218 c = pat[i]
218 c = pat[i]
219 if c == '%':
219 if c == '%':
220 i += 1
220 i += 1
221 c = pat[i]
221 c = pat[i]
222 c = expander[c]()
222 c = expander[c]()
223 newname.append(c)
223 newname.append(c)
224 i += 1
224 i += 1
225 return ''.join(newname)
225 return ''.join(newname)
226 except KeyError, inst:
226 except KeyError, inst:
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
228 inst.args[0])
228 inst.args[0])
229
229
230 def make_file(repo, pat, node=None,
230 def make_file(repo, pat, node=None,
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
232
232
233 writable = 'w' in mode or 'a' in mode
233 writable = 'w' in mode or 'a' in mode
234
234
235 if not pat or pat == '-':
235 if not pat or pat == '-':
236 return writable and sys.stdout or sys.stdin
236 return writable and sys.stdout or sys.stdin
237 if hasattr(pat, 'write') and writable:
237 if hasattr(pat, 'write') and writable:
238 return pat
238 return pat
239 if hasattr(pat, 'read') and 'r' in mode:
239 if hasattr(pat, 'read') and 'r' in mode:
240 return pat
240 return pat
241 return open(make_filename(repo, pat, node, total, seqno, revwidth,
241 return open(make_filename(repo, pat, node, total, seqno, revwidth,
242 pathname),
242 pathname),
243 mode)
243 mode)
244
244
245 def expandpats(pats):
245 def expandpats(pats):
246 if not util.expandglobs:
246 if not util.expandglobs:
247 return list(pats)
247 return list(pats)
248 ret = []
248 ret = []
249 for p in pats:
249 for p in pats:
250 kind, name = matchmod._patsplit(p, None)
250 kind, name = matchmod._patsplit(p, None)
251 if kind is None:
251 if kind is None:
252 try:
252 try:
253 globbed = glob.glob(name)
253 globbed = glob.glob(name)
254 except re.error:
254 except re.error:
255 globbed = [name]
255 globbed = [name]
256 if globbed:
256 if globbed:
257 ret.extend(globbed)
257 ret.extend(globbed)
258 continue
258 continue
259 ret.append(p)
259 ret.append(p)
260 return ret
260 return ret
261
261
262 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
262 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
263 if not globbed and default == 'relpath':
263 if not globbed and default == 'relpath':
264 pats = expandpats(pats or [])
264 pats = expandpats(pats or [])
265 m = matchmod.match(repo.root, repo.getcwd(), pats,
265 m = matchmod.match(repo.root, repo.getcwd(), pats,
266 opts.get('include'), opts.get('exclude'), default,
266 opts.get('include'), opts.get('exclude'), default,
267 auditor=repo.auditor)
267 auditor=repo.auditor)
268 def badfn(f, msg):
268 def badfn(f, msg):
269 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
269 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
270 m.bad = badfn
270 m.bad = badfn
271 return m
271 return m
272
272
273 def matchall(repo):
273 def matchall(repo):
274 return matchmod.always(repo.root, repo.getcwd())
274 return matchmod.always(repo.root, repo.getcwd())
275
275
276 def matchfiles(repo, files):
276 def matchfiles(repo, files):
277 return matchmod.exact(repo.root, repo.getcwd(), files)
277 return matchmod.exact(repo.root, repo.getcwd(), files)
278
278
279 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
279 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
280 if dry_run is None:
280 if dry_run is None:
281 dry_run = opts.get('dry_run')
281 dry_run = opts.get('dry_run')
282 if similarity is None:
282 if similarity is None:
283 similarity = float(opts.get('similarity') or 0)
283 similarity = float(opts.get('similarity') or 0)
284 # we'd use status here, except handling of symlinks and ignore is tricky
284 # we'd use status here, except handling of symlinks and ignore is tricky
285 added, unknown, deleted, removed = [], [], [], []
285 added, unknown, deleted, removed = [], [], [], []
286 audit_path = util.path_auditor(repo.root)
286 audit_path = util.path_auditor(repo.root)
287 m = match(repo, pats, opts)
287 m = match(repo, pats, opts)
288 for abs in repo.walk(m):
288 for abs in repo.walk(m):
289 target = repo.wjoin(abs)
289 target = repo.wjoin(abs)
290 good = True
290 good = True
291 try:
291 try:
292 audit_path(abs)
292 audit_path(abs)
293 except:
293 except:
294 good = False
294 good = False
295 rel = m.rel(abs)
295 rel = m.rel(abs)
296 exact = m.exact(abs)
296 exact = m.exact(abs)
297 if good and abs not in repo.dirstate:
297 if good and abs not in repo.dirstate:
298 unknown.append(abs)
298 unknown.append(abs)
299 if repo.ui.verbose or not exact:
299 if repo.ui.verbose or not exact:
300 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
300 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
301 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
301 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
302 or (os.path.isdir(target) and not os.path.islink(target))):
302 or (os.path.isdir(target) and not os.path.islink(target))):
303 deleted.append(abs)
303 deleted.append(abs)
304 if repo.ui.verbose or not exact:
304 if repo.ui.verbose or not exact:
305 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
305 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
306 # for finding renames
306 # for finding renames
307 elif repo.dirstate[abs] == 'r':
307 elif repo.dirstate[abs] == 'r':
308 removed.append(abs)
308 removed.append(abs)
309 elif repo.dirstate[abs] == 'a':
309 elif repo.dirstate[abs] == 'a':
310 added.append(abs)
310 added.append(abs)
311 copies = {}
311 copies = {}
312 if similarity > 0:
312 if similarity > 0:
313 for old, new, score in similar.findrenames(repo,
313 for old, new, score in similar.findrenames(repo,
314 added + unknown, removed + deleted, similarity):
314 added + unknown, removed + deleted, similarity):
315 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
315 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
316 repo.ui.status(_('recording removal of %s as rename to %s '
316 repo.ui.status(_('recording removal of %s as rename to %s '
317 '(%d%% similar)\n') %
317 '(%d%% similar)\n') %
318 (m.rel(old), m.rel(new), score * 100))
318 (m.rel(old), m.rel(new), score * 100))
319 copies[new] = old
319 copies[new] = old
320
320
321 if not dry_run:
321 if not dry_run:
322 wctx = repo[None]
322 wctx = repo[None]
323 wlock = repo.wlock()
323 wlock = repo.wlock()
324 try:
324 try:
325 wctx.remove(deleted)
325 wctx.remove(deleted)
326 wctx.add(unknown)
326 wctx.add(unknown)
327 for new, old in copies.iteritems():
327 for new, old in copies.iteritems():
328 wctx.copy(old, new)
328 wctx.copy(old, new)
329 finally:
329 finally:
330 wlock.release()
330 wlock.release()
331
331
332 def copy(ui, repo, pats, opts, rename=False):
332 def copy(ui, repo, pats, opts, rename=False):
333 # called with the repo lock held
333 # called with the repo lock held
334 #
334 #
335 # hgsep => pathname that uses "/" to separate directories
335 # hgsep => pathname that uses "/" to separate directories
336 # ossep => pathname that uses os.sep to separate directories
336 # ossep => pathname that uses os.sep to separate directories
337 cwd = repo.getcwd()
337 cwd = repo.getcwd()
338 targets = {}
338 targets = {}
339 after = opts.get("after")
339 after = opts.get("after")
340 dryrun = opts.get("dry_run")
340 dryrun = opts.get("dry_run")
341 wctx = repo[None]
341 wctx = repo[None]
342
342
343 def walkpat(pat):
343 def walkpat(pat):
344 srcs = []
344 srcs = []
345 badstates = after and '?' or '?r'
345 badstates = after and '?' or '?r'
346 m = match(repo, [pat], opts, globbed=True)
346 m = match(repo, [pat], opts, globbed=True)
347 for abs in repo.walk(m):
347 for abs in repo.walk(m):
348 state = repo.dirstate[abs]
348 state = repo.dirstate[abs]
349 rel = m.rel(abs)
349 rel = m.rel(abs)
350 exact = m.exact(abs)
350 exact = m.exact(abs)
351 if state in badstates:
351 if state in badstates:
352 if exact and state == '?':
352 if exact and state == '?':
353 ui.warn(_('%s: not copying - file is not managed\n') % rel)
353 ui.warn(_('%s: not copying - file is not managed\n') % rel)
354 if exact and state == 'r':
354 if exact and state == 'r':
355 ui.warn(_('%s: not copying - file has been marked for'
355 ui.warn(_('%s: not copying - file has been marked for'
356 ' remove\n') % rel)
356 ' remove\n') % rel)
357 continue
357 continue
358 # abs: hgsep
358 # abs: hgsep
359 # rel: ossep
359 # rel: ossep
360 srcs.append((abs, rel, exact))
360 srcs.append((abs, rel, exact))
361 return srcs
361 return srcs
362
362
363 # abssrc: hgsep
363 # abssrc: hgsep
364 # relsrc: ossep
364 # relsrc: ossep
365 # otarget: ossep
365 # otarget: ossep
366 def copyfile(abssrc, relsrc, otarget, exact):
366 def copyfile(abssrc, relsrc, otarget, exact):
367 abstarget = util.canonpath(repo.root, cwd, otarget)
367 abstarget = util.canonpath(repo.root, cwd, otarget)
368 reltarget = repo.pathto(abstarget, cwd)
368 reltarget = repo.pathto(abstarget, cwd)
369 target = repo.wjoin(abstarget)
369 target = repo.wjoin(abstarget)
370 src = repo.wjoin(abssrc)
370 src = repo.wjoin(abssrc)
371 state = repo.dirstate[abstarget]
371 state = repo.dirstate[abstarget]
372
372
373 # check for collisions
373 # check for collisions
374 prevsrc = targets.get(abstarget)
374 prevsrc = targets.get(abstarget)
375 if prevsrc is not None:
375 if prevsrc is not None:
376 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
376 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
377 (reltarget, repo.pathto(abssrc, cwd),
377 (reltarget, repo.pathto(abssrc, cwd),
378 repo.pathto(prevsrc, cwd)))
378 repo.pathto(prevsrc, cwd)))
379 return
379 return
380
380
381 # check for overwrites
381 # check for overwrites
382 exists = os.path.exists(target)
382 exists = os.path.exists(target)
383 if not after and exists or after and state in 'mn':
383 if not after and exists or after and state in 'mn':
384 if not opts['force']:
384 if not opts['force']:
385 ui.warn(_('%s: not overwriting - file exists\n') %
385 ui.warn(_('%s: not overwriting - file exists\n') %
386 reltarget)
386 reltarget)
387 return
387 return
388
388
389 if after:
389 if after:
390 if not exists:
390 if not exists:
391 if rename:
391 if rename:
392 ui.warn(_('%s: not recording move - %s does not exist\n') %
392 ui.warn(_('%s: not recording move - %s does not exist\n') %
393 (relsrc, reltarget))
393 (relsrc, reltarget))
394 else:
394 else:
395 ui.warn(_('%s: not recording copy - %s does not exist\n') %
395 ui.warn(_('%s: not recording copy - %s does not exist\n') %
396 (relsrc, reltarget))
396 (relsrc, reltarget))
397 return
397 return
398 elif not dryrun:
398 elif not dryrun:
399 try:
399 try:
400 if exists:
400 if exists:
401 os.unlink(target)
401 os.unlink(target)
402 targetdir = os.path.dirname(target) or '.'
402 targetdir = os.path.dirname(target) or '.'
403 if not os.path.isdir(targetdir):
403 if not os.path.isdir(targetdir):
404 os.makedirs(targetdir)
404 os.makedirs(targetdir)
405 util.copyfile(src, target)
405 util.copyfile(src, target)
406 except IOError, inst:
406 except IOError, inst:
407 if inst.errno == errno.ENOENT:
407 if inst.errno == errno.ENOENT:
408 ui.warn(_('%s: deleted in working copy\n') % relsrc)
408 ui.warn(_('%s: deleted in working copy\n') % relsrc)
409 else:
409 else:
410 ui.warn(_('%s: cannot copy - %s\n') %
410 ui.warn(_('%s: cannot copy - %s\n') %
411 (relsrc, inst.strerror))
411 (relsrc, inst.strerror))
412 return True # report a failure
412 return True # report a failure
413
413
414 if ui.verbose or not exact:
414 if ui.verbose or not exact:
415 if rename:
415 if rename:
416 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
416 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
417 else:
417 else:
418 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
418 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
419
419
420 targets[abstarget] = abssrc
420 targets[abstarget] = abssrc
421
421
422 # fix up dirstate
422 # fix up dirstate
423 origsrc = repo.dirstate.copied(abssrc) or abssrc
423 origsrc = repo.dirstate.copied(abssrc) or abssrc
424 if abstarget == origsrc: # copying back a copy?
424 if abstarget == origsrc: # copying back a copy?
425 if state not in 'mn' and not dryrun:
425 if state not in 'mn' and not dryrun:
426 repo.dirstate.normallookup(abstarget)
426 repo.dirstate.normallookup(abstarget)
427 else:
427 else:
428 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
428 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
429 if not ui.quiet:
429 if not ui.quiet:
430 ui.warn(_("%s has not been committed yet, so no copy "
430 ui.warn(_("%s has not been committed yet, so no copy "
431 "data will be stored for %s.\n")
431 "data will be stored for %s.\n")
432 % (repo.pathto(origsrc, cwd), reltarget))
432 % (repo.pathto(origsrc, cwd), reltarget))
433 if repo.dirstate[abstarget] in '?r' and not dryrun:
433 if repo.dirstate[abstarget] in '?r' and not dryrun:
434 wctx.add([abstarget])
434 wctx.add([abstarget])
435 elif not dryrun:
435 elif not dryrun:
436 wctx.copy(origsrc, abstarget)
436 wctx.copy(origsrc, abstarget)
437
437
438 if rename and not dryrun:
438 if rename and not dryrun:
439 wctx.remove([abssrc], not after)
439 wctx.remove([abssrc], not after)
440
440
441 # pat: ossep
441 # pat: ossep
442 # dest ossep
442 # dest ossep
443 # srcs: list of (hgsep, hgsep, ossep, bool)
443 # srcs: list of (hgsep, hgsep, ossep, bool)
444 # return: function that takes hgsep and returns ossep
444 # return: function that takes hgsep and returns ossep
445 def targetpathfn(pat, dest, srcs):
445 def targetpathfn(pat, dest, srcs):
446 if os.path.isdir(pat):
446 if os.path.isdir(pat):
447 abspfx = util.canonpath(repo.root, cwd, pat)
447 abspfx = util.canonpath(repo.root, cwd, pat)
448 abspfx = util.localpath(abspfx)
448 abspfx = util.localpath(abspfx)
449 if destdirexists:
449 if destdirexists:
450 striplen = len(os.path.split(abspfx)[0])
450 striplen = len(os.path.split(abspfx)[0])
451 else:
451 else:
452 striplen = len(abspfx)
452 striplen = len(abspfx)
453 if striplen:
453 if striplen:
454 striplen += len(os.sep)
454 striplen += len(os.sep)
455 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
455 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
456 elif destdirexists:
456 elif destdirexists:
457 res = lambda p: os.path.join(dest,
457 res = lambda p: os.path.join(dest,
458 os.path.basename(util.localpath(p)))
458 os.path.basename(util.localpath(p)))
459 else:
459 else:
460 res = lambda p: dest
460 res = lambda p: dest
461 return res
461 return res
462
462
463 # pat: ossep
463 # pat: ossep
464 # dest ossep
464 # dest ossep
465 # srcs: list of (hgsep, hgsep, ossep, bool)
465 # srcs: list of (hgsep, hgsep, ossep, bool)
466 # return: function that takes hgsep and returns ossep
466 # return: function that takes hgsep and returns ossep
467 def targetpathafterfn(pat, dest, srcs):
467 def targetpathafterfn(pat, dest, srcs):
468 if matchmod.patkind(pat):
468 if matchmod.patkind(pat):
469 # a mercurial pattern
469 # a mercurial pattern
470 res = lambda p: os.path.join(dest,
470 res = lambda p: os.path.join(dest,
471 os.path.basename(util.localpath(p)))
471 os.path.basename(util.localpath(p)))
472 else:
472 else:
473 abspfx = util.canonpath(repo.root, cwd, pat)
473 abspfx = util.canonpath(repo.root, cwd, pat)
474 if len(abspfx) < len(srcs[0][0]):
474 if len(abspfx) < len(srcs[0][0]):
475 # A directory. Either the target path contains the last
475 # A directory. Either the target path contains the last
476 # component of the source path or it does not.
476 # component of the source path or it does not.
477 def evalpath(striplen):
477 def evalpath(striplen):
478 score = 0
478 score = 0
479 for s in srcs:
479 for s in srcs:
480 t = os.path.join(dest, util.localpath(s[0])[striplen:])
480 t = os.path.join(dest, util.localpath(s[0])[striplen:])
481 if os.path.exists(t):
481 if os.path.exists(t):
482 score += 1
482 score += 1
483 return score
483 return score
484
484
485 abspfx = util.localpath(abspfx)
485 abspfx = util.localpath(abspfx)
486 striplen = len(abspfx)
486 striplen = len(abspfx)
487 if striplen:
487 if striplen:
488 striplen += len(os.sep)
488 striplen += len(os.sep)
489 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
489 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
490 score = evalpath(striplen)
490 score = evalpath(striplen)
491 striplen1 = len(os.path.split(abspfx)[0])
491 striplen1 = len(os.path.split(abspfx)[0])
492 if striplen1:
492 if striplen1:
493 striplen1 += len(os.sep)
493 striplen1 += len(os.sep)
494 if evalpath(striplen1) > score:
494 if evalpath(striplen1) > score:
495 striplen = striplen1
495 striplen = striplen1
496 res = lambda p: os.path.join(dest,
496 res = lambda p: os.path.join(dest,
497 util.localpath(p)[striplen:])
497 util.localpath(p)[striplen:])
498 else:
498 else:
499 # a file
499 # a file
500 if destdirexists:
500 if destdirexists:
501 res = lambda p: os.path.join(dest,
501 res = lambda p: os.path.join(dest,
502 os.path.basename(util.localpath(p)))
502 os.path.basename(util.localpath(p)))
503 else:
503 else:
504 res = lambda p: dest
504 res = lambda p: dest
505 return res
505 return res
506
506
507
507
508 pats = expandpats(pats)
508 pats = expandpats(pats)
509 if not pats:
509 if not pats:
510 raise util.Abort(_('no source or destination specified'))
510 raise util.Abort(_('no source or destination specified'))
511 if len(pats) == 1:
511 if len(pats) == 1:
512 raise util.Abort(_('no destination specified'))
512 raise util.Abort(_('no destination specified'))
513 dest = pats.pop()
513 dest = pats.pop()
514 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
514 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
515 if not destdirexists:
515 if not destdirexists:
516 if len(pats) > 1 or matchmod.patkind(pats[0]):
516 if len(pats) > 1 or matchmod.patkind(pats[0]):
517 raise util.Abort(_('with multiple sources, destination must be an '
517 raise util.Abort(_('with multiple sources, destination must be an '
518 'existing directory'))
518 'existing directory'))
519 if util.endswithsep(dest):
519 if util.endswithsep(dest):
520 raise util.Abort(_('destination %s is not a directory') % dest)
520 raise util.Abort(_('destination %s is not a directory') % dest)
521
521
522 tfn = targetpathfn
522 tfn = targetpathfn
523 if after:
523 if after:
524 tfn = targetpathafterfn
524 tfn = targetpathafterfn
525 copylist = []
525 copylist = []
526 for pat in pats:
526 for pat in pats:
527 srcs = walkpat(pat)
527 srcs = walkpat(pat)
528 if not srcs:
528 if not srcs:
529 continue
529 continue
530 copylist.append((tfn(pat, dest, srcs), srcs))
530 copylist.append((tfn(pat, dest, srcs), srcs))
531 if not copylist:
531 if not copylist:
532 raise util.Abort(_('no files to copy'))
532 raise util.Abort(_('no files to copy'))
533
533
534 errors = 0
534 errors = 0
535 for targetpath, srcs in copylist:
535 for targetpath, srcs in copylist:
536 for abssrc, relsrc, exact in srcs:
536 for abssrc, relsrc, exact in srcs:
537 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
537 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
538 errors += 1
538 errors += 1
539
539
540 if errors:
540 if errors:
541 ui.warn(_('(consider using --after)\n'))
541 ui.warn(_('(consider using --after)\n'))
542
542
543 return errors != 0
543 return errors != 0
544
544
545 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
545 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
546 runargs=None, appendpid=False):
546 runargs=None, appendpid=False):
547 '''Run a command as a service.'''
547 '''Run a command as a service.'''
548
548
549 if opts['daemon'] and not opts['daemon_pipefds']:
549 if opts['daemon'] and not opts['daemon_pipefds']:
550 # Signal child process startup with file removal
550 # Signal child process startup with file removal
551 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
551 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
552 os.close(lockfd)
552 os.close(lockfd)
553 try:
553 try:
554 if not runargs:
554 if not runargs:
555 runargs = util.hgcmd() + sys.argv[1:]
555 runargs = util.hgcmd() + sys.argv[1:]
556 runargs.append('--daemon-pipefds=%s' % lockpath)
556 runargs.append('--daemon-pipefds=%s' % lockpath)
557 # Don't pass --cwd to the child process, because we've already
557 # Don't pass --cwd to the child process, because we've already
558 # changed directory.
558 # changed directory.
559 for i in xrange(1, len(runargs)):
559 for i in xrange(1, len(runargs)):
560 if runargs[i].startswith('--cwd='):
560 if runargs[i].startswith('--cwd='):
561 del runargs[i]
561 del runargs[i]
562 break
562 break
563 elif runargs[i].startswith('--cwd'):
563 elif runargs[i].startswith('--cwd'):
564 del runargs[i:i + 2]
564 del runargs[i:i + 2]
565 break
565 break
566 def condfn():
566 def condfn():
567 return not os.path.exists(lockpath)
567 return not os.path.exists(lockpath)
568 pid = util.rundetached(runargs, condfn)
568 pid = util.rundetached(runargs, condfn)
569 if pid < 0:
569 if pid < 0:
570 raise util.Abort(_('child process failed to start'))
570 raise util.Abort(_('child process failed to start'))
571 finally:
571 finally:
572 try:
572 try:
573 os.unlink(lockpath)
573 os.unlink(lockpath)
574 except OSError, e:
574 except OSError, e:
575 if e.errno != errno.ENOENT:
575 if e.errno != errno.ENOENT:
576 raise
576 raise
577 if parentfn:
577 if parentfn:
578 return parentfn(pid)
578 return parentfn(pid)
579 else:
579 else:
580 return
580 return
581
581
582 if initfn:
582 if initfn:
583 initfn()
583 initfn()
584
584
585 if opts['pid_file']:
585 if opts['pid_file']:
586 mode = appendpid and 'a' or 'w'
586 mode = appendpid and 'a' or 'w'
587 fp = open(opts['pid_file'], mode)
587 fp = open(opts['pid_file'], mode)
588 fp.write(str(os.getpid()) + '\n')
588 fp.write(str(os.getpid()) + '\n')
589 fp.close()
589 fp.close()
590
590
591 if opts['daemon_pipefds']:
591 if opts['daemon_pipefds']:
592 lockpath = opts['daemon_pipefds']
592 lockpath = opts['daemon_pipefds']
593 try:
593 try:
594 os.setsid()
594 os.setsid()
595 except AttributeError:
595 except AttributeError:
596 pass
596 pass
597 os.unlink(lockpath)
597 os.unlink(lockpath)
598 util.hidewindow()
598 util.hidewindow()
599 sys.stdout.flush()
599 sys.stdout.flush()
600 sys.stderr.flush()
600 sys.stderr.flush()
601
601
602 nullfd = os.open(util.nulldev, os.O_RDWR)
602 nullfd = os.open(util.nulldev, os.O_RDWR)
603 logfilefd = nullfd
603 logfilefd = nullfd
604 if logfile:
604 if logfile:
605 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
605 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
606 os.dup2(nullfd, 0)
606 os.dup2(nullfd, 0)
607 os.dup2(logfilefd, 1)
607 os.dup2(logfilefd, 1)
608 os.dup2(logfilefd, 2)
608 os.dup2(logfilefd, 2)
609 if nullfd not in (0, 1, 2):
609 if nullfd not in (0, 1, 2):
610 os.close(nullfd)
610 os.close(nullfd)
611 if logfile and logfilefd not in (0, 1, 2):
611 if logfile and logfilefd not in (0, 1, 2):
612 os.close(logfilefd)
612 os.close(logfilefd)
613
613
614 if runfn:
614 if runfn:
615 return runfn()
615 return runfn()
616
616
617 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
617 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
618 opts=None):
618 opts=None):
619 '''export changesets as hg patches.'''
619 '''export changesets as hg patches.'''
620
620
621 total = len(revs)
621 total = len(revs)
622 revwidth = max([len(str(rev)) for rev in revs])
622 revwidth = max([len(str(rev)) for rev in revs])
623
623
624 def single(rev, seqno, fp):
624 def single(rev, seqno, fp):
625 ctx = repo[rev]
625 ctx = repo[rev]
626 node = ctx.node()
626 node = ctx.node()
627 parents = [p.node() for p in ctx.parents() if p]
627 parents = [p.node() for p in ctx.parents() if p]
628 branch = ctx.branch()
628 branch = ctx.branch()
629 if switch_parent:
629 if switch_parent:
630 parents.reverse()
630 parents.reverse()
631 prev = (parents and parents[0]) or nullid
631 prev = (parents and parents[0]) or nullid
632
632
633 if not fp:
633 if not fp:
634 fp = make_file(repo, template, node, total=total, seqno=seqno,
634 fp = make_file(repo, template, node, total=total, seqno=seqno,
635 revwidth=revwidth, mode='ab')
635 revwidth=revwidth, mode='ab')
636 if fp != sys.stdout and hasattr(fp, 'name'):
636 if fp != sys.stdout and hasattr(fp, 'name'):
637 repo.ui.note("%s\n" % fp.name)
637 repo.ui.note("%s\n" % fp.name)
638
638
639 fp.write("# HG changeset patch\n")
639 fp.write("# HG changeset patch\n")
640 fp.write("# User %s\n" % ctx.user())
640 fp.write("# User %s\n" % ctx.user())
641 fp.write("# Date %d %d\n" % ctx.date())
641 fp.write("# Date %d %d\n" % ctx.date())
642 if branch and branch != 'default':
642 if branch and branch != 'default':
643 fp.write("# Branch %s\n" % branch)
643 fp.write("# Branch %s\n" % branch)
644 fp.write("# Node ID %s\n" % hex(node))
644 fp.write("# Node ID %s\n" % hex(node))
645 fp.write("# Parent %s\n" % hex(prev))
645 fp.write("# Parent %s\n" % hex(prev))
646 if len(parents) > 1:
646 if len(parents) > 1:
647 fp.write("# Parent %s\n" % hex(parents[1]))
647 fp.write("# Parent %s\n" % hex(parents[1]))
648 fp.write(ctx.description().rstrip())
648 fp.write(ctx.description().rstrip())
649 fp.write("\n\n")
649 fp.write("\n\n")
650
650
651 for chunk in patch.diff(repo, prev, node, opts=opts):
651 for chunk in patch.diff(repo, prev, node, opts=opts):
652 fp.write(chunk)
652 fp.write(chunk)
653
653
654 for seqno, rev in enumerate(revs):
654 for seqno, rev in enumerate(revs):
655 single(rev, seqno + 1, fp)
655 single(rev, seqno + 1, fp)
656
656
657 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
657 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
658 changes=None, stat=False, fp=None, prefix='',
658 changes=None, stat=False, fp=None, prefix='',
659 listsubrepos=False):
659 listsubrepos=False):
660 '''show diff or diffstat.'''
660 '''show diff or diffstat.'''
661 if fp is None:
661 if fp is None:
662 write = ui.write
662 write = ui.write
663 else:
663 else:
664 def write(s, **kw):
664 def write(s, **kw):
665 fp.write(s)
665 fp.write(s)
666
666
667 if stat:
667 if stat:
668 diffopts = diffopts.copy(context=0)
668 diffopts = diffopts.copy(context=0)
669 width = 80
669 width = 80
670 if not ui.plain():
670 if not ui.plain():
671 width = util.termwidth()
671 width = util.termwidth()
672 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
672 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
673 prefix=prefix)
673 prefix=prefix)
674 for chunk, label in patch.diffstatui(util.iterlines(chunks),
674 for chunk, label in patch.diffstatui(util.iterlines(chunks),
675 width=width,
675 width=width,
676 git=diffopts.git):
676 git=diffopts.git):
677 write(chunk, label=label)
677 write(chunk, label=label)
678 else:
678 else:
679 for chunk, label in patch.diffui(repo, node1, node2, match,
679 for chunk, label in patch.diffui(repo, node1, node2, match,
680 changes, diffopts, prefix=prefix):
680 changes, diffopts, prefix=prefix):
681 write(chunk, label=label)
681 write(chunk, label=label)
682
682
683 if listsubrepos:
683 if listsubrepos:
684 ctx1 = repo[node1]
684 ctx1 = repo[node1]
685 ctx2 = repo[node2]
685 ctx2 = repo[node2]
686 # Create a (subpath, ctx) mapping where we prefer subpaths
686 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
687 # from ctx1. The subpaths from ctx2 are important when the
688 # .hgsub file has been modified (in ctx2) but not yet
689 # committed (in ctx1).
690 subpaths = dict.fromkeys(ctx2.substate, ctx2)
691 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
692 for subpath, ctx in subpaths.iteritems():
693 sub = ctx.sub(subpath)
694 if node2 is not None:
687 if node2 is not None:
695 node2 = bin(ctx2.substate[subpath][1])
688 node2 = bin(ctx2.substate[subpath][1])
696 submatch = matchmod.narrowmatcher(subpath, match)
689 submatch = matchmod.narrowmatcher(subpath, match)
697 sub.diff(diffopts, node2, submatch, changes=changes,
690 sub.diff(diffopts, node2, submatch, changes=changes,
698 stat=stat, fp=fp, prefix=prefix)
691 stat=stat, fp=fp, prefix=prefix)
699
692
700 class changeset_printer(object):
693 class changeset_printer(object):
701 '''show changeset information when templating not requested.'''
694 '''show changeset information when templating not requested.'''
702
695
703 def __init__(self, ui, repo, patch, diffopts, buffered):
696 def __init__(self, ui, repo, patch, diffopts, buffered):
704 self.ui = ui
697 self.ui = ui
705 self.repo = repo
698 self.repo = repo
706 self.buffered = buffered
699 self.buffered = buffered
707 self.patch = patch
700 self.patch = patch
708 self.diffopts = diffopts
701 self.diffopts = diffopts
709 self.header = {}
702 self.header = {}
710 self.hunk = {}
703 self.hunk = {}
711 self.lastheader = None
704 self.lastheader = None
712 self.footer = None
705 self.footer = None
713
706
714 def flush(self, rev):
707 def flush(self, rev):
715 if rev in self.header:
708 if rev in self.header:
716 h = self.header[rev]
709 h = self.header[rev]
717 if h != self.lastheader:
710 if h != self.lastheader:
718 self.lastheader = h
711 self.lastheader = h
719 self.ui.write(h)
712 self.ui.write(h)
720 del self.header[rev]
713 del self.header[rev]
721 if rev in self.hunk:
714 if rev in self.hunk:
722 self.ui.write(self.hunk[rev])
715 self.ui.write(self.hunk[rev])
723 del self.hunk[rev]
716 del self.hunk[rev]
724 return 1
717 return 1
725 return 0
718 return 0
726
719
727 def close(self):
720 def close(self):
728 if self.footer:
721 if self.footer:
729 self.ui.write(self.footer)
722 self.ui.write(self.footer)
730
723
731 def show(self, ctx, copies=None, matchfn=None, **props):
724 def show(self, ctx, copies=None, matchfn=None, **props):
732 if self.buffered:
725 if self.buffered:
733 self.ui.pushbuffer()
726 self.ui.pushbuffer()
734 self._show(ctx, copies, matchfn, props)
727 self._show(ctx, copies, matchfn, props)
735 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
728 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
736 else:
729 else:
737 self._show(ctx, copies, matchfn, props)
730 self._show(ctx, copies, matchfn, props)
738
731
739 def _show(self, ctx, copies, matchfn, props):
732 def _show(self, ctx, copies, matchfn, props):
740 '''show a single changeset or file revision'''
733 '''show a single changeset or file revision'''
741 changenode = ctx.node()
734 changenode = ctx.node()
742 rev = ctx.rev()
735 rev = ctx.rev()
743
736
744 if self.ui.quiet:
737 if self.ui.quiet:
745 self.ui.write("%d:%s\n" % (rev, short(changenode)),
738 self.ui.write("%d:%s\n" % (rev, short(changenode)),
746 label='log.node')
739 label='log.node')
747 return
740 return
748
741
749 log = self.repo.changelog
742 log = self.repo.changelog
750 date = util.datestr(ctx.date())
743 date = util.datestr(ctx.date())
751
744
752 hexfunc = self.ui.debugflag and hex or short
745 hexfunc = self.ui.debugflag and hex or short
753
746
754 parents = [(p, hexfunc(log.node(p)))
747 parents = [(p, hexfunc(log.node(p)))
755 for p in self._meaningful_parentrevs(log, rev)]
748 for p in self._meaningful_parentrevs(log, rev)]
756
749
757 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
750 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
758 label='log.changeset')
751 label='log.changeset')
759
752
760 branch = ctx.branch()
753 branch = ctx.branch()
761 # don't show the default branch name
754 # don't show the default branch name
762 if branch != 'default':
755 if branch != 'default':
763 branch = encoding.tolocal(branch)
756 branch = encoding.tolocal(branch)
764 self.ui.write(_("branch: %s\n") % branch,
757 self.ui.write(_("branch: %s\n") % branch,
765 label='log.branch')
758 label='log.branch')
766 for tag in self.repo.nodetags(changenode):
759 for tag in self.repo.nodetags(changenode):
767 self.ui.write(_("tag: %s\n") % tag,
760 self.ui.write(_("tag: %s\n") % tag,
768 label='log.tag')
761 label='log.tag')
769 for parent in parents:
762 for parent in parents:
770 self.ui.write(_("parent: %d:%s\n") % parent,
763 self.ui.write(_("parent: %d:%s\n") % parent,
771 label='log.parent')
764 label='log.parent')
772
765
773 if self.ui.debugflag:
766 if self.ui.debugflag:
774 mnode = ctx.manifestnode()
767 mnode = ctx.manifestnode()
775 self.ui.write(_("manifest: %d:%s\n") %
768 self.ui.write(_("manifest: %d:%s\n") %
776 (self.repo.manifest.rev(mnode), hex(mnode)),
769 (self.repo.manifest.rev(mnode), hex(mnode)),
777 label='ui.debug log.manifest')
770 label='ui.debug log.manifest')
778 self.ui.write(_("user: %s\n") % ctx.user(),
771 self.ui.write(_("user: %s\n") % ctx.user(),
779 label='log.user')
772 label='log.user')
780 self.ui.write(_("date: %s\n") % date,
773 self.ui.write(_("date: %s\n") % date,
781 label='log.date')
774 label='log.date')
782
775
783 if self.ui.debugflag:
776 if self.ui.debugflag:
784 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
777 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
785 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
778 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
786 files):
779 files):
787 if value:
780 if value:
788 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
781 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
789 label='ui.debug log.files')
782 label='ui.debug log.files')
790 elif ctx.files() and self.ui.verbose:
783 elif ctx.files() and self.ui.verbose:
791 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
784 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
792 label='ui.note log.files')
785 label='ui.note log.files')
793 if copies and self.ui.verbose:
786 if copies and self.ui.verbose:
794 copies = ['%s (%s)' % c for c in copies]
787 copies = ['%s (%s)' % c for c in copies]
795 self.ui.write(_("copies: %s\n") % ' '.join(copies),
788 self.ui.write(_("copies: %s\n") % ' '.join(copies),
796 label='ui.note log.copies')
789 label='ui.note log.copies')
797
790
798 extra = ctx.extra()
791 extra = ctx.extra()
799 if extra and self.ui.debugflag:
792 if extra and self.ui.debugflag:
800 for key, value in sorted(extra.items()):
793 for key, value in sorted(extra.items()):
801 self.ui.write(_("extra: %s=%s\n")
794 self.ui.write(_("extra: %s=%s\n")
802 % (key, value.encode('string_escape')),
795 % (key, value.encode('string_escape')),
803 label='ui.debug log.extra')
796 label='ui.debug log.extra')
804
797
805 description = ctx.description().strip()
798 description = ctx.description().strip()
806 if description:
799 if description:
807 if self.ui.verbose:
800 if self.ui.verbose:
808 self.ui.write(_("description:\n"),
801 self.ui.write(_("description:\n"),
809 label='ui.note log.description')
802 label='ui.note log.description')
810 self.ui.write(description,
803 self.ui.write(description,
811 label='ui.note log.description')
804 label='ui.note log.description')
812 self.ui.write("\n\n")
805 self.ui.write("\n\n")
813 else:
806 else:
814 self.ui.write(_("summary: %s\n") %
807 self.ui.write(_("summary: %s\n") %
815 description.splitlines()[0],
808 description.splitlines()[0],
816 label='log.summary')
809 label='log.summary')
817 self.ui.write("\n")
810 self.ui.write("\n")
818
811
819 self.showpatch(changenode, matchfn)
812 self.showpatch(changenode, matchfn)
820
813
821 def showpatch(self, node, matchfn):
814 def showpatch(self, node, matchfn):
822 if not matchfn:
815 if not matchfn:
823 matchfn = self.patch
816 matchfn = self.patch
824 if matchfn:
817 if matchfn:
825 stat = self.diffopts.get('stat')
818 stat = self.diffopts.get('stat')
826 diff = self.diffopts.get('patch')
819 diff = self.diffopts.get('patch')
827 diffopts = patch.diffopts(self.ui, self.diffopts)
820 diffopts = patch.diffopts(self.ui, self.diffopts)
828 prev = self.repo.changelog.parents(node)[0]
821 prev = self.repo.changelog.parents(node)[0]
829 if stat:
822 if stat:
830 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
823 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
831 match=matchfn, stat=True)
824 match=matchfn, stat=True)
832 if diff:
825 if diff:
833 if stat:
826 if stat:
834 self.ui.write("\n")
827 self.ui.write("\n")
835 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
828 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
836 match=matchfn, stat=False)
829 match=matchfn, stat=False)
837 self.ui.write("\n")
830 self.ui.write("\n")
838
831
839 def _meaningful_parentrevs(self, log, rev):
832 def _meaningful_parentrevs(self, log, rev):
840 """Return list of meaningful (or all if debug) parentrevs for rev.
833 """Return list of meaningful (or all if debug) parentrevs for rev.
841
834
842 For merges (two non-nullrev revisions) both parents are meaningful.
835 For merges (two non-nullrev revisions) both parents are meaningful.
843 Otherwise the first parent revision is considered meaningful if it
836 Otherwise the first parent revision is considered meaningful if it
844 is not the preceding revision.
837 is not the preceding revision.
845 """
838 """
846 parents = log.parentrevs(rev)
839 parents = log.parentrevs(rev)
847 if not self.ui.debugflag and parents[1] == nullrev:
840 if not self.ui.debugflag and parents[1] == nullrev:
848 if parents[0] >= rev - 1:
841 if parents[0] >= rev - 1:
849 parents = []
842 parents = []
850 else:
843 else:
851 parents = [parents[0]]
844 parents = [parents[0]]
852 return parents
845 return parents
853
846
854
847
855 class changeset_templater(changeset_printer):
848 class changeset_templater(changeset_printer):
856 '''format changeset information.'''
849 '''format changeset information.'''
857
850
858 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
851 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
859 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
852 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
860 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
853 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
861 defaulttempl = {
854 defaulttempl = {
862 'parent': '{rev}:{node|formatnode} ',
855 'parent': '{rev}:{node|formatnode} ',
863 'manifest': '{rev}:{node|formatnode}',
856 'manifest': '{rev}:{node|formatnode}',
864 'file_copy': '{name} ({source})',
857 'file_copy': '{name} ({source})',
865 'extra': '{key}={value|stringescape}'
858 'extra': '{key}={value|stringescape}'
866 }
859 }
867 # filecopy is preserved for compatibility reasons
860 # filecopy is preserved for compatibility reasons
868 defaulttempl['filecopy'] = defaulttempl['file_copy']
861 defaulttempl['filecopy'] = defaulttempl['file_copy']
869 self.t = templater.templater(mapfile, {'formatnode': formatnode},
862 self.t = templater.templater(mapfile, {'formatnode': formatnode},
870 cache=defaulttempl)
863 cache=defaulttempl)
871 self.cache = {}
864 self.cache = {}
872
865
873 def use_template(self, t):
866 def use_template(self, t):
874 '''set template string to use'''
867 '''set template string to use'''
875 self.t.cache['changeset'] = t
868 self.t.cache['changeset'] = t
876
869
877 def _meaningful_parentrevs(self, ctx):
870 def _meaningful_parentrevs(self, ctx):
878 """Return list of meaningful (or all if debug) parentrevs for rev.
871 """Return list of meaningful (or all if debug) parentrevs for rev.
879 """
872 """
880 parents = ctx.parents()
873 parents = ctx.parents()
881 if len(parents) > 1:
874 if len(parents) > 1:
882 return parents
875 return parents
883 if self.ui.debugflag:
876 if self.ui.debugflag:
884 return [parents[0], self.repo['null']]
877 return [parents[0], self.repo['null']]
885 if parents[0].rev() >= ctx.rev() - 1:
878 if parents[0].rev() >= ctx.rev() - 1:
886 return []
879 return []
887 return parents
880 return parents
888
881
889 def _show(self, ctx, copies, matchfn, props):
882 def _show(self, ctx, copies, matchfn, props):
890 '''show a single changeset or file revision'''
883 '''show a single changeset or file revision'''
891
884
892 showlist = templatekw.showlist
885 showlist = templatekw.showlist
893
886
894 # showparents() behaviour depends on ui trace level which
887 # showparents() behaviour depends on ui trace level which
895 # causes unexpected behaviours at templating level and makes
888 # causes unexpected behaviours at templating level and makes
896 # it harder to extract it in a standalone function. Its
889 # it harder to extract it in a standalone function. Its
897 # behaviour cannot be changed so leave it here for now.
890 # behaviour cannot be changed so leave it here for now.
898 def showparents(**args):
891 def showparents(**args):
899 ctx = args['ctx']
892 ctx = args['ctx']
900 parents = [[('rev', p.rev()), ('node', p.hex())]
893 parents = [[('rev', p.rev()), ('node', p.hex())]
901 for p in self._meaningful_parentrevs(ctx)]
894 for p in self._meaningful_parentrevs(ctx)]
902 return showlist('parent', parents, **args)
895 return showlist('parent', parents, **args)
903
896
904 props = props.copy()
897 props = props.copy()
905 props.update(templatekw.keywords)
898 props.update(templatekw.keywords)
906 props['parents'] = showparents
899 props['parents'] = showparents
907 props['templ'] = self.t
900 props['templ'] = self.t
908 props['ctx'] = ctx
901 props['ctx'] = ctx
909 props['repo'] = self.repo
902 props['repo'] = self.repo
910 props['revcache'] = {'copies': copies}
903 props['revcache'] = {'copies': copies}
911 props['cache'] = self.cache
904 props['cache'] = self.cache
912
905
913 # find correct templates for current mode
906 # find correct templates for current mode
914
907
915 tmplmodes = [
908 tmplmodes = [
916 (True, None),
909 (True, None),
917 (self.ui.verbose, 'verbose'),
910 (self.ui.verbose, 'verbose'),
918 (self.ui.quiet, 'quiet'),
911 (self.ui.quiet, 'quiet'),
919 (self.ui.debugflag, 'debug'),
912 (self.ui.debugflag, 'debug'),
920 ]
913 ]
921
914
922 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
915 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
923 for mode, postfix in tmplmodes:
916 for mode, postfix in tmplmodes:
924 for type in types:
917 for type in types:
925 cur = postfix and ('%s_%s' % (type, postfix)) or type
918 cur = postfix and ('%s_%s' % (type, postfix)) or type
926 if mode and cur in self.t:
919 if mode and cur in self.t:
927 types[type] = cur
920 types[type] = cur
928
921
929 try:
922 try:
930
923
931 # write header
924 # write header
932 if types['header']:
925 if types['header']:
933 h = templater.stringify(self.t(types['header'], **props))
926 h = templater.stringify(self.t(types['header'], **props))
934 if self.buffered:
927 if self.buffered:
935 self.header[ctx.rev()] = h
928 self.header[ctx.rev()] = h
936 else:
929 else:
937 if self.lastheader != h:
930 if self.lastheader != h:
938 self.lastheader = h
931 self.lastheader = h
939 self.ui.write(h)
932 self.ui.write(h)
940
933
941 # write changeset metadata, then patch if requested
934 # write changeset metadata, then patch if requested
942 key = types['changeset']
935 key = types['changeset']
943 self.ui.write(templater.stringify(self.t(key, **props)))
936 self.ui.write(templater.stringify(self.t(key, **props)))
944 self.showpatch(ctx.node(), matchfn)
937 self.showpatch(ctx.node(), matchfn)
945
938
946 if types['footer']:
939 if types['footer']:
947 if not self.footer:
940 if not self.footer:
948 self.footer = templater.stringify(self.t(types['footer'],
941 self.footer = templater.stringify(self.t(types['footer'],
949 **props))
942 **props))
950
943
951 except KeyError, inst:
944 except KeyError, inst:
952 msg = _("%s: no key named '%s'")
945 msg = _("%s: no key named '%s'")
953 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
946 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
954 except SyntaxError, inst:
947 except SyntaxError, inst:
955 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
948 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
956
949
957 def show_changeset(ui, repo, opts, buffered=False):
950 def show_changeset(ui, repo, opts, buffered=False):
958 """show one changeset using template or regular display.
951 """show one changeset using template or regular display.
959
952
960 Display format will be the first non-empty hit of:
953 Display format will be the first non-empty hit of:
961 1. option 'template'
954 1. option 'template'
962 2. option 'style'
955 2. option 'style'
963 3. [ui] setting 'logtemplate'
956 3. [ui] setting 'logtemplate'
964 4. [ui] setting 'style'
957 4. [ui] setting 'style'
965 If all of these values are either the unset or the empty string,
958 If all of these values are either the unset or the empty string,
966 regular display via changeset_printer() is done.
959 regular display via changeset_printer() is done.
967 """
960 """
968 # options
961 # options
969 patch = False
962 patch = False
970 if opts.get('patch') or opts.get('stat'):
963 if opts.get('patch') or opts.get('stat'):
971 patch = matchall(repo)
964 patch = matchall(repo)
972
965
973 tmpl = opts.get('template')
966 tmpl = opts.get('template')
974 style = None
967 style = None
975 if tmpl:
968 if tmpl:
976 tmpl = templater.parsestring(tmpl, quoted=False)
969 tmpl = templater.parsestring(tmpl, quoted=False)
977 else:
970 else:
978 style = opts.get('style')
971 style = opts.get('style')
979
972
980 # ui settings
973 # ui settings
981 if not (tmpl or style):
974 if not (tmpl or style):
982 tmpl = ui.config('ui', 'logtemplate')
975 tmpl = ui.config('ui', 'logtemplate')
983 if tmpl:
976 if tmpl:
984 tmpl = templater.parsestring(tmpl)
977 tmpl = templater.parsestring(tmpl)
985 else:
978 else:
986 style = util.expandpath(ui.config('ui', 'style', ''))
979 style = util.expandpath(ui.config('ui', 'style', ''))
987
980
988 if not (tmpl or style):
981 if not (tmpl or style):
989 return changeset_printer(ui, repo, patch, opts, buffered)
982 return changeset_printer(ui, repo, patch, opts, buffered)
990
983
991 mapfile = None
984 mapfile = None
992 if style and not tmpl:
985 if style and not tmpl:
993 mapfile = style
986 mapfile = style
994 if not os.path.split(mapfile)[0]:
987 if not os.path.split(mapfile)[0]:
995 mapname = (templater.templatepath('map-cmdline.' + mapfile)
988 mapname = (templater.templatepath('map-cmdline.' + mapfile)
996 or templater.templatepath(mapfile))
989 or templater.templatepath(mapfile))
997 if mapname:
990 if mapname:
998 mapfile = mapname
991 mapfile = mapname
999
992
1000 try:
993 try:
1001 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
994 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1002 except SyntaxError, inst:
995 except SyntaxError, inst:
1003 raise util.Abort(inst.args[0])
996 raise util.Abort(inst.args[0])
1004 if tmpl:
997 if tmpl:
1005 t.use_template(tmpl)
998 t.use_template(tmpl)
1006 return t
999 return t
1007
1000
1008 def finddate(ui, repo, date):
1001 def finddate(ui, repo, date):
1009 """Find the tipmost changeset that matches the given date spec"""
1002 """Find the tipmost changeset that matches the given date spec"""
1010
1003
1011 df = util.matchdate(date)
1004 df = util.matchdate(date)
1012 m = matchall(repo)
1005 m = matchall(repo)
1013 results = {}
1006 results = {}
1014
1007
1015 def prep(ctx, fns):
1008 def prep(ctx, fns):
1016 d = ctx.date()
1009 d = ctx.date()
1017 if df(d[0]):
1010 if df(d[0]):
1018 results[ctx.rev()] = d
1011 results[ctx.rev()] = d
1019
1012
1020 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1013 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1021 rev = ctx.rev()
1014 rev = ctx.rev()
1022 if rev in results:
1015 if rev in results:
1023 ui.status(_("Found revision %s from %s\n") %
1016 ui.status(_("Found revision %s from %s\n") %
1024 (rev, util.datestr(results[rev])))
1017 (rev, util.datestr(results[rev])))
1025 return str(rev)
1018 return str(rev)
1026
1019
1027 raise util.Abort(_("revision matching date not found"))
1020 raise util.Abort(_("revision matching date not found"))
1028
1021
1029 def walkchangerevs(repo, match, opts, prepare):
1022 def walkchangerevs(repo, match, opts, prepare):
1030 '''Iterate over files and the revs in which they changed.
1023 '''Iterate over files and the revs in which they changed.
1031
1024
1032 Callers most commonly need to iterate backwards over the history
1025 Callers most commonly need to iterate backwards over the history
1033 in which they are interested. Doing so has awful (quadratic-looking)
1026 in which they are interested. Doing so has awful (quadratic-looking)
1034 performance, so we use iterators in a "windowed" way.
1027 performance, so we use iterators in a "windowed" way.
1035
1028
1036 We walk a window of revisions in the desired order. Within the
1029 We walk a window of revisions in the desired order. Within the
1037 window, we first walk forwards to gather data, then in the desired
1030 window, we first walk forwards to gather data, then in the desired
1038 order (usually backwards) to display it.
1031 order (usually backwards) to display it.
1039
1032
1040 This function returns an iterator yielding contexts. Before
1033 This function returns an iterator yielding contexts. Before
1041 yielding each context, the iterator will first call the prepare
1034 yielding each context, the iterator will first call the prepare
1042 function on each context in the window in forward order.'''
1035 function on each context in the window in forward order.'''
1043
1036
1044 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1037 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1045 if start < end:
1038 if start < end:
1046 while start < end:
1039 while start < end:
1047 yield start, min(windowsize, end - start)
1040 yield start, min(windowsize, end - start)
1048 start += windowsize
1041 start += windowsize
1049 if windowsize < sizelimit:
1042 if windowsize < sizelimit:
1050 windowsize *= 2
1043 windowsize *= 2
1051 else:
1044 else:
1052 while start > end:
1045 while start > end:
1053 yield start, min(windowsize, start - end - 1)
1046 yield start, min(windowsize, start - end - 1)
1054 start -= windowsize
1047 start -= windowsize
1055 if windowsize < sizelimit:
1048 if windowsize < sizelimit:
1056 windowsize *= 2
1049 windowsize *= 2
1057
1050
1058 follow = opts.get('follow') or opts.get('follow_first')
1051 follow = opts.get('follow') or opts.get('follow_first')
1059
1052
1060 if not len(repo):
1053 if not len(repo):
1061 return []
1054 return []
1062
1055
1063 if follow:
1056 if follow:
1064 defrange = '%s:0' % repo['.'].rev()
1057 defrange = '%s:0' % repo['.'].rev()
1065 else:
1058 else:
1066 defrange = '-1:0'
1059 defrange = '-1:0'
1067 revs = revrange(repo, opts['rev'] or [defrange])
1060 revs = revrange(repo, opts['rev'] or [defrange])
1068 if not revs:
1061 if not revs:
1069 return []
1062 return []
1070 wanted = set()
1063 wanted = set()
1071 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1064 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1072 fncache = {}
1065 fncache = {}
1073 change = util.cachefunc(repo.changectx)
1066 change = util.cachefunc(repo.changectx)
1074
1067
1075 # First step is to fill wanted, the set of revisions that we want to yield.
1068 # First step is to fill wanted, the set of revisions that we want to yield.
1076 # When it does not induce extra cost, we also fill fncache for revisions in
1069 # When it does not induce extra cost, we also fill fncache for revisions in
1077 # wanted: a cache of filenames that were changed (ctx.files()) and that
1070 # wanted: a cache of filenames that were changed (ctx.files()) and that
1078 # match the file filtering conditions.
1071 # match the file filtering conditions.
1079
1072
1080 if not slowpath and not match.files():
1073 if not slowpath and not match.files():
1081 # No files, no patterns. Display all revs.
1074 # No files, no patterns. Display all revs.
1082 wanted = set(revs)
1075 wanted = set(revs)
1083 copies = []
1076 copies = []
1084
1077
1085 if not slowpath:
1078 if not slowpath:
1086 # We only have to read through the filelog to find wanted revisions
1079 # We only have to read through the filelog to find wanted revisions
1087
1080
1088 minrev, maxrev = min(revs), max(revs)
1081 minrev, maxrev = min(revs), max(revs)
1089 def filerevgen(filelog, last):
1082 def filerevgen(filelog, last):
1090 """
1083 """
1091 Only files, no patterns. Check the history of each file.
1084 Only files, no patterns. Check the history of each file.
1092
1085
1093 Examines filelog entries within minrev, maxrev linkrev range
1086 Examines filelog entries within minrev, maxrev linkrev range
1094 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1087 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1095 tuples in backwards order
1088 tuples in backwards order
1096 """
1089 """
1097 cl_count = len(repo)
1090 cl_count = len(repo)
1098 revs = []
1091 revs = []
1099 for j in xrange(0, last + 1):
1092 for j in xrange(0, last + 1):
1100 linkrev = filelog.linkrev(j)
1093 linkrev = filelog.linkrev(j)
1101 if linkrev < minrev:
1094 if linkrev < minrev:
1102 continue
1095 continue
1103 # only yield rev for which we have the changelog, it can
1096 # only yield rev for which we have the changelog, it can
1104 # happen while doing "hg log" during a pull or commit
1097 # happen while doing "hg log" during a pull or commit
1105 if linkrev > maxrev or linkrev >= cl_count:
1098 if linkrev > maxrev or linkrev >= cl_count:
1106 break
1099 break
1107
1100
1108 parentlinkrevs = []
1101 parentlinkrevs = []
1109 for p in filelog.parentrevs(j):
1102 for p in filelog.parentrevs(j):
1110 if p != nullrev:
1103 if p != nullrev:
1111 parentlinkrevs.append(filelog.linkrev(p))
1104 parentlinkrevs.append(filelog.linkrev(p))
1112 n = filelog.node(j)
1105 n = filelog.node(j)
1113 revs.append((linkrev, parentlinkrevs,
1106 revs.append((linkrev, parentlinkrevs,
1114 follow and filelog.renamed(n)))
1107 follow and filelog.renamed(n)))
1115
1108
1116 return reversed(revs)
1109 return reversed(revs)
1117 def iterfiles():
1110 def iterfiles():
1118 for filename in match.files():
1111 for filename in match.files():
1119 yield filename, None
1112 yield filename, None
1120 for filename_node in copies:
1113 for filename_node in copies:
1121 yield filename_node
1114 yield filename_node
1122 for file_, node in iterfiles():
1115 for file_, node in iterfiles():
1123 filelog = repo.file(file_)
1116 filelog = repo.file(file_)
1124 if not len(filelog):
1117 if not len(filelog):
1125 if node is None:
1118 if node is None:
1126 # A zero count may be a directory or deleted file, so
1119 # A zero count may be a directory or deleted file, so
1127 # try to find matching entries on the slow path.
1120 # try to find matching entries on the slow path.
1128 if follow:
1121 if follow:
1129 raise util.Abort(
1122 raise util.Abort(
1130 _('cannot follow nonexistent file: "%s"') % file_)
1123 _('cannot follow nonexistent file: "%s"') % file_)
1131 slowpath = True
1124 slowpath = True
1132 break
1125 break
1133 else:
1126 else:
1134 continue
1127 continue
1135
1128
1136 if node is None:
1129 if node is None:
1137 last = len(filelog) - 1
1130 last = len(filelog) - 1
1138 else:
1131 else:
1139 last = filelog.rev(node)
1132 last = filelog.rev(node)
1140
1133
1141
1134
1142 # keep track of all ancestors of the file
1135 # keep track of all ancestors of the file
1143 ancestors = set([filelog.linkrev(last)])
1136 ancestors = set([filelog.linkrev(last)])
1144
1137
1145 # iterate from latest to oldest revision
1138 # iterate from latest to oldest revision
1146 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1139 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1147 if rev not in ancestors:
1140 if rev not in ancestors:
1148 continue
1141 continue
1149 # XXX insert 1327 fix here
1142 # XXX insert 1327 fix here
1150 if flparentlinkrevs:
1143 if flparentlinkrevs:
1151 ancestors.update(flparentlinkrevs)
1144 ancestors.update(flparentlinkrevs)
1152
1145
1153 fncache.setdefault(rev, []).append(file_)
1146 fncache.setdefault(rev, []).append(file_)
1154 wanted.add(rev)
1147 wanted.add(rev)
1155 if copied:
1148 if copied:
1156 copies.append(copied)
1149 copies.append(copied)
1157 if slowpath:
1150 if slowpath:
1158 # We have to read the changelog to match filenames against
1151 # We have to read the changelog to match filenames against
1159 # changed files
1152 # changed files
1160
1153
1161 if follow:
1154 if follow:
1162 raise util.Abort(_('can only follow copies/renames for explicit '
1155 raise util.Abort(_('can only follow copies/renames for explicit '
1163 'filenames'))
1156 'filenames'))
1164
1157
1165 # The slow path checks files modified in every changeset.
1158 # The slow path checks files modified in every changeset.
1166 for i in sorted(revs):
1159 for i in sorted(revs):
1167 ctx = change(i)
1160 ctx = change(i)
1168 matches = filter(match, ctx.files())
1161 matches = filter(match, ctx.files())
1169 if matches:
1162 if matches:
1170 fncache[i] = matches
1163 fncache[i] = matches
1171 wanted.add(i)
1164 wanted.add(i)
1172
1165
1173 class followfilter(object):
1166 class followfilter(object):
1174 def __init__(self, onlyfirst=False):
1167 def __init__(self, onlyfirst=False):
1175 self.startrev = nullrev
1168 self.startrev = nullrev
1176 self.roots = set()
1169 self.roots = set()
1177 self.onlyfirst = onlyfirst
1170 self.onlyfirst = onlyfirst
1178
1171
1179 def match(self, rev):
1172 def match(self, rev):
1180 def realparents(rev):
1173 def realparents(rev):
1181 if self.onlyfirst:
1174 if self.onlyfirst:
1182 return repo.changelog.parentrevs(rev)[0:1]
1175 return repo.changelog.parentrevs(rev)[0:1]
1183 else:
1176 else:
1184 return filter(lambda x: x != nullrev,
1177 return filter(lambda x: x != nullrev,
1185 repo.changelog.parentrevs(rev))
1178 repo.changelog.parentrevs(rev))
1186
1179
1187 if self.startrev == nullrev:
1180 if self.startrev == nullrev:
1188 self.startrev = rev
1181 self.startrev = rev
1189 return True
1182 return True
1190
1183
1191 if rev > self.startrev:
1184 if rev > self.startrev:
1192 # forward: all descendants
1185 # forward: all descendants
1193 if not self.roots:
1186 if not self.roots:
1194 self.roots.add(self.startrev)
1187 self.roots.add(self.startrev)
1195 for parent in realparents(rev):
1188 for parent in realparents(rev):
1196 if parent in self.roots:
1189 if parent in self.roots:
1197 self.roots.add(rev)
1190 self.roots.add(rev)
1198 return True
1191 return True
1199 else:
1192 else:
1200 # backwards: all parents
1193 # backwards: all parents
1201 if not self.roots:
1194 if not self.roots:
1202 self.roots.update(realparents(self.startrev))
1195 self.roots.update(realparents(self.startrev))
1203 if rev in self.roots:
1196 if rev in self.roots:
1204 self.roots.remove(rev)
1197 self.roots.remove(rev)
1205 self.roots.update(realparents(rev))
1198 self.roots.update(realparents(rev))
1206 return True
1199 return True
1207
1200
1208 return False
1201 return False
1209
1202
1210 # it might be worthwhile to do this in the iterator if the rev range
1203 # it might be worthwhile to do this in the iterator if the rev range
1211 # is descending and the prune args are all within that range
1204 # is descending and the prune args are all within that range
1212 for rev in opts.get('prune', ()):
1205 for rev in opts.get('prune', ()):
1213 rev = repo.changelog.rev(repo.lookup(rev))
1206 rev = repo.changelog.rev(repo.lookup(rev))
1214 ff = followfilter()
1207 ff = followfilter()
1215 stop = min(revs[0], revs[-1])
1208 stop = min(revs[0], revs[-1])
1216 for x in xrange(rev, stop - 1, -1):
1209 for x in xrange(rev, stop - 1, -1):
1217 if ff.match(x):
1210 if ff.match(x):
1218 wanted.discard(x)
1211 wanted.discard(x)
1219
1212
1220 # Now that wanted is correctly initialized, we can iterate over the
1213 # Now that wanted is correctly initialized, we can iterate over the
1221 # revision range, yielding only revisions in wanted.
1214 # revision range, yielding only revisions in wanted.
1222 def iterate():
1215 def iterate():
1223 if follow and not match.files():
1216 if follow and not match.files():
1224 ff = followfilter(onlyfirst=opts.get('follow_first'))
1217 ff = followfilter(onlyfirst=opts.get('follow_first'))
1225 def want(rev):
1218 def want(rev):
1226 return ff.match(rev) and rev in wanted
1219 return ff.match(rev) and rev in wanted
1227 else:
1220 else:
1228 def want(rev):
1221 def want(rev):
1229 return rev in wanted
1222 return rev in wanted
1230
1223
1231 for i, window in increasing_windows(0, len(revs)):
1224 for i, window in increasing_windows(0, len(revs)):
1232 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1225 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1233 for rev in sorted(nrevs):
1226 for rev in sorted(nrevs):
1234 fns = fncache.get(rev)
1227 fns = fncache.get(rev)
1235 ctx = change(rev)
1228 ctx = change(rev)
1236 if not fns:
1229 if not fns:
1237 def fns_generator():
1230 def fns_generator():
1238 for f in ctx.files():
1231 for f in ctx.files():
1239 if match(f):
1232 if match(f):
1240 yield f
1233 yield f
1241 fns = fns_generator()
1234 fns = fns_generator()
1242 prepare(ctx, fns)
1235 prepare(ctx, fns)
1243 for rev in nrevs:
1236 for rev in nrevs:
1244 yield change(rev)
1237 yield change(rev)
1245 return iterate()
1238 return iterate()
1246
1239
1247 def commit(ui, repo, commitfunc, pats, opts):
1240 def commit(ui, repo, commitfunc, pats, opts):
1248 '''commit the specified files or all outstanding changes'''
1241 '''commit the specified files or all outstanding changes'''
1249 date = opts.get('date')
1242 date = opts.get('date')
1250 if date:
1243 if date:
1251 opts['date'] = util.parsedate(date)
1244 opts['date'] = util.parsedate(date)
1252 message = logmessage(opts)
1245 message = logmessage(opts)
1253
1246
1254 # extract addremove carefully -- this function can be called from a command
1247 # extract addremove carefully -- this function can be called from a command
1255 # that doesn't support addremove
1248 # that doesn't support addremove
1256 if opts.get('addremove'):
1249 if opts.get('addremove'):
1257 addremove(repo, pats, opts)
1250 addremove(repo, pats, opts)
1258
1251
1259 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1252 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1260
1253
1261 def commiteditor(repo, ctx, subs):
1254 def commiteditor(repo, ctx, subs):
1262 if ctx.description():
1255 if ctx.description():
1263 return ctx.description()
1256 return ctx.description()
1264 return commitforceeditor(repo, ctx, subs)
1257 return commitforceeditor(repo, ctx, subs)
1265
1258
1266 def commitforceeditor(repo, ctx, subs):
1259 def commitforceeditor(repo, ctx, subs):
1267 edittext = []
1260 edittext = []
1268 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1261 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1269 if ctx.description():
1262 if ctx.description():
1270 edittext.append(ctx.description())
1263 edittext.append(ctx.description())
1271 edittext.append("")
1264 edittext.append("")
1272 edittext.append("") # Empty line between message and comments.
1265 edittext.append("") # Empty line between message and comments.
1273 edittext.append(_("HG: Enter commit message."
1266 edittext.append(_("HG: Enter commit message."
1274 " Lines beginning with 'HG:' are removed."))
1267 " Lines beginning with 'HG:' are removed."))
1275 edittext.append(_("HG: Leave message empty to abort commit."))
1268 edittext.append(_("HG: Leave message empty to abort commit."))
1276 edittext.append("HG: --")
1269 edittext.append("HG: --")
1277 edittext.append(_("HG: user: %s") % ctx.user())
1270 edittext.append(_("HG: user: %s") % ctx.user())
1278 if ctx.p2():
1271 if ctx.p2():
1279 edittext.append(_("HG: branch merge"))
1272 edittext.append(_("HG: branch merge"))
1280 if ctx.branch():
1273 if ctx.branch():
1281 edittext.append(_("HG: branch '%s'")
1274 edittext.append(_("HG: branch '%s'")
1282 % encoding.tolocal(ctx.branch()))
1275 % encoding.tolocal(ctx.branch()))
1283 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1276 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1284 edittext.extend([_("HG: added %s") % f for f in added])
1277 edittext.extend([_("HG: added %s") % f for f in added])
1285 edittext.extend([_("HG: changed %s") % f for f in modified])
1278 edittext.extend([_("HG: changed %s") % f for f in modified])
1286 edittext.extend([_("HG: removed %s") % f for f in removed])
1279 edittext.extend([_("HG: removed %s") % f for f in removed])
1287 if not added and not modified and not removed:
1280 if not added and not modified and not removed:
1288 edittext.append(_("HG: no files changed"))
1281 edittext.append(_("HG: no files changed"))
1289 edittext.append("")
1282 edittext.append("")
1290 # run editor in the repository root
1283 # run editor in the repository root
1291 olddir = os.getcwd()
1284 olddir = os.getcwd()
1292 os.chdir(repo.root)
1285 os.chdir(repo.root)
1293 text = repo.ui.edit("\n".join(edittext), ctx.user())
1286 text = repo.ui.edit("\n".join(edittext), ctx.user())
1294 text = re.sub("(?m)^HG:.*\n", "", text)
1287 text = re.sub("(?m)^HG:.*\n", "", text)
1295 os.chdir(olddir)
1288 os.chdir(olddir)
1296
1289
1297 if not text.strip():
1290 if not text.strip():
1298 raise util.Abort(_("empty commit message"))
1291 raise util.Abort(_("empty commit message"))
1299
1292
1300 return text
1293 return text
@@ -1,1870 +1,1863 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.auditor = util.path_auditor(self.root, self._checknested)
31 self.auditor = util.path_auditor(self.root, self._checknested)
32 self.opener = util.opener(self.path)
32 self.opener = util.opener(self.path)
33 self.wopener = util.opener(self.root)
33 self.wopener = util.opener(self.root)
34 self.baseui = baseui
34 self.baseui = baseui
35 self.ui = baseui.copy()
35 self.ui = baseui.copy()
36
36
37 try:
37 try:
38 self.ui.readconfig(self.join("hgrc"), self.root)
38 self.ui.readconfig(self.join("hgrc"), self.root)
39 extensions.loadall(self.ui)
39 extensions.loadall(self.ui)
40 except IOError:
40 except IOError:
41 pass
41 pass
42
42
43 if not os.path.isdir(self.path):
43 if not os.path.isdir(self.path):
44 if create:
44 if create:
45 if not os.path.exists(path):
45 if not os.path.exists(path):
46 util.makedirs(path)
46 util.makedirs(path)
47 os.mkdir(self.path)
47 os.mkdir(self.path)
48 requirements = ["revlogv1"]
48 requirements = ["revlogv1"]
49 if self.ui.configbool('format', 'usestore', True):
49 if self.ui.configbool('format', 'usestore', True):
50 os.mkdir(os.path.join(self.path, "store"))
50 os.mkdir(os.path.join(self.path, "store"))
51 requirements.append("store")
51 requirements.append("store")
52 if self.ui.configbool('format', 'usefncache', True):
52 if self.ui.configbool('format', 'usefncache', True):
53 requirements.append("fncache")
53 requirements.append("fncache")
54 # create an invalid changelog
54 # create an invalid changelog
55 self.opener("00changelog.i", "a").write(
55 self.opener("00changelog.i", "a").write(
56 '\0\0\0\2' # represents revlogv2
56 '\0\0\0\2' # represents revlogv2
57 ' dummy changelog to prevent using the old repo layout'
57 ' dummy changelog to prevent using the old repo layout'
58 )
58 )
59 if self.ui.configbool('format', 'parentdelta', False):
59 if self.ui.configbool('format', 'parentdelta', False):
60 requirements.append("parentdelta")
60 requirements.append("parentdelta")
61 reqfile = self.opener("requires", "w")
61 reqfile = self.opener("requires", "w")
62 for r in requirements:
62 for r in requirements:
63 reqfile.write("%s\n" % r)
63 reqfile.write("%s\n" % r)
64 reqfile.close()
64 reqfile.close()
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self.sopener.options = {}
96 self.sopener.options = {}
97 if 'parentdelta' in requirements:
97 if 'parentdelta' in requirements:
98 self.sopener.options['parentdelta'] = 1
98 self.sopener.options['parentdelta'] = 1
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _checknested(self, path):
115 def _checknested(self, path):
116 """Determine if path is a legal nested repository."""
116 """Determine if path is a legal nested repository."""
117 if not path.startswith(self.root):
117 if not path.startswith(self.root):
118 return False
118 return False
119 subpath = path[len(self.root) + 1:]
119 subpath = path[len(self.root) + 1:]
120
120
121 # XXX: Checking against the current working copy is wrong in
121 # XXX: Checking against the current working copy is wrong in
122 # the sense that it can reject things like
122 # the sense that it can reject things like
123 #
123 #
124 # $ hg cat -r 10 sub/x.txt
124 # $ hg cat -r 10 sub/x.txt
125 #
125 #
126 # if sub/ is no longer a subrepository in the working copy
126 # if sub/ is no longer a subrepository in the working copy
127 # parent revision.
127 # parent revision.
128 #
128 #
129 # However, it can of course also allow things that would have
129 # However, it can of course also allow things that would have
130 # been rejected before, such as the above cat command if sub/
130 # been rejected before, such as the above cat command if sub/
131 # is a subrepository now, but was a normal directory before.
131 # is a subrepository now, but was a normal directory before.
132 # The old path auditor would have rejected by mistake since it
132 # The old path auditor would have rejected by mistake since it
133 # panics when it sees sub/.hg/.
133 # panics when it sees sub/.hg/.
134 #
134 #
135 # All in all, checking against the working copy seems sensible
135 # All in all, checking against the working copy seems sensible
136 # since we want to prevent access to nested repositories on
136 # since we want to prevent access to nested repositories on
137 # the filesystem *now*.
137 # the filesystem *now*.
138 ctx = self[None]
138 ctx = self[None]
139 parts = util.splitpath(subpath)
139 parts = util.splitpath(subpath)
140 while parts:
140 while parts:
141 prefix = os.sep.join(parts)
141 prefix = os.sep.join(parts)
142 if prefix in ctx.substate:
142 if prefix in ctx.substate:
143 if prefix == subpath:
143 if prefix == subpath:
144 return True
144 return True
145 else:
145 else:
146 sub = ctx.sub(prefix)
146 sub = ctx.sub(prefix)
147 return sub.checknested(subpath[len(prefix) + 1:])
147 return sub.checknested(subpath[len(prefix) + 1:])
148 else:
148 else:
149 parts.pop()
149 parts.pop()
150 return False
150 return False
151
151
152
152
153 @propertycache
153 @propertycache
154 def changelog(self):
154 def changelog(self):
155 c = changelog.changelog(self.sopener)
155 c = changelog.changelog(self.sopener)
156 if 'HG_PENDING' in os.environ:
156 if 'HG_PENDING' in os.environ:
157 p = os.environ['HG_PENDING']
157 p = os.environ['HG_PENDING']
158 if p.startswith(self.root):
158 if p.startswith(self.root):
159 c.readpending('00changelog.i.a')
159 c.readpending('00changelog.i.a')
160 self.sopener.options['defversion'] = c.version
160 self.sopener.options['defversion'] = c.version
161 return c
161 return c
162
162
163 @propertycache
163 @propertycache
164 def manifest(self):
164 def manifest(self):
165 return manifest.manifest(self.sopener)
165 return manifest.manifest(self.sopener)
166
166
167 @propertycache
167 @propertycache
168 def dirstate(self):
168 def dirstate(self):
169 return dirstate.dirstate(self.opener, self.ui, self.root)
169 return dirstate.dirstate(self.opener, self.ui, self.root)
170
170
171 def __getitem__(self, changeid):
171 def __getitem__(self, changeid):
172 if changeid is None:
172 if changeid is None:
173 return context.workingctx(self)
173 return context.workingctx(self)
174 return context.changectx(self, changeid)
174 return context.changectx(self, changeid)
175
175
176 def __contains__(self, changeid):
176 def __contains__(self, changeid):
177 try:
177 try:
178 return bool(self.lookup(changeid))
178 return bool(self.lookup(changeid))
179 except error.RepoLookupError:
179 except error.RepoLookupError:
180 return False
180 return False
181
181
182 def __nonzero__(self):
182 def __nonzero__(self):
183 return True
183 return True
184
184
185 def __len__(self):
185 def __len__(self):
186 return len(self.changelog)
186 return len(self.changelog)
187
187
188 def __iter__(self):
188 def __iter__(self):
189 for i in xrange(len(self)):
189 for i in xrange(len(self)):
190 yield i
190 yield i
191
191
192 def url(self):
192 def url(self):
193 return 'file:' + self.root
193 return 'file:' + self.root
194
194
195 def hook(self, name, throw=False, **args):
195 def hook(self, name, throw=False, **args):
196 return hook.hook(self.ui, self, name, throw, **args)
196 return hook.hook(self.ui, self, name, throw, **args)
197
197
198 tag_disallowed = ':\r\n'
198 tag_disallowed = ':\r\n'
199
199
200 def _tag(self, names, node, message, local, user, date, extra={}):
200 def _tag(self, names, node, message, local, user, date, extra={}):
201 if isinstance(names, str):
201 if isinstance(names, str):
202 allchars = names
202 allchars = names
203 names = (names,)
203 names = (names,)
204 else:
204 else:
205 allchars = ''.join(names)
205 allchars = ''.join(names)
206 for c in self.tag_disallowed:
206 for c in self.tag_disallowed:
207 if c in allchars:
207 if c in allchars:
208 raise util.Abort(_('%r cannot be used in a tag name') % c)
208 raise util.Abort(_('%r cannot be used in a tag name') % c)
209
209
210 branches = self.branchmap()
210 branches = self.branchmap()
211 for name in names:
211 for name in names:
212 self.hook('pretag', throw=True, node=hex(node), tag=name,
212 self.hook('pretag', throw=True, node=hex(node), tag=name,
213 local=local)
213 local=local)
214 if name in branches:
214 if name in branches:
215 self.ui.warn(_("warning: tag %s conflicts with existing"
215 self.ui.warn(_("warning: tag %s conflicts with existing"
216 " branch name\n") % name)
216 " branch name\n") % name)
217
217
218 def writetags(fp, names, munge, prevtags):
218 def writetags(fp, names, munge, prevtags):
219 fp.seek(0, 2)
219 fp.seek(0, 2)
220 if prevtags and prevtags[-1] != '\n':
220 if prevtags and prevtags[-1] != '\n':
221 fp.write('\n')
221 fp.write('\n')
222 for name in names:
222 for name in names:
223 m = munge and munge(name) or name
223 m = munge and munge(name) or name
224 if self._tagtypes and name in self._tagtypes:
224 if self._tagtypes and name in self._tagtypes:
225 old = self._tags.get(name, nullid)
225 old = self._tags.get(name, nullid)
226 fp.write('%s %s\n' % (hex(old), m))
226 fp.write('%s %s\n' % (hex(old), m))
227 fp.write('%s %s\n' % (hex(node), m))
227 fp.write('%s %s\n' % (hex(node), m))
228 fp.close()
228 fp.close()
229
229
230 prevtags = ''
230 prevtags = ''
231 if local:
231 if local:
232 try:
232 try:
233 fp = self.opener('localtags', 'r+')
233 fp = self.opener('localtags', 'r+')
234 except IOError:
234 except IOError:
235 fp = self.opener('localtags', 'a')
235 fp = self.opener('localtags', 'a')
236 else:
236 else:
237 prevtags = fp.read()
237 prevtags = fp.read()
238
238
239 # local tags are stored in the current charset
239 # local tags are stored in the current charset
240 writetags(fp, names, None, prevtags)
240 writetags(fp, names, None, prevtags)
241 for name in names:
241 for name in names:
242 self.hook('tag', node=hex(node), tag=name, local=local)
242 self.hook('tag', node=hex(node), tag=name, local=local)
243 return
243 return
244
244
245 try:
245 try:
246 fp = self.wfile('.hgtags', 'rb+')
246 fp = self.wfile('.hgtags', 'rb+')
247 except IOError:
247 except IOError:
248 fp = self.wfile('.hgtags', 'ab')
248 fp = self.wfile('.hgtags', 'ab')
249 else:
249 else:
250 prevtags = fp.read()
250 prevtags = fp.read()
251
251
252 # committed tags are stored in UTF-8
252 # committed tags are stored in UTF-8
253 writetags(fp, names, encoding.fromlocal, prevtags)
253 writetags(fp, names, encoding.fromlocal, prevtags)
254
254
255 if '.hgtags' not in self.dirstate:
255 if '.hgtags' not in self.dirstate:
256 self[None].add(['.hgtags'])
256 self[None].add(['.hgtags'])
257
257
258 m = matchmod.exact(self.root, '', ['.hgtags'])
258 m = matchmod.exact(self.root, '', ['.hgtags'])
259 tagnode = self.commit(message, user, date, extra=extra, match=m)
259 tagnode = self.commit(message, user, date, extra=extra, match=m)
260
260
261 for name in names:
261 for name in names:
262 self.hook('tag', node=hex(node), tag=name, local=local)
262 self.hook('tag', node=hex(node), tag=name, local=local)
263
263
264 return tagnode
264 return tagnode
265
265
266 def tag(self, names, node, message, local, user, date):
266 def tag(self, names, node, message, local, user, date):
267 '''tag a revision with one or more symbolic names.
267 '''tag a revision with one or more symbolic names.
268
268
269 names is a list of strings or, when adding a single tag, names may be a
269 names is a list of strings or, when adding a single tag, names may be a
270 string.
270 string.
271
271
272 if local is True, the tags are stored in a per-repository file.
272 if local is True, the tags are stored in a per-repository file.
273 otherwise, they are stored in the .hgtags file, and a new
273 otherwise, they are stored in the .hgtags file, and a new
274 changeset is committed with the change.
274 changeset is committed with the change.
275
275
276 keyword arguments:
276 keyword arguments:
277
277
278 local: whether to store tags in non-version-controlled file
278 local: whether to store tags in non-version-controlled file
279 (default False)
279 (default False)
280
280
281 message: commit message to use if committing
281 message: commit message to use if committing
282
282
283 user: name of user to use if committing
283 user: name of user to use if committing
284
284
285 date: date tuple to use if committing'''
285 date: date tuple to use if committing'''
286
286
287 for x in self.status()[:5]:
287 for x in self.status()[:5]:
288 if '.hgtags' in x:
288 if '.hgtags' in x:
289 raise util.Abort(_('working copy of .hgtags is changed '
289 raise util.Abort(_('working copy of .hgtags is changed '
290 '(please commit .hgtags manually)'))
290 '(please commit .hgtags manually)'))
291
291
292 self.tags() # instantiate the cache
292 self.tags() # instantiate the cache
293 self._tag(names, node, message, local, user, date)
293 self._tag(names, node, message, local, user, date)
294
294
295 def tags(self):
295 def tags(self):
296 '''return a mapping of tag to node'''
296 '''return a mapping of tag to node'''
297 if self._tags is None:
297 if self._tags is None:
298 (self._tags, self._tagtypes) = self._findtags()
298 (self._tags, self._tagtypes) = self._findtags()
299
299
300 return self._tags
300 return self._tags
301
301
302 def _findtags(self):
302 def _findtags(self):
303 '''Do the hard work of finding tags. Return a pair of dicts
303 '''Do the hard work of finding tags. Return a pair of dicts
304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
304 (tags, tagtypes) where tags maps tag name to node, and tagtypes
305 maps tag name to a string like \'global\' or \'local\'.
305 maps tag name to a string like \'global\' or \'local\'.
306 Subclasses or extensions are free to add their own tags, but
306 Subclasses or extensions are free to add their own tags, but
307 should be aware that the returned dicts will be retained for the
307 should be aware that the returned dicts will be retained for the
308 duration of the localrepo object.'''
308 duration of the localrepo object.'''
309
309
310 # XXX what tagtype should subclasses/extensions use? Currently
310 # XXX what tagtype should subclasses/extensions use? Currently
311 # mq and bookmarks add tags, but do not set the tagtype at all.
311 # mq and bookmarks add tags, but do not set the tagtype at all.
312 # Should each extension invent its own tag type? Should there
312 # Should each extension invent its own tag type? Should there
313 # be one tagtype for all such "virtual" tags? Or is the status
313 # be one tagtype for all such "virtual" tags? Or is the status
314 # quo fine?
314 # quo fine?
315
315
316 alltags = {} # map tag name to (node, hist)
316 alltags = {} # map tag name to (node, hist)
317 tagtypes = {}
317 tagtypes = {}
318
318
319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
319 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
320 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
321
321
322 # Build the return dicts. Have to re-encode tag names because
322 # Build the return dicts. Have to re-encode tag names because
323 # the tags module always uses UTF-8 (in order not to lose info
323 # the tags module always uses UTF-8 (in order not to lose info
324 # writing to the cache), but the rest of Mercurial wants them in
324 # writing to the cache), but the rest of Mercurial wants them in
325 # local encoding.
325 # local encoding.
326 tags = {}
326 tags = {}
327 for (name, (node, hist)) in alltags.iteritems():
327 for (name, (node, hist)) in alltags.iteritems():
328 if node != nullid:
328 if node != nullid:
329 tags[encoding.tolocal(name)] = node
329 tags[encoding.tolocal(name)] = node
330 tags['tip'] = self.changelog.tip()
330 tags['tip'] = self.changelog.tip()
331 tagtypes = dict([(encoding.tolocal(name), value)
331 tagtypes = dict([(encoding.tolocal(name), value)
332 for (name, value) in tagtypes.iteritems()])
332 for (name, value) in tagtypes.iteritems()])
333 return (tags, tagtypes)
333 return (tags, tagtypes)
334
334
335 def tagtype(self, tagname):
335 def tagtype(self, tagname):
336 '''
336 '''
337 return the type of the given tag. result can be:
337 return the type of the given tag. result can be:
338
338
339 'local' : a local tag
339 'local' : a local tag
340 'global' : a global tag
340 'global' : a global tag
341 None : tag does not exist
341 None : tag does not exist
342 '''
342 '''
343
343
344 self.tags()
344 self.tags()
345
345
346 return self._tagtypes.get(tagname)
346 return self._tagtypes.get(tagname)
347
347
348 def tagslist(self):
348 def tagslist(self):
349 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
350 l = []
350 l = []
351 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
352 try:
352 try:
353 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
354 except:
354 except:
355 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
356 l.append((r, t, n))
357 return [(t, n) for r, t, n in sorted(l)]
357 return [(t, n) for r, t, n in sorted(l)]
358
358
359 def nodetags(self, node):
359 def nodetags(self, node):
360 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
361 if not self.nodetagscache:
361 if not self.nodetagscache:
362 self.nodetagscache = {}
362 self.nodetagscache = {}
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
365 for tags in self.nodetagscache.itervalues():
365 for tags in self.nodetagscache.itervalues():
366 tags.sort()
366 tags.sort()
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self, partial, lrev):
369 def _branchtags(self, partial, lrev):
370 # TODO: rename this function?
370 # TODO: rename this function?
371 tiprev = len(self) - 1
371 tiprev = len(self) - 1
372 if lrev != tiprev:
372 if lrev != tiprev:
373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
373 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
374 self._updatebranchcache(partial, ctxgen)
374 self._updatebranchcache(partial, ctxgen)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376
376
377 return partial
377 return partial
378
378
379 def updatebranchcache(self):
379 def updatebranchcache(self):
380 tip = self.changelog.tip()
380 tip = self.changelog.tip()
381 if self._branchcache is not None and self._branchcachetip == tip:
381 if self._branchcache is not None and self._branchcachetip == tip:
382 return self._branchcache
382 return self._branchcache
383
383
384 oldtip = self._branchcachetip
384 oldtip = self._branchcachetip
385 self._branchcachetip = tip
385 self._branchcachetip = tip
386 if oldtip is None or oldtip not in self.changelog.nodemap:
386 if oldtip is None or oldtip not in self.changelog.nodemap:
387 partial, last, lrev = self._readbranchcache()
387 partial, last, lrev = self._readbranchcache()
388 else:
388 else:
389 lrev = self.changelog.rev(oldtip)
389 lrev = self.changelog.rev(oldtip)
390 partial = self._branchcache
390 partial = self._branchcache
391
391
392 self._branchtags(partial, lrev)
392 self._branchtags(partial, lrev)
393 # this private cache holds all heads (not just tips)
393 # this private cache holds all heads (not just tips)
394 self._branchcache = partial
394 self._branchcache = partial
395
395
396 def branchmap(self):
396 def branchmap(self):
397 '''returns a dictionary {branch: [branchheads]}'''
397 '''returns a dictionary {branch: [branchheads]}'''
398 self.updatebranchcache()
398 self.updatebranchcache()
399 return self._branchcache
399 return self._branchcache
400
400
401 def branchtags(self):
401 def branchtags(self):
402 '''return a dict where branch names map to the tipmost head of
402 '''return a dict where branch names map to the tipmost head of
403 the branch, open heads come before closed'''
403 the branch, open heads come before closed'''
404 bt = {}
404 bt = {}
405 for bn, heads in self.branchmap().iteritems():
405 for bn, heads in self.branchmap().iteritems():
406 tip = heads[-1]
406 tip = heads[-1]
407 for h in reversed(heads):
407 for h in reversed(heads):
408 if 'close' not in self.changelog.read(h)[5]:
408 if 'close' not in self.changelog.read(h)[5]:
409 tip = h
409 tip = h
410 break
410 break
411 bt[bn] = tip
411 bt[bn] = tip
412 return bt
412 return bt
413
413
414
414
415 def _readbranchcache(self):
415 def _readbranchcache(self):
416 partial = {}
416 partial = {}
417 try:
417 try:
418 f = self.opener("branchheads.cache")
418 f = self.opener("branchheads.cache")
419 lines = f.read().split('\n')
419 lines = f.read().split('\n')
420 f.close()
420 f.close()
421 except (IOError, OSError):
421 except (IOError, OSError):
422 return {}, nullid, nullrev
422 return {}, nullid, nullrev
423
423
424 try:
424 try:
425 last, lrev = lines.pop(0).split(" ", 1)
425 last, lrev = lines.pop(0).split(" ", 1)
426 last, lrev = bin(last), int(lrev)
426 last, lrev = bin(last), int(lrev)
427 if lrev >= len(self) or self[lrev].node() != last:
427 if lrev >= len(self) or self[lrev].node() != last:
428 # invalidate the cache
428 # invalidate the cache
429 raise ValueError('invalidating branch cache (tip differs)')
429 raise ValueError('invalidating branch cache (tip differs)')
430 for l in lines:
430 for l in lines:
431 if not l:
431 if not l:
432 continue
432 continue
433 node, label = l.split(" ", 1)
433 node, label = l.split(" ", 1)
434 partial.setdefault(label.strip(), []).append(bin(node))
434 partial.setdefault(label.strip(), []).append(bin(node))
435 except KeyboardInterrupt:
435 except KeyboardInterrupt:
436 raise
436 raise
437 except Exception, inst:
437 except Exception, inst:
438 if self.ui.debugflag:
438 if self.ui.debugflag:
439 self.ui.warn(str(inst), '\n')
439 self.ui.warn(str(inst), '\n')
440 partial, last, lrev = {}, nullid, nullrev
440 partial, last, lrev = {}, nullid, nullrev
441 return partial, last, lrev
441 return partial, last, lrev
442
442
443 def _writebranchcache(self, branches, tip, tiprev):
443 def _writebranchcache(self, branches, tip, tiprev):
444 try:
444 try:
445 f = self.opener("branchheads.cache", "w", atomictemp=True)
445 f = self.opener("branchheads.cache", "w", atomictemp=True)
446 f.write("%s %s\n" % (hex(tip), tiprev))
446 f.write("%s %s\n" % (hex(tip), tiprev))
447 for label, nodes in branches.iteritems():
447 for label, nodes in branches.iteritems():
448 for node in nodes:
448 for node in nodes:
449 f.write("%s %s\n" % (hex(node), label))
449 f.write("%s %s\n" % (hex(node), label))
450 f.rename()
450 f.rename()
451 except (IOError, OSError):
451 except (IOError, OSError):
452 pass
452 pass
453
453
454 def _updatebranchcache(self, partial, ctxgen):
454 def _updatebranchcache(self, partial, ctxgen):
455 # collect new branch entries
455 # collect new branch entries
456 newbranches = {}
456 newbranches = {}
457 for c in ctxgen:
457 for c in ctxgen:
458 newbranches.setdefault(c.branch(), []).append(c.node())
458 newbranches.setdefault(c.branch(), []).append(c.node())
459 # if older branchheads are reachable from new ones, they aren't
459 # if older branchheads are reachable from new ones, they aren't
460 # really branchheads. Note checking parents is insufficient:
460 # really branchheads. Note checking parents is insufficient:
461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
461 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
462 for branch, newnodes in newbranches.iteritems():
462 for branch, newnodes in newbranches.iteritems():
463 bheads = partial.setdefault(branch, [])
463 bheads = partial.setdefault(branch, [])
464 bheads.extend(newnodes)
464 bheads.extend(newnodes)
465 if len(bheads) <= 1:
465 if len(bheads) <= 1:
466 continue
466 continue
467 # starting from tip means fewer passes over reachable
467 # starting from tip means fewer passes over reachable
468 while newnodes:
468 while newnodes:
469 latest = newnodes.pop()
469 latest = newnodes.pop()
470 if latest not in bheads:
470 if latest not in bheads:
471 continue
471 continue
472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
472 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
473 reachable = self.changelog.reachable(latest, minbhrev)
473 reachable = self.changelog.reachable(latest, minbhrev)
474 reachable.remove(latest)
474 reachable.remove(latest)
475 bheads = [b for b in bheads if b not in reachable]
475 bheads = [b for b in bheads if b not in reachable]
476 partial[branch] = bheads
476 partial[branch] = bheads
477
477
478 def lookup(self, key):
478 def lookup(self, key):
479 if isinstance(key, int):
479 if isinstance(key, int):
480 return self.changelog.node(key)
480 return self.changelog.node(key)
481 elif key == '.':
481 elif key == '.':
482 return self.dirstate.parents()[0]
482 return self.dirstate.parents()[0]
483 elif key == 'null':
483 elif key == 'null':
484 return nullid
484 return nullid
485 elif key == 'tip':
485 elif key == 'tip':
486 return self.changelog.tip()
486 return self.changelog.tip()
487 n = self.changelog._match(key)
487 n = self.changelog._match(key)
488 if n:
488 if n:
489 return n
489 return n
490 if key in self.tags():
490 if key in self.tags():
491 return self.tags()[key]
491 return self.tags()[key]
492 if key in self.branchtags():
492 if key in self.branchtags():
493 return self.branchtags()[key]
493 return self.branchtags()[key]
494 n = self.changelog._partialmatch(key)
494 n = self.changelog._partialmatch(key)
495 if n:
495 if n:
496 return n
496 return n
497
497
498 # can't find key, check if it might have come from damaged dirstate
498 # can't find key, check if it might have come from damaged dirstate
499 if key in self.dirstate.parents():
499 if key in self.dirstate.parents():
500 raise error.Abort(_("working directory has unknown parent '%s'!")
500 raise error.Abort(_("working directory has unknown parent '%s'!")
501 % short(key))
501 % short(key))
502 try:
502 try:
503 if len(key) == 20:
503 if len(key) == 20:
504 key = hex(key)
504 key = hex(key)
505 except:
505 except:
506 pass
506 pass
507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
507 raise error.RepoLookupError(_("unknown revision '%s'") % key)
508
508
509 def lookupbranch(self, key, remote=None):
509 def lookupbranch(self, key, remote=None):
510 repo = remote or self
510 repo = remote or self
511 if key in repo.branchmap():
511 if key in repo.branchmap():
512 return key
512 return key
513
513
514 repo = (remote and remote.local()) and remote or self
514 repo = (remote and remote.local()) and remote or self
515 return repo[key].branch()
515 return repo[key].branch()
516
516
517 def local(self):
517 def local(self):
518 return True
518 return True
519
519
520 def join(self, f):
520 def join(self, f):
521 return os.path.join(self.path, f)
521 return os.path.join(self.path, f)
522
522
523 def wjoin(self, f):
523 def wjoin(self, f):
524 return os.path.join(self.root, f)
524 return os.path.join(self.root, f)
525
525
526 def file(self, f):
526 def file(self, f):
527 if f[0] == '/':
527 if f[0] == '/':
528 f = f[1:]
528 f = f[1:]
529 return filelog.filelog(self.sopener, f)
529 return filelog.filelog(self.sopener, f)
530
530
531 def changectx(self, changeid):
531 def changectx(self, changeid):
532 return self[changeid]
532 return self[changeid]
533
533
534 def parents(self, changeid=None):
534 def parents(self, changeid=None):
535 '''get list of changectxs for parents of changeid'''
535 '''get list of changectxs for parents of changeid'''
536 return self[changeid].parents()
536 return self[changeid].parents()
537
537
538 def filectx(self, path, changeid=None, fileid=None):
538 def filectx(self, path, changeid=None, fileid=None):
539 """changeid can be a changeset revision, node, or tag.
539 """changeid can be a changeset revision, node, or tag.
540 fileid can be a file revision or node."""
540 fileid can be a file revision or node."""
541 return context.filectx(self, path, changeid, fileid)
541 return context.filectx(self, path, changeid, fileid)
542
542
543 def getcwd(self):
543 def getcwd(self):
544 return self.dirstate.getcwd()
544 return self.dirstate.getcwd()
545
545
546 def pathto(self, f, cwd=None):
546 def pathto(self, f, cwd=None):
547 return self.dirstate.pathto(f, cwd)
547 return self.dirstate.pathto(f, cwd)
548
548
549 def wfile(self, f, mode='r'):
549 def wfile(self, f, mode='r'):
550 return self.wopener(f, mode)
550 return self.wopener(f, mode)
551
551
552 def _link(self, f):
552 def _link(self, f):
553 return os.path.islink(self.wjoin(f))
553 return os.path.islink(self.wjoin(f))
554
554
555 def _loadfilter(self, filter):
555 def _loadfilter(self, filter):
556 if filter not in self.filterpats:
556 if filter not in self.filterpats:
557 l = []
557 l = []
558 for pat, cmd in self.ui.configitems(filter):
558 for pat, cmd in self.ui.configitems(filter):
559 if cmd == '!':
559 if cmd == '!':
560 continue
560 continue
561 mf = matchmod.match(self.root, '', [pat])
561 mf = matchmod.match(self.root, '', [pat])
562 fn = None
562 fn = None
563 params = cmd
563 params = cmd
564 for name, filterfn in self._datafilters.iteritems():
564 for name, filterfn in self._datafilters.iteritems():
565 if cmd.startswith(name):
565 if cmd.startswith(name):
566 fn = filterfn
566 fn = filterfn
567 params = cmd[len(name):].lstrip()
567 params = cmd[len(name):].lstrip()
568 break
568 break
569 if not fn:
569 if not fn:
570 fn = lambda s, c, **kwargs: util.filter(s, c)
570 fn = lambda s, c, **kwargs: util.filter(s, c)
571 # Wrap old filters not supporting keyword arguments
571 # Wrap old filters not supporting keyword arguments
572 if not inspect.getargspec(fn)[2]:
572 if not inspect.getargspec(fn)[2]:
573 oldfn = fn
573 oldfn = fn
574 fn = lambda s, c, **kwargs: oldfn(s, c)
574 fn = lambda s, c, **kwargs: oldfn(s, c)
575 l.append((mf, fn, params))
575 l.append((mf, fn, params))
576 self.filterpats[filter] = l
576 self.filterpats[filter] = l
577
577
578 def _filter(self, filter, filename, data):
578 def _filter(self, filter, filename, data):
579 self._loadfilter(filter)
579 self._loadfilter(filter)
580
580
581 for mf, fn, cmd in self.filterpats[filter]:
581 for mf, fn, cmd in self.filterpats[filter]:
582 if mf(filename):
582 if mf(filename):
583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
583 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
584 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
585 break
585 break
586
586
587 return data
587 return data
588
588
589 def adddatafilter(self, name, filter):
589 def adddatafilter(self, name, filter):
590 self._datafilters[name] = filter
590 self._datafilters[name] = filter
591
591
592 def wread(self, filename):
592 def wread(self, filename):
593 if self._link(filename):
593 if self._link(filename):
594 data = os.readlink(self.wjoin(filename))
594 data = os.readlink(self.wjoin(filename))
595 else:
595 else:
596 data = self.wopener(filename, 'r').read()
596 data = self.wopener(filename, 'r').read()
597 return self._filter("encode", filename, data)
597 return self._filter("encode", filename, data)
598
598
599 def wwrite(self, filename, data, flags):
599 def wwrite(self, filename, data, flags):
600 data = self._filter("decode", filename, data)
600 data = self._filter("decode", filename, data)
601 try:
601 try:
602 os.unlink(self.wjoin(filename))
602 os.unlink(self.wjoin(filename))
603 except OSError:
603 except OSError:
604 pass
604 pass
605 if 'l' in flags:
605 if 'l' in flags:
606 self.wopener.symlink(data, filename)
606 self.wopener.symlink(data, filename)
607 else:
607 else:
608 self.wopener(filename, 'w').write(data)
608 self.wopener(filename, 'w').write(data)
609 if 'x' in flags:
609 if 'x' in flags:
610 util.set_flags(self.wjoin(filename), False, True)
610 util.set_flags(self.wjoin(filename), False, True)
611
611
612 def wwritedata(self, filename, data):
612 def wwritedata(self, filename, data):
613 return self._filter("decode", filename, data)
613 return self._filter("decode", filename, data)
614
614
615 def transaction(self, desc):
615 def transaction(self, desc):
616 tr = self._transref and self._transref() or None
616 tr = self._transref and self._transref() or None
617 if tr and tr.running():
617 if tr and tr.running():
618 return tr.nest()
618 return tr.nest()
619
619
620 # abort here if the journal already exists
620 # abort here if the journal already exists
621 if os.path.exists(self.sjoin("journal")):
621 if os.path.exists(self.sjoin("journal")):
622 raise error.RepoError(
622 raise error.RepoError(
623 _("abandoned transaction found - run hg recover"))
623 _("abandoned transaction found - run hg recover"))
624
624
625 # save dirstate for rollback
625 # save dirstate for rollback
626 try:
626 try:
627 ds = self.opener("dirstate").read()
627 ds = self.opener("dirstate").read()
628 except IOError:
628 except IOError:
629 ds = ""
629 ds = ""
630 self.opener("journal.dirstate", "w").write(ds)
630 self.opener("journal.dirstate", "w").write(ds)
631 self.opener("journal.branch", "w").write(self.dirstate.branch())
631 self.opener("journal.branch", "w").write(self.dirstate.branch())
632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
632 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
633
633
634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
634 renames = [(self.sjoin("journal"), self.sjoin("undo")),
635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
635 (self.join("journal.dirstate"), self.join("undo.dirstate")),
636 (self.join("journal.branch"), self.join("undo.branch")),
636 (self.join("journal.branch"), self.join("undo.branch")),
637 (self.join("journal.desc"), self.join("undo.desc"))]
637 (self.join("journal.desc"), self.join("undo.desc"))]
638 tr = transaction.transaction(self.ui.warn, self.sopener,
638 tr = transaction.transaction(self.ui.warn, self.sopener,
639 self.sjoin("journal"),
639 self.sjoin("journal"),
640 aftertrans(renames),
640 aftertrans(renames),
641 self.store.createmode)
641 self.store.createmode)
642 self._transref = weakref.ref(tr)
642 self._transref = weakref.ref(tr)
643 return tr
643 return tr
644
644
645 def recover(self):
645 def recover(self):
646 lock = self.lock()
646 lock = self.lock()
647 try:
647 try:
648 if os.path.exists(self.sjoin("journal")):
648 if os.path.exists(self.sjoin("journal")):
649 self.ui.status(_("rolling back interrupted transaction\n"))
649 self.ui.status(_("rolling back interrupted transaction\n"))
650 transaction.rollback(self.sopener, self.sjoin("journal"),
650 transaction.rollback(self.sopener, self.sjoin("journal"),
651 self.ui.warn)
651 self.ui.warn)
652 self.invalidate()
652 self.invalidate()
653 return True
653 return True
654 else:
654 else:
655 self.ui.warn(_("no interrupted transaction available\n"))
655 self.ui.warn(_("no interrupted transaction available\n"))
656 return False
656 return False
657 finally:
657 finally:
658 lock.release()
658 lock.release()
659
659
660 def rollback(self, dryrun=False):
660 def rollback(self, dryrun=False):
661 wlock = lock = None
661 wlock = lock = None
662 try:
662 try:
663 wlock = self.wlock()
663 wlock = self.wlock()
664 lock = self.lock()
664 lock = self.lock()
665 if os.path.exists(self.sjoin("undo")):
665 if os.path.exists(self.sjoin("undo")):
666 try:
666 try:
667 args = self.opener("undo.desc", "r").read().splitlines()
667 args = self.opener("undo.desc", "r").read().splitlines()
668 if len(args) >= 3 and self.ui.verbose:
668 if len(args) >= 3 and self.ui.verbose:
669 desc = _("rolling back to revision %s"
669 desc = _("rolling back to revision %s"
670 " (undo %s: %s)\n") % (
670 " (undo %s: %s)\n") % (
671 int(args[0]) - 1, args[1], args[2])
671 int(args[0]) - 1, args[1], args[2])
672 elif len(args) >= 2:
672 elif len(args) >= 2:
673 desc = _("rolling back to revision %s (undo %s)\n") % (
673 desc = _("rolling back to revision %s (undo %s)\n") % (
674 int(args[0]) - 1, args[1])
674 int(args[0]) - 1, args[1])
675 except IOError:
675 except IOError:
676 desc = _("rolling back unknown transaction\n")
676 desc = _("rolling back unknown transaction\n")
677 self.ui.status(desc)
677 self.ui.status(desc)
678 if dryrun:
678 if dryrun:
679 return
679 return
680 transaction.rollback(self.sopener, self.sjoin("undo"),
680 transaction.rollback(self.sopener, self.sjoin("undo"),
681 self.ui.warn)
681 self.ui.warn)
682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
682 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
683 try:
683 try:
684 branch = self.opener("undo.branch").read()
684 branch = self.opener("undo.branch").read()
685 self.dirstate.setbranch(branch)
685 self.dirstate.setbranch(branch)
686 except IOError:
686 except IOError:
687 self.ui.warn(_("Named branch could not be reset, "
687 self.ui.warn(_("Named branch could not be reset, "
688 "current branch still is: %s\n")
688 "current branch still is: %s\n")
689 % encoding.tolocal(self.dirstate.branch()))
689 % encoding.tolocal(self.dirstate.branch()))
690 self.invalidate()
690 self.invalidate()
691 self.dirstate.invalidate()
691 self.dirstate.invalidate()
692 self.destroyed()
692 self.destroyed()
693 else:
693 else:
694 self.ui.warn(_("no rollback information available\n"))
694 self.ui.warn(_("no rollback information available\n"))
695 return 1
695 return 1
696 finally:
696 finally:
697 release(lock, wlock)
697 release(lock, wlock)
698
698
699 def invalidatecaches(self):
699 def invalidatecaches(self):
700 self._tags = None
700 self._tags = None
701 self._tagtypes = None
701 self._tagtypes = None
702 self.nodetagscache = None
702 self.nodetagscache = None
703 self._branchcache = None # in UTF-8
703 self._branchcache = None # in UTF-8
704 self._branchcachetip = None
704 self._branchcachetip = None
705
705
706 def invalidate(self):
706 def invalidate(self):
707 for a in "changelog manifest".split():
707 for a in "changelog manifest".split():
708 if a in self.__dict__:
708 if a in self.__dict__:
709 delattr(self, a)
709 delattr(self, a)
710 self.invalidatecaches()
710 self.invalidatecaches()
711
711
712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
712 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
713 try:
713 try:
714 l = lock.lock(lockname, 0, releasefn, desc=desc)
714 l = lock.lock(lockname, 0, releasefn, desc=desc)
715 except error.LockHeld, inst:
715 except error.LockHeld, inst:
716 if not wait:
716 if not wait:
717 raise
717 raise
718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
718 self.ui.warn(_("waiting for lock on %s held by %r\n") %
719 (desc, inst.locker))
719 (desc, inst.locker))
720 # default to 600 seconds timeout
720 # default to 600 seconds timeout
721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
721 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
722 releasefn, desc=desc)
722 releasefn, desc=desc)
723 if acquirefn:
723 if acquirefn:
724 acquirefn()
724 acquirefn()
725 return l
725 return l
726
726
727 def lock(self, wait=True):
727 def lock(self, wait=True):
728 '''Lock the repository store (.hg/store) and return a weak reference
728 '''Lock the repository store (.hg/store) and return a weak reference
729 to the lock. Use this before modifying the store (e.g. committing or
729 to the lock. Use this before modifying the store (e.g. committing or
730 stripping). If you are opening a transaction, get a lock as well.)'''
730 stripping). If you are opening a transaction, get a lock as well.)'''
731 l = self._lockref and self._lockref()
731 l = self._lockref and self._lockref()
732 if l is not None and l.held:
732 if l is not None and l.held:
733 l.lock()
733 l.lock()
734 return l
734 return l
735
735
736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
736 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
737 _('repository %s') % self.origroot)
737 _('repository %s') % self.origroot)
738 self._lockref = weakref.ref(l)
738 self._lockref = weakref.ref(l)
739 return l
739 return l
740
740
741 def wlock(self, wait=True):
741 def wlock(self, wait=True):
742 '''Lock the non-store parts of the repository (everything under
742 '''Lock the non-store parts of the repository (everything under
743 .hg except .hg/store) and return a weak reference to the lock.
743 .hg except .hg/store) and return a weak reference to the lock.
744 Use this before modifying files in .hg.'''
744 Use this before modifying files in .hg.'''
745 l = self._wlockref and self._wlockref()
745 l = self._wlockref and self._wlockref()
746 if l is not None and l.held:
746 if l is not None and l.held:
747 l.lock()
747 l.lock()
748 return l
748 return l
749
749
750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
750 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
751 self.dirstate.invalidate, _('working directory of %s') %
751 self.dirstate.invalidate, _('working directory of %s') %
752 self.origroot)
752 self.origroot)
753 self._wlockref = weakref.ref(l)
753 self._wlockref = weakref.ref(l)
754 return l
754 return l
755
755
756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
756 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
757 """
757 """
758 commit an individual file as part of a larger transaction
758 commit an individual file as part of a larger transaction
759 """
759 """
760
760
761 fname = fctx.path()
761 fname = fctx.path()
762 text = fctx.data()
762 text = fctx.data()
763 flog = self.file(fname)
763 flog = self.file(fname)
764 fparent1 = manifest1.get(fname, nullid)
764 fparent1 = manifest1.get(fname, nullid)
765 fparent2 = fparent2o = manifest2.get(fname, nullid)
765 fparent2 = fparent2o = manifest2.get(fname, nullid)
766
766
767 meta = {}
767 meta = {}
768 copy = fctx.renamed()
768 copy = fctx.renamed()
769 if copy and copy[0] != fname:
769 if copy and copy[0] != fname:
770 # Mark the new revision of this file as a copy of another
770 # Mark the new revision of this file as a copy of another
771 # file. This copy data will effectively act as a parent
771 # file. This copy data will effectively act as a parent
772 # of this new revision. If this is a merge, the first
772 # of this new revision. If this is a merge, the first
773 # parent will be the nullid (meaning "look up the copy data")
773 # parent will be the nullid (meaning "look up the copy data")
774 # and the second one will be the other parent. For example:
774 # and the second one will be the other parent. For example:
775 #
775 #
776 # 0 --- 1 --- 3 rev1 changes file foo
776 # 0 --- 1 --- 3 rev1 changes file foo
777 # \ / rev2 renames foo to bar and changes it
777 # \ / rev2 renames foo to bar and changes it
778 # \- 2 -/ rev3 should have bar with all changes and
778 # \- 2 -/ rev3 should have bar with all changes and
779 # should record that bar descends from
779 # should record that bar descends from
780 # bar in rev2 and foo in rev1
780 # bar in rev2 and foo in rev1
781 #
781 #
782 # this allows this merge to succeed:
782 # this allows this merge to succeed:
783 #
783 #
784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
784 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
785 # \ / merging rev3 and rev4 should use bar@rev2
785 # \ / merging rev3 and rev4 should use bar@rev2
786 # \- 2 --- 4 as the merge base
786 # \- 2 --- 4 as the merge base
787 #
787 #
788
788
789 cfname = copy[0]
789 cfname = copy[0]
790 crev = manifest1.get(cfname)
790 crev = manifest1.get(cfname)
791 newfparent = fparent2
791 newfparent = fparent2
792
792
793 if manifest2: # branch merge
793 if manifest2: # branch merge
794 if fparent2 == nullid or crev is None: # copied on remote side
794 if fparent2 == nullid or crev is None: # copied on remote side
795 if cfname in manifest2:
795 if cfname in manifest2:
796 crev = manifest2[cfname]
796 crev = manifest2[cfname]
797 newfparent = fparent1
797 newfparent = fparent1
798
798
799 # find source in nearest ancestor if we've lost track
799 # find source in nearest ancestor if we've lost track
800 if not crev:
800 if not crev:
801 self.ui.debug(" %s: searching for copy revision for %s\n" %
801 self.ui.debug(" %s: searching for copy revision for %s\n" %
802 (fname, cfname))
802 (fname, cfname))
803 for ancestor in self['.'].ancestors():
803 for ancestor in self['.'].ancestors():
804 if cfname in ancestor:
804 if cfname in ancestor:
805 crev = ancestor[cfname].filenode()
805 crev = ancestor[cfname].filenode()
806 break
806 break
807
807
808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
808 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
809 meta["copy"] = cfname
809 meta["copy"] = cfname
810 meta["copyrev"] = hex(crev)
810 meta["copyrev"] = hex(crev)
811 fparent1, fparent2 = nullid, newfparent
811 fparent1, fparent2 = nullid, newfparent
812 elif fparent2 != nullid:
812 elif fparent2 != nullid:
813 # is one parent an ancestor of the other?
813 # is one parent an ancestor of the other?
814 fparentancestor = flog.ancestor(fparent1, fparent2)
814 fparentancestor = flog.ancestor(fparent1, fparent2)
815 if fparentancestor == fparent1:
815 if fparentancestor == fparent1:
816 fparent1, fparent2 = fparent2, nullid
816 fparent1, fparent2 = fparent2, nullid
817 elif fparentancestor == fparent2:
817 elif fparentancestor == fparent2:
818 fparent2 = nullid
818 fparent2 = nullid
819
819
820 # is the file changed?
820 # is the file changed?
821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
822 changelist.append(fname)
822 changelist.append(fname)
823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
824
824
825 # are just the flags changed during merge?
825 # are just the flags changed during merge?
826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
826 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
827 changelist.append(fname)
827 changelist.append(fname)
828
828
829 return fparent1
829 return fparent1
830
830
831 def commit(self, text="", user=None, date=None, match=None, force=False,
831 def commit(self, text="", user=None, date=None, match=None, force=False,
832 editor=False, extra={}):
832 editor=False, extra={}):
833 """Add a new revision to current repository.
833 """Add a new revision to current repository.
834
834
835 Revision information is gathered from the working directory,
835 Revision information is gathered from the working directory,
836 match can be used to filter the committed files. If editor is
836 match can be used to filter the committed files. If editor is
837 supplied, it is called to get a commit message.
837 supplied, it is called to get a commit message.
838 """
838 """
839
839
840 def fail(f, msg):
840 def fail(f, msg):
841 raise util.Abort('%s: %s' % (f, msg))
841 raise util.Abort('%s: %s' % (f, msg))
842
842
843 if not match:
843 if not match:
844 match = matchmod.always(self.root, '')
844 match = matchmod.always(self.root, '')
845
845
846 if not force:
846 if not force:
847 vdirs = []
847 vdirs = []
848 match.dir = vdirs.append
848 match.dir = vdirs.append
849 match.bad = fail
849 match.bad = fail
850
850
851 wlock = self.wlock()
851 wlock = self.wlock()
852 try:
852 try:
853 wctx = self[None]
853 wctx = self[None]
854 merge = len(wctx.parents()) > 1
854 merge = len(wctx.parents()) > 1
855
855
856 if (not force and merge and match and
856 if (not force and merge and match and
857 (match.files() or match.anypats())):
857 (match.files() or match.anypats())):
858 raise util.Abort(_('cannot partially commit a merge '
858 raise util.Abort(_('cannot partially commit a merge '
859 '(do not specify files or patterns)'))
859 '(do not specify files or patterns)'))
860
860
861 changes = self.status(match=match, clean=force)
861 changes = self.status(match=match, clean=force)
862 if force:
862 if force:
863 changes[0].extend(changes[6]) # mq may commit unchanged files
863 changes[0].extend(changes[6]) # mq may commit unchanged files
864
864
865 # check subrepos
865 # check subrepos
866 subs = []
866 subs = []
867 removedsubs = set()
867 removedsubs = set()
868 for p in wctx.parents():
868 for p in wctx.parents():
869 removedsubs.update(s for s in p.substate if match(s))
869 removedsubs.update(s for s in p.substate if match(s))
870 for s in wctx.substate:
870 for s in wctx.substate:
871 removedsubs.discard(s)
871 removedsubs.discard(s)
872 if match(s) and wctx.sub(s).dirty():
872 if match(s) and wctx.sub(s).dirty():
873 subs.append(s)
873 subs.append(s)
874 if (subs or removedsubs):
874 if (subs or removedsubs):
875 if (not match('.hgsub') and
875 if (not match('.hgsub') and
876 '.hgsub' in (wctx.modified() + wctx.added())):
876 '.hgsub' in (wctx.modified() + wctx.added())):
877 raise util.Abort(_("can't commit subrepos without .hgsub"))
877 raise util.Abort(_("can't commit subrepos without .hgsub"))
878 if '.hgsubstate' not in changes[0]:
878 if '.hgsubstate' not in changes[0]:
879 changes[0].insert(0, '.hgsubstate')
879 changes[0].insert(0, '.hgsubstate')
880
880
881 # make sure all explicit patterns are matched
881 # make sure all explicit patterns are matched
882 if not force and match.files():
882 if not force and match.files():
883 matched = set(changes[0] + changes[1] + changes[2])
883 matched = set(changes[0] + changes[1] + changes[2])
884
884
885 for f in match.files():
885 for f in match.files():
886 if f == '.' or f in matched or f in wctx.substate:
886 if f == '.' or f in matched or f in wctx.substate:
887 continue
887 continue
888 if f in changes[3]: # missing
888 if f in changes[3]: # missing
889 fail(f, _('file not found!'))
889 fail(f, _('file not found!'))
890 if f in vdirs: # visited directory
890 if f in vdirs: # visited directory
891 d = f + '/'
891 d = f + '/'
892 for mf in matched:
892 for mf in matched:
893 if mf.startswith(d):
893 if mf.startswith(d):
894 break
894 break
895 else:
895 else:
896 fail(f, _("no match under directory!"))
896 fail(f, _("no match under directory!"))
897 elif f not in self.dirstate:
897 elif f not in self.dirstate:
898 fail(f, _("file not tracked!"))
898 fail(f, _("file not tracked!"))
899
899
900 if (not force and not extra.get("close") and not merge
900 if (not force and not extra.get("close") and not merge
901 and not (changes[0] or changes[1] or changes[2])
901 and not (changes[0] or changes[1] or changes[2])
902 and wctx.branch() == wctx.p1().branch()):
902 and wctx.branch() == wctx.p1().branch()):
903 return None
903 return None
904
904
905 ms = mergemod.mergestate(self)
905 ms = mergemod.mergestate(self)
906 for f in changes[0]:
906 for f in changes[0]:
907 if f in ms and ms[f] == 'u':
907 if f in ms and ms[f] == 'u':
908 raise util.Abort(_("unresolved merge conflicts "
908 raise util.Abort(_("unresolved merge conflicts "
909 "(see hg resolve)"))
909 "(see hg resolve)"))
910
910
911 cctx = context.workingctx(self, text, user, date, extra, changes)
911 cctx = context.workingctx(self, text, user, date, extra, changes)
912 if editor:
912 if editor:
913 cctx._text = editor(self, cctx, subs)
913 cctx._text = editor(self, cctx, subs)
914 edited = (text != cctx._text)
914 edited = (text != cctx._text)
915
915
916 # commit subs
916 # commit subs
917 if subs or removedsubs:
917 if subs or removedsubs:
918 state = wctx.substate.copy()
918 state = wctx.substate.copy()
919 for s in sorted(subs):
919 for s in sorted(subs):
920 sub = wctx.sub(s)
920 sub = wctx.sub(s)
921 self.ui.status(_('committing subrepository %s\n') %
921 self.ui.status(_('committing subrepository %s\n') %
922 subrepo.relpath(sub))
922 subrepo.relpath(sub))
923 sr = sub.commit(cctx._text, user, date)
923 sr = sub.commit(cctx._text, user, date)
924 state[s] = (state[s][0], sr)
924 state[s] = (state[s][0], sr)
925 subrepo.writestate(self, state)
925 subrepo.writestate(self, state)
926
926
927 # Save commit message in case this transaction gets rolled back
927 # Save commit message in case this transaction gets rolled back
928 # (e.g. by a pretxncommit hook). Leave the content alone on
928 # (e.g. by a pretxncommit hook). Leave the content alone on
929 # the assumption that the user will use the same editor again.
929 # the assumption that the user will use the same editor again.
930 msgfile = self.opener('last-message.txt', 'wb')
930 msgfile = self.opener('last-message.txt', 'wb')
931 msgfile.write(cctx._text)
931 msgfile.write(cctx._text)
932 msgfile.close()
932 msgfile.close()
933
933
934 p1, p2 = self.dirstate.parents()
934 p1, p2 = self.dirstate.parents()
935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
936 try:
936 try:
937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
937 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
938 ret = self.commitctx(cctx, True)
938 ret = self.commitctx(cctx, True)
939 except:
939 except:
940 if edited:
940 if edited:
941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
941 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
942 self.ui.write(
942 self.ui.write(
943 _('note: commit message saved in %s\n') % msgfn)
943 _('note: commit message saved in %s\n') % msgfn)
944 raise
944 raise
945
945
946 # update dirstate and mergestate
946 # update dirstate and mergestate
947 for f in changes[0] + changes[1]:
947 for f in changes[0] + changes[1]:
948 self.dirstate.normal(f)
948 self.dirstate.normal(f)
949 for f in changes[2]:
949 for f in changes[2]:
950 self.dirstate.forget(f)
950 self.dirstate.forget(f)
951 self.dirstate.setparents(ret)
951 self.dirstate.setparents(ret)
952 ms.reset()
952 ms.reset()
953 finally:
953 finally:
954 wlock.release()
954 wlock.release()
955
955
956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
956 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
957 return ret
957 return ret
958
958
959 def commitctx(self, ctx, error=False):
959 def commitctx(self, ctx, error=False):
960 """Add a new revision to current repository.
960 """Add a new revision to current repository.
961 Revision information is passed via the context argument.
961 Revision information is passed via the context argument.
962 """
962 """
963
963
964 tr = lock = None
964 tr = lock = None
965 removed = ctx.removed()
965 removed = ctx.removed()
966 p1, p2 = ctx.p1(), ctx.p2()
966 p1, p2 = ctx.p1(), ctx.p2()
967 m1 = p1.manifest().copy()
967 m1 = p1.manifest().copy()
968 m2 = p2.manifest()
968 m2 = p2.manifest()
969 user = ctx.user()
969 user = ctx.user()
970
970
971 lock = self.lock()
971 lock = self.lock()
972 try:
972 try:
973 tr = self.transaction("commit")
973 tr = self.transaction("commit")
974 trp = weakref.proxy(tr)
974 trp = weakref.proxy(tr)
975
975
976 # check in files
976 # check in files
977 new = {}
977 new = {}
978 changed = []
978 changed = []
979 linkrev = len(self)
979 linkrev = len(self)
980 for f in sorted(ctx.modified() + ctx.added()):
980 for f in sorted(ctx.modified() + ctx.added()):
981 self.ui.note(f + "\n")
981 self.ui.note(f + "\n")
982 try:
982 try:
983 fctx = ctx[f]
983 fctx = ctx[f]
984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
984 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
985 changed)
985 changed)
986 m1.set(f, fctx.flags())
986 m1.set(f, fctx.flags())
987 except OSError, inst:
987 except OSError, inst:
988 self.ui.warn(_("trouble committing %s!\n") % f)
988 self.ui.warn(_("trouble committing %s!\n") % f)
989 raise
989 raise
990 except IOError, inst:
990 except IOError, inst:
991 errcode = getattr(inst, 'errno', errno.ENOENT)
991 errcode = getattr(inst, 'errno', errno.ENOENT)
992 if error or errcode and errcode != errno.ENOENT:
992 if error or errcode and errcode != errno.ENOENT:
993 self.ui.warn(_("trouble committing %s!\n") % f)
993 self.ui.warn(_("trouble committing %s!\n") % f)
994 raise
994 raise
995 else:
995 else:
996 removed.append(f)
996 removed.append(f)
997
997
998 # update manifest
998 # update manifest
999 m1.update(new)
999 m1.update(new)
1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1000 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1001 drop = [f for f in removed if f in m1]
1001 drop = [f for f in removed if f in m1]
1002 for f in drop:
1002 for f in drop:
1003 del m1[f]
1003 del m1[f]
1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1004 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1005 p2.manifestnode(), (new, drop))
1005 p2.manifestnode(), (new, drop))
1006
1006
1007 # update changelog
1007 # update changelog
1008 self.changelog.delayupdate()
1008 self.changelog.delayupdate()
1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1009 n = self.changelog.add(mn, changed + removed, ctx.description(),
1010 trp, p1.node(), p2.node(),
1010 trp, p1.node(), p2.node(),
1011 user, ctx.date(), ctx.extra().copy())
1011 user, ctx.date(), ctx.extra().copy())
1012 p = lambda: self.changelog.writepending() and self.root or ""
1012 p = lambda: self.changelog.writepending() and self.root or ""
1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1013 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1014 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1015 parent2=xp2, pending=p)
1015 parent2=xp2, pending=p)
1016 self.changelog.finalize(trp)
1016 self.changelog.finalize(trp)
1017 tr.close()
1017 tr.close()
1018
1018
1019 if self._branchcache:
1019 if self._branchcache:
1020 self.updatebranchcache()
1020 self.updatebranchcache()
1021 return n
1021 return n
1022 finally:
1022 finally:
1023 if tr:
1023 if tr:
1024 tr.release()
1024 tr.release()
1025 lock.release()
1025 lock.release()
1026
1026
1027 def destroyed(self):
1027 def destroyed(self):
1028 '''Inform the repository that nodes have been destroyed.
1028 '''Inform the repository that nodes have been destroyed.
1029 Intended for use by strip and rollback, so there's a common
1029 Intended for use by strip and rollback, so there's a common
1030 place for anything that has to be done after destroying history.'''
1030 place for anything that has to be done after destroying history.'''
1031 # XXX it might be nice if we could take the list of destroyed
1031 # XXX it might be nice if we could take the list of destroyed
1032 # nodes, but I don't see an easy way for rollback() to do that
1032 # nodes, but I don't see an easy way for rollback() to do that
1033
1033
1034 # Ensure the persistent tag cache is updated. Doing it now
1034 # Ensure the persistent tag cache is updated. Doing it now
1035 # means that the tag cache only has to worry about destroyed
1035 # means that the tag cache only has to worry about destroyed
1036 # heads immediately after a strip/rollback. That in turn
1036 # heads immediately after a strip/rollback. That in turn
1037 # guarantees that "cachetip == currenttip" (comparing both rev
1037 # guarantees that "cachetip == currenttip" (comparing both rev
1038 # and node) always means no nodes have been added or destroyed.
1038 # and node) always means no nodes have been added or destroyed.
1039
1039
1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1040 # XXX this is suboptimal when qrefresh'ing: we strip the current
1041 # head, refresh the tag cache, then immediately add a new head.
1041 # head, refresh the tag cache, then immediately add a new head.
1042 # But I think doing it this way is necessary for the "instant
1042 # But I think doing it this way is necessary for the "instant
1043 # tag cache retrieval" case to work.
1043 # tag cache retrieval" case to work.
1044 self.invalidatecaches()
1044 self.invalidatecaches()
1045
1045
1046 def walk(self, match, node=None):
1046 def walk(self, match, node=None):
1047 '''
1047 '''
1048 walk recursively through the directory tree or a given
1048 walk recursively through the directory tree or a given
1049 changeset, finding all files matched by the match
1049 changeset, finding all files matched by the match
1050 function
1050 function
1051 '''
1051 '''
1052 return self[node].walk(match)
1052 return self[node].walk(match)
1053
1053
1054 def status(self, node1='.', node2=None, match=None,
1054 def status(self, node1='.', node2=None, match=None,
1055 ignored=False, clean=False, unknown=False,
1055 ignored=False, clean=False, unknown=False,
1056 listsubrepos=False):
1056 listsubrepos=False):
1057 """return status of files between two nodes or node and working directory
1057 """return status of files between two nodes or node and working directory
1058
1058
1059 If node1 is None, use the first dirstate parent instead.
1059 If node1 is None, use the first dirstate parent instead.
1060 If node2 is None, compare node1 with working directory.
1060 If node2 is None, compare node1 with working directory.
1061 """
1061 """
1062
1062
1063 def mfmatches(ctx):
1063 def mfmatches(ctx):
1064 mf = ctx.manifest().copy()
1064 mf = ctx.manifest().copy()
1065 for fn in mf.keys():
1065 for fn in mf.keys():
1066 if not match(fn):
1066 if not match(fn):
1067 del mf[fn]
1067 del mf[fn]
1068 return mf
1068 return mf
1069
1069
1070 if isinstance(node1, context.changectx):
1070 if isinstance(node1, context.changectx):
1071 ctx1 = node1
1071 ctx1 = node1
1072 else:
1072 else:
1073 ctx1 = self[node1]
1073 ctx1 = self[node1]
1074 if isinstance(node2, context.changectx):
1074 if isinstance(node2, context.changectx):
1075 ctx2 = node2
1075 ctx2 = node2
1076 else:
1076 else:
1077 ctx2 = self[node2]
1077 ctx2 = self[node2]
1078
1078
1079 working = ctx2.rev() is None
1079 working = ctx2.rev() is None
1080 parentworking = working and ctx1 == self['.']
1080 parentworking = working and ctx1 == self['.']
1081 match = match or matchmod.always(self.root, self.getcwd())
1081 match = match or matchmod.always(self.root, self.getcwd())
1082 listignored, listclean, listunknown = ignored, clean, unknown
1082 listignored, listclean, listunknown = ignored, clean, unknown
1083
1083
1084 # load earliest manifest first for caching reasons
1084 # load earliest manifest first for caching reasons
1085 if not working and ctx2.rev() < ctx1.rev():
1085 if not working and ctx2.rev() < ctx1.rev():
1086 ctx2.manifest()
1086 ctx2.manifest()
1087
1087
1088 if not parentworking:
1088 if not parentworking:
1089 def bad(f, msg):
1089 def bad(f, msg):
1090 if f not in ctx1:
1090 if f not in ctx1:
1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1091 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1092 match.bad = bad
1092 match.bad = bad
1093
1093
1094 if working: # we need to scan the working dir
1094 if working: # we need to scan the working dir
1095 subrepos = []
1095 subrepos = []
1096 if '.hgsub' in self.dirstate:
1096 if '.hgsub' in self.dirstate:
1097 subrepos = ctx1.substate.keys()
1097 subrepos = ctx1.substate.keys()
1098 s = self.dirstate.status(match, subrepos, listignored,
1098 s = self.dirstate.status(match, subrepos, listignored,
1099 listclean, listunknown)
1099 listclean, listunknown)
1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1100 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1101
1101
1102 # check for any possibly clean files
1102 # check for any possibly clean files
1103 if parentworking and cmp:
1103 if parentworking and cmp:
1104 fixup = []
1104 fixup = []
1105 # do a full compare of any files that might have changed
1105 # do a full compare of any files that might have changed
1106 for f in sorted(cmp):
1106 for f in sorted(cmp):
1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1107 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1108 or ctx1[f].cmp(ctx2[f])):
1108 or ctx1[f].cmp(ctx2[f])):
1109 modified.append(f)
1109 modified.append(f)
1110 else:
1110 else:
1111 fixup.append(f)
1111 fixup.append(f)
1112
1112
1113 # update dirstate for files that are actually clean
1113 # update dirstate for files that are actually clean
1114 if fixup:
1114 if fixup:
1115 if listclean:
1115 if listclean:
1116 clean += fixup
1116 clean += fixup
1117
1117
1118 try:
1118 try:
1119 # updating the dirstate is optional
1119 # updating the dirstate is optional
1120 # so we don't wait on the lock
1120 # so we don't wait on the lock
1121 wlock = self.wlock(False)
1121 wlock = self.wlock(False)
1122 try:
1122 try:
1123 for f in fixup:
1123 for f in fixup:
1124 self.dirstate.normal(f)
1124 self.dirstate.normal(f)
1125 finally:
1125 finally:
1126 wlock.release()
1126 wlock.release()
1127 except error.LockError:
1127 except error.LockError:
1128 pass
1128 pass
1129
1129
1130 if not parentworking:
1130 if not parentworking:
1131 mf1 = mfmatches(ctx1)
1131 mf1 = mfmatches(ctx1)
1132 if working:
1132 if working:
1133 # we are comparing working dir against non-parent
1133 # we are comparing working dir against non-parent
1134 # generate a pseudo-manifest for the working dir
1134 # generate a pseudo-manifest for the working dir
1135 mf2 = mfmatches(self['.'])
1135 mf2 = mfmatches(self['.'])
1136 for f in cmp + modified + added:
1136 for f in cmp + modified + added:
1137 mf2[f] = None
1137 mf2[f] = None
1138 mf2.set(f, ctx2.flags(f))
1138 mf2.set(f, ctx2.flags(f))
1139 for f in removed:
1139 for f in removed:
1140 if f in mf2:
1140 if f in mf2:
1141 del mf2[f]
1141 del mf2[f]
1142 else:
1142 else:
1143 # we are comparing two revisions
1143 # we are comparing two revisions
1144 deleted, unknown, ignored = [], [], []
1144 deleted, unknown, ignored = [], [], []
1145 mf2 = mfmatches(ctx2)
1145 mf2 = mfmatches(ctx2)
1146
1146
1147 modified, added, clean = [], [], []
1147 modified, added, clean = [], [], []
1148 for fn in mf2:
1148 for fn in mf2:
1149 if fn in mf1:
1149 if fn in mf1:
1150 if (mf1.flags(fn) != mf2.flags(fn) or
1150 if (mf1.flags(fn) != mf2.flags(fn) or
1151 (mf1[fn] != mf2[fn] and
1151 (mf1[fn] != mf2[fn] and
1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1152 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1153 modified.append(fn)
1153 modified.append(fn)
1154 elif listclean:
1154 elif listclean:
1155 clean.append(fn)
1155 clean.append(fn)
1156 del mf1[fn]
1156 del mf1[fn]
1157 else:
1157 else:
1158 added.append(fn)
1158 added.append(fn)
1159 removed = mf1.keys()
1159 removed = mf1.keys()
1160
1160
1161 r = modified, added, removed, deleted, unknown, ignored, clean
1161 r = modified, added, removed, deleted, unknown, ignored, clean
1162
1162
1163 if listsubrepos:
1163 if listsubrepos:
1164 # Create a (subpath, ctx) mapping where we prefer subpaths
1164 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1165 # from ctx1. The subpaths from ctx2 are important when the
1166 # .hgsub file has been modified (in ctx2) but not yet
1167 # committed (in ctx1).
1168 subpaths = dict.fromkeys(ctx2.substate, ctx2)
1169 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
1170 for subpath, ctx in subpaths.iteritems():
1171 sub = ctx.sub(subpath)
1172 if working:
1165 if working:
1173 rev2 = None
1166 rev2 = None
1174 else:
1167 else:
1175 rev2 = ctx2.substate[subpath][1]
1168 rev2 = ctx2.substate[subpath][1]
1176 try:
1169 try:
1177 submatch = matchmod.narrowmatcher(subpath, match)
1170 submatch = matchmod.narrowmatcher(subpath, match)
1178 s = sub.status(rev2, match=submatch, ignored=listignored,
1171 s = sub.status(rev2, match=submatch, ignored=listignored,
1179 clean=listclean, unknown=listunknown,
1172 clean=listclean, unknown=listunknown,
1180 listsubrepos=True)
1173 listsubrepos=True)
1181 for rfiles, sfiles in zip(r, s):
1174 for rfiles, sfiles in zip(r, s):
1182 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1175 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1183 except error.LookupError:
1176 except error.LookupError:
1184 self.ui.status(_("skipping missing subrepository: %s\n")
1177 self.ui.status(_("skipping missing subrepository: %s\n")
1185 % subpath)
1178 % subpath)
1186
1179
1187 [l.sort() for l in r]
1180 [l.sort() for l in r]
1188 return r
1181 return r
1189
1182
1190 def heads(self, start=None):
1183 def heads(self, start=None):
1191 heads = self.changelog.heads(start)
1184 heads = self.changelog.heads(start)
1192 # sort the output in rev descending order
1185 # sort the output in rev descending order
1193 heads = [(-self.changelog.rev(h), h) for h in heads]
1186 heads = [(-self.changelog.rev(h), h) for h in heads]
1194 return [n for (r, n) in sorted(heads)]
1187 return [n for (r, n) in sorted(heads)]
1195
1188
1196 def branchheads(self, branch=None, start=None, closed=False):
1189 def branchheads(self, branch=None, start=None, closed=False):
1197 '''return a (possibly filtered) list of heads for the given branch
1190 '''return a (possibly filtered) list of heads for the given branch
1198
1191
1199 Heads are returned in topological order, from newest to oldest.
1192 Heads are returned in topological order, from newest to oldest.
1200 If branch is None, use the dirstate branch.
1193 If branch is None, use the dirstate branch.
1201 If start is not None, return only heads reachable from start.
1194 If start is not None, return only heads reachable from start.
1202 If closed is True, return heads that are marked as closed as well.
1195 If closed is True, return heads that are marked as closed as well.
1203 '''
1196 '''
1204 if branch is None:
1197 if branch is None:
1205 branch = self[None].branch()
1198 branch = self[None].branch()
1206 branches = self.branchmap()
1199 branches = self.branchmap()
1207 if branch not in branches:
1200 if branch not in branches:
1208 return []
1201 return []
1209 # the cache returns heads ordered lowest to highest
1202 # the cache returns heads ordered lowest to highest
1210 bheads = list(reversed(branches[branch]))
1203 bheads = list(reversed(branches[branch]))
1211 if start is not None:
1204 if start is not None:
1212 # filter out the heads that cannot be reached from startrev
1205 # filter out the heads that cannot be reached from startrev
1213 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1206 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1214 bheads = [h for h in bheads if h in fbheads]
1207 bheads = [h for h in bheads if h in fbheads]
1215 if not closed:
1208 if not closed:
1216 bheads = [h for h in bheads if
1209 bheads = [h for h in bheads if
1217 ('close' not in self.changelog.read(h)[5])]
1210 ('close' not in self.changelog.read(h)[5])]
1218 return bheads
1211 return bheads
1219
1212
1220 def branches(self, nodes):
1213 def branches(self, nodes):
1221 if not nodes:
1214 if not nodes:
1222 nodes = [self.changelog.tip()]
1215 nodes = [self.changelog.tip()]
1223 b = []
1216 b = []
1224 for n in nodes:
1217 for n in nodes:
1225 t = n
1218 t = n
1226 while 1:
1219 while 1:
1227 p = self.changelog.parents(n)
1220 p = self.changelog.parents(n)
1228 if p[1] != nullid or p[0] == nullid:
1221 if p[1] != nullid or p[0] == nullid:
1229 b.append((t, n, p[0], p[1]))
1222 b.append((t, n, p[0], p[1]))
1230 break
1223 break
1231 n = p[0]
1224 n = p[0]
1232 return b
1225 return b
1233
1226
1234 def between(self, pairs):
1227 def between(self, pairs):
1235 r = []
1228 r = []
1236
1229
1237 for top, bottom in pairs:
1230 for top, bottom in pairs:
1238 n, l, i = top, [], 0
1231 n, l, i = top, [], 0
1239 f = 1
1232 f = 1
1240
1233
1241 while n != bottom and n != nullid:
1234 while n != bottom and n != nullid:
1242 p = self.changelog.parents(n)[0]
1235 p = self.changelog.parents(n)[0]
1243 if i == f:
1236 if i == f:
1244 l.append(n)
1237 l.append(n)
1245 f = f * 2
1238 f = f * 2
1246 n = p
1239 n = p
1247 i += 1
1240 i += 1
1248
1241
1249 r.append(l)
1242 r.append(l)
1250
1243
1251 return r
1244 return r
1252
1245
1253 def pull(self, remote, heads=None, force=False):
1246 def pull(self, remote, heads=None, force=False):
1254 lock = self.lock()
1247 lock = self.lock()
1255 try:
1248 try:
1256 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1249 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1257 force=force)
1250 force=force)
1258 common, fetch, rheads = tmp
1251 common, fetch, rheads = tmp
1259 if not fetch:
1252 if not fetch:
1260 self.ui.status(_("no changes found\n"))
1253 self.ui.status(_("no changes found\n"))
1261 return 0
1254 return 0
1262
1255
1263 if fetch == [nullid]:
1256 if fetch == [nullid]:
1264 self.ui.status(_("requesting all changes\n"))
1257 self.ui.status(_("requesting all changes\n"))
1265 elif heads is None and remote.capable('changegroupsubset'):
1258 elif heads is None and remote.capable('changegroupsubset'):
1266 # issue1320, avoid a race if remote changed after discovery
1259 # issue1320, avoid a race if remote changed after discovery
1267 heads = rheads
1260 heads = rheads
1268
1261
1269 if heads is None:
1262 if heads is None:
1270 cg = remote.changegroup(fetch, 'pull')
1263 cg = remote.changegroup(fetch, 'pull')
1271 else:
1264 else:
1272 if not remote.capable('changegroupsubset'):
1265 if not remote.capable('changegroupsubset'):
1273 raise util.Abort(_("partial pull cannot be done because "
1266 raise util.Abort(_("partial pull cannot be done because "
1274 "other repository doesn't support "
1267 "other repository doesn't support "
1275 "changegroupsubset."))
1268 "changegroupsubset."))
1276 cg = remote.changegroupsubset(fetch, heads, 'pull')
1269 cg = remote.changegroupsubset(fetch, heads, 'pull')
1277 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1270 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1278 finally:
1271 finally:
1279 lock.release()
1272 lock.release()
1280
1273
1281 def push(self, remote, force=False, revs=None, newbranch=False):
1274 def push(self, remote, force=False, revs=None, newbranch=False):
1282 '''Push outgoing changesets (limited by revs) from the current
1275 '''Push outgoing changesets (limited by revs) from the current
1283 repository to remote. Return an integer:
1276 repository to remote. Return an integer:
1284 - 0 means HTTP error *or* nothing to push
1277 - 0 means HTTP error *or* nothing to push
1285 - 1 means we pushed and remote head count is unchanged *or*
1278 - 1 means we pushed and remote head count is unchanged *or*
1286 we have outgoing changesets but refused to push
1279 we have outgoing changesets but refused to push
1287 - other values as described by addchangegroup()
1280 - other values as described by addchangegroup()
1288 '''
1281 '''
1289 # there are two ways to push to remote repo:
1282 # there are two ways to push to remote repo:
1290 #
1283 #
1291 # addchangegroup assumes local user can lock remote
1284 # addchangegroup assumes local user can lock remote
1292 # repo (local filesystem, old ssh servers).
1285 # repo (local filesystem, old ssh servers).
1293 #
1286 #
1294 # unbundle assumes local user cannot lock remote repo (new ssh
1287 # unbundle assumes local user cannot lock remote repo (new ssh
1295 # servers, http servers).
1288 # servers, http servers).
1296
1289
1297 lock = None
1290 lock = None
1298 unbundle = remote.capable('unbundle')
1291 unbundle = remote.capable('unbundle')
1299 if not unbundle:
1292 if not unbundle:
1300 lock = remote.lock()
1293 lock = remote.lock()
1301 try:
1294 try:
1302 ret = discovery.prepush(self, remote, force, revs, newbranch)
1295 ret = discovery.prepush(self, remote, force, revs, newbranch)
1303 if ret[0] is None:
1296 if ret[0] is None:
1304 # and here we return 0 for "nothing to push" or 1 for
1297 # and here we return 0 for "nothing to push" or 1 for
1305 # "something to push but I refuse"
1298 # "something to push but I refuse"
1306 return ret[1]
1299 return ret[1]
1307
1300
1308 cg, remote_heads = ret
1301 cg, remote_heads = ret
1309 if unbundle:
1302 if unbundle:
1310 # local repo finds heads on server, finds out what revs it must
1303 # local repo finds heads on server, finds out what revs it must
1311 # push. once revs transferred, if server finds it has
1304 # push. once revs transferred, if server finds it has
1312 # different heads (someone else won commit/push race), server
1305 # different heads (someone else won commit/push race), server
1313 # aborts.
1306 # aborts.
1314 if force:
1307 if force:
1315 remote_heads = ['force']
1308 remote_heads = ['force']
1316 # ssh: return remote's addchangegroup()
1309 # ssh: return remote's addchangegroup()
1317 # http: return remote's addchangegroup() or 0 for error
1310 # http: return remote's addchangegroup() or 0 for error
1318 return remote.unbundle(cg, remote_heads, 'push')
1311 return remote.unbundle(cg, remote_heads, 'push')
1319 else:
1312 else:
1320 # we return an integer indicating remote head count change
1313 # we return an integer indicating remote head count change
1321 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1314 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1322 finally:
1315 finally:
1323 if lock is not None:
1316 if lock is not None:
1324 lock.release()
1317 lock.release()
1325
1318
1326 def changegroupinfo(self, nodes, source):
1319 def changegroupinfo(self, nodes, source):
1327 if self.ui.verbose or source == 'bundle':
1320 if self.ui.verbose or source == 'bundle':
1328 self.ui.status(_("%d changesets found\n") % len(nodes))
1321 self.ui.status(_("%d changesets found\n") % len(nodes))
1329 if self.ui.debugflag:
1322 if self.ui.debugflag:
1330 self.ui.debug("list of changesets:\n")
1323 self.ui.debug("list of changesets:\n")
1331 for node in nodes:
1324 for node in nodes:
1332 self.ui.debug("%s\n" % hex(node))
1325 self.ui.debug("%s\n" % hex(node))
1333
1326
1334 def changegroupsubset(self, bases, heads, source, extranodes=None):
1327 def changegroupsubset(self, bases, heads, source, extranodes=None):
1335 """Compute a changegroup consisting of all the nodes that are
1328 """Compute a changegroup consisting of all the nodes that are
1336 descendents of any of the bases and ancestors of any of the heads.
1329 descendents of any of the bases and ancestors of any of the heads.
1337 Return a chunkbuffer object whose read() method will return
1330 Return a chunkbuffer object whose read() method will return
1338 successive changegroup chunks.
1331 successive changegroup chunks.
1339
1332
1340 It is fairly complex as determining which filenodes and which
1333 It is fairly complex as determining which filenodes and which
1341 manifest nodes need to be included for the changeset to be complete
1334 manifest nodes need to be included for the changeset to be complete
1342 is non-trivial.
1335 is non-trivial.
1343
1336
1344 Another wrinkle is doing the reverse, figuring out which changeset in
1337 Another wrinkle is doing the reverse, figuring out which changeset in
1345 the changegroup a particular filenode or manifestnode belongs to.
1338 the changegroup a particular filenode or manifestnode belongs to.
1346
1339
1347 The caller can specify some nodes that must be included in the
1340 The caller can specify some nodes that must be included in the
1348 changegroup using the extranodes argument. It should be a dict
1341 changegroup using the extranodes argument. It should be a dict
1349 where the keys are the filenames (or 1 for the manifest), and the
1342 where the keys are the filenames (or 1 for the manifest), and the
1350 values are lists of (node, linknode) tuples, where node is a wanted
1343 values are lists of (node, linknode) tuples, where node is a wanted
1351 node and linknode is the changelog node that should be transmitted as
1344 node and linknode is the changelog node that should be transmitted as
1352 the linkrev.
1345 the linkrev.
1353 """
1346 """
1354
1347
1355 # Set up some initial variables
1348 # Set up some initial variables
1356 # Make it easy to refer to self.changelog
1349 # Make it easy to refer to self.changelog
1357 cl = self.changelog
1350 cl = self.changelog
1358 # Compute the list of changesets in this changegroup.
1351 # Compute the list of changesets in this changegroup.
1359 # Some bases may turn out to be superfluous, and some heads may be
1352 # Some bases may turn out to be superfluous, and some heads may be
1360 # too. nodesbetween will return the minimal set of bases and heads
1353 # too. nodesbetween will return the minimal set of bases and heads
1361 # necessary to re-create the changegroup.
1354 # necessary to re-create the changegroup.
1362 if not bases:
1355 if not bases:
1363 bases = [nullid]
1356 bases = [nullid]
1364 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1357 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1365
1358
1366 if extranodes is None:
1359 if extranodes is None:
1367 # can we go through the fast path ?
1360 # can we go through the fast path ?
1368 heads.sort()
1361 heads.sort()
1369 allheads = self.heads()
1362 allheads = self.heads()
1370 allheads.sort()
1363 allheads.sort()
1371 if heads == allheads:
1364 if heads == allheads:
1372 return self._changegroup(msng_cl_lst, source)
1365 return self._changegroup(msng_cl_lst, source)
1373
1366
1374 # slow path
1367 # slow path
1375 self.hook('preoutgoing', throw=True, source=source)
1368 self.hook('preoutgoing', throw=True, source=source)
1376
1369
1377 self.changegroupinfo(msng_cl_lst, source)
1370 self.changegroupinfo(msng_cl_lst, source)
1378
1371
1379 # We assume that all ancestors of bases are known
1372 # We assume that all ancestors of bases are known
1380 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1373 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1381
1374
1382 # Make it easy to refer to self.manifest
1375 # Make it easy to refer to self.manifest
1383 mnfst = self.manifest
1376 mnfst = self.manifest
1384 # We don't know which manifests are missing yet
1377 # We don't know which manifests are missing yet
1385 msng_mnfst_set = {}
1378 msng_mnfst_set = {}
1386 # Nor do we know which filenodes are missing.
1379 # Nor do we know which filenodes are missing.
1387 msng_filenode_set = {}
1380 msng_filenode_set = {}
1388
1381
1389 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1382 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1390 junk = None
1383 junk = None
1391
1384
1392 # A changeset always belongs to itself, so the changenode lookup
1385 # A changeset always belongs to itself, so the changenode lookup
1393 # function for a changenode is identity.
1386 # function for a changenode is identity.
1394 def identity(x):
1387 def identity(x):
1395 return x
1388 return x
1396
1389
1397 # A function generating function that sets up the initial environment
1390 # A function generating function that sets up the initial environment
1398 # the inner function.
1391 # the inner function.
1399 def filenode_collector(changedfiles):
1392 def filenode_collector(changedfiles):
1400 # This gathers information from each manifestnode included in the
1393 # This gathers information from each manifestnode included in the
1401 # changegroup about which filenodes the manifest node references
1394 # changegroup about which filenodes the manifest node references
1402 # so we can include those in the changegroup too.
1395 # so we can include those in the changegroup too.
1403 #
1396 #
1404 # It also remembers which changenode each filenode belongs to. It
1397 # It also remembers which changenode each filenode belongs to. It
1405 # does this by assuming the a filenode belongs to the changenode
1398 # does this by assuming the a filenode belongs to the changenode
1406 # the first manifest that references it belongs to.
1399 # the first manifest that references it belongs to.
1407 def collect_msng_filenodes(mnfstnode):
1400 def collect_msng_filenodes(mnfstnode):
1408 r = mnfst.rev(mnfstnode)
1401 r = mnfst.rev(mnfstnode)
1409 if r - 1 in mnfst.parentrevs(r):
1402 if r - 1 in mnfst.parentrevs(r):
1410 # If the previous rev is one of the parents,
1403 # If the previous rev is one of the parents,
1411 # we only need to see a diff.
1404 # we only need to see a diff.
1412 deltamf = mnfst.readdelta(mnfstnode)
1405 deltamf = mnfst.readdelta(mnfstnode)
1413 # For each line in the delta
1406 # For each line in the delta
1414 for f, fnode in deltamf.iteritems():
1407 for f, fnode in deltamf.iteritems():
1415 # And if the file is in the list of files we care
1408 # And if the file is in the list of files we care
1416 # about.
1409 # about.
1417 if f in changedfiles:
1410 if f in changedfiles:
1418 # Get the changenode this manifest belongs to
1411 # Get the changenode this manifest belongs to
1419 clnode = msng_mnfst_set[mnfstnode]
1412 clnode = msng_mnfst_set[mnfstnode]
1420 # Create the set of filenodes for the file if
1413 # Create the set of filenodes for the file if
1421 # there isn't one already.
1414 # there isn't one already.
1422 ndset = msng_filenode_set.setdefault(f, {})
1415 ndset = msng_filenode_set.setdefault(f, {})
1423 # And set the filenode's changelog node to the
1416 # And set the filenode's changelog node to the
1424 # manifest's if it hasn't been set already.
1417 # manifest's if it hasn't been set already.
1425 ndset.setdefault(fnode, clnode)
1418 ndset.setdefault(fnode, clnode)
1426 else:
1419 else:
1427 # Otherwise we need a full manifest.
1420 # Otherwise we need a full manifest.
1428 m = mnfst.read(mnfstnode)
1421 m = mnfst.read(mnfstnode)
1429 # For every file in we care about.
1422 # For every file in we care about.
1430 for f in changedfiles:
1423 for f in changedfiles:
1431 fnode = m.get(f, None)
1424 fnode = m.get(f, None)
1432 # If it's in the manifest
1425 # If it's in the manifest
1433 if fnode is not None:
1426 if fnode is not None:
1434 # See comments above.
1427 # See comments above.
1435 clnode = msng_mnfst_set[mnfstnode]
1428 clnode = msng_mnfst_set[mnfstnode]
1436 ndset = msng_filenode_set.setdefault(f, {})
1429 ndset = msng_filenode_set.setdefault(f, {})
1437 ndset.setdefault(fnode, clnode)
1430 ndset.setdefault(fnode, clnode)
1438 return collect_msng_filenodes
1431 return collect_msng_filenodes
1439
1432
1440 # If we determine that a particular file or manifest node must be a
1433 # If we determine that a particular file or manifest node must be a
1441 # node that the recipient of the changegroup will already have, we can
1434 # node that the recipient of the changegroup will already have, we can
1442 # also assume the recipient will have all the parents. This function
1435 # also assume the recipient will have all the parents. This function
1443 # prunes them from the set of missing nodes.
1436 # prunes them from the set of missing nodes.
1444 def prune(revlog, missingnodes):
1437 def prune(revlog, missingnodes):
1445 hasset = set()
1438 hasset = set()
1446 # If a 'missing' filenode thinks it belongs to a changenode we
1439 # If a 'missing' filenode thinks it belongs to a changenode we
1447 # assume the recipient must have, then the recipient must have
1440 # assume the recipient must have, then the recipient must have
1448 # that filenode.
1441 # that filenode.
1449 for n in missingnodes:
1442 for n in missingnodes:
1450 clrev = revlog.linkrev(revlog.rev(n))
1443 clrev = revlog.linkrev(revlog.rev(n))
1451 if clrev in commonrevs:
1444 if clrev in commonrevs:
1452 hasset.add(n)
1445 hasset.add(n)
1453 for n in hasset:
1446 for n in hasset:
1454 missingnodes.pop(n, None)
1447 missingnodes.pop(n, None)
1455 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1448 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1456 missingnodes.pop(revlog.node(r), None)
1449 missingnodes.pop(revlog.node(r), None)
1457
1450
1458 # Add the nodes that were explicitly requested.
1451 # Add the nodes that were explicitly requested.
1459 def add_extra_nodes(name, nodes):
1452 def add_extra_nodes(name, nodes):
1460 if not extranodes or name not in extranodes:
1453 if not extranodes or name not in extranodes:
1461 return
1454 return
1462
1455
1463 for node, linknode in extranodes[name]:
1456 for node, linknode in extranodes[name]:
1464 if node not in nodes:
1457 if node not in nodes:
1465 nodes[node] = linknode
1458 nodes[node] = linknode
1466
1459
1467 # Now that we have all theses utility functions to help out and
1460 # Now that we have all theses utility functions to help out and
1468 # logically divide up the task, generate the group.
1461 # logically divide up the task, generate the group.
1469 def gengroup():
1462 def gengroup():
1470 # The set of changed files starts empty.
1463 # The set of changed files starts empty.
1471 changedfiles = set()
1464 changedfiles = set()
1472 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1465 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1473
1466
1474 # Create a changenode group generator that will call our functions
1467 # Create a changenode group generator that will call our functions
1475 # back to lookup the owning changenode and collect information.
1468 # back to lookup the owning changenode and collect information.
1476 group = cl.group(msng_cl_lst, identity, collect)
1469 group = cl.group(msng_cl_lst, identity, collect)
1477 for cnt, chnk in enumerate(group):
1470 for cnt, chnk in enumerate(group):
1478 yield chnk
1471 yield chnk
1479 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1472 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1480 self.ui.progress(_('bundling changes'), None)
1473 self.ui.progress(_('bundling changes'), None)
1481
1474
1482 prune(mnfst, msng_mnfst_set)
1475 prune(mnfst, msng_mnfst_set)
1483 add_extra_nodes(1, msng_mnfst_set)
1476 add_extra_nodes(1, msng_mnfst_set)
1484 msng_mnfst_lst = msng_mnfst_set.keys()
1477 msng_mnfst_lst = msng_mnfst_set.keys()
1485 # Sort the manifestnodes by revision number.
1478 # Sort the manifestnodes by revision number.
1486 msng_mnfst_lst.sort(key=mnfst.rev)
1479 msng_mnfst_lst.sort(key=mnfst.rev)
1487 # Create a generator for the manifestnodes that calls our lookup
1480 # Create a generator for the manifestnodes that calls our lookup
1488 # and data collection functions back.
1481 # and data collection functions back.
1489 group = mnfst.group(msng_mnfst_lst,
1482 group = mnfst.group(msng_mnfst_lst,
1490 lambda mnode: msng_mnfst_set[mnode],
1483 lambda mnode: msng_mnfst_set[mnode],
1491 filenode_collector(changedfiles))
1484 filenode_collector(changedfiles))
1492 for cnt, chnk in enumerate(group):
1485 for cnt, chnk in enumerate(group):
1493 yield chnk
1486 yield chnk
1494 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1487 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1495 self.ui.progress(_('bundling manifests'), None)
1488 self.ui.progress(_('bundling manifests'), None)
1496
1489
1497 # These are no longer needed, dereference and toss the memory for
1490 # These are no longer needed, dereference and toss the memory for
1498 # them.
1491 # them.
1499 msng_mnfst_lst = None
1492 msng_mnfst_lst = None
1500 msng_mnfst_set.clear()
1493 msng_mnfst_set.clear()
1501
1494
1502 if extranodes:
1495 if extranodes:
1503 for fname in extranodes:
1496 for fname in extranodes:
1504 if isinstance(fname, int):
1497 if isinstance(fname, int):
1505 continue
1498 continue
1506 msng_filenode_set.setdefault(fname, {})
1499 msng_filenode_set.setdefault(fname, {})
1507 changedfiles.add(fname)
1500 changedfiles.add(fname)
1508 # Go through all our files in order sorted by name.
1501 # Go through all our files in order sorted by name.
1509 cnt = 0
1502 cnt = 0
1510 for fname in sorted(changedfiles):
1503 for fname in sorted(changedfiles):
1511 filerevlog = self.file(fname)
1504 filerevlog = self.file(fname)
1512 if not len(filerevlog):
1505 if not len(filerevlog):
1513 raise util.Abort(_("empty or missing revlog for %s") % fname)
1506 raise util.Abort(_("empty or missing revlog for %s") % fname)
1514 # Toss out the filenodes that the recipient isn't really
1507 # Toss out the filenodes that the recipient isn't really
1515 # missing.
1508 # missing.
1516 missingfnodes = msng_filenode_set.pop(fname, {})
1509 missingfnodes = msng_filenode_set.pop(fname, {})
1517 prune(filerevlog, missingfnodes)
1510 prune(filerevlog, missingfnodes)
1518 add_extra_nodes(fname, missingfnodes)
1511 add_extra_nodes(fname, missingfnodes)
1519 # If any filenodes are left, generate the group for them,
1512 # If any filenodes are left, generate the group for them,
1520 # otherwise don't bother.
1513 # otherwise don't bother.
1521 if missingfnodes:
1514 if missingfnodes:
1522 yield changegroup.chunkheader(len(fname))
1515 yield changegroup.chunkheader(len(fname))
1523 yield fname
1516 yield fname
1524 # Sort the filenodes by their revision # (topological order)
1517 # Sort the filenodes by their revision # (topological order)
1525 nodeiter = list(missingfnodes)
1518 nodeiter = list(missingfnodes)
1526 nodeiter.sort(key=filerevlog.rev)
1519 nodeiter.sort(key=filerevlog.rev)
1527 # Create a group generator and only pass in a changenode
1520 # Create a group generator and only pass in a changenode
1528 # lookup function as we need to collect no information
1521 # lookup function as we need to collect no information
1529 # from filenodes.
1522 # from filenodes.
1530 group = filerevlog.group(nodeiter,
1523 group = filerevlog.group(nodeiter,
1531 lambda fnode: missingfnodes[fnode])
1524 lambda fnode: missingfnodes[fnode])
1532 for chnk in group:
1525 for chnk in group:
1533 self.ui.progress(
1526 self.ui.progress(
1534 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1527 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1535 cnt += 1
1528 cnt += 1
1536 yield chnk
1529 yield chnk
1537 # Signal that no more groups are left.
1530 # Signal that no more groups are left.
1538 yield changegroup.closechunk()
1531 yield changegroup.closechunk()
1539 self.ui.progress(_('bundling files'), None)
1532 self.ui.progress(_('bundling files'), None)
1540
1533
1541 if msng_cl_lst:
1534 if msng_cl_lst:
1542 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1535 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1543
1536
1544 return util.chunkbuffer(gengroup())
1537 return util.chunkbuffer(gengroup())
1545
1538
1546 def changegroup(self, basenodes, source):
1539 def changegroup(self, basenodes, source):
1547 # to avoid a race we use changegroupsubset() (issue1320)
1540 # to avoid a race we use changegroupsubset() (issue1320)
1548 return self.changegroupsubset(basenodes, self.heads(), source)
1541 return self.changegroupsubset(basenodes, self.heads(), source)
1549
1542
1550 def _changegroup(self, nodes, source):
1543 def _changegroup(self, nodes, source):
1551 """Compute the changegroup of all nodes that we have that a recipient
1544 """Compute the changegroup of all nodes that we have that a recipient
1552 doesn't. Return a chunkbuffer object whose read() method will return
1545 doesn't. Return a chunkbuffer object whose read() method will return
1553 successive changegroup chunks.
1546 successive changegroup chunks.
1554
1547
1555 This is much easier than the previous function as we can assume that
1548 This is much easier than the previous function as we can assume that
1556 the recipient has any changenode we aren't sending them.
1549 the recipient has any changenode we aren't sending them.
1557
1550
1558 nodes is the set of nodes to send"""
1551 nodes is the set of nodes to send"""
1559
1552
1560 self.hook('preoutgoing', throw=True, source=source)
1553 self.hook('preoutgoing', throw=True, source=source)
1561
1554
1562 cl = self.changelog
1555 cl = self.changelog
1563 revset = set([cl.rev(n) for n in nodes])
1556 revset = set([cl.rev(n) for n in nodes])
1564 self.changegroupinfo(nodes, source)
1557 self.changegroupinfo(nodes, source)
1565
1558
1566 def identity(x):
1559 def identity(x):
1567 return x
1560 return x
1568
1561
1569 def gennodelst(log):
1562 def gennodelst(log):
1570 for r in log:
1563 for r in log:
1571 if log.linkrev(r) in revset:
1564 if log.linkrev(r) in revset:
1572 yield log.node(r)
1565 yield log.node(r)
1573
1566
1574 def lookuplinkrev_func(revlog):
1567 def lookuplinkrev_func(revlog):
1575 def lookuplinkrev(n):
1568 def lookuplinkrev(n):
1576 return cl.node(revlog.linkrev(revlog.rev(n)))
1569 return cl.node(revlog.linkrev(revlog.rev(n)))
1577 return lookuplinkrev
1570 return lookuplinkrev
1578
1571
1579 def gengroup():
1572 def gengroup():
1580 '''yield a sequence of changegroup chunks (strings)'''
1573 '''yield a sequence of changegroup chunks (strings)'''
1581 # construct a list of all changed files
1574 # construct a list of all changed files
1582 changedfiles = set()
1575 changedfiles = set()
1583 mmfs = {}
1576 mmfs = {}
1584 collect = changegroup.collector(cl, mmfs, changedfiles)
1577 collect = changegroup.collector(cl, mmfs, changedfiles)
1585
1578
1586 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1579 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1587 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1580 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1588 yield chnk
1581 yield chnk
1589 self.ui.progress(_('bundling changes'), None)
1582 self.ui.progress(_('bundling changes'), None)
1590
1583
1591 mnfst = self.manifest
1584 mnfst = self.manifest
1592 nodeiter = gennodelst(mnfst)
1585 nodeiter = gennodelst(mnfst)
1593 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1586 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1594 lookuplinkrev_func(mnfst))):
1587 lookuplinkrev_func(mnfst))):
1595 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1588 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1596 yield chnk
1589 yield chnk
1597 self.ui.progress(_('bundling manifests'), None)
1590 self.ui.progress(_('bundling manifests'), None)
1598
1591
1599 cnt = 0
1592 cnt = 0
1600 for fname in sorted(changedfiles):
1593 for fname in sorted(changedfiles):
1601 filerevlog = self.file(fname)
1594 filerevlog = self.file(fname)
1602 if not len(filerevlog):
1595 if not len(filerevlog):
1603 raise util.Abort(_("empty or missing revlog for %s") % fname)
1596 raise util.Abort(_("empty or missing revlog for %s") % fname)
1604 nodeiter = gennodelst(filerevlog)
1597 nodeiter = gennodelst(filerevlog)
1605 nodeiter = list(nodeiter)
1598 nodeiter = list(nodeiter)
1606 if nodeiter:
1599 if nodeiter:
1607 yield changegroup.chunkheader(len(fname))
1600 yield changegroup.chunkheader(len(fname))
1608 yield fname
1601 yield fname
1609 lookup = lookuplinkrev_func(filerevlog)
1602 lookup = lookuplinkrev_func(filerevlog)
1610 for chnk in filerevlog.group(nodeiter, lookup):
1603 for chnk in filerevlog.group(nodeiter, lookup):
1611 self.ui.progress(
1604 self.ui.progress(
1612 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1605 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1613 cnt += 1
1606 cnt += 1
1614 yield chnk
1607 yield chnk
1615 self.ui.progress(_('bundling files'), None)
1608 self.ui.progress(_('bundling files'), None)
1616
1609
1617 yield changegroup.closechunk()
1610 yield changegroup.closechunk()
1618
1611
1619 if nodes:
1612 if nodes:
1620 self.hook('outgoing', node=hex(nodes[0]), source=source)
1613 self.hook('outgoing', node=hex(nodes[0]), source=source)
1621
1614
1622 return util.chunkbuffer(gengroup())
1615 return util.chunkbuffer(gengroup())
1623
1616
1624 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1617 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1625 """Add the changegroup returned by source.read() to this repo.
1618 """Add the changegroup returned by source.read() to this repo.
1626 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1619 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1627 the URL of the repo where this changegroup is coming from.
1620 the URL of the repo where this changegroup is coming from.
1628
1621
1629 Return an integer summarizing the change to this repo:
1622 Return an integer summarizing the change to this repo:
1630 - nothing changed or no source: 0
1623 - nothing changed or no source: 0
1631 - more heads than before: 1+added heads (2..n)
1624 - more heads than before: 1+added heads (2..n)
1632 - fewer heads than before: -1-removed heads (-2..-n)
1625 - fewer heads than before: -1-removed heads (-2..-n)
1633 - number of heads stays the same: 1
1626 - number of heads stays the same: 1
1634 """
1627 """
1635 def csmap(x):
1628 def csmap(x):
1636 self.ui.debug("add changeset %s\n" % short(x))
1629 self.ui.debug("add changeset %s\n" % short(x))
1637 return len(cl)
1630 return len(cl)
1638
1631
1639 def revmap(x):
1632 def revmap(x):
1640 return cl.rev(x)
1633 return cl.rev(x)
1641
1634
1642 if not source:
1635 if not source:
1643 return 0
1636 return 0
1644
1637
1645 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1638 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1646
1639
1647 changesets = files = revisions = 0
1640 changesets = files = revisions = 0
1648 efiles = set()
1641 efiles = set()
1649
1642
1650 # write changelog data to temp files so concurrent readers will not see
1643 # write changelog data to temp files so concurrent readers will not see
1651 # inconsistent view
1644 # inconsistent view
1652 cl = self.changelog
1645 cl = self.changelog
1653 cl.delayupdate()
1646 cl.delayupdate()
1654 oldheads = len(cl.heads())
1647 oldheads = len(cl.heads())
1655
1648
1656 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1649 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1657 try:
1650 try:
1658 trp = weakref.proxy(tr)
1651 trp = weakref.proxy(tr)
1659 # pull off the changeset group
1652 # pull off the changeset group
1660 self.ui.status(_("adding changesets\n"))
1653 self.ui.status(_("adding changesets\n"))
1661 clstart = len(cl)
1654 clstart = len(cl)
1662 class prog(object):
1655 class prog(object):
1663 step = _('changesets')
1656 step = _('changesets')
1664 count = 1
1657 count = 1
1665 ui = self.ui
1658 ui = self.ui
1666 total = None
1659 total = None
1667 def __call__(self):
1660 def __call__(self):
1668 self.ui.progress(self.step, self.count, unit=_('chunks'),
1661 self.ui.progress(self.step, self.count, unit=_('chunks'),
1669 total=self.total)
1662 total=self.total)
1670 self.count += 1
1663 self.count += 1
1671 pr = prog()
1664 pr = prog()
1672 chunkiter = changegroup.chunkiter(source, progress=pr)
1665 chunkiter = changegroup.chunkiter(source, progress=pr)
1673 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1666 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1674 raise util.Abort(_("received changelog group is empty"))
1667 raise util.Abort(_("received changelog group is empty"))
1675 clend = len(cl)
1668 clend = len(cl)
1676 changesets = clend - clstart
1669 changesets = clend - clstart
1677 for c in xrange(clstart, clend):
1670 for c in xrange(clstart, clend):
1678 efiles.update(self[c].files())
1671 efiles.update(self[c].files())
1679 efiles = len(efiles)
1672 efiles = len(efiles)
1680 self.ui.progress(_('changesets'), None)
1673 self.ui.progress(_('changesets'), None)
1681
1674
1682 # pull off the manifest group
1675 # pull off the manifest group
1683 self.ui.status(_("adding manifests\n"))
1676 self.ui.status(_("adding manifests\n"))
1684 pr.step = _('manifests')
1677 pr.step = _('manifests')
1685 pr.count = 1
1678 pr.count = 1
1686 pr.total = changesets # manifests <= changesets
1679 pr.total = changesets # manifests <= changesets
1687 chunkiter = changegroup.chunkiter(source, progress=pr)
1680 chunkiter = changegroup.chunkiter(source, progress=pr)
1688 # no need to check for empty manifest group here:
1681 # no need to check for empty manifest group here:
1689 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1682 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1690 # no new manifest will be created and the manifest group will
1683 # no new manifest will be created and the manifest group will
1691 # be empty during the pull
1684 # be empty during the pull
1692 self.manifest.addgroup(chunkiter, revmap, trp)
1685 self.manifest.addgroup(chunkiter, revmap, trp)
1693 self.ui.progress(_('manifests'), None)
1686 self.ui.progress(_('manifests'), None)
1694
1687
1695 needfiles = {}
1688 needfiles = {}
1696 if self.ui.configbool('server', 'validate', default=False):
1689 if self.ui.configbool('server', 'validate', default=False):
1697 # validate incoming csets have their manifests
1690 # validate incoming csets have their manifests
1698 for cset in xrange(clstart, clend):
1691 for cset in xrange(clstart, clend):
1699 mfest = self.changelog.read(self.changelog.node(cset))[0]
1692 mfest = self.changelog.read(self.changelog.node(cset))[0]
1700 mfest = self.manifest.readdelta(mfest)
1693 mfest = self.manifest.readdelta(mfest)
1701 # store file nodes we must see
1694 # store file nodes we must see
1702 for f, n in mfest.iteritems():
1695 for f, n in mfest.iteritems():
1703 needfiles.setdefault(f, set()).add(n)
1696 needfiles.setdefault(f, set()).add(n)
1704
1697
1705 # process the files
1698 # process the files
1706 self.ui.status(_("adding file changes\n"))
1699 self.ui.status(_("adding file changes\n"))
1707 pr.step = 'files'
1700 pr.step = 'files'
1708 pr.count = 1
1701 pr.count = 1
1709 pr.total = efiles
1702 pr.total = efiles
1710 while 1:
1703 while 1:
1711 f = changegroup.getchunk(source)
1704 f = changegroup.getchunk(source)
1712 if not f:
1705 if not f:
1713 break
1706 break
1714 self.ui.debug("adding %s revisions\n" % f)
1707 self.ui.debug("adding %s revisions\n" % f)
1715 pr()
1708 pr()
1716 fl = self.file(f)
1709 fl = self.file(f)
1717 o = len(fl)
1710 o = len(fl)
1718 chunkiter = changegroup.chunkiter(source)
1711 chunkiter = changegroup.chunkiter(source)
1719 if fl.addgroup(chunkiter, revmap, trp) is None:
1712 if fl.addgroup(chunkiter, revmap, trp) is None:
1720 raise util.Abort(_("received file revlog group is empty"))
1713 raise util.Abort(_("received file revlog group is empty"))
1721 revisions += len(fl) - o
1714 revisions += len(fl) - o
1722 files += 1
1715 files += 1
1723 if f in needfiles:
1716 if f in needfiles:
1724 needs = needfiles[f]
1717 needs = needfiles[f]
1725 for new in xrange(o, len(fl)):
1718 for new in xrange(o, len(fl)):
1726 n = fl.node(new)
1719 n = fl.node(new)
1727 if n in needs:
1720 if n in needs:
1728 needs.remove(n)
1721 needs.remove(n)
1729 if not needs:
1722 if not needs:
1730 del needfiles[f]
1723 del needfiles[f]
1731 self.ui.progress(_('files'), None)
1724 self.ui.progress(_('files'), None)
1732
1725
1733 for f, needs in needfiles.iteritems():
1726 for f, needs in needfiles.iteritems():
1734 fl = self.file(f)
1727 fl = self.file(f)
1735 for n in needs:
1728 for n in needs:
1736 try:
1729 try:
1737 fl.rev(n)
1730 fl.rev(n)
1738 except error.LookupError:
1731 except error.LookupError:
1739 raise util.Abort(
1732 raise util.Abort(
1740 _('missing file data for %s:%s - run hg verify') %
1733 _('missing file data for %s:%s - run hg verify') %
1741 (f, hex(n)))
1734 (f, hex(n)))
1742
1735
1743 newheads = len(cl.heads())
1736 newheads = len(cl.heads())
1744 heads = ""
1737 heads = ""
1745 if oldheads and newheads != oldheads:
1738 if oldheads and newheads != oldheads:
1746 heads = _(" (%+d heads)") % (newheads - oldheads)
1739 heads = _(" (%+d heads)") % (newheads - oldheads)
1747
1740
1748 self.ui.status(_("added %d changesets"
1741 self.ui.status(_("added %d changesets"
1749 " with %d changes to %d files%s\n")
1742 " with %d changes to %d files%s\n")
1750 % (changesets, revisions, files, heads))
1743 % (changesets, revisions, files, heads))
1751
1744
1752 if changesets > 0:
1745 if changesets > 0:
1753 p = lambda: cl.writepending() and self.root or ""
1746 p = lambda: cl.writepending() and self.root or ""
1754 self.hook('pretxnchangegroup', throw=True,
1747 self.hook('pretxnchangegroup', throw=True,
1755 node=hex(cl.node(clstart)), source=srctype,
1748 node=hex(cl.node(clstart)), source=srctype,
1756 url=url, pending=p)
1749 url=url, pending=p)
1757
1750
1758 # make changelog see real files again
1751 # make changelog see real files again
1759 cl.finalize(trp)
1752 cl.finalize(trp)
1760
1753
1761 tr.close()
1754 tr.close()
1762 finally:
1755 finally:
1763 tr.release()
1756 tr.release()
1764 if lock:
1757 if lock:
1765 lock.release()
1758 lock.release()
1766
1759
1767 if changesets > 0:
1760 if changesets > 0:
1768 # forcefully update the on-disk branch cache
1761 # forcefully update the on-disk branch cache
1769 self.ui.debug("updating the branch cache\n")
1762 self.ui.debug("updating the branch cache\n")
1770 self.updatebranchcache()
1763 self.updatebranchcache()
1771 self.hook("changegroup", node=hex(cl.node(clstart)),
1764 self.hook("changegroup", node=hex(cl.node(clstart)),
1772 source=srctype, url=url)
1765 source=srctype, url=url)
1773
1766
1774 for i in xrange(clstart, clend):
1767 for i in xrange(clstart, clend):
1775 self.hook("incoming", node=hex(cl.node(i)),
1768 self.hook("incoming", node=hex(cl.node(i)),
1776 source=srctype, url=url)
1769 source=srctype, url=url)
1777
1770
1778 # never return 0 here:
1771 # never return 0 here:
1779 if newheads < oldheads:
1772 if newheads < oldheads:
1780 return newheads - oldheads - 1
1773 return newheads - oldheads - 1
1781 else:
1774 else:
1782 return newheads - oldheads + 1
1775 return newheads - oldheads + 1
1783
1776
1784
1777
1785 def stream_in(self, remote):
1778 def stream_in(self, remote):
1786 fp = remote.stream_out()
1779 fp = remote.stream_out()
1787 l = fp.readline()
1780 l = fp.readline()
1788 try:
1781 try:
1789 resp = int(l)
1782 resp = int(l)
1790 except ValueError:
1783 except ValueError:
1791 raise error.ResponseError(
1784 raise error.ResponseError(
1792 _('Unexpected response from remote server:'), l)
1785 _('Unexpected response from remote server:'), l)
1793 if resp == 1:
1786 if resp == 1:
1794 raise util.Abort(_('operation forbidden by server'))
1787 raise util.Abort(_('operation forbidden by server'))
1795 elif resp == 2:
1788 elif resp == 2:
1796 raise util.Abort(_('locking the remote repository failed'))
1789 raise util.Abort(_('locking the remote repository failed'))
1797 elif resp != 0:
1790 elif resp != 0:
1798 raise util.Abort(_('the server sent an unknown error code'))
1791 raise util.Abort(_('the server sent an unknown error code'))
1799 self.ui.status(_('streaming all changes\n'))
1792 self.ui.status(_('streaming all changes\n'))
1800 l = fp.readline()
1793 l = fp.readline()
1801 try:
1794 try:
1802 total_files, total_bytes = map(int, l.split(' ', 1))
1795 total_files, total_bytes = map(int, l.split(' ', 1))
1803 except (ValueError, TypeError):
1796 except (ValueError, TypeError):
1804 raise error.ResponseError(
1797 raise error.ResponseError(
1805 _('Unexpected response from remote server:'), l)
1798 _('Unexpected response from remote server:'), l)
1806 self.ui.status(_('%d files to transfer, %s of data\n') %
1799 self.ui.status(_('%d files to transfer, %s of data\n') %
1807 (total_files, util.bytecount(total_bytes)))
1800 (total_files, util.bytecount(total_bytes)))
1808 start = time.time()
1801 start = time.time()
1809 for i in xrange(total_files):
1802 for i in xrange(total_files):
1810 # XXX doesn't support '\n' or '\r' in filenames
1803 # XXX doesn't support '\n' or '\r' in filenames
1811 l = fp.readline()
1804 l = fp.readline()
1812 try:
1805 try:
1813 name, size = l.split('\0', 1)
1806 name, size = l.split('\0', 1)
1814 size = int(size)
1807 size = int(size)
1815 except (ValueError, TypeError):
1808 except (ValueError, TypeError):
1816 raise error.ResponseError(
1809 raise error.ResponseError(
1817 _('Unexpected response from remote server:'), l)
1810 _('Unexpected response from remote server:'), l)
1818 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1811 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1819 # for backwards compat, name was partially encoded
1812 # for backwards compat, name was partially encoded
1820 ofp = self.sopener(store.decodedir(name), 'w')
1813 ofp = self.sopener(store.decodedir(name), 'w')
1821 for chunk in util.filechunkiter(fp, limit=size):
1814 for chunk in util.filechunkiter(fp, limit=size):
1822 ofp.write(chunk)
1815 ofp.write(chunk)
1823 ofp.close()
1816 ofp.close()
1824 elapsed = time.time() - start
1817 elapsed = time.time() - start
1825 if elapsed <= 0:
1818 if elapsed <= 0:
1826 elapsed = 0.001
1819 elapsed = 0.001
1827 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1820 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1828 (util.bytecount(total_bytes), elapsed,
1821 (util.bytecount(total_bytes), elapsed,
1829 util.bytecount(total_bytes / elapsed)))
1822 util.bytecount(total_bytes / elapsed)))
1830 self.invalidate()
1823 self.invalidate()
1831 return len(self.heads()) + 1
1824 return len(self.heads()) + 1
1832
1825
1833 def clone(self, remote, heads=[], stream=False):
1826 def clone(self, remote, heads=[], stream=False):
1834 '''clone remote repository.
1827 '''clone remote repository.
1835
1828
1836 keyword arguments:
1829 keyword arguments:
1837 heads: list of revs to clone (forces use of pull)
1830 heads: list of revs to clone (forces use of pull)
1838 stream: use streaming clone if possible'''
1831 stream: use streaming clone if possible'''
1839
1832
1840 # now, all clients that can request uncompressed clones can
1833 # now, all clients that can request uncompressed clones can
1841 # read repo formats supported by all servers that can serve
1834 # read repo formats supported by all servers that can serve
1842 # them.
1835 # them.
1843
1836
1844 # if revlog format changes, client will have to check version
1837 # if revlog format changes, client will have to check version
1845 # and format flags on "stream" capability, and use
1838 # and format flags on "stream" capability, and use
1846 # uncompressed only if compatible.
1839 # uncompressed only if compatible.
1847
1840
1848 if stream and not heads and remote.capable('stream'):
1841 if stream and not heads and remote.capable('stream'):
1849 return self.stream_in(remote)
1842 return self.stream_in(remote)
1850 return self.pull(remote, heads)
1843 return self.pull(remote, heads)
1851
1844
1852 def pushkey(self, namespace, key, old, new):
1845 def pushkey(self, namespace, key, old, new):
1853 return pushkey.push(self, namespace, key, old, new)
1846 return pushkey.push(self, namespace, key, old, new)
1854
1847
1855 def listkeys(self, namespace):
1848 def listkeys(self, namespace):
1856 return pushkey.list(self, namespace)
1849 return pushkey.list(self, namespace)
1857
1850
1858 # used to avoid circular references so destructors work
1851 # used to avoid circular references so destructors work
1859 def aftertrans(files):
1852 def aftertrans(files):
1860 renamefiles = [tuple(t) for t in files]
1853 renamefiles = [tuple(t) for t in files]
1861 def a():
1854 def a():
1862 for src, dest in renamefiles:
1855 for src, dest in renamefiles:
1863 util.rename(src, dest)
1856 util.rename(src, dest)
1864 return a
1857 return a
1865
1858
1866 def instance(ui, path, create):
1859 def instance(ui, path, create):
1867 return localrepository(ui, util.drop_scheme('file', path), create)
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1868
1861
1869 def islocal(path):
1862 def islocal(path):
1870 return True
1863 return True
@@ -1,482 +1,492 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
8 import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath
9 from i18n import _
9 from i18n import _
10 import config, util, node, error, cmdutil
10 import config, util, node, error, cmdutil
11 hg = None
11 hg = None
12
12
13 nullstate = ('', '', 'empty')
13 nullstate = ('', '', 'empty')
14
14
15 def state(ctx, ui):
15 def state(ctx, ui):
16 """return a state dict, mapping subrepo paths configured in .hgsub
16 """return a state dict, mapping subrepo paths configured in .hgsub
17 to tuple: (source from .hgsub, revision from .hgsubstate, kind
17 to tuple: (source from .hgsub, revision from .hgsubstate, kind
18 (key in types dict))
18 (key in types dict))
19 """
19 """
20 p = config.config()
20 p = config.config()
21 def read(f, sections=None, remap=None):
21 def read(f, sections=None, remap=None):
22 if f in ctx:
22 if f in ctx:
23 p.parse(f, ctx[f].data(), sections, remap, read)
23 p.parse(f, ctx[f].data(), sections, remap, read)
24 else:
24 else:
25 raise util.Abort(_("subrepo spec file %s not found") % f)
25 raise util.Abort(_("subrepo spec file %s not found") % f)
26
26
27 if '.hgsub' in ctx:
27 if '.hgsub' in ctx:
28 read('.hgsub')
28 read('.hgsub')
29
29
30 for path, src in ui.configitems('subpaths'):
30 for path, src in ui.configitems('subpaths'):
31 p.set('subpaths', path, src, ui.configsource('subpaths', path))
31 p.set('subpaths', path, src, ui.configsource('subpaths', path))
32
32
33 rev = {}
33 rev = {}
34 if '.hgsubstate' in ctx:
34 if '.hgsubstate' in ctx:
35 try:
35 try:
36 for l in ctx['.hgsubstate'].data().splitlines():
36 for l in ctx['.hgsubstate'].data().splitlines():
37 revision, path = l.split(" ", 1)
37 revision, path = l.split(" ", 1)
38 rev[path] = revision
38 rev[path] = revision
39 except IOError, err:
39 except IOError, err:
40 if err.errno != errno.ENOENT:
40 if err.errno != errno.ENOENT:
41 raise
41 raise
42
42
43 state = {}
43 state = {}
44 for path, src in p[''].items():
44 for path, src in p[''].items():
45 kind = 'hg'
45 kind = 'hg'
46 if src.startswith('['):
46 if src.startswith('['):
47 if ']' not in src:
47 if ']' not in src:
48 raise util.Abort(_('missing ] in subrepo source'))
48 raise util.Abort(_('missing ] in subrepo source'))
49 kind, src = src.split(']', 1)
49 kind, src = src.split(']', 1)
50 kind = kind[1:]
50 kind = kind[1:]
51
51
52 for pattern, repl in p.items('subpaths'):
52 for pattern, repl in p.items('subpaths'):
53 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
53 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
54 # does a string decode.
54 # does a string decode.
55 repl = repl.encode('string-escape')
55 repl = repl.encode('string-escape')
56 # However, we still want to allow back references to go
56 # However, we still want to allow back references to go
57 # through unharmed, so we turn r'\\1' into r'\1'. Again,
57 # through unharmed, so we turn r'\\1' into r'\1'. Again,
58 # extra escapes are needed because re.sub string decodes.
58 # extra escapes are needed because re.sub string decodes.
59 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
59 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
60 try:
60 try:
61 src = re.sub(pattern, repl, src, 1)
61 src = re.sub(pattern, repl, src, 1)
62 except re.error, e:
62 except re.error, e:
63 raise util.Abort(_("bad subrepository pattern in %s: %s")
63 raise util.Abort(_("bad subrepository pattern in %s: %s")
64 % (p.source('subpaths', pattern), e))
64 % (p.source('subpaths', pattern), e))
65
65
66 state[path] = (src.strip(), rev.get(path, ''), kind)
66 state[path] = (src.strip(), rev.get(path, ''), kind)
67
67
68 return state
68 return state
69
69
70 def writestate(repo, state):
70 def writestate(repo, state):
71 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
71 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
72 repo.wwrite('.hgsubstate',
72 repo.wwrite('.hgsubstate',
73 ''.join(['%s %s\n' % (state[s][1], s)
73 ''.join(['%s %s\n' % (state[s][1], s)
74 for s in sorted(state)]), '')
74 for s in sorted(state)]), '')
75
75
76 def submerge(repo, wctx, mctx, actx):
76 def submerge(repo, wctx, mctx, actx):
77 """delegated from merge.applyupdates: merging of .hgsubstate file
77 """delegated from merge.applyupdates: merging of .hgsubstate file
78 in working context, merging context and ancestor context"""
78 in working context, merging context and ancestor context"""
79 if mctx == actx: # backwards?
79 if mctx == actx: # backwards?
80 actx = wctx.p1()
80 actx = wctx.p1()
81 s1 = wctx.substate
81 s1 = wctx.substate
82 s2 = mctx.substate
82 s2 = mctx.substate
83 sa = actx.substate
83 sa = actx.substate
84 sm = {}
84 sm = {}
85
85
86 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
86 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
87
87
88 def debug(s, msg, r=""):
88 def debug(s, msg, r=""):
89 if r:
89 if r:
90 r = "%s:%s:%s" % r
90 r = "%s:%s:%s" % r
91 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
91 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
92
92
93 for s, l in s1.items():
93 for s, l in s1.items():
94 a = sa.get(s, nullstate)
94 a = sa.get(s, nullstate)
95 ld = l # local state with possible dirty flag for compares
95 ld = l # local state with possible dirty flag for compares
96 if wctx.sub(s).dirty():
96 if wctx.sub(s).dirty():
97 ld = (l[0], l[1] + "+")
97 ld = (l[0], l[1] + "+")
98 if wctx == actx: # overwrite
98 if wctx == actx: # overwrite
99 a = ld
99 a = ld
100
100
101 if s in s2:
101 if s in s2:
102 r = s2[s]
102 r = s2[s]
103 if ld == r or r == a: # no change or local is newer
103 if ld == r or r == a: # no change or local is newer
104 sm[s] = l
104 sm[s] = l
105 continue
105 continue
106 elif ld == a: # other side changed
106 elif ld == a: # other side changed
107 debug(s, "other changed, get", r)
107 debug(s, "other changed, get", r)
108 wctx.sub(s).get(r)
108 wctx.sub(s).get(r)
109 sm[s] = r
109 sm[s] = r
110 elif ld[0] != r[0]: # sources differ
110 elif ld[0] != r[0]: # sources differ
111 if repo.ui.promptchoice(
111 if repo.ui.promptchoice(
112 _(' subrepository sources for %s differ\n'
112 _(' subrepository sources for %s differ\n'
113 'use (l)ocal source (%s) or (r)emote source (%s)?')
113 'use (l)ocal source (%s) or (r)emote source (%s)?')
114 % (s, l[0], r[0]),
114 % (s, l[0], r[0]),
115 (_('&Local'), _('&Remote')), 0):
115 (_('&Local'), _('&Remote')), 0):
116 debug(s, "prompt changed, get", r)
116 debug(s, "prompt changed, get", r)
117 wctx.sub(s).get(r)
117 wctx.sub(s).get(r)
118 sm[s] = r
118 sm[s] = r
119 elif ld[1] == a[1]: # local side is unchanged
119 elif ld[1] == a[1]: # local side is unchanged
120 debug(s, "other side changed, get", r)
120 debug(s, "other side changed, get", r)
121 wctx.sub(s).get(r)
121 wctx.sub(s).get(r)
122 sm[s] = r
122 sm[s] = r
123 else:
123 else:
124 debug(s, "both sides changed, merge with", r)
124 debug(s, "both sides changed, merge with", r)
125 wctx.sub(s).merge(r)
125 wctx.sub(s).merge(r)
126 sm[s] = l
126 sm[s] = l
127 elif ld == a: # remote removed, local unchanged
127 elif ld == a: # remote removed, local unchanged
128 debug(s, "remote removed, remove")
128 debug(s, "remote removed, remove")
129 wctx.sub(s).remove()
129 wctx.sub(s).remove()
130 else:
130 else:
131 if repo.ui.promptchoice(
131 if repo.ui.promptchoice(
132 _(' local changed subrepository %s which remote removed\n'
132 _(' local changed subrepository %s which remote removed\n'
133 'use (c)hanged version or (d)elete?') % s,
133 'use (c)hanged version or (d)elete?') % s,
134 (_('&Changed'), _('&Delete')), 0):
134 (_('&Changed'), _('&Delete')), 0):
135 debug(s, "prompt remove")
135 debug(s, "prompt remove")
136 wctx.sub(s).remove()
136 wctx.sub(s).remove()
137
137
138 for s, r in s2.items():
138 for s, r in s2.items():
139 if s in s1:
139 if s in s1:
140 continue
140 continue
141 elif s not in sa:
141 elif s not in sa:
142 debug(s, "remote added, get", r)
142 debug(s, "remote added, get", r)
143 mctx.sub(s).get(r)
143 mctx.sub(s).get(r)
144 sm[s] = r
144 sm[s] = r
145 elif r != sa[s]:
145 elif r != sa[s]:
146 if repo.ui.promptchoice(
146 if repo.ui.promptchoice(
147 _(' remote changed subrepository %s which local removed\n'
147 _(' remote changed subrepository %s which local removed\n'
148 'use (c)hanged version or (d)elete?') % s,
148 'use (c)hanged version or (d)elete?') % s,
149 (_('&Changed'), _('&Delete')), 0) == 0:
149 (_('&Changed'), _('&Delete')), 0) == 0:
150 debug(s, "prompt recreate", r)
150 debug(s, "prompt recreate", r)
151 wctx.sub(s).get(r)
151 wctx.sub(s).get(r)
152 sm[s] = r
152 sm[s] = r
153
153
154 # record merged .hgsubstate
154 # record merged .hgsubstate
155 writestate(repo, sm)
155 writestate(repo, sm)
156
156
157 def relpath(sub):
157 def relpath(sub):
158 """return path to this subrepo as seen from outermost repo"""
158 """return path to this subrepo as seen from outermost repo"""
159 if not hasattr(sub, '_repo'):
159 if not hasattr(sub, '_repo'):
160 return sub._path
160 return sub._path
161 parent = sub._repo
161 parent = sub._repo
162 while hasattr(parent, '_subparent'):
162 while hasattr(parent, '_subparent'):
163 parent = parent._subparent
163 parent = parent._subparent
164 return sub._repo.root[len(parent.root)+1:]
164 return sub._repo.root[len(parent.root)+1:]
165
165
166 def _abssource(repo, push=False):
166 def _abssource(repo, push=False):
167 """return pull/push path of repo - either based on parent repo
167 """return pull/push path of repo - either based on parent repo
168 .hgsub info or on the subrepos own config"""
168 .hgsub info or on the subrepos own config"""
169 if hasattr(repo, '_subparent'):
169 if hasattr(repo, '_subparent'):
170 source = repo._subsource
170 source = repo._subsource
171 if source.startswith('/') or '://' in source:
171 if source.startswith('/') or '://' in source:
172 return source
172 return source
173 parent = _abssource(repo._subparent, push)
173 parent = _abssource(repo._subparent, push)
174 if '://' in parent:
174 if '://' in parent:
175 if parent[-1] == '/':
175 if parent[-1] == '/':
176 parent = parent[:-1]
176 parent = parent[:-1]
177 r = urlparse.urlparse(parent + '/' + source)
177 r = urlparse.urlparse(parent + '/' + source)
178 r = urlparse.urlunparse((r[0], r[1],
178 r = urlparse.urlunparse((r[0], r[1],
179 posixpath.normpath(r[2]),
179 posixpath.normpath(r[2]),
180 r[3], r[4], r[5]))
180 r[3], r[4], r[5]))
181 return r
181 return r
182 return posixpath.normpath(os.path.join(parent, repo._subsource))
182 return posixpath.normpath(os.path.join(parent, repo._subsource))
183 if push and repo.ui.config('paths', 'default-push'):
183 if push and repo.ui.config('paths', 'default-push'):
184 return repo.ui.config('paths', 'default-push', repo.root)
184 return repo.ui.config('paths', 'default-push', repo.root)
185 return repo.ui.config('paths', 'default', repo.root)
185 return repo.ui.config('paths', 'default', repo.root)
186
186
187 def itersubrepos(ctx1, ctx2):
188 """find subrepos in ctx1 or ctx2"""
189 # Create a (subpath, ctx) mapping where we prefer subpaths from
190 # ctx1. The subpaths from ctx2 are important when the .hgsub file
191 # has been modified (in ctx2) but not yet committed (in ctx1).
192 subpaths = dict.fromkeys(ctx2.substate, ctx2)
193 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
194 for subpath, ctx in sorted(subpaths.iteritems()):
195 yield subpath, ctx.sub(subpath)
196
187 def subrepo(ctx, path):
197 def subrepo(ctx, path):
188 """return instance of the right subrepo class for subrepo in path"""
198 """return instance of the right subrepo class for subrepo in path"""
189 # subrepo inherently violates our import layering rules
199 # subrepo inherently violates our import layering rules
190 # because it wants to make repo objects from deep inside the stack
200 # because it wants to make repo objects from deep inside the stack
191 # so we manually delay the circular imports to not break
201 # so we manually delay the circular imports to not break
192 # scripts that don't use our demand-loading
202 # scripts that don't use our demand-loading
193 global hg
203 global hg
194 import hg as h
204 import hg as h
195 hg = h
205 hg = h
196
206
197 util.path_auditor(ctx._repo.root)(path)
207 util.path_auditor(ctx._repo.root)(path)
198 state = ctx.substate.get(path, nullstate)
208 state = ctx.substate.get(path, nullstate)
199 if state[2] not in types:
209 if state[2] not in types:
200 raise util.Abort(_('unknown subrepo type %s') % state[2])
210 raise util.Abort(_('unknown subrepo type %s') % state[2])
201 return types[state[2]](ctx, path, state[:2])
211 return types[state[2]](ctx, path, state[:2])
202
212
203 # subrepo classes need to implement the following abstract class:
213 # subrepo classes need to implement the following abstract class:
204
214
205 class abstractsubrepo(object):
215 class abstractsubrepo(object):
206
216
207 def dirty(self):
217 def dirty(self):
208 """returns true if the dirstate of the subrepo does not match
218 """returns true if the dirstate of the subrepo does not match
209 current stored state
219 current stored state
210 """
220 """
211 raise NotImplementedError
221 raise NotImplementedError
212
222
213 def checknested(path):
223 def checknested(path):
214 """check if path is a subrepository within this repository"""
224 """check if path is a subrepository within this repository"""
215 return False
225 return False
216
226
217 def commit(self, text, user, date):
227 def commit(self, text, user, date):
218 """commit the current changes to the subrepo with the given
228 """commit the current changes to the subrepo with the given
219 log message. Use given user and date if possible. Return the
229 log message. Use given user and date if possible. Return the
220 new state of the subrepo.
230 new state of the subrepo.
221 """
231 """
222 raise NotImplementedError
232 raise NotImplementedError
223
233
224 def remove(self):
234 def remove(self):
225 """remove the subrepo
235 """remove the subrepo
226
236
227 (should verify the dirstate is not dirty first)
237 (should verify the dirstate is not dirty first)
228 """
238 """
229 raise NotImplementedError
239 raise NotImplementedError
230
240
231 def get(self, state):
241 def get(self, state):
232 """run whatever commands are needed to put the subrepo into
242 """run whatever commands are needed to put the subrepo into
233 this state
243 this state
234 """
244 """
235 raise NotImplementedError
245 raise NotImplementedError
236
246
237 def merge(self, state):
247 def merge(self, state):
238 """merge currently-saved state with the new state."""
248 """merge currently-saved state with the new state."""
239 raise NotImplementedError
249 raise NotImplementedError
240
250
241 def push(self, force):
251 def push(self, force):
242 """perform whatever action is analogous to 'hg push'
252 """perform whatever action is analogous to 'hg push'
243
253
244 This may be a no-op on some systems.
254 This may be a no-op on some systems.
245 """
255 """
246 raise NotImplementedError
256 raise NotImplementedError
247
257
248
258
249 def status(self, rev2, **opts):
259 def status(self, rev2, **opts):
250 return [], [], [], [], [], [], []
260 return [], [], [], [], [], [], []
251
261
252 def diff(self, diffopts, node2, match, prefix, **opts):
262 def diff(self, diffopts, node2, match, prefix, **opts):
253 pass
263 pass
254
264
255 class hgsubrepo(abstractsubrepo):
265 class hgsubrepo(abstractsubrepo):
256 def __init__(self, ctx, path, state):
266 def __init__(self, ctx, path, state):
257 self._path = path
267 self._path = path
258 self._state = state
268 self._state = state
259 r = ctx._repo
269 r = ctx._repo
260 root = r.wjoin(path)
270 root = r.wjoin(path)
261 create = False
271 create = False
262 if not os.path.exists(os.path.join(root, '.hg')):
272 if not os.path.exists(os.path.join(root, '.hg')):
263 create = True
273 create = True
264 util.makedirs(root)
274 util.makedirs(root)
265 self._repo = hg.repository(r.ui, root, create=create)
275 self._repo = hg.repository(r.ui, root, create=create)
266 self._repo._subparent = r
276 self._repo._subparent = r
267 self._repo._subsource = state[0]
277 self._repo._subsource = state[0]
268
278
269 if create:
279 if create:
270 fp = self._repo.opener("hgrc", "w", text=True)
280 fp = self._repo.opener("hgrc", "w", text=True)
271 fp.write('[paths]\n')
281 fp.write('[paths]\n')
272
282
273 def addpathconfig(key, value):
283 def addpathconfig(key, value):
274 fp.write('%s = %s\n' % (key, value))
284 fp.write('%s = %s\n' % (key, value))
275 self._repo.ui.setconfig('paths', key, value)
285 self._repo.ui.setconfig('paths', key, value)
276
286
277 defpath = _abssource(self._repo)
287 defpath = _abssource(self._repo)
278 defpushpath = _abssource(self._repo, True)
288 defpushpath = _abssource(self._repo, True)
279 addpathconfig('default', defpath)
289 addpathconfig('default', defpath)
280 if defpath != defpushpath:
290 if defpath != defpushpath:
281 addpathconfig('default-push', defpushpath)
291 addpathconfig('default-push', defpushpath)
282 fp.close()
292 fp.close()
283
293
284 def status(self, rev2, **opts):
294 def status(self, rev2, **opts):
285 try:
295 try:
286 rev1 = self._state[1]
296 rev1 = self._state[1]
287 ctx1 = self._repo[rev1]
297 ctx1 = self._repo[rev1]
288 ctx2 = self._repo[rev2]
298 ctx2 = self._repo[rev2]
289 return self._repo.status(ctx1, ctx2, **opts)
299 return self._repo.status(ctx1, ctx2, **opts)
290 except error.RepoLookupError, inst:
300 except error.RepoLookupError, inst:
291 self._repo.ui.warn(_("warning: %s in %s\n")
301 self._repo.ui.warn(_("warning: %s in %s\n")
292 % (inst, relpath(self)))
302 % (inst, relpath(self)))
293 return [], [], [], [], [], [], []
303 return [], [], [], [], [], [], []
294
304
295 def diff(self, diffopts, node2, match, prefix, **opts):
305 def diff(self, diffopts, node2, match, prefix, **opts):
296 try:
306 try:
297 node1 = node.bin(self._state[1])
307 node1 = node.bin(self._state[1])
298 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
308 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
299 node1, node2, match,
309 node1, node2, match,
300 prefix=os.path.join(prefix, self._path),
310 prefix=os.path.join(prefix, self._path),
301 listsubrepos=True, **opts)
311 listsubrepos=True, **opts)
302 except error.RepoLookupError, inst:
312 except error.RepoLookupError, inst:
303 self._repo.ui.warn(_("warning: %s in %s\n")
313 self._repo.ui.warn(_("warning: %s in %s\n")
304 % (inst, relpath(self)))
314 % (inst, relpath(self)))
305
315
306 def dirty(self):
316 def dirty(self):
307 r = self._state[1]
317 r = self._state[1]
308 if r == '':
318 if r == '':
309 return True
319 return True
310 w = self._repo[None]
320 w = self._repo[None]
311 if w.p1() != self._repo[r]: # version checked out change
321 if w.p1() != self._repo[r]: # version checked out change
312 return True
322 return True
313 return w.dirty() # working directory changed
323 return w.dirty() # working directory changed
314
324
315 def checknested(self, path):
325 def checknested(self, path):
316 return self._repo._checknested(self._repo.wjoin(path))
326 return self._repo._checknested(self._repo.wjoin(path))
317
327
318 def commit(self, text, user, date):
328 def commit(self, text, user, date):
319 self._repo.ui.debug("committing subrepo %s\n" % relpath(self))
329 self._repo.ui.debug("committing subrepo %s\n" % relpath(self))
320 n = self._repo.commit(text, user, date)
330 n = self._repo.commit(text, user, date)
321 if not n:
331 if not n:
322 return self._repo['.'].hex() # different version checked out
332 return self._repo['.'].hex() # different version checked out
323 return node.hex(n)
333 return node.hex(n)
324
334
325 def remove(self):
335 def remove(self):
326 # we can't fully delete the repository as it may contain
336 # we can't fully delete the repository as it may contain
327 # local-only history
337 # local-only history
328 self._repo.ui.note(_('removing subrepo %s\n') % relpath(self))
338 self._repo.ui.note(_('removing subrepo %s\n') % relpath(self))
329 hg.clean(self._repo, node.nullid, False)
339 hg.clean(self._repo, node.nullid, False)
330
340
331 def _get(self, state):
341 def _get(self, state):
332 source, revision, kind = state
342 source, revision, kind = state
333 try:
343 try:
334 self._repo.lookup(revision)
344 self._repo.lookup(revision)
335 except error.RepoError:
345 except error.RepoError:
336 self._repo._subsource = source
346 self._repo._subsource = source
337 srcurl = _abssource(self._repo)
347 srcurl = _abssource(self._repo)
338 self._repo.ui.status(_('pulling subrepo %s from %s\n')
348 self._repo.ui.status(_('pulling subrepo %s from %s\n')
339 % (relpath(self), srcurl))
349 % (relpath(self), srcurl))
340 other = hg.repository(self._repo.ui, srcurl)
350 other = hg.repository(self._repo.ui, srcurl)
341 self._repo.pull(other)
351 self._repo.pull(other)
342
352
343 def get(self, state):
353 def get(self, state):
344 self._get(state)
354 self._get(state)
345 source, revision, kind = state
355 source, revision, kind = state
346 self._repo.ui.debug("getting subrepo %s\n" % self._path)
356 self._repo.ui.debug("getting subrepo %s\n" % self._path)
347 hg.clean(self._repo, revision, False)
357 hg.clean(self._repo, revision, False)
348
358
349 def merge(self, state):
359 def merge(self, state):
350 self._get(state)
360 self._get(state)
351 cur = self._repo['.']
361 cur = self._repo['.']
352 dst = self._repo[state[1]]
362 dst = self._repo[state[1]]
353 anc = dst.ancestor(cur)
363 anc = dst.ancestor(cur)
354 if anc == cur:
364 if anc == cur:
355 self._repo.ui.debug("updating subrepo %s\n" % relpath(self))
365 self._repo.ui.debug("updating subrepo %s\n" % relpath(self))
356 hg.update(self._repo, state[1])
366 hg.update(self._repo, state[1])
357 elif anc == dst:
367 elif anc == dst:
358 self._repo.ui.debug("skipping subrepo %s\n" % relpath(self))
368 self._repo.ui.debug("skipping subrepo %s\n" % relpath(self))
359 else:
369 else:
360 self._repo.ui.debug("merging subrepo %s\n" % relpath(self))
370 self._repo.ui.debug("merging subrepo %s\n" % relpath(self))
361 hg.merge(self._repo, state[1], remind=False)
371 hg.merge(self._repo, state[1], remind=False)
362
372
363 def push(self, force):
373 def push(self, force):
364 # push subrepos depth-first for coherent ordering
374 # push subrepos depth-first for coherent ordering
365 c = self._repo['']
375 c = self._repo['']
366 subs = c.substate # only repos that are committed
376 subs = c.substate # only repos that are committed
367 for s in sorted(subs):
377 for s in sorted(subs):
368 if not c.sub(s).push(force):
378 if not c.sub(s).push(force):
369 return False
379 return False
370
380
371 dsturl = _abssource(self._repo, True)
381 dsturl = _abssource(self._repo, True)
372 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
382 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
373 (relpath(self), dsturl))
383 (relpath(self), dsturl))
374 other = hg.repository(self._repo.ui, dsturl)
384 other = hg.repository(self._repo.ui, dsturl)
375 return self._repo.push(other, force)
385 return self._repo.push(other, force)
376
386
377 class svnsubrepo(abstractsubrepo):
387 class svnsubrepo(abstractsubrepo):
378 def __init__(self, ctx, path, state):
388 def __init__(self, ctx, path, state):
379 self._path = path
389 self._path = path
380 self._state = state
390 self._state = state
381 self._ctx = ctx
391 self._ctx = ctx
382 self._ui = ctx._repo.ui
392 self._ui = ctx._repo.ui
383
393
384 def _svncommand(self, commands, filename=''):
394 def _svncommand(self, commands, filename=''):
385 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
395 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
386 cmd = ['svn'] + commands + [path]
396 cmd = ['svn'] + commands + [path]
387 cmd = [util.shellquote(arg) for arg in cmd]
397 cmd = [util.shellquote(arg) for arg in cmd]
388 cmd = util.quotecommand(' '.join(cmd))
398 cmd = util.quotecommand(' '.join(cmd))
389 env = dict(os.environ)
399 env = dict(os.environ)
390 # Avoid localized output, preserve current locale for everything else.
400 # Avoid localized output, preserve current locale for everything else.
391 env['LC_MESSAGES'] = 'C'
401 env['LC_MESSAGES'] = 'C'
392 write, read, err = util.popen3(cmd, env=env, newlines=True)
402 write, read, err = util.popen3(cmd, env=env, newlines=True)
393 retdata = read.read()
403 retdata = read.read()
394 err = err.read().strip()
404 err = err.read().strip()
395 if err:
405 if err:
396 raise util.Abort(err)
406 raise util.Abort(err)
397 return retdata
407 return retdata
398
408
399 def _wcrev(self):
409 def _wcrev(self):
400 output = self._svncommand(['info', '--xml'])
410 output = self._svncommand(['info', '--xml'])
401 doc = xml.dom.minidom.parseString(output)
411 doc = xml.dom.minidom.parseString(output)
402 entries = doc.getElementsByTagName('entry')
412 entries = doc.getElementsByTagName('entry')
403 if not entries:
413 if not entries:
404 return 0
414 return 0
405 return int(entries[0].getAttribute('revision') or 0)
415 return int(entries[0].getAttribute('revision') or 0)
406
416
407 def _wcchanged(self):
417 def _wcchanged(self):
408 """Return (changes, extchanges) where changes is True
418 """Return (changes, extchanges) where changes is True
409 if the working directory was changed, and extchanges is
419 if the working directory was changed, and extchanges is
410 True if any of these changes concern an external entry.
420 True if any of these changes concern an external entry.
411 """
421 """
412 output = self._svncommand(['status', '--xml'])
422 output = self._svncommand(['status', '--xml'])
413 externals, changes = [], []
423 externals, changes = [], []
414 doc = xml.dom.minidom.parseString(output)
424 doc = xml.dom.minidom.parseString(output)
415 for e in doc.getElementsByTagName('entry'):
425 for e in doc.getElementsByTagName('entry'):
416 s = e.getElementsByTagName('wc-status')
426 s = e.getElementsByTagName('wc-status')
417 if not s:
427 if not s:
418 continue
428 continue
419 item = s[0].getAttribute('item')
429 item = s[0].getAttribute('item')
420 props = s[0].getAttribute('props')
430 props = s[0].getAttribute('props')
421 path = e.getAttribute('path')
431 path = e.getAttribute('path')
422 if item == 'external':
432 if item == 'external':
423 externals.append(path)
433 externals.append(path)
424 if (item not in ('', 'normal', 'unversioned', 'external')
434 if (item not in ('', 'normal', 'unversioned', 'external')
425 or props not in ('', 'none')):
435 or props not in ('', 'none')):
426 changes.append(path)
436 changes.append(path)
427 for path in changes:
437 for path in changes:
428 for ext in externals:
438 for ext in externals:
429 if path == ext or path.startswith(ext + os.sep):
439 if path == ext or path.startswith(ext + os.sep):
430 return True, True
440 return True, True
431 return bool(changes), False
441 return bool(changes), False
432
442
433 def dirty(self):
443 def dirty(self):
434 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
444 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
435 return False
445 return False
436 return True
446 return True
437
447
438 def commit(self, text, user, date):
448 def commit(self, text, user, date):
439 # user and date are out of our hands since svn is centralized
449 # user and date are out of our hands since svn is centralized
440 changed, extchanged = self._wcchanged()
450 changed, extchanged = self._wcchanged()
441 if not changed:
451 if not changed:
442 return self._wcrev()
452 return self._wcrev()
443 if extchanged:
453 if extchanged:
444 # Do not try to commit externals
454 # Do not try to commit externals
445 raise util.Abort(_('cannot commit svn externals'))
455 raise util.Abort(_('cannot commit svn externals'))
446 commitinfo = self._svncommand(['commit', '-m', text])
456 commitinfo = self._svncommand(['commit', '-m', text])
447 self._ui.status(commitinfo)
457 self._ui.status(commitinfo)
448 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
458 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
449 if not newrev:
459 if not newrev:
450 raise util.Abort(commitinfo.splitlines()[-1])
460 raise util.Abort(commitinfo.splitlines()[-1])
451 newrev = newrev.groups()[0]
461 newrev = newrev.groups()[0]
452 self._ui.status(self._svncommand(['update', '-r', newrev]))
462 self._ui.status(self._svncommand(['update', '-r', newrev]))
453 return newrev
463 return newrev
454
464
455 def remove(self):
465 def remove(self):
456 if self.dirty():
466 if self.dirty():
457 self._ui.warn(_('not removing repo %s because '
467 self._ui.warn(_('not removing repo %s because '
458 'it has changes.\n' % self._path))
468 'it has changes.\n' % self._path))
459 return
469 return
460 self._ui.note(_('removing subrepo %s\n') % self._path)
470 self._ui.note(_('removing subrepo %s\n') % self._path)
461 shutil.rmtree(self._ctx.repo.join(self._path))
471 shutil.rmtree(self._ctx.repo.join(self._path))
462
472
463 def get(self, state):
473 def get(self, state):
464 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
474 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
465 if not re.search('Checked out revision [0-9]+.', status):
475 if not re.search('Checked out revision [0-9]+.', status):
466 raise util.Abort(status.splitlines()[-1])
476 raise util.Abort(status.splitlines()[-1])
467 self._ui.status(status)
477 self._ui.status(status)
468
478
469 def merge(self, state):
479 def merge(self, state):
470 old = int(self._state[1])
480 old = int(self._state[1])
471 new = int(state[1])
481 new = int(state[1])
472 if new > old:
482 if new > old:
473 self.get(state)
483 self.get(state)
474
484
475 def push(self, force):
485 def push(self, force):
476 # push is a no-op for SVN
486 # push is a no-op for SVN
477 return True
487 return True
478
488
479 types = {
489 types = {
480 'hg': hgsubrepo,
490 'hg': hgsubrepo,
481 'svn': svnsubrepo,
491 'svn': svnsubrepo,
482 }
492 }
General Comments 0
You need to be logged in to leave comments. Login now