##// END OF EJS Templates
move path_auditor from util to scmutil
Adrian Buehlmann -
r13972:d1f4e7fd default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1391 +1,1391 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile
10 import os, sys, errno, re, glob, tempfile
11 import util, scmutil, templater, patch, error, encoding, templatekw
11 import util, scmutil, templater, patch, error, encoding, templatekw
12 import match as matchmod
12 import match as matchmod
13 import similar, revset, subrepo
13 import similar, revset, subrepo
14
14
15 revrangesep = ':'
15 revrangesep = ':'
16
16
17 def parsealiases(cmd):
17 def parsealiases(cmd):
18 return cmd.lstrip("^").split("|")
18 return cmd.lstrip("^").split("|")
19
19
20 def findpossible(cmd, table, strict=False):
20 def findpossible(cmd, table, strict=False):
21 """
21 """
22 Return cmd -> (aliases, command table entry)
22 Return cmd -> (aliases, command table entry)
23 for each matching command.
23 for each matching command.
24 Return debug commands (or their aliases) only if no normal command matches.
24 Return debug commands (or their aliases) only if no normal command matches.
25 """
25 """
26 choice = {}
26 choice = {}
27 debugchoice = {}
27 debugchoice = {}
28 for e in table.keys():
28 for e in table.keys():
29 aliases = parsealiases(e)
29 aliases = parsealiases(e)
30 found = None
30 found = None
31 if cmd in aliases:
31 if cmd in aliases:
32 found = cmd
32 found = cmd
33 elif not strict:
33 elif not strict:
34 for a in aliases:
34 for a in aliases:
35 if a.startswith(cmd):
35 if a.startswith(cmd):
36 found = a
36 found = a
37 break
37 break
38 if found is not None:
38 if found is not None:
39 if aliases[0].startswith("debug") or found.startswith("debug"):
39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 debugchoice[found] = (aliases, table[e])
40 debugchoice[found] = (aliases, table[e])
41 else:
41 else:
42 choice[found] = (aliases, table[e])
42 choice[found] = (aliases, table[e])
43
43
44 if not choice and debugchoice:
44 if not choice and debugchoice:
45 choice = debugchoice
45 choice = debugchoice
46
46
47 return choice
47 return choice
48
48
49 def findcmd(cmd, table, strict=True):
49 def findcmd(cmd, table, strict=True):
50 """Return (aliases, command table entry) for command string."""
50 """Return (aliases, command table entry) for command string."""
51 choice = findpossible(cmd, table, strict)
51 choice = findpossible(cmd, table, strict)
52
52
53 if cmd in choice:
53 if cmd in choice:
54 return choice[cmd]
54 return choice[cmd]
55
55
56 if len(choice) > 1:
56 if len(choice) > 1:
57 clist = choice.keys()
57 clist = choice.keys()
58 clist.sort()
58 clist.sort()
59 raise error.AmbiguousCommand(cmd, clist)
59 raise error.AmbiguousCommand(cmd, clist)
60
60
61 if choice:
61 if choice:
62 return choice.values()[0]
62 return choice.values()[0]
63
63
64 raise error.UnknownCommand(cmd)
64 raise error.UnknownCommand(cmd)
65
65
66 def findrepo(p):
66 def findrepo(p):
67 while not os.path.isdir(os.path.join(p, ".hg")):
67 while not os.path.isdir(os.path.join(p, ".hg")):
68 oldp, p = p, os.path.dirname(p)
68 oldp, p = p, os.path.dirname(p)
69 if p == oldp:
69 if p == oldp:
70 return None
70 return None
71
71
72 return p
72 return p
73
73
74 def bail_if_changed(repo):
74 def bail_if_changed(repo):
75 if repo.dirstate.p2() != nullid:
75 if repo.dirstate.p2() != nullid:
76 raise util.Abort(_('outstanding uncommitted merge'))
76 raise util.Abort(_('outstanding uncommitted merge'))
77 modified, added, removed, deleted = repo.status()[:4]
77 modified, added, removed, deleted = repo.status()[:4]
78 if modified or added or removed or deleted:
78 if modified or added or removed or deleted:
79 raise util.Abort(_("outstanding uncommitted changes"))
79 raise util.Abort(_("outstanding uncommitted changes"))
80
80
81 def logmessage(opts):
81 def logmessage(opts):
82 """ get the log message according to -m and -l option """
82 """ get the log message according to -m and -l option """
83 message = opts.get('message')
83 message = opts.get('message')
84 logfile = opts.get('logfile')
84 logfile = opts.get('logfile')
85
85
86 if message and logfile:
86 if message and logfile:
87 raise util.Abort(_('options --message and --logfile are mutually '
87 raise util.Abort(_('options --message and --logfile are mutually '
88 'exclusive'))
88 'exclusive'))
89 if not message and logfile:
89 if not message and logfile:
90 try:
90 try:
91 if logfile == '-':
91 if logfile == '-':
92 message = sys.stdin.read()
92 message = sys.stdin.read()
93 else:
93 else:
94 message = open(logfile).read()
94 message = open(logfile).read()
95 except IOError, inst:
95 except IOError, inst:
96 raise util.Abort(_("can't read commit message '%s': %s") %
96 raise util.Abort(_("can't read commit message '%s': %s") %
97 (logfile, inst.strerror))
97 (logfile, inst.strerror))
98 return message
98 return message
99
99
100 def loglimit(opts):
100 def loglimit(opts):
101 """get the log limit according to option -l/--limit"""
101 """get the log limit according to option -l/--limit"""
102 limit = opts.get('limit')
102 limit = opts.get('limit')
103 if limit:
103 if limit:
104 try:
104 try:
105 limit = int(limit)
105 limit = int(limit)
106 except ValueError:
106 except ValueError:
107 raise util.Abort(_('limit must be a positive integer'))
107 raise util.Abort(_('limit must be a positive integer'))
108 if limit <= 0:
108 if limit <= 0:
109 raise util.Abort(_('limit must be positive'))
109 raise util.Abort(_('limit must be positive'))
110 else:
110 else:
111 limit = None
111 limit = None
112 return limit
112 return limit
113
113
114 def revsingle(repo, revspec, default='.'):
114 def revsingle(repo, revspec, default='.'):
115 if not revspec:
115 if not revspec:
116 return repo[default]
116 return repo[default]
117
117
118 l = revrange(repo, [revspec])
118 l = revrange(repo, [revspec])
119 if len(l) < 1:
119 if len(l) < 1:
120 raise util.Abort(_('empty revision set'))
120 raise util.Abort(_('empty revision set'))
121 return repo[l[-1]]
121 return repo[l[-1]]
122
122
123 def revpair(repo, revs):
123 def revpair(repo, revs):
124 if not revs:
124 if not revs:
125 return repo.dirstate.p1(), None
125 return repo.dirstate.p1(), None
126
126
127 l = revrange(repo, revs)
127 l = revrange(repo, revs)
128
128
129 if len(l) == 0:
129 if len(l) == 0:
130 return repo.dirstate.p1(), None
130 return repo.dirstate.p1(), None
131
131
132 if len(l) == 1:
132 if len(l) == 1:
133 return repo.lookup(l[0]), None
133 return repo.lookup(l[0]), None
134
134
135 return repo.lookup(l[0]), repo.lookup(l[-1])
135 return repo.lookup(l[0]), repo.lookup(l[-1])
136
136
137 def revrange(repo, revs):
137 def revrange(repo, revs):
138 """Yield revision as strings from a list of revision specifications."""
138 """Yield revision as strings from a list of revision specifications."""
139
139
140 def revfix(repo, val, defval):
140 def revfix(repo, val, defval):
141 if not val and val != 0 and defval is not None:
141 if not val and val != 0 and defval is not None:
142 return defval
142 return defval
143 return repo.changelog.rev(repo.lookup(val))
143 return repo.changelog.rev(repo.lookup(val))
144
144
145 seen, l = set(), []
145 seen, l = set(), []
146 for spec in revs:
146 for spec in revs:
147 # attempt to parse old-style ranges first to deal with
147 # attempt to parse old-style ranges first to deal with
148 # things like old-tag which contain query metacharacters
148 # things like old-tag which contain query metacharacters
149 try:
149 try:
150 if isinstance(spec, int):
150 if isinstance(spec, int):
151 seen.add(spec)
151 seen.add(spec)
152 l.append(spec)
152 l.append(spec)
153 continue
153 continue
154
154
155 if revrangesep in spec:
155 if revrangesep in spec:
156 start, end = spec.split(revrangesep, 1)
156 start, end = spec.split(revrangesep, 1)
157 start = revfix(repo, start, 0)
157 start = revfix(repo, start, 0)
158 end = revfix(repo, end, len(repo) - 1)
158 end = revfix(repo, end, len(repo) - 1)
159 step = start > end and -1 or 1
159 step = start > end and -1 or 1
160 for rev in xrange(start, end + step, step):
160 for rev in xrange(start, end + step, step):
161 if rev in seen:
161 if rev in seen:
162 continue
162 continue
163 seen.add(rev)
163 seen.add(rev)
164 l.append(rev)
164 l.append(rev)
165 continue
165 continue
166 elif spec and spec in repo: # single unquoted rev
166 elif spec and spec in repo: # single unquoted rev
167 rev = revfix(repo, spec, None)
167 rev = revfix(repo, spec, None)
168 if rev in seen:
168 if rev in seen:
169 continue
169 continue
170 seen.add(rev)
170 seen.add(rev)
171 l.append(rev)
171 l.append(rev)
172 continue
172 continue
173 except error.RepoLookupError:
173 except error.RepoLookupError:
174 pass
174 pass
175
175
176 # fall through to new-style queries if old-style fails
176 # fall through to new-style queries if old-style fails
177 m = revset.match(spec)
177 m = revset.match(spec)
178 for r in m(repo, range(len(repo))):
178 for r in m(repo, range(len(repo))):
179 if r not in seen:
179 if r not in seen:
180 l.append(r)
180 l.append(r)
181 seen.update(l)
181 seen.update(l)
182
182
183 return l
183 return l
184
184
185 def make_filename(repo, pat, node,
185 def make_filename(repo, pat, node,
186 total=None, seqno=None, revwidth=None, pathname=None):
186 total=None, seqno=None, revwidth=None, pathname=None):
187 node_expander = {
187 node_expander = {
188 'H': lambda: hex(node),
188 'H': lambda: hex(node),
189 'R': lambda: str(repo.changelog.rev(node)),
189 'R': lambda: str(repo.changelog.rev(node)),
190 'h': lambda: short(node),
190 'h': lambda: short(node),
191 }
191 }
192 expander = {
192 expander = {
193 '%': lambda: '%',
193 '%': lambda: '%',
194 'b': lambda: os.path.basename(repo.root),
194 'b': lambda: os.path.basename(repo.root),
195 }
195 }
196
196
197 try:
197 try:
198 if node:
198 if node:
199 expander.update(node_expander)
199 expander.update(node_expander)
200 if node:
200 if node:
201 expander['r'] = (lambda:
201 expander['r'] = (lambda:
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
203 if total is not None:
203 if total is not None:
204 expander['N'] = lambda: str(total)
204 expander['N'] = lambda: str(total)
205 if seqno is not None:
205 if seqno is not None:
206 expander['n'] = lambda: str(seqno)
206 expander['n'] = lambda: str(seqno)
207 if total is not None and seqno is not None:
207 if total is not None and seqno is not None:
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
209 if pathname is not None:
209 if pathname is not None:
210 expander['s'] = lambda: os.path.basename(pathname)
210 expander['s'] = lambda: os.path.basename(pathname)
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
212 expander['p'] = lambda: pathname
212 expander['p'] = lambda: pathname
213
213
214 newname = []
214 newname = []
215 patlen = len(pat)
215 patlen = len(pat)
216 i = 0
216 i = 0
217 while i < patlen:
217 while i < patlen:
218 c = pat[i]
218 c = pat[i]
219 if c == '%':
219 if c == '%':
220 i += 1
220 i += 1
221 c = pat[i]
221 c = pat[i]
222 c = expander[c]()
222 c = expander[c]()
223 newname.append(c)
223 newname.append(c)
224 i += 1
224 i += 1
225 return ''.join(newname)
225 return ''.join(newname)
226 except KeyError, inst:
226 except KeyError, inst:
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
228 inst.args[0])
228 inst.args[0])
229
229
230 def make_file(repo, pat, node=None,
230 def make_file(repo, pat, node=None,
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
232
232
233 writable = mode not in ('r', 'rb')
233 writable = mode not in ('r', 'rb')
234
234
235 if not pat or pat == '-':
235 if not pat or pat == '-':
236 fp = writable and sys.stdout or sys.stdin
236 fp = writable and sys.stdout or sys.stdin
237 return os.fdopen(os.dup(fp.fileno()), mode)
237 return os.fdopen(os.dup(fp.fileno()), mode)
238 if hasattr(pat, 'write') and writable:
238 if hasattr(pat, 'write') and writable:
239 return pat
239 return pat
240 if hasattr(pat, 'read') and 'r' in mode:
240 if hasattr(pat, 'read') and 'r' in mode:
241 return pat
241 return pat
242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
243 pathname),
243 pathname),
244 mode)
244 mode)
245
245
246 def expandpats(pats):
246 def expandpats(pats):
247 if not util.expandglobs:
247 if not util.expandglobs:
248 return list(pats)
248 return list(pats)
249 ret = []
249 ret = []
250 for p in pats:
250 for p in pats:
251 kind, name = matchmod._patsplit(p, None)
251 kind, name = matchmod._patsplit(p, None)
252 if kind is None:
252 if kind is None:
253 try:
253 try:
254 globbed = glob.glob(name)
254 globbed = glob.glob(name)
255 except re.error:
255 except re.error:
256 globbed = [name]
256 globbed = [name]
257 if globbed:
257 if globbed:
258 ret.extend(globbed)
258 ret.extend(globbed)
259 continue
259 continue
260 ret.append(p)
260 ret.append(p)
261 return ret
261 return ret
262
262
263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
264 if pats == ("",):
264 if pats == ("",):
265 pats = []
265 pats = []
266 if not globbed and default == 'relpath':
266 if not globbed and default == 'relpath':
267 pats = expandpats(pats or [])
267 pats = expandpats(pats or [])
268 m = matchmod.match(repo.root, repo.getcwd(), pats,
268 m = matchmod.match(repo.root, repo.getcwd(), pats,
269 opts.get('include'), opts.get('exclude'), default,
269 opts.get('include'), opts.get('exclude'), default,
270 auditor=repo.auditor)
270 auditor=repo.auditor)
271 def badfn(f, msg):
271 def badfn(f, msg):
272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
273 m.bad = badfn
273 m.bad = badfn
274 return m
274 return m
275
275
276 def matchall(repo):
276 def matchall(repo):
277 return matchmod.always(repo.root, repo.getcwd())
277 return matchmod.always(repo.root, repo.getcwd())
278
278
279 def matchfiles(repo, files):
279 def matchfiles(repo, files):
280 return matchmod.exact(repo.root, repo.getcwd(), files)
280 return matchmod.exact(repo.root, repo.getcwd(), files)
281
281
282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
283 if dry_run is None:
283 if dry_run is None:
284 dry_run = opts.get('dry_run')
284 dry_run = opts.get('dry_run')
285 if similarity is None:
285 if similarity is None:
286 similarity = float(opts.get('similarity') or 0)
286 similarity = float(opts.get('similarity') or 0)
287 # we'd use status here, except handling of symlinks and ignore is tricky
287 # we'd use status here, except handling of symlinks and ignore is tricky
288 added, unknown, deleted, removed = [], [], [], []
288 added, unknown, deleted, removed = [], [], [], []
289 audit_path = util.path_auditor(repo.root)
289 audit_path = scmutil.path_auditor(repo.root)
290 m = match(repo, pats, opts)
290 m = match(repo, pats, opts)
291 for abs in repo.walk(m):
291 for abs in repo.walk(m):
292 target = repo.wjoin(abs)
292 target = repo.wjoin(abs)
293 good = True
293 good = True
294 try:
294 try:
295 audit_path(abs)
295 audit_path(abs)
296 except:
296 except:
297 good = False
297 good = False
298 rel = m.rel(abs)
298 rel = m.rel(abs)
299 exact = m.exact(abs)
299 exact = m.exact(abs)
300 if good and abs not in repo.dirstate:
300 if good and abs not in repo.dirstate:
301 unknown.append(abs)
301 unknown.append(abs)
302 if repo.ui.verbose or not exact:
302 if repo.ui.verbose or not exact:
303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
305 or (os.path.isdir(target) and not os.path.islink(target))):
305 or (os.path.isdir(target) and not os.path.islink(target))):
306 deleted.append(abs)
306 deleted.append(abs)
307 if repo.ui.verbose or not exact:
307 if repo.ui.verbose or not exact:
308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
309 # for finding renames
309 # for finding renames
310 elif repo.dirstate[abs] == 'r':
310 elif repo.dirstate[abs] == 'r':
311 removed.append(abs)
311 removed.append(abs)
312 elif repo.dirstate[abs] == 'a':
312 elif repo.dirstate[abs] == 'a':
313 added.append(abs)
313 added.append(abs)
314 copies = {}
314 copies = {}
315 if similarity > 0:
315 if similarity > 0:
316 for old, new, score in similar.findrenames(repo,
316 for old, new, score in similar.findrenames(repo,
317 added + unknown, removed + deleted, similarity):
317 added + unknown, removed + deleted, similarity):
318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
319 repo.ui.status(_('recording removal of %s as rename to %s '
319 repo.ui.status(_('recording removal of %s as rename to %s '
320 '(%d%% similar)\n') %
320 '(%d%% similar)\n') %
321 (m.rel(old), m.rel(new), score * 100))
321 (m.rel(old), m.rel(new), score * 100))
322 copies[new] = old
322 copies[new] = old
323
323
324 if not dry_run:
324 if not dry_run:
325 wctx = repo[None]
325 wctx = repo[None]
326 wlock = repo.wlock()
326 wlock = repo.wlock()
327 try:
327 try:
328 wctx.remove(deleted)
328 wctx.remove(deleted)
329 wctx.add(unknown)
329 wctx.add(unknown)
330 for new, old in copies.iteritems():
330 for new, old in copies.iteritems():
331 wctx.copy(old, new)
331 wctx.copy(old, new)
332 finally:
332 finally:
333 wlock.release()
333 wlock.release()
334
334
335 def updatedir(ui, repo, patches, similarity=0):
335 def updatedir(ui, repo, patches, similarity=0):
336 '''Update dirstate after patch application according to metadata'''
336 '''Update dirstate after patch application according to metadata'''
337 if not patches:
337 if not patches:
338 return
338 return
339 copies = []
339 copies = []
340 removes = set()
340 removes = set()
341 cfiles = patches.keys()
341 cfiles = patches.keys()
342 cwd = repo.getcwd()
342 cwd = repo.getcwd()
343 if cwd:
343 if cwd:
344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
345 for f in patches:
345 for f in patches:
346 gp = patches[f]
346 gp = patches[f]
347 if not gp:
347 if not gp:
348 continue
348 continue
349 if gp.op == 'RENAME':
349 if gp.op == 'RENAME':
350 copies.append((gp.oldpath, gp.path))
350 copies.append((gp.oldpath, gp.path))
351 removes.add(gp.oldpath)
351 removes.add(gp.oldpath)
352 elif gp.op == 'COPY':
352 elif gp.op == 'COPY':
353 copies.append((gp.oldpath, gp.path))
353 copies.append((gp.oldpath, gp.path))
354 elif gp.op == 'DELETE':
354 elif gp.op == 'DELETE':
355 removes.add(gp.path)
355 removes.add(gp.path)
356
356
357 wctx = repo[None]
357 wctx = repo[None]
358 for src, dst in copies:
358 for src, dst in copies:
359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
360 if (not similarity) and removes:
360 if (not similarity) and removes:
361 wctx.remove(sorted(removes), True)
361 wctx.remove(sorted(removes), True)
362
362
363 for f in patches:
363 for f in patches:
364 gp = patches[f]
364 gp = patches[f]
365 if gp and gp.mode:
365 if gp and gp.mode:
366 islink, isexec = gp.mode
366 islink, isexec = gp.mode
367 dst = repo.wjoin(gp.path)
367 dst = repo.wjoin(gp.path)
368 # patch won't create empty files
368 # patch won't create empty files
369 if gp.op == 'ADD' and not os.path.lexists(dst):
369 if gp.op == 'ADD' and not os.path.lexists(dst):
370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
371 repo.wwrite(gp.path, '', flags)
371 repo.wwrite(gp.path, '', flags)
372 util.set_flags(dst, islink, isexec)
372 util.set_flags(dst, islink, isexec)
373 addremove(repo, cfiles, similarity=similarity)
373 addremove(repo, cfiles, similarity=similarity)
374 files = patches.keys()
374 files = patches.keys()
375 files.extend([r for r in removes if r not in files])
375 files.extend([r for r in removes if r not in files])
376 return sorted(files)
376 return sorted(files)
377
377
378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
379 """Update the dirstate to reflect the intent of copying src to dst. For
379 """Update the dirstate to reflect the intent of copying src to dst. For
380 different reasons it might not end with dst being marked as copied from src.
380 different reasons it might not end with dst being marked as copied from src.
381 """
381 """
382 origsrc = repo.dirstate.copied(src) or src
382 origsrc = repo.dirstate.copied(src) or src
383 if dst == origsrc: # copying back a copy?
383 if dst == origsrc: # copying back a copy?
384 if repo.dirstate[dst] not in 'mn' and not dryrun:
384 if repo.dirstate[dst] not in 'mn' and not dryrun:
385 repo.dirstate.normallookup(dst)
385 repo.dirstate.normallookup(dst)
386 else:
386 else:
387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
388 if not ui.quiet:
388 if not ui.quiet:
389 ui.warn(_("%s has not been committed yet, so no copy "
389 ui.warn(_("%s has not been committed yet, so no copy "
390 "data will be stored for %s.\n")
390 "data will be stored for %s.\n")
391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
392 if repo.dirstate[dst] in '?r' and not dryrun:
392 if repo.dirstate[dst] in '?r' and not dryrun:
393 wctx.add([dst])
393 wctx.add([dst])
394 elif not dryrun:
394 elif not dryrun:
395 wctx.copy(origsrc, dst)
395 wctx.copy(origsrc, dst)
396
396
397 def copy(ui, repo, pats, opts, rename=False):
397 def copy(ui, repo, pats, opts, rename=False):
398 # called with the repo lock held
398 # called with the repo lock held
399 #
399 #
400 # hgsep => pathname that uses "/" to separate directories
400 # hgsep => pathname that uses "/" to separate directories
401 # ossep => pathname that uses os.sep to separate directories
401 # ossep => pathname that uses os.sep to separate directories
402 cwd = repo.getcwd()
402 cwd = repo.getcwd()
403 targets = {}
403 targets = {}
404 after = opts.get("after")
404 after = opts.get("after")
405 dryrun = opts.get("dry_run")
405 dryrun = opts.get("dry_run")
406 wctx = repo[None]
406 wctx = repo[None]
407
407
408 def walkpat(pat):
408 def walkpat(pat):
409 srcs = []
409 srcs = []
410 badstates = after and '?' or '?r'
410 badstates = after and '?' or '?r'
411 m = match(repo, [pat], opts, globbed=True)
411 m = match(repo, [pat], opts, globbed=True)
412 for abs in repo.walk(m):
412 for abs in repo.walk(m):
413 state = repo.dirstate[abs]
413 state = repo.dirstate[abs]
414 rel = m.rel(abs)
414 rel = m.rel(abs)
415 exact = m.exact(abs)
415 exact = m.exact(abs)
416 if state in badstates:
416 if state in badstates:
417 if exact and state == '?':
417 if exact and state == '?':
418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
419 if exact and state == 'r':
419 if exact and state == 'r':
420 ui.warn(_('%s: not copying - file has been marked for'
420 ui.warn(_('%s: not copying - file has been marked for'
421 ' remove\n') % rel)
421 ' remove\n') % rel)
422 continue
422 continue
423 # abs: hgsep
423 # abs: hgsep
424 # rel: ossep
424 # rel: ossep
425 srcs.append((abs, rel, exact))
425 srcs.append((abs, rel, exact))
426 return srcs
426 return srcs
427
427
428 # abssrc: hgsep
428 # abssrc: hgsep
429 # relsrc: ossep
429 # relsrc: ossep
430 # otarget: ossep
430 # otarget: ossep
431 def copyfile(abssrc, relsrc, otarget, exact):
431 def copyfile(abssrc, relsrc, otarget, exact):
432 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
432 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
433 reltarget = repo.pathto(abstarget, cwd)
433 reltarget = repo.pathto(abstarget, cwd)
434 target = repo.wjoin(abstarget)
434 target = repo.wjoin(abstarget)
435 src = repo.wjoin(abssrc)
435 src = repo.wjoin(abssrc)
436 state = repo.dirstate[abstarget]
436 state = repo.dirstate[abstarget]
437
437
438 scmutil.checkportable(ui, abstarget)
438 scmutil.checkportable(ui, abstarget)
439
439
440 # check for collisions
440 # check for collisions
441 prevsrc = targets.get(abstarget)
441 prevsrc = targets.get(abstarget)
442 if prevsrc is not None:
442 if prevsrc is not None:
443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
444 (reltarget, repo.pathto(abssrc, cwd),
444 (reltarget, repo.pathto(abssrc, cwd),
445 repo.pathto(prevsrc, cwd)))
445 repo.pathto(prevsrc, cwd)))
446 return
446 return
447
447
448 # check for overwrites
448 # check for overwrites
449 exists = os.path.lexists(target)
449 exists = os.path.lexists(target)
450 if not after and exists or after and state in 'mn':
450 if not after and exists or after and state in 'mn':
451 if not opts['force']:
451 if not opts['force']:
452 ui.warn(_('%s: not overwriting - file exists\n') %
452 ui.warn(_('%s: not overwriting - file exists\n') %
453 reltarget)
453 reltarget)
454 return
454 return
455
455
456 if after:
456 if after:
457 if not exists:
457 if not exists:
458 if rename:
458 if rename:
459 ui.warn(_('%s: not recording move - %s does not exist\n') %
459 ui.warn(_('%s: not recording move - %s does not exist\n') %
460 (relsrc, reltarget))
460 (relsrc, reltarget))
461 else:
461 else:
462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
463 (relsrc, reltarget))
463 (relsrc, reltarget))
464 return
464 return
465 elif not dryrun:
465 elif not dryrun:
466 try:
466 try:
467 if exists:
467 if exists:
468 os.unlink(target)
468 os.unlink(target)
469 targetdir = os.path.dirname(target) or '.'
469 targetdir = os.path.dirname(target) or '.'
470 if not os.path.isdir(targetdir):
470 if not os.path.isdir(targetdir):
471 os.makedirs(targetdir)
471 os.makedirs(targetdir)
472 util.copyfile(src, target)
472 util.copyfile(src, target)
473 except IOError, inst:
473 except IOError, inst:
474 if inst.errno == errno.ENOENT:
474 if inst.errno == errno.ENOENT:
475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
476 else:
476 else:
477 ui.warn(_('%s: cannot copy - %s\n') %
477 ui.warn(_('%s: cannot copy - %s\n') %
478 (relsrc, inst.strerror))
478 (relsrc, inst.strerror))
479 return True # report a failure
479 return True # report a failure
480
480
481 if ui.verbose or not exact:
481 if ui.verbose or not exact:
482 if rename:
482 if rename:
483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
484 else:
484 else:
485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
486
486
487 targets[abstarget] = abssrc
487 targets[abstarget] = abssrc
488
488
489 # fix up dirstate
489 # fix up dirstate
490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
491 if rename and not dryrun:
491 if rename and not dryrun:
492 wctx.remove([abssrc], not after)
492 wctx.remove([abssrc], not after)
493
493
494 # pat: ossep
494 # pat: ossep
495 # dest ossep
495 # dest ossep
496 # srcs: list of (hgsep, hgsep, ossep, bool)
496 # srcs: list of (hgsep, hgsep, ossep, bool)
497 # return: function that takes hgsep and returns ossep
497 # return: function that takes hgsep and returns ossep
498 def targetpathfn(pat, dest, srcs):
498 def targetpathfn(pat, dest, srcs):
499 if os.path.isdir(pat):
499 if os.path.isdir(pat):
500 abspfx = scmutil.canonpath(repo.root, cwd, pat)
500 abspfx = scmutil.canonpath(repo.root, cwd, pat)
501 abspfx = util.localpath(abspfx)
501 abspfx = util.localpath(abspfx)
502 if destdirexists:
502 if destdirexists:
503 striplen = len(os.path.split(abspfx)[0])
503 striplen = len(os.path.split(abspfx)[0])
504 else:
504 else:
505 striplen = len(abspfx)
505 striplen = len(abspfx)
506 if striplen:
506 if striplen:
507 striplen += len(os.sep)
507 striplen += len(os.sep)
508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
509 elif destdirexists:
509 elif destdirexists:
510 res = lambda p: os.path.join(dest,
510 res = lambda p: os.path.join(dest,
511 os.path.basename(util.localpath(p)))
511 os.path.basename(util.localpath(p)))
512 else:
512 else:
513 res = lambda p: dest
513 res = lambda p: dest
514 return res
514 return res
515
515
516 # pat: ossep
516 # pat: ossep
517 # dest ossep
517 # dest ossep
518 # srcs: list of (hgsep, hgsep, ossep, bool)
518 # srcs: list of (hgsep, hgsep, ossep, bool)
519 # return: function that takes hgsep and returns ossep
519 # return: function that takes hgsep and returns ossep
520 def targetpathafterfn(pat, dest, srcs):
520 def targetpathafterfn(pat, dest, srcs):
521 if matchmod.patkind(pat):
521 if matchmod.patkind(pat):
522 # a mercurial pattern
522 # a mercurial pattern
523 res = lambda p: os.path.join(dest,
523 res = lambda p: os.path.join(dest,
524 os.path.basename(util.localpath(p)))
524 os.path.basename(util.localpath(p)))
525 else:
525 else:
526 abspfx = scmutil.canonpath(repo.root, cwd, pat)
526 abspfx = scmutil.canonpath(repo.root, cwd, pat)
527 if len(abspfx) < len(srcs[0][0]):
527 if len(abspfx) < len(srcs[0][0]):
528 # A directory. Either the target path contains the last
528 # A directory. Either the target path contains the last
529 # component of the source path or it does not.
529 # component of the source path or it does not.
530 def evalpath(striplen):
530 def evalpath(striplen):
531 score = 0
531 score = 0
532 for s in srcs:
532 for s in srcs:
533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
534 if os.path.lexists(t):
534 if os.path.lexists(t):
535 score += 1
535 score += 1
536 return score
536 return score
537
537
538 abspfx = util.localpath(abspfx)
538 abspfx = util.localpath(abspfx)
539 striplen = len(abspfx)
539 striplen = len(abspfx)
540 if striplen:
540 if striplen:
541 striplen += len(os.sep)
541 striplen += len(os.sep)
542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
543 score = evalpath(striplen)
543 score = evalpath(striplen)
544 striplen1 = len(os.path.split(abspfx)[0])
544 striplen1 = len(os.path.split(abspfx)[0])
545 if striplen1:
545 if striplen1:
546 striplen1 += len(os.sep)
546 striplen1 += len(os.sep)
547 if evalpath(striplen1) > score:
547 if evalpath(striplen1) > score:
548 striplen = striplen1
548 striplen = striplen1
549 res = lambda p: os.path.join(dest,
549 res = lambda p: os.path.join(dest,
550 util.localpath(p)[striplen:])
550 util.localpath(p)[striplen:])
551 else:
551 else:
552 # a file
552 # a file
553 if destdirexists:
553 if destdirexists:
554 res = lambda p: os.path.join(dest,
554 res = lambda p: os.path.join(dest,
555 os.path.basename(util.localpath(p)))
555 os.path.basename(util.localpath(p)))
556 else:
556 else:
557 res = lambda p: dest
557 res = lambda p: dest
558 return res
558 return res
559
559
560
560
561 pats = expandpats(pats)
561 pats = expandpats(pats)
562 if not pats:
562 if not pats:
563 raise util.Abort(_('no source or destination specified'))
563 raise util.Abort(_('no source or destination specified'))
564 if len(pats) == 1:
564 if len(pats) == 1:
565 raise util.Abort(_('no destination specified'))
565 raise util.Abort(_('no destination specified'))
566 dest = pats.pop()
566 dest = pats.pop()
567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
568 if not destdirexists:
568 if not destdirexists:
569 if len(pats) > 1 or matchmod.patkind(pats[0]):
569 if len(pats) > 1 or matchmod.patkind(pats[0]):
570 raise util.Abort(_('with multiple sources, destination must be an '
570 raise util.Abort(_('with multiple sources, destination must be an '
571 'existing directory'))
571 'existing directory'))
572 if util.endswithsep(dest):
572 if util.endswithsep(dest):
573 raise util.Abort(_('destination %s is not a directory') % dest)
573 raise util.Abort(_('destination %s is not a directory') % dest)
574
574
575 tfn = targetpathfn
575 tfn = targetpathfn
576 if after:
576 if after:
577 tfn = targetpathafterfn
577 tfn = targetpathafterfn
578 copylist = []
578 copylist = []
579 for pat in pats:
579 for pat in pats:
580 srcs = walkpat(pat)
580 srcs = walkpat(pat)
581 if not srcs:
581 if not srcs:
582 continue
582 continue
583 copylist.append((tfn(pat, dest, srcs), srcs))
583 copylist.append((tfn(pat, dest, srcs), srcs))
584 if not copylist:
584 if not copylist:
585 raise util.Abort(_('no files to copy'))
585 raise util.Abort(_('no files to copy'))
586
586
587 errors = 0
587 errors = 0
588 for targetpath, srcs in copylist:
588 for targetpath, srcs in copylist:
589 for abssrc, relsrc, exact in srcs:
589 for abssrc, relsrc, exact in srcs:
590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
591 errors += 1
591 errors += 1
592
592
593 if errors:
593 if errors:
594 ui.warn(_('(consider using --after)\n'))
594 ui.warn(_('(consider using --after)\n'))
595
595
596 return errors != 0
596 return errors != 0
597
597
598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
599 runargs=None, appendpid=False):
599 runargs=None, appendpid=False):
600 '''Run a command as a service.'''
600 '''Run a command as a service.'''
601
601
602 if opts['daemon'] and not opts['daemon_pipefds']:
602 if opts['daemon'] and not opts['daemon_pipefds']:
603 # Signal child process startup with file removal
603 # Signal child process startup with file removal
604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
605 os.close(lockfd)
605 os.close(lockfd)
606 try:
606 try:
607 if not runargs:
607 if not runargs:
608 runargs = util.hgcmd() + sys.argv[1:]
608 runargs = util.hgcmd() + sys.argv[1:]
609 runargs.append('--daemon-pipefds=%s' % lockpath)
609 runargs.append('--daemon-pipefds=%s' % lockpath)
610 # Don't pass --cwd to the child process, because we've already
610 # Don't pass --cwd to the child process, because we've already
611 # changed directory.
611 # changed directory.
612 for i in xrange(1, len(runargs)):
612 for i in xrange(1, len(runargs)):
613 if runargs[i].startswith('--cwd='):
613 if runargs[i].startswith('--cwd='):
614 del runargs[i]
614 del runargs[i]
615 break
615 break
616 elif runargs[i].startswith('--cwd'):
616 elif runargs[i].startswith('--cwd'):
617 del runargs[i:i + 2]
617 del runargs[i:i + 2]
618 break
618 break
619 def condfn():
619 def condfn():
620 return not os.path.exists(lockpath)
620 return not os.path.exists(lockpath)
621 pid = util.rundetached(runargs, condfn)
621 pid = util.rundetached(runargs, condfn)
622 if pid < 0:
622 if pid < 0:
623 raise util.Abort(_('child process failed to start'))
623 raise util.Abort(_('child process failed to start'))
624 finally:
624 finally:
625 try:
625 try:
626 os.unlink(lockpath)
626 os.unlink(lockpath)
627 except OSError, e:
627 except OSError, e:
628 if e.errno != errno.ENOENT:
628 if e.errno != errno.ENOENT:
629 raise
629 raise
630 if parentfn:
630 if parentfn:
631 return parentfn(pid)
631 return parentfn(pid)
632 else:
632 else:
633 return
633 return
634
634
635 if initfn:
635 if initfn:
636 initfn()
636 initfn()
637
637
638 if opts['pid_file']:
638 if opts['pid_file']:
639 mode = appendpid and 'a' or 'w'
639 mode = appendpid and 'a' or 'w'
640 fp = open(opts['pid_file'], mode)
640 fp = open(opts['pid_file'], mode)
641 fp.write(str(os.getpid()) + '\n')
641 fp.write(str(os.getpid()) + '\n')
642 fp.close()
642 fp.close()
643
643
644 if opts['daemon_pipefds']:
644 if opts['daemon_pipefds']:
645 lockpath = opts['daemon_pipefds']
645 lockpath = opts['daemon_pipefds']
646 try:
646 try:
647 os.setsid()
647 os.setsid()
648 except AttributeError:
648 except AttributeError:
649 pass
649 pass
650 os.unlink(lockpath)
650 os.unlink(lockpath)
651 util.hidewindow()
651 util.hidewindow()
652 sys.stdout.flush()
652 sys.stdout.flush()
653 sys.stderr.flush()
653 sys.stderr.flush()
654
654
655 nullfd = os.open(util.nulldev, os.O_RDWR)
655 nullfd = os.open(util.nulldev, os.O_RDWR)
656 logfilefd = nullfd
656 logfilefd = nullfd
657 if logfile:
657 if logfile:
658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
659 os.dup2(nullfd, 0)
659 os.dup2(nullfd, 0)
660 os.dup2(logfilefd, 1)
660 os.dup2(logfilefd, 1)
661 os.dup2(logfilefd, 2)
661 os.dup2(logfilefd, 2)
662 if nullfd not in (0, 1, 2):
662 if nullfd not in (0, 1, 2):
663 os.close(nullfd)
663 os.close(nullfd)
664 if logfile and logfilefd not in (0, 1, 2):
664 if logfile and logfilefd not in (0, 1, 2):
665 os.close(logfilefd)
665 os.close(logfilefd)
666
666
667 if runfn:
667 if runfn:
668 return runfn()
668 return runfn()
669
669
670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
671 opts=None):
671 opts=None):
672 '''export changesets as hg patches.'''
672 '''export changesets as hg patches.'''
673
673
674 total = len(revs)
674 total = len(revs)
675 revwidth = max([len(str(rev)) for rev in revs])
675 revwidth = max([len(str(rev)) for rev in revs])
676
676
677 def single(rev, seqno, fp):
677 def single(rev, seqno, fp):
678 ctx = repo[rev]
678 ctx = repo[rev]
679 node = ctx.node()
679 node = ctx.node()
680 parents = [p.node() for p in ctx.parents() if p]
680 parents = [p.node() for p in ctx.parents() if p]
681 branch = ctx.branch()
681 branch = ctx.branch()
682 if switch_parent:
682 if switch_parent:
683 parents.reverse()
683 parents.reverse()
684 prev = (parents and parents[0]) or nullid
684 prev = (parents and parents[0]) or nullid
685
685
686 shouldclose = False
686 shouldclose = False
687 if not fp:
687 if not fp:
688 fp = make_file(repo, template, node, total=total, seqno=seqno,
688 fp = make_file(repo, template, node, total=total, seqno=seqno,
689 revwidth=revwidth, mode='ab')
689 revwidth=revwidth, mode='ab')
690 if fp != template:
690 if fp != template:
691 shouldclose = True
691 shouldclose = True
692 if fp != sys.stdout and hasattr(fp, 'name'):
692 if fp != sys.stdout and hasattr(fp, 'name'):
693 repo.ui.note("%s\n" % fp.name)
693 repo.ui.note("%s\n" % fp.name)
694
694
695 fp.write("# HG changeset patch\n")
695 fp.write("# HG changeset patch\n")
696 fp.write("# User %s\n" % ctx.user())
696 fp.write("# User %s\n" % ctx.user())
697 fp.write("# Date %d %d\n" % ctx.date())
697 fp.write("# Date %d %d\n" % ctx.date())
698 if branch and branch != 'default':
698 if branch and branch != 'default':
699 fp.write("# Branch %s\n" % branch)
699 fp.write("# Branch %s\n" % branch)
700 fp.write("# Node ID %s\n" % hex(node))
700 fp.write("# Node ID %s\n" % hex(node))
701 fp.write("# Parent %s\n" % hex(prev))
701 fp.write("# Parent %s\n" % hex(prev))
702 if len(parents) > 1:
702 if len(parents) > 1:
703 fp.write("# Parent %s\n" % hex(parents[1]))
703 fp.write("# Parent %s\n" % hex(parents[1]))
704 fp.write(ctx.description().rstrip())
704 fp.write(ctx.description().rstrip())
705 fp.write("\n\n")
705 fp.write("\n\n")
706
706
707 for chunk in patch.diff(repo, prev, node, opts=opts):
707 for chunk in patch.diff(repo, prev, node, opts=opts):
708 fp.write(chunk)
708 fp.write(chunk)
709
709
710 if shouldclose:
710 if shouldclose:
711 fp.close()
711 fp.close()
712
712
713 for seqno, rev in enumerate(revs):
713 for seqno, rev in enumerate(revs):
714 single(rev, seqno + 1, fp)
714 single(rev, seqno + 1, fp)
715
715
716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
717 changes=None, stat=False, fp=None, prefix='',
717 changes=None, stat=False, fp=None, prefix='',
718 listsubrepos=False):
718 listsubrepos=False):
719 '''show diff or diffstat.'''
719 '''show diff or diffstat.'''
720 if fp is None:
720 if fp is None:
721 write = ui.write
721 write = ui.write
722 else:
722 else:
723 def write(s, **kw):
723 def write(s, **kw):
724 fp.write(s)
724 fp.write(s)
725
725
726 if stat:
726 if stat:
727 diffopts = diffopts.copy(context=0)
727 diffopts = diffopts.copy(context=0)
728 width = 80
728 width = 80
729 if not ui.plain():
729 if not ui.plain():
730 width = ui.termwidth()
730 width = ui.termwidth()
731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
732 prefix=prefix)
732 prefix=prefix)
733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
734 width=width,
734 width=width,
735 git=diffopts.git):
735 git=diffopts.git):
736 write(chunk, label=label)
736 write(chunk, label=label)
737 else:
737 else:
738 for chunk, label in patch.diffui(repo, node1, node2, match,
738 for chunk, label in patch.diffui(repo, node1, node2, match,
739 changes, diffopts, prefix=prefix):
739 changes, diffopts, prefix=prefix):
740 write(chunk, label=label)
740 write(chunk, label=label)
741
741
742 if listsubrepos:
742 if listsubrepos:
743 ctx1 = repo[node1]
743 ctx1 = repo[node1]
744 ctx2 = repo[node2]
744 ctx2 = repo[node2]
745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
746 if node2 is not None:
746 if node2 is not None:
747 node2 = ctx2.substate[subpath][1]
747 node2 = ctx2.substate[subpath][1]
748 submatch = matchmod.narrowmatcher(subpath, match)
748 submatch = matchmod.narrowmatcher(subpath, match)
749 sub.diff(diffopts, node2, submatch, changes=changes,
749 sub.diff(diffopts, node2, submatch, changes=changes,
750 stat=stat, fp=fp, prefix=prefix)
750 stat=stat, fp=fp, prefix=prefix)
751
751
752 class changeset_printer(object):
752 class changeset_printer(object):
753 '''show changeset information when templating not requested.'''
753 '''show changeset information when templating not requested.'''
754
754
755 def __init__(self, ui, repo, patch, diffopts, buffered):
755 def __init__(self, ui, repo, patch, diffopts, buffered):
756 self.ui = ui
756 self.ui = ui
757 self.repo = repo
757 self.repo = repo
758 self.buffered = buffered
758 self.buffered = buffered
759 self.patch = patch
759 self.patch = patch
760 self.diffopts = diffopts
760 self.diffopts = diffopts
761 self.header = {}
761 self.header = {}
762 self.hunk = {}
762 self.hunk = {}
763 self.lastheader = None
763 self.lastheader = None
764 self.footer = None
764 self.footer = None
765
765
766 def flush(self, rev):
766 def flush(self, rev):
767 if rev in self.header:
767 if rev in self.header:
768 h = self.header[rev]
768 h = self.header[rev]
769 if h != self.lastheader:
769 if h != self.lastheader:
770 self.lastheader = h
770 self.lastheader = h
771 self.ui.write(h)
771 self.ui.write(h)
772 del self.header[rev]
772 del self.header[rev]
773 if rev in self.hunk:
773 if rev in self.hunk:
774 self.ui.write(self.hunk[rev])
774 self.ui.write(self.hunk[rev])
775 del self.hunk[rev]
775 del self.hunk[rev]
776 return 1
776 return 1
777 return 0
777 return 0
778
778
779 def close(self):
779 def close(self):
780 if self.footer:
780 if self.footer:
781 self.ui.write(self.footer)
781 self.ui.write(self.footer)
782
782
783 def show(self, ctx, copies=None, matchfn=None, **props):
783 def show(self, ctx, copies=None, matchfn=None, **props):
784 if self.buffered:
784 if self.buffered:
785 self.ui.pushbuffer()
785 self.ui.pushbuffer()
786 self._show(ctx, copies, matchfn, props)
786 self._show(ctx, copies, matchfn, props)
787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
788 else:
788 else:
789 self._show(ctx, copies, matchfn, props)
789 self._show(ctx, copies, matchfn, props)
790
790
791 def _show(self, ctx, copies, matchfn, props):
791 def _show(self, ctx, copies, matchfn, props):
792 '''show a single changeset or file revision'''
792 '''show a single changeset or file revision'''
793 changenode = ctx.node()
793 changenode = ctx.node()
794 rev = ctx.rev()
794 rev = ctx.rev()
795
795
796 if self.ui.quiet:
796 if self.ui.quiet:
797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
798 label='log.node')
798 label='log.node')
799 return
799 return
800
800
801 log = self.repo.changelog
801 log = self.repo.changelog
802 date = util.datestr(ctx.date())
802 date = util.datestr(ctx.date())
803
803
804 hexfunc = self.ui.debugflag and hex or short
804 hexfunc = self.ui.debugflag and hex or short
805
805
806 parents = [(p, hexfunc(log.node(p)))
806 parents = [(p, hexfunc(log.node(p)))
807 for p in self._meaningful_parentrevs(log, rev)]
807 for p in self._meaningful_parentrevs(log, rev)]
808
808
809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
810 label='log.changeset')
810 label='log.changeset')
811
811
812 branch = ctx.branch()
812 branch = ctx.branch()
813 # don't show the default branch name
813 # don't show the default branch name
814 if branch != 'default':
814 if branch != 'default':
815 self.ui.write(_("branch: %s\n") % branch,
815 self.ui.write(_("branch: %s\n") % branch,
816 label='log.branch')
816 label='log.branch')
817 for bookmark in self.repo.nodebookmarks(changenode):
817 for bookmark in self.repo.nodebookmarks(changenode):
818 self.ui.write(_("bookmark: %s\n") % bookmark,
818 self.ui.write(_("bookmark: %s\n") % bookmark,
819 label='log.bookmark')
819 label='log.bookmark')
820 for tag in self.repo.nodetags(changenode):
820 for tag in self.repo.nodetags(changenode):
821 self.ui.write(_("tag: %s\n") % tag,
821 self.ui.write(_("tag: %s\n") % tag,
822 label='log.tag')
822 label='log.tag')
823 for parent in parents:
823 for parent in parents:
824 self.ui.write(_("parent: %d:%s\n") % parent,
824 self.ui.write(_("parent: %d:%s\n") % parent,
825 label='log.parent')
825 label='log.parent')
826
826
827 if self.ui.debugflag:
827 if self.ui.debugflag:
828 mnode = ctx.manifestnode()
828 mnode = ctx.manifestnode()
829 self.ui.write(_("manifest: %d:%s\n") %
829 self.ui.write(_("manifest: %d:%s\n") %
830 (self.repo.manifest.rev(mnode), hex(mnode)),
830 (self.repo.manifest.rev(mnode), hex(mnode)),
831 label='ui.debug log.manifest')
831 label='ui.debug log.manifest')
832 self.ui.write(_("user: %s\n") % ctx.user(),
832 self.ui.write(_("user: %s\n") % ctx.user(),
833 label='log.user')
833 label='log.user')
834 self.ui.write(_("date: %s\n") % date,
834 self.ui.write(_("date: %s\n") % date,
835 label='log.date')
835 label='log.date')
836
836
837 if self.ui.debugflag:
837 if self.ui.debugflag:
838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
840 files):
840 files):
841 if value:
841 if value:
842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
843 label='ui.debug log.files')
843 label='ui.debug log.files')
844 elif ctx.files() and self.ui.verbose:
844 elif ctx.files() and self.ui.verbose:
845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
846 label='ui.note log.files')
846 label='ui.note log.files')
847 if copies and self.ui.verbose:
847 if copies and self.ui.verbose:
848 copies = ['%s (%s)' % c for c in copies]
848 copies = ['%s (%s)' % c for c in copies]
849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
850 label='ui.note log.copies')
850 label='ui.note log.copies')
851
851
852 extra = ctx.extra()
852 extra = ctx.extra()
853 if extra and self.ui.debugflag:
853 if extra and self.ui.debugflag:
854 for key, value in sorted(extra.items()):
854 for key, value in sorted(extra.items()):
855 self.ui.write(_("extra: %s=%s\n")
855 self.ui.write(_("extra: %s=%s\n")
856 % (key, value.encode('string_escape')),
856 % (key, value.encode('string_escape')),
857 label='ui.debug log.extra')
857 label='ui.debug log.extra')
858
858
859 description = ctx.description().strip()
859 description = ctx.description().strip()
860 if description:
860 if description:
861 if self.ui.verbose:
861 if self.ui.verbose:
862 self.ui.write(_("description:\n"),
862 self.ui.write(_("description:\n"),
863 label='ui.note log.description')
863 label='ui.note log.description')
864 self.ui.write(description,
864 self.ui.write(description,
865 label='ui.note log.description')
865 label='ui.note log.description')
866 self.ui.write("\n\n")
866 self.ui.write("\n\n")
867 else:
867 else:
868 self.ui.write(_("summary: %s\n") %
868 self.ui.write(_("summary: %s\n") %
869 description.splitlines()[0],
869 description.splitlines()[0],
870 label='log.summary')
870 label='log.summary')
871 self.ui.write("\n")
871 self.ui.write("\n")
872
872
873 self.showpatch(changenode, matchfn)
873 self.showpatch(changenode, matchfn)
874
874
875 def showpatch(self, node, matchfn):
875 def showpatch(self, node, matchfn):
876 if not matchfn:
876 if not matchfn:
877 matchfn = self.patch
877 matchfn = self.patch
878 if matchfn:
878 if matchfn:
879 stat = self.diffopts.get('stat')
879 stat = self.diffopts.get('stat')
880 diff = self.diffopts.get('patch')
880 diff = self.diffopts.get('patch')
881 diffopts = patch.diffopts(self.ui, self.diffopts)
881 diffopts = patch.diffopts(self.ui, self.diffopts)
882 prev = self.repo.changelog.parents(node)[0]
882 prev = self.repo.changelog.parents(node)[0]
883 if stat:
883 if stat:
884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
885 match=matchfn, stat=True)
885 match=matchfn, stat=True)
886 if diff:
886 if diff:
887 if stat:
887 if stat:
888 self.ui.write("\n")
888 self.ui.write("\n")
889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
890 match=matchfn, stat=False)
890 match=matchfn, stat=False)
891 self.ui.write("\n")
891 self.ui.write("\n")
892
892
893 def _meaningful_parentrevs(self, log, rev):
893 def _meaningful_parentrevs(self, log, rev):
894 """Return list of meaningful (or all if debug) parentrevs for rev.
894 """Return list of meaningful (or all if debug) parentrevs for rev.
895
895
896 For merges (two non-nullrev revisions) both parents are meaningful.
896 For merges (two non-nullrev revisions) both parents are meaningful.
897 Otherwise the first parent revision is considered meaningful if it
897 Otherwise the first parent revision is considered meaningful if it
898 is not the preceding revision.
898 is not the preceding revision.
899 """
899 """
900 parents = log.parentrevs(rev)
900 parents = log.parentrevs(rev)
901 if not self.ui.debugflag and parents[1] == nullrev:
901 if not self.ui.debugflag and parents[1] == nullrev:
902 if parents[0] >= rev - 1:
902 if parents[0] >= rev - 1:
903 parents = []
903 parents = []
904 else:
904 else:
905 parents = [parents[0]]
905 parents = [parents[0]]
906 return parents
906 return parents
907
907
908
908
909 class changeset_templater(changeset_printer):
909 class changeset_templater(changeset_printer):
910 '''format changeset information.'''
910 '''format changeset information.'''
911
911
912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
915 defaulttempl = {
915 defaulttempl = {
916 'parent': '{rev}:{node|formatnode} ',
916 'parent': '{rev}:{node|formatnode} ',
917 'manifest': '{rev}:{node|formatnode}',
917 'manifest': '{rev}:{node|formatnode}',
918 'file_copy': '{name} ({source})',
918 'file_copy': '{name} ({source})',
919 'extra': '{key}={value|stringescape}'
919 'extra': '{key}={value|stringescape}'
920 }
920 }
921 # filecopy is preserved for compatibility reasons
921 # filecopy is preserved for compatibility reasons
922 defaulttempl['filecopy'] = defaulttempl['file_copy']
922 defaulttempl['filecopy'] = defaulttempl['file_copy']
923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
924 cache=defaulttempl)
924 cache=defaulttempl)
925 self.cache = {}
925 self.cache = {}
926
926
927 def use_template(self, t):
927 def use_template(self, t):
928 '''set template string to use'''
928 '''set template string to use'''
929 self.t.cache['changeset'] = t
929 self.t.cache['changeset'] = t
930
930
931 def _meaningful_parentrevs(self, ctx):
931 def _meaningful_parentrevs(self, ctx):
932 """Return list of meaningful (or all if debug) parentrevs for rev.
932 """Return list of meaningful (or all if debug) parentrevs for rev.
933 """
933 """
934 parents = ctx.parents()
934 parents = ctx.parents()
935 if len(parents) > 1:
935 if len(parents) > 1:
936 return parents
936 return parents
937 if self.ui.debugflag:
937 if self.ui.debugflag:
938 return [parents[0], self.repo['null']]
938 return [parents[0], self.repo['null']]
939 if parents[0].rev() >= ctx.rev() - 1:
939 if parents[0].rev() >= ctx.rev() - 1:
940 return []
940 return []
941 return parents
941 return parents
942
942
943 def _show(self, ctx, copies, matchfn, props):
943 def _show(self, ctx, copies, matchfn, props):
944 '''show a single changeset or file revision'''
944 '''show a single changeset or file revision'''
945
945
946 showlist = templatekw.showlist
946 showlist = templatekw.showlist
947
947
948 # showparents() behaviour depends on ui trace level which
948 # showparents() behaviour depends on ui trace level which
949 # causes unexpected behaviours at templating level and makes
949 # causes unexpected behaviours at templating level and makes
950 # it harder to extract it in a standalone function. Its
950 # it harder to extract it in a standalone function. Its
951 # behaviour cannot be changed so leave it here for now.
951 # behaviour cannot be changed so leave it here for now.
952 def showparents(**args):
952 def showparents(**args):
953 ctx = args['ctx']
953 ctx = args['ctx']
954 parents = [[('rev', p.rev()), ('node', p.hex())]
954 parents = [[('rev', p.rev()), ('node', p.hex())]
955 for p in self._meaningful_parentrevs(ctx)]
955 for p in self._meaningful_parentrevs(ctx)]
956 return showlist('parent', parents, **args)
956 return showlist('parent', parents, **args)
957
957
958 props = props.copy()
958 props = props.copy()
959 props.update(templatekw.keywords)
959 props.update(templatekw.keywords)
960 props['parents'] = showparents
960 props['parents'] = showparents
961 props['templ'] = self.t
961 props['templ'] = self.t
962 props['ctx'] = ctx
962 props['ctx'] = ctx
963 props['repo'] = self.repo
963 props['repo'] = self.repo
964 props['revcache'] = {'copies': copies}
964 props['revcache'] = {'copies': copies}
965 props['cache'] = self.cache
965 props['cache'] = self.cache
966
966
967 # find correct templates for current mode
967 # find correct templates for current mode
968
968
969 tmplmodes = [
969 tmplmodes = [
970 (True, None),
970 (True, None),
971 (self.ui.verbose, 'verbose'),
971 (self.ui.verbose, 'verbose'),
972 (self.ui.quiet, 'quiet'),
972 (self.ui.quiet, 'quiet'),
973 (self.ui.debugflag, 'debug'),
973 (self.ui.debugflag, 'debug'),
974 ]
974 ]
975
975
976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
977 for mode, postfix in tmplmodes:
977 for mode, postfix in tmplmodes:
978 for type in types:
978 for type in types:
979 cur = postfix and ('%s_%s' % (type, postfix)) or type
979 cur = postfix and ('%s_%s' % (type, postfix)) or type
980 if mode and cur in self.t:
980 if mode and cur in self.t:
981 types[type] = cur
981 types[type] = cur
982
982
983 try:
983 try:
984
984
985 # write header
985 # write header
986 if types['header']:
986 if types['header']:
987 h = templater.stringify(self.t(types['header'], **props))
987 h = templater.stringify(self.t(types['header'], **props))
988 if self.buffered:
988 if self.buffered:
989 self.header[ctx.rev()] = h
989 self.header[ctx.rev()] = h
990 else:
990 else:
991 if self.lastheader != h:
991 if self.lastheader != h:
992 self.lastheader = h
992 self.lastheader = h
993 self.ui.write(h)
993 self.ui.write(h)
994
994
995 # write changeset metadata, then patch if requested
995 # write changeset metadata, then patch if requested
996 key = types['changeset']
996 key = types['changeset']
997 self.ui.write(templater.stringify(self.t(key, **props)))
997 self.ui.write(templater.stringify(self.t(key, **props)))
998 self.showpatch(ctx.node(), matchfn)
998 self.showpatch(ctx.node(), matchfn)
999
999
1000 if types['footer']:
1000 if types['footer']:
1001 if not self.footer:
1001 if not self.footer:
1002 self.footer = templater.stringify(self.t(types['footer'],
1002 self.footer = templater.stringify(self.t(types['footer'],
1003 **props))
1003 **props))
1004
1004
1005 except KeyError, inst:
1005 except KeyError, inst:
1006 msg = _("%s: no key named '%s'")
1006 msg = _("%s: no key named '%s'")
1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1008 except SyntaxError, inst:
1008 except SyntaxError, inst:
1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1010
1010
1011 def show_changeset(ui, repo, opts, buffered=False):
1011 def show_changeset(ui, repo, opts, buffered=False):
1012 """show one changeset using template or regular display.
1012 """show one changeset using template or regular display.
1013
1013
1014 Display format will be the first non-empty hit of:
1014 Display format will be the first non-empty hit of:
1015 1. option 'template'
1015 1. option 'template'
1016 2. option 'style'
1016 2. option 'style'
1017 3. [ui] setting 'logtemplate'
1017 3. [ui] setting 'logtemplate'
1018 4. [ui] setting 'style'
1018 4. [ui] setting 'style'
1019 If all of these values are either the unset or the empty string,
1019 If all of these values are either the unset or the empty string,
1020 regular display via changeset_printer() is done.
1020 regular display via changeset_printer() is done.
1021 """
1021 """
1022 # options
1022 # options
1023 patch = False
1023 patch = False
1024 if opts.get('patch') or opts.get('stat'):
1024 if opts.get('patch') or opts.get('stat'):
1025 patch = matchall(repo)
1025 patch = matchall(repo)
1026
1026
1027 tmpl = opts.get('template')
1027 tmpl = opts.get('template')
1028 style = None
1028 style = None
1029 if tmpl:
1029 if tmpl:
1030 tmpl = templater.parsestring(tmpl, quoted=False)
1030 tmpl = templater.parsestring(tmpl, quoted=False)
1031 else:
1031 else:
1032 style = opts.get('style')
1032 style = opts.get('style')
1033
1033
1034 # ui settings
1034 # ui settings
1035 if not (tmpl or style):
1035 if not (tmpl or style):
1036 tmpl = ui.config('ui', 'logtemplate')
1036 tmpl = ui.config('ui', 'logtemplate')
1037 if tmpl:
1037 if tmpl:
1038 tmpl = templater.parsestring(tmpl)
1038 tmpl = templater.parsestring(tmpl)
1039 else:
1039 else:
1040 style = util.expandpath(ui.config('ui', 'style', ''))
1040 style = util.expandpath(ui.config('ui', 'style', ''))
1041
1041
1042 if not (tmpl or style):
1042 if not (tmpl or style):
1043 return changeset_printer(ui, repo, patch, opts, buffered)
1043 return changeset_printer(ui, repo, patch, opts, buffered)
1044
1044
1045 mapfile = None
1045 mapfile = None
1046 if style and not tmpl:
1046 if style and not tmpl:
1047 mapfile = style
1047 mapfile = style
1048 if not os.path.split(mapfile)[0]:
1048 if not os.path.split(mapfile)[0]:
1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1050 or templater.templatepath(mapfile))
1050 or templater.templatepath(mapfile))
1051 if mapname:
1051 if mapname:
1052 mapfile = mapname
1052 mapfile = mapname
1053
1053
1054 try:
1054 try:
1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1056 except SyntaxError, inst:
1056 except SyntaxError, inst:
1057 raise util.Abort(inst.args[0])
1057 raise util.Abort(inst.args[0])
1058 if tmpl:
1058 if tmpl:
1059 t.use_template(tmpl)
1059 t.use_template(tmpl)
1060 return t
1060 return t
1061
1061
1062 def finddate(ui, repo, date):
1062 def finddate(ui, repo, date):
1063 """Find the tipmost changeset that matches the given date spec"""
1063 """Find the tipmost changeset that matches the given date spec"""
1064
1064
1065 df = util.matchdate(date)
1065 df = util.matchdate(date)
1066 m = matchall(repo)
1066 m = matchall(repo)
1067 results = {}
1067 results = {}
1068
1068
1069 def prep(ctx, fns):
1069 def prep(ctx, fns):
1070 d = ctx.date()
1070 d = ctx.date()
1071 if df(d[0]):
1071 if df(d[0]):
1072 results[ctx.rev()] = d
1072 results[ctx.rev()] = d
1073
1073
1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1075 rev = ctx.rev()
1075 rev = ctx.rev()
1076 if rev in results:
1076 if rev in results:
1077 ui.status(_("Found revision %s from %s\n") %
1077 ui.status(_("Found revision %s from %s\n") %
1078 (rev, util.datestr(results[rev])))
1078 (rev, util.datestr(results[rev])))
1079 return str(rev)
1079 return str(rev)
1080
1080
1081 raise util.Abort(_("revision matching date not found"))
1081 raise util.Abort(_("revision matching date not found"))
1082
1082
1083 def walkchangerevs(repo, match, opts, prepare):
1083 def walkchangerevs(repo, match, opts, prepare):
1084 '''Iterate over files and the revs in which they changed.
1084 '''Iterate over files and the revs in which they changed.
1085
1085
1086 Callers most commonly need to iterate backwards over the history
1086 Callers most commonly need to iterate backwards over the history
1087 in which they are interested. Doing so has awful (quadratic-looking)
1087 in which they are interested. Doing so has awful (quadratic-looking)
1088 performance, so we use iterators in a "windowed" way.
1088 performance, so we use iterators in a "windowed" way.
1089
1089
1090 We walk a window of revisions in the desired order. Within the
1090 We walk a window of revisions in the desired order. Within the
1091 window, we first walk forwards to gather data, then in the desired
1091 window, we first walk forwards to gather data, then in the desired
1092 order (usually backwards) to display it.
1092 order (usually backwards) to display it.
1093
1093
1094 This function returns an iterator yielding contexts. Before
1094 This function returns an iterator yielding contexts. Before
1095 yielding each context, the iterator will first call the prepare
1095 yielding each context, the iterator will first call the prepare
1096 function on each context in the window in forward order.'''
1096 function on each context in the window in forward order.'''
1097
1097
1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1099 if start < end:
1099 if start < end:
1100 while start < end:
1100 while start < end:
1101 yield start, min(windowsize, end - start)
1101 yield start, min(windowsize, end - start)
1102 start += windowsize
1102 start += windowsize
1103 if windowsize < sizelimit:
1103 if windowsize < sizelimit:
1104 windowsize *= 2
1104 windowsize *= 2
1105 else:
1105 else:
1106 while start > end:
1106 while start > end:
1107 yield start, min(windowsize, start - end - 1)
1107 yield start, min(windowsize, start - end - 1)
1108 start -= windowsize
1108 start -= windowsize
1109 if windowsize < sizelimit:
1109 if windowsize < sizelimit:
1110 windowsize *= 2
1110 windowsize *= 2
1111
1111
1112 follow = opts.get('follow') or opts.get('follow_first')
1112 follow = opts.get('follow') or opts.get('follow_first')
1113
1113
1114 if not len(repo):
1114 if not len(repo):
1115 return []
1115 return []
1116
1116
1117 if follow:
1117 if follow:
1118 defrange = '%s:0' % repo['.'].rev()
1118 defrange = '%s:0' % repo['.'].rev()
1119 else:
1119 else:
1120 defrange = '-1:0'
1120 defrange = '-1:0'
1121 revs = revrange(repo, opts['rev'] or [defrange])
1121 revs = revrange(repo, opts['rev'] or [defrange])
1122 if not revs:
1122 if not revs:
1123 return []
1123 return []
1124 wanted = set()
1124 wanted = set()
1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1126 fncache = {}
1126 fncache = {}
1127 change = util.cachefunc(repo.changectx)
1127 change = util.cachefunc(repo.changectx)
1128
1128
1129 # First step is to fill wanted, the set of revisions that we want to yield.
1129 # First step is to fill wanted, the set of revisions that we want to yield.
1130 # When it does not induce extra cost, we also fill fncache for revisions in
1130 # When it does not induce extra cost, we also fill fncache for revisions in
1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1132 # match the file filtering conditions.
1132 # match the file filtering conditions.
1133
1133
1134 if not slowpath and not match.files():
1134 if not slowpath and not match.files():
1135 # No files, no patterns. Display all revs.
1135 # No files, no patterns. Display all revs.
1136 wanted = set(revs)
1136 wanted = set(revs)
1137 copies = []
1137 copies = []
1138
1138
1139 if not slowpath:
1139 if not slowpath:
1140 # We only have to read through the filelog to find wanted revisions
1140 # We only have to read through the filelog to find wanted revisions
1141
1141
1142 minrev, maxrev = min(revs), max(revs)
1142 minrev, maxrev = min(revs), max(revs)
1143 def filerevgen(filelog, last):
1143 def filerevgen(filelog, last):
1144 """
1144 """
1145 Only files, no patterns. Check the history of each file.
1145 Only files, no patterns. Check the history of each file.
1146
1146
1147 Examines filelog entries within minrev, maxrev linkrev range
1147 Examines filelog entries within minrev, maxrev linkrev range
1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1149 tuples in backwards order
1149 tuples in backwards order
1150 """
1150 """
1151 cl_count = len(repo)
1151 cl_count = len(repo)
1152 revs = []
1152 revs = []
1153 for j in xrange(0, last + 1):
1153 for j in xrange(0, last + 1):
1154 linkrev = filelog.linkrev(j)
1154 linkrev = filelog.linkrev(j)
1155 if linkrev < minrev:
1155 if linkrev < minrev:
1156 continue
1156 continue
1157 # only yield rev for which we have the changelog, it can
1157 # only yield rev for which we have the changelog, it can
1158 # happen while doing "hg log" during a pull or commit
1158 # happen while doing "hg log" during a pull or commit
1159 if linkrev >= cl_count:
1159 if linkrev >= cl_count:
1160 break
1160 break
1161
1161
1162 parentlinkrevs = []
1162 parentlinkrevs = []
1163 for p in filelog.parentrevs(j):
1163 for p in filelog.parentrevs(j):
1164 if p != nullrev:
1164 if p != nullrev:
1165 parentlinkrevs.append(filelog.linkrev(p))
1165 parentlinkrevs.append(filelog.linkrev(p))
1166 n = filelog.node(j)
1166 n = filelog.node(j)
1167 revs.append((linkrev, parentlinkrevs,
1167 revs.append((linkrev, parentlinkrevs,
1168 follow and filelog.renamed(n)))
1168 follow and filelog.renamed(n)))
1169
1169
1170 return reversed(revs)
1170 return reversed(revs)
1171 def iterfiles():
1171 def iterfiles():
1172 for filename in match.files():
1172 for filename in match.files():
1173 yield filename, None
1173 yield filename, None
1174 for filename_node in copies:
1174 for filename_node in copies:
1175 yield filename_node
1175 yield filename_node
1176 for file_, node in iterfiles():
1176 for file_, node in iterfiles():
1177 filelog = repo.file(file_)
1177 filelog = repo.file(file_)
1178 if not len(filelog):
1178 if not len(filelog):
1179 if node is None:
1179 if node is None:
1180 # A zero count may be a directory or deleted file, so
1180 # A zero count may be a directory or deleted file, so
1181 # try to find matching entries on the slow path.
1181 # try to find matching entries on the slow path.
1182 if follow:
1182 if follow:
1183 raise util.Abort(
1183 raise util.Abort(
1184 _('cannot follow nonexistent file: "%s"') % file_)
1184 _('cannot follow nonexistent file: "%s"') % file_)
1185 slowpath = True
1185 slowpath = True
1186 break
1186 break
1187 else:
1187 else:
1188 continue
1188 continue
1189
1189
1190 if node is None:
1190 if node is None:
1191 last = len(filelog) - 1
1191 last = len(filelog) - 1
1192 else:
1192 else:
1193 last = filelog.rev(node)
1193 last = filelog.rev(node)
1194
1194
1195
1195
1196 # keep track of all ancestors of the file
1196 # keep track of all ancestors of the file
1197 ancestors = set([filelog.linkrev(last)])
1197 ancestors = set([filelog.linkrev(last)])
1198
1198
1199 # iterate from latest to oldest revision
1199 # iterate from latest to oldest revision
1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1201 if not follow:
1201 if not follow:
1202 if rev > maxrev:
1202 if rev > maxrev:
1203 continue
1203 continue
1204 else:
1204 else:
1205 # Note that last might not be the first interesting
1205 # Note that last might not be the first interesting
1206 # rev to us:
1206 # rev to us:
1207 # if the file has been changed after maxrev, we'll
1207 # if the file has been changed after maxrev, we'll
1208 # have linkrev(last) > maxrev, and we still need
1208 # have linkrev(last) > maxrev, and we still need
1209 # to explore the file graph
1209 # to explore the file graph
1210 if rev not in ancestors:
1210 if rev not in ancestors:
1211 continue
1211 continue
1212 # XXX insert 1327 fix here
1212 # XXX insert 1327 fix here
1213 if flparentlinkrevs:
1213 if flparentlinkrevs:
1214 ancestors.update(flparentlinkrevs)
1214 ancestors.update(flparentlinkrevs)
1215
1215
1216 fncache.setdefault(rev, []).append(file_)
1216 fncache.setdefault(rev, []).append(file_)
1217 wanted.add(rev)
1217 wanted.add(rev)
1218 if copied:
1218 if copied:
1219 copies.append(copied)
1219 copies.append(copied)
1220 if slowpath:
1220 if slowpath:
1221 # We have to read the changelog to match filenames against
1221 # We have to read the changelog to match filenames against
1222 # changed files
1222 # changed files
1223
1223
1224 if follow:
1224 if follow:
1225 raise util.Abort(_('can only follow copies/renames for explicit '
1225 raise util.Abort(_('can only follow copies/renames for explicit '
1226 'filenames'))
1226 'filenames'))
1227
1227
1228 # The slow path checks files modified in every changeset.
1228 # The slow path checks files modified in every changeset.
1229 for i in sorted(revs):
1229 for i in sorted(revs):
1230 ctx = change(i)
1230 ctx = change(i)
1231 matches = filter(match, ctx.files())
1231 matches = filter(match, ctx.files())
1232 if matches:
1232 if matches:
1233 fncache[i] = matches
1233 fncache[i] = matches
1234 wanted.add(i)
1234 wanted.add(i)
1235
1235
1236 class followfilter(object):
1236 class followfilter(object):
1237 def __init__(self, onlyfirst=False):
1237 def __init__(self, onlyfirst=False):
1238 self.startrev = nullrev
1238 self.startrev = nullrev
1239 self.roots = set()
1239 self.roots = set()
1240 self.onlyfirst = onlyfirst
1240 self.onlyfirst = onlyfirst
1241
1241
1242 def match(self, rev):
1242 def match(self, rev):
1243 def realparents(rev):
1243 def realparents(rev):
1244 if self.onlyfirst:
1244 if self.onlyfirst:
1245 return repo.changelog.parentrevs(rev)[0:1]
1245 return repo.changelog.parentrevs(rev)[0:1]
1246 else:
1246 else:
1247 return filter(lambda x: x != nullrev,
1247 return filter(lambda x: x != nullrev,
1248 repo.changelog.parentrevs(rev))
1248 repo.changelog.parentrevs(rev))
1249
1249
1250 if self.startrev == nullrev:
1250 if self.startrev == nullrev:
1251 self.startrev = rev
1251 self.startrev = rev
1252 return True
1252 return True
1253
1253
1254 if rev > self.startrev:
1254 if rev > self.startrev:
1255 # forward: all descendants
1255 # forward: all descendants
1256 if not self.roots:
1256 if not self.roots:
1257 self.roots.add(self.startrev)
1257 self.roots.add(self.startrev)
1258 for parent in realparents(rev):
1258 for parent in realparents(rev):
1259 if parent in self.roots:
1259 if parent in self.roots:
1260 self.roots.add(rev)
1260 self.roots.add(rev)
1261 return True
1261 return True
1262 else:
1262 else:
1263 # backwards: all parents
1263 # backwards: all parents
1264 if not self.roots:
1264 if not self.roots:
1265 self.roots.update(realparents(self.startrev))
1265 self.roots.update(realparents(self.startrev))
1266 if rev in self.roots:
1266 if rev in self.roots:
1267 self.roots.remove(rev)
1267 self.roots.remove(rev)
1268 self.roots.update(realparents(rev))
1268 self.roots.update(realparents(rev))
1269 return True
1269 return True
1270
1270
1271 return False
1271 return False
1272
1272
1273 # it might be worthwhile to do this in the iterator if the rev range
1273 # it might be worthwhile to do this in the iterator if the rev range
1274 # is descending and the prune args are all within that range
1274 # is descending and the prune args are all within that range
1275 for rev in opts.get('prune', ()):
1275 for rev in opts.get('prune', ()):
1276 rev = repo.changelog.rev(repo.lookup(rev))
1276 rev = repo.changelog.rev(repo.lookup(rev))
1277 ff = followfilter()
1277 ff = followfilter()
1278 stop = min(revs[0], revs[-1])
1278 stop = min(revs[0], revs[-1])
1279 for x in xrange(rev, stop - 1, -1):
1279 for x in xrange(rev, stop - 1, -1):
1280 if ff.match(x):
1280 if ff.match(x):
1281 wanted.discard(x)
1281 wanted.discard(x)
1282
1282
1283 # Now that wanted is correctly initialized, we can iterate over the
1283 # Now that wanted is correctly initialized, we can iterate over the
1284 # revision range, yielding only revisions in wanted.
1284 # revision range, yielding only revisions in wanted.
1285 def iterate():
1285 def iterate():
1286 if follow and not match.files():
1286 if follow and not match.files():
1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1288 def want(rev):
1288 def want(rev):
1289 return ff.match(rev) and rev in wanted
1289 return ff.match(rev) and rev in wanted
1290 else:
1290 else:
1291 def want(rev):
1291 def want(rev):
1292 return rev in wanted
1292 return rev in wanted
1293
1293
1294 for i, window in increasing_windows(0, len(revs)):
1294 for i, window in increasing_windows(0, len(revs)):
1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1296 for rev in sorted(nrevs):
1296 for rev in sorted(nrevs):
1297 fns = fncache.get(rev)
1297 fns = fncache.get(rev)
1298 ctx = change(rev)
1298 ctx = change(rev)
1299 if not fns:
1299 if not fns:
1300 def fns_generator():
1300 def fns_generator():
1301 for f in ctx.files():
1301 for f in ctx.files():
1302 if match(f):
1302 if match(f):
1303 yield f
1303 yield f
1304 fns = fns_generator()
1304 fns = fns_generator()
1305 prepare(ctx, fns)
1305 prepare(ctx, fns)
1306 for rev in nrevs:
1306 for rev in nrevs:
1307 yield change(rev)
1307 yield change(rev)
1308 return iterate()
1308 return iterate()
1309
1309
1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1311 join = lambda f: os.path.join(prefix, f)
1311 join = lambda f: os.path.join(prefix, f)
1312 bad = []
1312 bad = []
1313 oldbad = match.bad
1313 oldbad = match.bad
1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1315 names = []
1315 names = []
1316 wctx = repo[None]
1316 wctx = repo[None]
1317 for f in repo.walk(match):
1317 for f in repo.walk(match):
1318 exact = match.exact(f)
1318 exact = match.exact(f)
1319 if exact or f not in repo.dirstate:
1319 if exact or f not in repo.dirstate:
1320 names.append(f)
1320 names.append(f)
1321 if ui.verbose or not exact:
1321 if ui.verbose or not exact:
1322 ui.status(_('adding %s\n') % match.rel(join(f)))
1322 ui.status(_('adding %s\n') % match.rel(join(f)))
1323
1323
1324 if listsubrepos:
1324 if listsubrepos:
1325 for subpath in wctx.substate:
1325 for subpath in wctx.substate:
1326 sub = wctx.sub(subpath)
1326 sub = wctx.sub(subpath)
1327 try:
1327 try:
1328 submatch = matchmod.narrowmatcher(subpath, match)
1328 submatch = matchmod.narrowmatcher(subpath, match)
1329 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1329 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1330 except error.LookupError:
1330 except error.LookupError:
1331 ui.status(_("skipping missing subrepository: %s\n")
1331 ui.status(_("skipping missing subrepository: %s\n")
1332 % join(subpath))
1332 % join(subpath))
1333
1333
1334 if not dryrun:
1334 if not dryrun:
1335 rejected = wctx.add(names, prefix)
1335 rejected = wctx.add(names, prefix)
1336 bad.extend(f for f in rejected if f in match.files())
1336 bad.extend(f for f in rejected if f in match.files())
1337 return bad
1337 return bad
1338
1338
1339 def commit(ui, repo, commitfunc, pats, opts):
1339 def commit(ui, repo, commitfunc, pats, opts):
1340 '''commit the specified files or all outstanding changes'''
1340 '''commit the specified files or all outstanding changes'''
1341 date = opts.get('date')
1341 date = opts.get('date')
1342 if date:
1342 if date:
1343 opts['date'] = util.parsedate(date)
1343 opts['date'] = util.parsedate(date)
1344 message = logmessage(opts)
1344 message = logmessage(opts)
1345
1345
1346 # extract addremove carefully -- this function can be called from a command
1346 # extract addremove carefully -- this function can be called from a command
1347 # that doesn't support addremove
1347 # that doesn't support addremove
1348 if opts.get('addremove'):
1348 if opts.get('addremove'):
1349 addremove(repo, pats, opts)
1349 addremove(repo, pats, opts)
1350
1350
1351 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1351 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1352
1352
1353 def commiteditor(repo, ctx, subs):
1353 def commiteditor(repo, ctx, subs):
1354 if ctx.description():
1354 if ctx.description():
1355 return ctx.description()
1355 return ctx.description()
1356 return commitforceeditor(repo, ctx, subs)
1356 return commitforceeditor(repo, ctx, subs)
1357
1357
1358 def commitforceeditor(repo, ctx, subs):
1358 def commitforceeditor(repo, ctx, subs):
1359 edittext = []
1359 edittext = []
1360 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1360 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1361 if ctx.description():
1361 if ctx.description():
1362 edittext.append(ctx.description())
1362 edittext.append(ctx.description())
1363 edittext.append("")
1363 edittext.append("")
1364 edittext.append("") # Empty line between message and comments.
1364 edittext.append("") # Empty line between message and comments.
1365 edittext.append(_("HG: Enter commit message."
1365 edittext.append(_("HG: Enter commit message."
1366 " Lines beginning with 'HG:' are removed."))
1366 " Lines beginning with 'HG:' are removed."))
1367 edittext.append(_("HG: Leave message empty to abort commit."))
1367 edittext.append(_("HG: Leave message empty to abort commit."))
1368 edittext.append("HG: --")
1368 edittext.append("HG: --")
1369 edittext.append(_("HG: user: %s") % ctx.user())
1369 edittext.append(_("HG: user: %s") % ctx.user())
1370 if ctx.p2():
1370 if ctx.p2():
1371 edittext.append(_("HG: branch merge"))
1371 edittext.append(_("HG: branch merge"))
1372 if ctx.branch():
1372 if ctx.branch():
1373 edittext.append(_("HG: branch '%s'") % ctx.branch())
1373 edittext.append(_("HG: branch '%s'") % ctx.branch())
1374 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1374 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1375 edittext.extend([_("HG: added %s") % f for f in added])
1375 edittext.extend([_("HG: added %s") % f for f in added])
1376 edittext.extend([_("HG: changed %s") % f for f in modified])
1376 edittext.extend([_("HG: changed %s") % f for f in modified])
1377 edittext.extend([_("HG: removed %s") % f for f in removed])
1377 edittext.extend([_("HG: removed %s") % f for f in removed])
1378 if not added and not modified and not removed:
1378 if not added and not modified and not removed:
1379 edittext.append(_("HG: no files changed"))
1379 edittext.append(_("HG: no files changed"))
1380 edittext.append("")
1380 edittext.append("")
1381 # run editor in the repository root
1381 # run editor in the repository root
1382 olddir = os.getcwd()
1382 olddir = os.getcwd()
1383 os.chdir(repo.root)
1383 os.chdir(repo.root)
1384 text = repo.ui.edit("\n".join(edittext), ctx.user())
1384 text = repo.ui.edit("\n".join(edittext), ctx.user())
1385 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1385 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1386 os.chdir(olddir)
1386 os.chdir(olddir)
1387
1387
1388 if not text.strip():
1388 if not text.strip():
1389 raise util.Abort(_("empty commit message"))
1389 raise util.Abort(_("empty commit message"))
1390
1390
1391 return text
1391 return text
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,1935 +1,1935 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.auditor = scmutil.path_auditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 try:
365 try:
366 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
369 except error.LookupError:
370 pass
370 pass
371 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
372 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
373 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
374 return (tags, tagtypes)
374 return (tags, tagtypes)
375
375
376 def tagtype(self, tagname):
376 def tagtype(self, tagname):
377 '''
377 '''
378 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
379
379
380 'local' : a local tag
380 'local' : a local tag
381 'global' : a global tag
381 'global' : a global tag
382 None : tag does not exist
382 None : tag does not exist
383 '''
383 '''
384
384
385 self.tags()
385 self.tags()
386
386
387 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
388
388
389 def tagslist(self):
389 def tagslist(self):
390 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
391 l = []
391 l = []
392 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
393 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
394 l.append((r, t, n))
394 l.append((r, t, n))
395 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
396
396
397 def nodetags(self, node):
397 def nodetags(self, node):
398 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
399 if not self.nodetagscache:
399 if not self.nodetagscache:
400 self.nodetagscache = {}
400 self.nodetagscache = {}
401 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
402 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
403 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
404 tags.sort()
404 tags.sort()
405 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
406
406
407 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
408 marks = []
408 marks = []
409 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
410 if n == node:
410 if n == node:
411 marks.append(bookmark)
411 marks.append(bookmark)
412 return sorted(marks)
412 return sorted(marks)
413
413
414 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
415 # TODO: rename this function?
415 # TODO: rename this function?
416 tiprev = len(self) - 1
416 tiprev = len(self) - 1
417 if lrev != tiprev:
417 if lrev != tiprev:
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
419 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
421
421
422 return partial
422 return partial
423
423
424 def updatebranchcache(self):
424 def updatebranchcache(self):
425 tip = self.changelog.tip()
425 tip = self.changelog.tip()
426 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
427 return self._branchcache
427 return self._branchcache
428
428
429 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
430 self._branchcachetip = tip
430 self._branchcachetip = tip
431 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
432 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
433 else:
433 else:
434 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
435 partial = self._branchcache
435 partial = self._branchcache
436
436
437 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
438 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
439 self._branchcache = partial
439 self._branchcache = partial
440
440
441 def branchmap(self):
441 def branchmap(self):
442 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
443 self.updatebranchcache()
443 self.updatebranchcache()
444 return self._branchcache
444 return self._branchcache
445
445
446 def branchtags(self):
446 def branchtags(self):
447 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
448 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
449 bt = {}
449 bt = {}
450 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
451 tip = heads[-1]
451 tip = heads[-1]
452 for h in reversed(heads):
452 for h in reversed(heads):
453 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
454 tip = h
454 tip = h
455 break
455 break
456 bt[bn] = tip
456 bt[bn] = tip
457 return bt
457 return bt
458
458
459 def _readbranchcache(self):
459 def _readbranchcache(self):
460 partial = {}
460 partial = {}
461 try:
461 try:
462 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
463 lines = f.read().split('\n')
463 lines = f.read().split('\n')
464 f.close()
464 f.close()
465 except (IOError, OSError):
465 except (IOError, OSError):
466 return {}, nullid, nullrev
466 return {}, nullid, nullrev
467
467
468 try:
468 try:
469 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
470 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
471 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
472 # invalidate the cache
472 # invalidate the cache
473 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
474 for l in lines:
474 for l in lines:
475 if not l:
475 if not l:
476 continue
476 continue
477 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
478 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
479 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
480 except KeyboardInterrupt:
480 except KeyboardInterrupt:
481 raise
481 raise
482 except Exception, inst:
482 except Exception, inst:
483 if self.ui.debugflag:
483 if self.ui.debugflag:
484 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
485 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
486 return partial, last, lrev
486 return partial, last, lrev
487
487
488 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
489 try:
489 try:
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
491 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
492 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
493 for node in nodes:
493 for node in nodes:
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
495 f.rename()
495 f.rename()
496 except (IOError, OSError):
496 except (IOError, OSError):
497 pass
497 pass
498
498
499 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
500 # collect new branch entries
500 # collect new branch entries
501 newbranches = {}
501 newbranches = {}
502 for c in ctxgen:
502 for c in ctxgen:
503 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
504 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
505 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
507 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
508 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
509 bheads.extend(newnodes)
509 bheads.extend(newnodes)
510 if len(bheads) <= 1:
510 if len(bheads) <= 1:
511 continue
511 continue
512 # starting from tip means fewer passes over reachable
512 # starting from tip means fewer passes over reachable
513 while newnodes:
513 while newnodes:
514 latest = newnodes.pop()
514 latest = newnodes.pop()
515 if latest not in bheads:
515 if latest not in bheads:
516 continue
516 continue
517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
517 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
518 reachable = self.changelog.reachable(latest, minbhrev)
518 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable.remove(latest)
519 reachable.remove(latest)
520 bheads = [b for b in bheads if b not in reachable]
520 bheads = [b for b in bheads if b not in reachable]
521 partial[branch] = bheads
521 partial[branch] = bheads
522
522
523 def lookup(self, key):
523 def lookup(self, key):
524 if isinstance(key, int):
524 if isinstance(key, int):
525 return self.changelog.node(key)
525 return self.changelog.node(key)
526 elif key == '.':
526 elif key == '.':
527 return self.dirstate.p1()
527 return self.dirstate.p1()
528 elif key == 'null':
528 elif key == 'null':
529 return nullid
529 return nullid
530 elif key == 'tip':
530 elif key == 'tip':
531 return self.changelog.tip()
531 return self.changelog.tip()
532 n = self.changelog._match(key)
532 n = self.changelog._match(key)
533 if n:
533 if n:
534 return n
534 return n
535 if key in self._bookmarks:
535 if key in self._bookmarks:
536 return self._bookmarks[key]
536 return self._bookmarks[key]
537 if key in self.tags():
537 if key in self.tags():
538 return self.tags()[key]
538 return self.tags()[key]
539 if key in self.branchtags():
539 if key in self.branchtags():
540 return self.branchtags()[key]
540 return self.branchtags()[key]
541 n = self.changelog._partialmatch(key)
541 n = self.changelog._partialmatch(key)
542 if n:
542 if n:
543 return n
543 return n
544
544
545 # can't find key, check if it might have come from damaged dirstate
545 # can't find key, check if it might have come from damaged dirstate
546 if key in self.dirstate.parents():
546 if key in self.dirstate.parents():
547 raise error.Abort(_("working directory has unknown parent '%s'!")
547 raise error.Abort(_("working directory has unknown parent '%s'!")
548 % short(key))
548 % short(key))
549 try:
549 try:
550 if len(key) == 20:
550 if len(key) == 20:
551 key = hex(key)
551 key = hex(key)
552 except:
552 except:
553 pass
553 pass
554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
554 raise error.RepoLookupError(_("unknown revision '%s'") % key)
555
555
556 def lookupbranch(self, key, remote=None):
556 def lookupbranch(self, key, remote=None):
557 repo = remote or self
557 repo = remote or self
558 if key in repo.branchmap():
558 if key in repo.branchmap():
559 return key
559 return key
560
560
561 repo = (remote and remote.local()) and remote or self
561 repo = (remote and remote.local()) and remote or self
562 return repo[key].branch()
562 return repo[key].branch()
563
563
564 def known(self, nodes):
564 def known(self, nodes):
565 nm = self.changelog.nodemap
565 nm = self.changelog.nodemap
566 return [(n in nm) for n in nodes]
566 return [(n in nm) for n in nodes]
567
567
568 def local(self):
568 def local(self):
569 return True
569 return True
570
570
571 def join(self, f):
571 def join(self, f):
572 return os.path.join(self.path, f)
572 return os.path.join(self.path, f)
573
573
574 def wjoin(self, f):
574 def wjoin(self, f):
575 return os.path.join(self.root, f)
575 return os.path.join(self.root, f)
576
576
577 def file(self, f):
577 def file(self, f):
578 if f[0] == '/':
578 if f[0] == '/':
579 f = f[1:]
579 f = f[1:]
580 return filelog.filelog(self.sopener, f)
580 return filelog.filelog(self.sopener, f)
581
581
582 def changectx(self, changeid):
582 def changectx(self, changeid):
583 return self[changeid]
583 return self[changeid]
584
584
585 def parents(self, changeid=None):
585 def parents(self, changeid=None):
586 '''get list of changectxs for parents of changeid'''
586 '''get list of changectxs for parents of changeid'''
587 return self[changeid].parents()
587 return self[changeid].parents()
588
588
589 def filectx(self, path, changeid=None, fileid=None):
589 def filectx(self, path, changeid=None, fileid=None):
590 """changeid can be a changeset revision, node, or tag.
590 """changeid can be a changeset revision, node, or tag.
591 fileid can be a file revision or node."""
591 fileid can be a file revision or node."""
592 return context.filectx(self, path, changeid, fileid)
592 return context.filectx(self, path, changeid, fileid)
593
593
594 def getcwd(self):
594 def getcwd(self):
595 return self.dirstate.getcwd()
595 return self.dirstate.getcwd()
596
596
597 def pathto(self, f, cwd=None):
597 def pathto(self, f, cwd=None):
598 return self.dirstate.pathto(f, cwd)
598 return self.dirstate.pathto(f, cwd)
599
599
600 def wfile(self, f, mode='r'):
600 def wfile(self, f, mode='r'):
601 return self.wopener(f, mode)
601 return self.wopener(f, mode)
602
602
603 def _link(self, f):
603 def _link(self, f):
604 return os.path.islink(self.wjoin(f))
604 return os.path.islink(self.wjoin(f))
605
605
606 def _loadfilter(self, filter):
606 def _loadfilter(self, filter):
607 if filter not in self.filterpats:
607 if filter not in self.filterpats:
608 l = []
608 l = []
609 for pat, cmd in self.ui.configitems(filter):
609 for pat, cmd in self.ui.configitems(filter):
610 if cmd == '!':
610 if cmd == '!':
611 continue
611 continue
612 mf = matchmod.match(self.root, '', [pat])
612 mf = matchmod.match(self.root, '', [pat])
613 fn = None
613 fn = None
614 params = cmd
614 params = cmd
615 for name, filterfn in self._datafilters.iteritems():
615 for name, filterfn in self._datafilters.iteritems():
616 if cmd.startswith(name):
616 if cmd.startswith(name):
617 fn = filterfn
617 fn = filterfn
618 params = cmd[len(name):].lstrip()
618 params = cmd[len(name):].lstrip()
619 break
619 break
620 if not fn:
620 if not fn:
621 fn = lambda s, c, **kwargs: util.filter(s, c)
621 fn = lambda s, c, **kwargs: util.filter(s, c)
622 # Wrap old filters not supporting keyword arguments
622 # Wrap old filters not supporting keyword arguments
623 if not inspect.getargspec(fn)[2]:
623 if not inspect.getargspec(fn)[2]:
624 oldfn = fn
624 oldfn = fn
625 fn = lambda s, c, **kwargs: oldfn(s, c)
625 fn = lambda s, c, **kwargs: oldfn(s, c)
626 l.append((mf, fn, params))
626 l.append((mf, fn, params))
627 self.filterpats[filter] = l
627 self.filterpats[filter] = l
628 return self.filterpats[filter]
628 return self.filterpats[filter]
629
629
630 def _filter(self, filterpats, filename, data):
630 def _filter(self, filterpats, filename, data):
631 for mf, fn, cmd in filterpats:
631 for mf, fn, cmd in filterpats:
632 if mf(filename):
632 if mf(filename):
633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
633 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
634 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
635 break
635 break
636
636
637 return data
637 return data
638
638
639 @propertycache
639 @propertycache
640 def _encodefilterpats(self):
640 def _encodefilterpats(self):
641 return self._loadfilter('encode')
641 return self._loadfilter('encode')
642
642
643 @propertycache
643 @propertycache
644 def _decodefilterpats(self):
644 def _decodefilterpats(self):
645 return self._loadfilter('decode')
645 return self._loadfilter('decode')
646
646
647 def adddatafilter(self, name, filter):
647 def adddatafilter(self, name, filter):
648 self._datafilters[name] = filter
648 self._datafilters[name] = filter
649
649
650 def wread(self, filename):
650 def wread(self, filename):
651 if self._link(filename):
651 if self._link(filename):
652 data = os.readlink(self.wjoin(filename))
652 data = os.readlink(self.wjoin(filename))
653 else:
653 else:
654 data = self.wopener(filename, 'r').read()
654 data = self.wopener(filename, 'r').read()
655 return self._filter(self._encodefilterpats, filename, data)
655 return self._filter(self._encodefilterpats, filename, data)
656
656
657 def wwrite(self, filename, data, flags):
657 def wwrite(self, filename, data, flags):
658 data = self._filter(self._decodefilterpats, filename, data)
658 data = self._filter(self._decodefilterpats, filename, data)
659 if 'l' in flags:
659 if 'l' in flags:
660 self.wopener.symlink(data, filename)
660 self.wopener.symlink(data, filename)
661 else:
661 else:
662 self.wopener(filename, 'w').write(data)
662 self.wopener(filename, 'w').write(data)
663 if 'x' in flags:
663 if 'x' in flags:
664 util.set_flags(self.wjoin(filename), False, True)
664 util.set_flags(self.wjoin(filename), False, True)
665
665
666 def wwritedata(self, filename, data):
666 def wwritedata(self, filename, data):
667 return self._filter(self._decodefilterpats, filename, data)
667 return self._filter(self._decodefilterpats, filename, data)
668
668
669 def transaction(self, desc):
669 def transaction(self, desc):
670 tr = self._transref and self._transref() or None
670 tr = self._transref and self._transref() or None
671 if tr and tr.running():
671 if tr and tr.running():
672 return tr.nest()
672 return tr.nest()
673
673
674 # abort here if the journal already exists
674 # abort here if the journal already exists
675 if os.path.exists(self.sjoin("journal")):
675 if os.path.exists(self.sjoin("journal")):
676 raise error.RepoError(
676 raise error.RepoError(
677 _("abandoned transaction found - run hg recover"))
677 _("abandoned transaction found - run hg recover"))
678
678
679 # save dirstate for rollback
679 # save dirstate for rollback
680 try:
680 try:
681 ds = self.opener("dirstate").read()
681 ds = self.opener("dirstate").read()
682 except IOError:
682 except IOError:
683 ds = ""
683 ds = ""
684 self.opener("journal.dirstate", "w").write(ds)
684 self.opener("journal.dirstate", "w").write(ds)
685 self.opener("journal.branch", "w").write(
685 self.opener("journal.branch", "w").write(
686 encoding.fromlocal(self.dirstate.branch()))
686 encoding.fromlocal(self.dirstate.branch()))
687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
687 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
688
688
689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
689 renames = [(self.sjoin("journal"), self.sjoin("undo")),
690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
690 (self.join("journal.dirstate"), self.join("undo.dirstate")),
691 (self.join("journal.branch"), self.join("undo.branch")),
691 (self.join("journal.branch"), self.join("undo.branch")),
692 (self.join("journal.desc"), self.join("undo.desc"))]
692 (self.join("journal.desc"), self.join("undo.desc"))]
693 tr = transaction.transaction(self.ui.warn, self.sopener,
693 tr = transaction.transaction(self.ui.warn, self.sopener,
694 self.sjoin("journal"),
694 self.sjoin("journal"),
695 aftertrans(renames),
695 aftertrans(renames),
696 self.store.createmode)
696 self.store.createmode)
697 self._transref = weakref.ref(tr)
697 self._transref = weakref.ref(tr)
698 return tr
698 return tr
699
699
700 def recover(self):
700 def recover(self):
701 lock = self.lock()
701 lock = self.lock()
702 try:
702 try:
703 if os.path.exists(self.sjoin("journal")):
703 if os.path.exists(self.sjoin("journal")):
704 self.ui.status(_("rolling back interrupted transaction\n"))
704 self.ui.status(_("rolling back interrupted transaction\n"))
705 transaction.rollback(self.sopener, self.sjoin("journal"),
705 transaction.rollback(self.sopener, self.sjoin("journal"),
706 self.ui.warn)
706 self.ui.warn)
707 self.invalidate()
707 self.invalidate()
708 return True
708 return True
709 else:
709 else:
710 self.ui.warn(_("no interrupted transaction available\n"))
710 self.ui.warn(_("no interrupted transaction available\n"))
711 return False
711 return False
712 finally:
712 finally:
713 lock.release()
713 lock.release()
714
714
715 def rollback(self, dryrun=False):
715 def rollback(self, dryrun=False):
716 wlock = lock = None
716 wlock = lock = None
717 try:
717 try:
718 wlock = self.wlock()
718 wlock = self.wlock()
719 lock = self.lock()
719 lock = self.lock()
720 if os.path.exists(self.sjoin("undo")):
720 if os.path.exists(self.sjoin("undo")):
721 try:
721 try:
722 args = self.opener("undo.desc", "r").read().splitlines()
722 args = self.opener("undo.desc", "r").read().splitlines()
723 if len(args) >= 3 and self.ui.verbose:
723 if len(args) >= 3 and self.ui.verbose:
724 desc = _("repository tip rolled back to revision %s"
724 desc = _("repository tip rolled back to revision %s"
725 " (undo %s: %s)\n") % (
725 " (undo %s: %s)\n") % (
726 int(args[0]) - 1, args[1], args[2])
726 int(args[0]) - 1, args[1], args[2])
727 elif len(args) >= 2:
727 elif len(args) >= 2:
728 desc = _("repository tip rolled back to revision %s"
728 desc = _("repository tip rolled back to revision %s"
729 " (undo %s)\n") % (
729 " (undo %s)\n") % (
730 int(args[0]) - 1, args[1])
730 int(args[0]) - 1, args[1])
731 except IOError:
731 except IOError:
732 desc = _("rolling back unknown transaction\n")
732 desc = _("rolling back unknown transaction\n")
733 self.ui.status(desc)
733 self.ui.status(desc)
734 if dryrun:
734 if dryrun:
735 return
735 return
736 transaction.rollback(self.sopener, self.sjoin("undo"),
736 transaction.rollback(self.sopener, self.sjoin("undo"),
737 self.ui.warn)
737 self.ui.warn)
738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
739 if os.path.exists(self.join('undo.bookmarks')):
739 if os.path.exists(self.join('undo.bookmarks')):
740 util.rename(self.join('undo.bookmarks'),
740 util.rename(self.join('undo.bookmarks'),
741 self.join('bookmarks'))
741 self.join('bookmarks'))
742 try:
742 try:
743 branch = self.opener("undo.branch").read()
743 branch = self.opener("undo.branch").read()
744 self.dirstate.setbranch(branch)
744 self.dirstate.setbranch(branch)
745 except IOError:
745 except IOError:
746 self.ui.warn(_("named branch could not be reset, "
746 self.ui.warn(_("named branch could not be reset, "
747 "current branch is still: %s\n")
747 "current branch is still: %s\n")
748 % self.dirstate.branch())
748 % self.dirstate.branch())
749 self.invalidate()
749 self.invalidate()
750 self.dirstate.invalidate()
750 self.dirstate.invalidate()
751 self.destroyed()
751 self.destroyed()
752 parents = tuple([p.rev() for p in self.parents()])
752 parents = tuple([p.rev() for p in self.parents()])
753 if len(parents) > 1:
753 if len(parents) > 1:
754 self.ui.status(_("working directory now based on "
754 self.ui.status(_("working directory now based on "
755 "revisions %d and %d\n") % parents)
755 "revisions %d and %d\n") % parents)
756 else:
756 else:
757 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
758 "revision %d\n") % parents)
758 "revision %d\n") % parents)
759 else:
759 else:
760 self.ui.warn(_("no rollback information available\n"))
760 self.ui.warn(_("no rollback information available\n"))
761 return 1
761 return 1
762 finally:
762 finally:
763 release(lock, wlock)
763 release(lock, wlock)
764
764
765 def invalidatecaches(self):
765 def invalidatecaches(self):
766 self._tags = None
766 self._tags = None
767 self._tagtypes = None
767 self._tagtypes = None
768 self.nodetagscache = None
768 self.nodetagscache = None
769 self._branchcache = None # in UTF-8
769 self._branchcache = None # in UTF-8
770 self._branchcachetip = None
770 self._branchcachetip = None
771
771
772 def invalidate(self):
772 def invalidate(self):
773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
773 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
774 if a in self.__dict__:
774 if a in self.__dict__:
775 delattr(self, a)
775 delattr(self, a)
776 self.invalidatecaches()
776 self.invalidatecaches()
777
777
778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
778 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
779 try:
779 try:
780 l = lock.lock(lockname, 0, releasefn, desc=desc)
780 l = lock.lock(lockname, 0, releasefn, desc=desc)
781 except error.LockHeld, inst:
781 except error.LockHeld, inst:
782 if not wait:
782 if not wait:
783 raise
783 raise
784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
784 self.ui.warn(_("waiting for lock on %s held by %r\n") %
785 (desc, inst.locker))
785 (desc, inst.locker))
786 # default to 600 seconds timeout
786 # default to 600 seconds timeout
787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
787 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
788 releasefn, desc=desc)
788 releasefn, desc=desc)
789 if acquirefn:
789 if acquirefn:
790 acquirefn()
790 acquirefn()
791 return l
791 return l
792
792
793 def lock(self, wait=True):
793 def lock(self, wait=True):
794 '''Lock the repository store (.hg/store) and return a weak reference
794 '''Lock the repository store (.hg/store) and return a weak reference
795 to the lock. Use this before modifying the store (e.g. committing or
795 to the lock. Use this before modifying the store (e.g. committing or
796 stripping). If you are opening a transaction, get a lock as well.)'''
796 stripping). If you are opening a transaction, get a lock as well.)'''
797 l = self._lockref and self._lockref()
797 l = self._lockref and self._lockref()
798 if l is not None and l.held:
798 if l is not None and l.held:
799 l.lock()
799 l.lock()
800 return l
800 return l
801
801
802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
802 l = self._lock(self.sjoin("lock"), wait, self.store.write,
803 self.invalidate, _('repository %s') % self.origroot)
803 self.invalidate, _('repository %s') % self.origroot)
804 self._lockref = weakref.ref(l)
804 self._lockref = weakref.ref(l)
805 return l
805 return l
806
806
807 def wlock(self, wait=True):
807 def wlock(self, wait=True):
808 '''Lock the non-store parts of the repository (everything under
808 '''Lock the non-store parts of the repository (everything under
809 .hg except .hg/store) and return a weak reference to the lock.
809 .hg except .hg/store) and return a weak reference to the lock.
810 Use this before modifying files in .hg.'''
810 Use this before modifying files in .hg.'''
811 l = self._wlockref and self._wlockref()
811 l = self._wlockref and self._wlockref()
812 if l is not None and l.held:
812 if l is not None and l.held:
813 l.lock()
813 l.lock()
814 return l
814 return l
815
815
816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
816 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
817 self.dirstate.invalidate, _('working directory of %s') %
817 self.dirstate.invalidate, _('working directory of %s') %
818 self.origroot)
818 self.origroot)
819 self._wlockref = weakref.ref(l)
819 self._wlockref = weakref.ref(l)
820 return l
820 return l
821
821
822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
822 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
823 """
823 """
824 commit an individual file as part of a larger transaction
824 commit an individual file as part of a larger transaction
825 """
825 """
826
826
827 fname = fctx.path()
827 fname = fctx.path()
828 text = fctx.data()
828 text = fctx.data()
829 flog = self.file(fname)
829 flog = self.file(fname)
830 fparent1 = manifest1.get(fname, nullid)
830 fparent1 = manifest1.get(fname, nullid)
831 fparent2 = fparent2o = manifest2.get(fname, nullid)
831 fparent2 = fparent2o = manifest2.get(fname, nullid)
832
832
833 meta = {}
833 meta = {}
834 copy = fctx.renamed()
834 copy = fctx.renamed()
835 if copy and copy[0] != fname:
835 if copy and copy[0] != fname:
836 # Mark the new revision of this file as a copy of another
836 # Mark the new revision of this file as a copy of another
837 # file. This copy data will effectively act as a parent
837 # file. This copy data will effectively act as a parent
838 # of this new revision. If this is a merge, the first
838 # of this new revision. If this is a merge, the first
839 # parent will be the nullid (meaning "look up the copy data")
839 # parent will be the nullid (meaning "look up the copy data")
840 # and the second one will be the other parent. For example:
840 # and the second one will be the other parent. For example:
841 #
841 #
842 # 0 --- 1 --- 3 rev1 changes file foo
842 # 0 --- 1 --- 3 rev1 changes file foo
843 # \ / rev2 renames foo to bar and changes it
843 # \ / rev2 renames foo to bar and changes it
844 # \- 2 -/ rev3 should have bar with all changes and
844 # \- 2 -/ rev3 should have bar with all changes and
845 # should record that bar descends from
845 # should record that bar descends from
846 # bar in rev2 and foo in rev1
846 # bar in rev2 and foo in rev1
847 #
847 #
848 # this allows this merge to succeed:
848 # this allows this merge to succeed:
849 #
849 #
850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
850 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
851 # \ / merging rev3 and rev4 should use bar@rev2
851 # \ / merging rev3 and rev4 should use bar@rev2
852 # \- 2 --- 4 as the merge base
852 # \- 2 --- 4 as the merge base
853 #
853 #
854
854
855 cfname = copy[0]
855 cfname = copy[0]
856 crev = manifest1.get(cfname)
856 crev = manifest1.get(cfname)
857 newfparent = fparent2
857 newfparent = fparent2
858
858
859 if manifest2: # branch merge
859 if manifest2: # branch merge
860 if fparent2 == nullid or crev is None: # copied on remote side
860 if fparent2 == nullid or crev is None: # copied on remote side
861 if cfname in manifest2:
861 if cfname in manifest2:
862 crev = manifest2[cfname]
862 crev = manifest2[cfname]
863 newfparent = fparent1
863 newfparent = fparent1
864
864
865 # find source in nearest ancestor if we've lost track
865 # find source in nearest ancestor if we've lost track
866 if not crev:
866 if not crev:
867 self.ui.debug(" %s: searching for copy revision for %s\n" %
867 self.ui.debug(" %s: searching for copy revision for %s\n" %
868 (fname, cfname))
868 (fname, cfname))
869 for ancestor in self[None].ancestors():
869 for ancestor in self[None].ancestors():
870 if cfname in ancestor:
870 if cfname in ancestor:
871 crev = ancestor[cfname].filenode()
871 crev = ancestor[cfname].filenode()
872 break
872 break
873
873
874 if crev:
874 if crev:
875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
875 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
876 meta["copy"] = cfname
876 meta["copy"] = cfname
877 meta["copyrev"] = hex(crev)
877 meta["copyrev"] = hex(crev)
878 fparent1, fparent2 = nullid, newfparent
878 fparent1, fparent2 = nullid, newfparent
879 else:
879 else:
880 self.ui.warn(_("warning: can't find ancestor for '%s' "
880 self.ui.warn(_("warning: can't find ancestor for '%s' "
881 "copied from '%s'!\n") % (fname, cfname))
881 "copied from '%s'!\n") % (fname, cfname))
882
882
883 elif fparent2 != nullid:
883 elif fparent2 != nullid:
884 # is one parent an ancestor of the other?
884 # is one parent an ancestor of the other?
885 fparentancestor = flog.ancestor(fparent1, fparent2)
885 fparentancestor = flog.ancestor(fparent1, fparent2)
886 if fparentancestor == fparent1:
886 if fparentancestor == fparent1:
887 fparent1, fparent2 = fparent2, nullid
887 fparent1, fparent2 = fparent2, nullid
888 elif fparentancestor == fparent2:
888 elif fparentancestor == fparent2:
889 fparent2 = nullid
889 fparent2 = nullid
890
890
891 # is the file changed?
891 # is the file changed?
892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
892 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
893 changelist.append(fname)
893 changelist.append(fname)
894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
894 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
895
895
896 # are just the flags changed during merge?
896 # are just the flags changed during merge?
897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
897 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
898 changelist.append(fname)
898 changelist.append(fname)
899
899
900 return fparent1
900 return fparent1
901
901
902 def commit(self, text="", user=None, date=None, match=None, force=False,
902 def commit(self, text="", user=None, date=None, match=None, force=False,
903 editor=False, extra={}):
903 editor=False, extra={}):
904 """Add a new revision to current repository.
904 """Add a new revision to current repository.
905
905
906 Revision information is gathered from the working directory,
906 Revision information is gathered from the working directory,
907 match can be used to filter the committed files. If editor is
907 match can be used to filter the committed files. If editor is
908 supplied, it is called to get a commit message.
908 supplied, it is called to get a commit message.
909 """
909 """
910
910
911 def fail(f, msg):
911 def fail(f, msg):
912 raise util.Abort('%s: %s' % (f, msg))
912 raise util.Abort('%s: %s' % (f, msg))
913
913
914 if not match:
914 if not match:
915 match = matchmod.always(self.root, '')
915 match = matchmod.always(self.root, '')
916
916
917 if not force:
917 if not force:
918 vdirs = []
918 vdirs = []
919 match.dir = vdirs.append
919 match.dir = vdirs.append
920 match.bad = fail
920 match.bad = fail
921
921
922 wlock = self.wlock()
922 wlock = self.wlock()
923 try:
923 try:
924 wctx = self[None]
924 wctx = self[None]
925 merge = len(wctx.parents()) > 1
925 merge = len(wctx.parents()) > 1
926
926
927 if (not force and merge and match and
927 if (not force and merge and match and
928 (match.files() or match.anypats())):
928 (match.files() or match.anypats())):
929 raise util.Abort(_('cannot partially commit a merge '
929 raise util.Abort(_('cannot partially commit a merge '
930 '(do not specify files or patterns)'))
930 '(do not specify files or patterns)'))
931
931
932 changes = self.status(match=match, clean=force)
932 changes = self.status(match=match, clean=force)
933 if force:
933 if force:
934 changes[0].extend(changes[6]) # mq may commit unchanged files
934 changes[0].extend(changes[6]) # mq may commit unchanged files
935
935
936 # check subrepos
936 # check subrepos
937 subs = []
937 subs = []
938 removedsubs = set()
938 removedsubs = set()
939 for p in wctx.parents():
939 for p in wctx.parents():
940 removedsubs.update(s for s in p.substate if match(s))
940 removedsubs.update(s for s in p.substate if match(s))
941 for s in wctx.substate:
941 for s in wctx.substate:
942 removedsubs.discard(s)
942 removedsubs.discard(s)
943 if match(s) and wctx.sub(s).dirty():
943 if match(s) and wctx.sub(s).dirty():
944 subs.append(s)
944 subs.append(s)
945 if (subs or removedsubs):
945 if (subs or removedsubs):
946 if (not match('.hgsub') and
946 if (not match('.hgsub') and
947 '.hgsub' in (wctx.modified() + wctx.added())):
947 '.hgsub' in (wctx.modified() + wctx.added())):
948 raise util.Abort(_("can't commit subrepos without .hgsub"))
948 raise util.Abort(_("can't commit subrepos without .hgsub"))
949 if '.hgsubstate' not in changes[0]:
949 if '.hgsubstate' not in changes[0]:
950 changes[0].insert(0, '.hgsubstate')
950 changes[0].insert(0, '.hgsubstate')
951
951
952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
952 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
953 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
954 if changedsubs:
954 if changedsubs:
955 raise util.Abort(_("uncommitted changes in subrepo %s")
955 raise util.Abort(_("uncommitted changes in subrepo %s")
956 % changedsubs[0])
956 % changedsubs[0])
957
957
958 # make sure all explicit patterns are matched
958 # make sure all explicit patterns are matched
959 if not force and match.files():
959 if not force and match.files():
960 matched = set(changes[0] + changes[1] + changes[2])
960 matched = set(changes[0] + changes[1] + changes[2])
961
961
962 for f in match.files():
962 for f in match.files():
963 if f == '.' or f in matched or f in wctx.substate:
963 if f == '.' or f in matched or f in wctx.substate:
964 continue
964 continue
965 if f in changes[3]: # missing
965 if f in changes[3]: # missing
966 fail(f, _('file not found!'))
966 fail(f, _('file not found!'))
967 if f in vdirs: # visited directory
967 if f in vdirs: # visited directory
968 d = f + '/'
968 d = f + '/'
969 for mf in matched:
969 for mf in matched:
970 if mf.startswith(d):
970 if mf.startswith(d):
971 break
971 break
972 else:
972 else:
973 fail(f, _("no match under directory!"))
973 fail(f, _("no match under directory!"))
974 elif f not in self.dirstate:
974 elif f not in self.dirstate:
975 fail(f, _("file not tracked!"))
975 fail(f, _("file not tracked!"))
976
976
977 if (not force and not extra.get("close") and not merge
977 if (not force and not extra.get("close") and not merge
978 and not (changes[0] or changes[1] or changes[2])
978 and not (changes[0] or changes[1] or changes[2])
979 and wctx.branch() == wctx.p1().branch()):
979 and wctx.branch() == wctx.p1().branch()):
980 return None
980 return None
981
981
982 ms = mergemod.mergestate(self)
982 ms = mergemod.mergestate(self)
983 for f in changes[0]:
983 for f in changes[0]:
984 if f in ms and ms[f] == 'u':
984 if f in ms and ms[f] == 'u':
985 raise util.Abort(_("unresolved merge conflicts "
985 raise util.Abort(_("unresolved merge conflicts "
986 "(see hg help resolve)"))
986 "(see hg help resolve)"))
987
987
988 cctx = context.workingctx(self, text, user, date, extra, changes)
988 cctx = context.workingctx(self, text, user, date, extra, changes)
989 if editor:
989 if editor:
990 cctx._text = editor(self, cctx, subs)
990 cctx._text = editor(self, cctx, subs)
991 edited = (text != cctx._text)
991 edited = (text != cctx._text)
992
992
993 # commit subs
993 # commit subs
994 if subs or removedsubs:
994 if subs or removedsubs:
995 state = wctx.substate.copy()
995 state = wctx.substate.copy()
996 for s in sorted(subs):
996 for s in sorted(subs):
997 sub = wctx.sub(s)
997 sub = wctx.sub(s)
998 self.ui.status(_('committing subrepository %s\n') %
998 self.ui.status(_('committing subrepository %s\n') %
999 subrepo.subrelpath(sub))
999 subrepo.subrelpath(sub))
1000 sr = sub.commit(cctx._text, user, date)
1000 sr = sub.commit(cctx._text, user, date)
1001 state[s] = (state[s][0], sr)
1001 state[s] = (state[s][0], sr)
1002 subrepo.writestate(self, state)
1002 subrepo.writestate(self, state)
1003
1003
1004 # Save commit message in case this transaction gets rolled back
1004 # Save commit message in case this transaction gets rolled back
1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1005 # (e.g. by a pretxncommit hook). Leave the content alone on
1006 # the assumption that the user will use the same editor again.
1006 # the assumption that the user will use the same editor again.
1007 msgfile = self.opener('last-message.txt', 'wb')
1007 msgfile = self.opener('last-message.txt', 'wb')
1008 msgfile.write(cctx._text)
1008 msgfile.write(cctx._text)
1009 msgfile.close()
1009 msgfile.close()
1010
1010
1011 p1, p2 = self.dirstate.parents()
1011 p1, p2 = self.dirstate.parents()
1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1013 try:
1013 try:
1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1014 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1015 ret = self.commitctx(cctx, True)
1015 ret = self.commitctx(cctx, True)
1016 except:
1016 except:
1017 if edited:
1017 if edited:
1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1018 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1019 self.ui.write(
1019 self.ui.write(
1020 _('note: commit message saved in %s\n') % msgfn)
1020 _('note: commit message saved in %s\n') % msgfn)
1021 raise
1021 raise
1022
1022
1023 # update bookmarks, dirstate and mergestate
1023 # update bookmarks, dirstate and mergestate
1024 bookmarks.update(self, p1, ret)
1024 bookmarks.update(self, p1, ret)
1025 for f in changes[0] + changes[1]:
1025 for f in changes[0] + changes[1]:
1026 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1027 for f in changes[2]:
1027 for f in changes[2]:
1028 self.dirstate.forget(f)
1028 self.dirstate.forget(f)
1029 self.dirstate.setparents(ret)
1029 self.dirstate.setparents(ret)
1030 ms.reset()
1030 ms.reset()
1031 finally:
1031 finally:
1032 wlock.release()
1032 wlock.release()
1033
1033
1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1034 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1035 return ret
1035 return ret
1036
1036
1037 def commitctx(self, ctx, error=False):
1037 def commitctx(self, ctx, error=False):
1038 """Add a new revision to current repository.
1038 """Add a new revision to current repository.
1039 Revision information is passed via the context argument.
1039 Revision information is passed via the context argument.
1040 """
1040 """
1041
1041
1042 tr = lock = None
1042 tr = lock = None
1043 removed = list(ctx.removed())
1043 removed = list(ctx.removed())
1044 p1, p2 = ctx.p1(), ctx.p2()
1044 p1, p2 = ctx.p1(), ctx.p2()
1045 m1 = p1.manifest().copy()
1045 m1 = p1.manifest().copy()
1046 m2 = p2.manifest()
1046 m2 = p2.manifest()
1047 user = ctx.user()
1047 user = ctx.user()
1048
1048
1049 lock = self.lock()
1049 lock = self.lock()
1050 try:
1050 try:
1051 tr = self.transaction("commit")
1051 tr = self.transaction("commit")
1052 trp = weakref.proxy(tr)
1052 trp = weakref.proxy(tr)
1053
1053
1054 # check in files
1054 # check in files
1055 new = {}
1055 new = {}
1056 changed = []
1056 changed = []
1057 linkrev = len(self)
1057 linkrev = len(self)
1058 for f in sorted(ctx.modified() + ctx.added()):
1058 for f in sorted(ctx.modified() + ctx.added()):
1059 self.ui.note(f + "\n")
1059 self.ui.note(f + "\n")
1060 try:
1060 try:
1061 fctx = ctx[f]
1061 fctx = ctx[f]
1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1062 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1063 changed)
1063 changed)
1064 m1.set(f, fctx.flags())
1064 m1.set(f, fctx.flags())
1065 except OSError, inst:
1065 except OSError, inst:
1066 self.ui.warn(_("trouble committing %s!\n") % f)
1066 self.ui.warn(_("trouble committing %s!\n") % f)
1067 raise
1067 raise
1068 except IOError, inst:
1068 except IOError, inst:
1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1069 errcode = getattr(inst, 'errno', errno.ENOENT)
1070 if error or errcode and errcode != errno.ENOENT:
1070 if error or errcode and errcode != errno.ENOENT:
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1072 raise
1072 raise
1073 else:
1073 else:
1074 removed.append(f)
1074 removed.append(f)
1075
1075
1076 # update manifest
1076 # update manifest
1077 m1.update(new)
1077 m1.update(new)
1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1078 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1079 drop = [f for f in removed if f in m1]
1079 drop = [f for f in removed if f in m1]
1080 for f in drop:
1080 for f in drop:
1081 del m1[f]
1081 del m1[f]
1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1082 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1083 p2.manifestnode(), (new, drop))
1083 p2.manifestnode(), (new, drop))
1084
1084
1085 # update changelog
1085 # update changelog
1086 self.changelog.delayupdate()
1086 self.changelog.delayupdate()
1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1087 n = self.changelog.add(mn, changed + removed, ctx.description(),
1088 trp, p1.node(), p2.node(),
1088 trp, p1.node(), p2.node(),
1089 user, ctx.date(), ctx.extra().copy())
1089 user, ctx.date(), ctx.extra().copy())
1090 p = lambda: self.changelog.writepending() and self.root or ""
1090 p = lambda: self.changelog.writepending() and self.root or ""
1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1091 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1092 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1093 parent2=xp2, pending=p)
1093 parent2=xp2, pending=p)
1094 self.changelog.finalize(trp)
1094 self.changelog.finalize(trp)
1095 tr.close()
1095 tr.close()
1096
1096
1097 if self._branchcache:
1097 if self._branchcache:
1098 self.updatebranchcache()
1098 self.updatebranchcache()
1099 return n
1099 return n
1100 finally:
1100 finally:
1101 if tr:
1101 if tr:
1102 tr.release()
1102 tr.release()
1103 lock.release()
1103 lock.release()
1104
1104
1105 def destroyed(self):
1105 def destroyed(self):
1106 '''Inform the repository that nodes have been destroyed.
1106 '''Inform the repository that nodes have been destroyed.
1107 Intended for use by strip and rollback, so there's a common
1107 Intended for use by strip and rollback, so there's a common
1108 place for anything that has to be done after destroying history.'''
1108 place for anything that has to be done after destroying history.'''
1109 # XXX it might be nice if we could take the list of destroyed
1109 # XXX it might be nice if we could take the list of destroyed
1110 # nodes, but I don't see an easy way for rollback() to do that
1110 # nodes, but I don't see an easy way for rollback() to do that
1111
1111
1112 # Ensure the persistent tag cache is updated. Doing it now
1112 # Ensure the persistent tag cache is updated. Doing it now
1113 # means that the tag cache only has to worry about destroyed
1113 # means that the tag cache only has to worry about destroyed
1114 # heads immediately after a strip/rollback. That in turn
1114 # heads immediately after a strip/rollback. That in turn
1115 # guarantees that "cachetip == currenttip" (comparing both rev
1115 # guarantees that "cachetip == currenttip" (comparing both rev
1116 # and node) always means no nodes have been added or destroyed.
1116 # and node) always means no nodes have been added or destroyed.
1117
1117
1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1118 # XXX this is suboptimal when qrefresh'ing: we strip the current
1119 # head, refresh the tag cache, then immediately add a new head.
1119 # head, refresh the tag cache, then immediately add a new head.
1120 # But I think doing it this way is necessary for the "instant
1120 # But I think doing it this way is necessary for the "instant
1121 # tag cache retrieval" case to work.
1121 # tag cache retrieval" case to work.
1122 self.invalidatecaches()
1122 self.invalidatecaches()
1123
1123
1124 def walk(self, match, node=None):
1124 def walk(self, match, node=None):
1125 '''
1125 '''
1126 walk recursively through the directory tree or a given
1126 walk recursively through the directory tree or a given
1127 changeset, finding all files matched by the match
1127 changeset, finding all files matched by the match
1128 function
1128 function
1129 '''
1129 '''
1130 return self[node].walk(match)
1130 return self[node].walk(match)
1131
1131
1132 def status(self, node1='.', node2=None, match=None,
1132 def status(self, node1='.', node2=None, match=None,
1133 ignored=False, clean=False, unknown=False,
1133 ignored=False, clean=False, unknown=False,
1134 listsubrepos=False):
1134 listsubrepos=False):
1135 """return status of files between two nodes or node and working directory
1135 """return status of files between two nodes or node and working directory
1136
1136
1137 If node1 is None, use the first dirstate parent instead.
1137 If node1 is None, use the first dirstate parent instead.
1138 If node2 is None, compare node1 with working directory.
1138 If node2 is None, compare node1 with working directory.
1139 """
1139 """
1140
1140
1141 def mfmatches(ctx):
1141 def mfmatches(ctx):
1142 mf = ctx.manifest().copy()
1142 mf = ctx.manifest().copy()
1143 for fn in mf.keys():
1143 for fn in mf.keys():
1144 if not match(fn):
1144 if not match(fn):
1145 del mf[fn]
1145 del mf[fn]
1146 return mf
1146 return mf
1147
1147
1148 if isinstance(node1, context.changectx):
1148 if isinstance(node1, context.changectx):
1149 ctx1 = node1
1149 ctx1 = node1
1150 else:
1150 else:
1151 ctx1 = self[node1]
1151 ctx1 = self[node1]
1152 if isinstance(node2, context.changectx):
1152 if isinstance(node2, context.changectx):
1153 ctx2 = node2
1153 ctx2 = node2
1154 else:
1154 else:
1155 ctx2 = self[node2]
1155 ctx2 = self[node2]
1156
1156
1157 working = ctx2.rev() is None
1157 working = ctx2.rev() is None
1158 parentworking = working and ctx1 == self['.']
1158 parentworking = working and ctx1 == self['.']
1159 match = match or matchmod.always(self.root, self.getcwd())
1159 match = match or matchmod.always(self.root, self.getcwd())
1160 listignored, listclean, listunknown = ignored, clean, unknown
1160 listignored, listclean, listunknown = ignored, clean, unknown
1161
1161
1162 # load earliest manifest first for caching reasons
1162 # load earliest manifest first for caching reasons
1163 if not working and ctx2.rev() < ctx1.rev():
1163 if not working and ctx2.rev() < ctx1.rev():
1164 ctx2.manifest()
1164 ctx2.manifest()
1165
1165
1166 if not parentworking:
1166 if not parentworking:
1167 def bad(f, msg):
1167 def bad(f, msg):
1168 if f not in ctx1:
1168 if f not in ctx1:
1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1169 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1170 match.bad = bad
1170 match.bad = bad
1171
1171
1172 if working: # we need to scan the working dir
1172 if working: # we need to scan the working dir
1173 subrepos = []
1173 subrepos = []
1174 if '.hgsub' in self.dirstate:
1174 if '.hgsub' in self.dirstate:
1175 subrepos = ctx1.substate.keys()
1175 subrepos = ctx1.substate.keys()
1176 s = self.dirstate.status(match, subrepos, listignored,
1176 s = self.dirstate.status(match, subrepos, listignored,
1177 listclean, listunknown)
1177 listclean, listunknown)
1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1178 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1179
1179
1180 # check for any possibly clean files
1180 # check for any possibly clean files
1181 if parentworking and cmp:
1181 if parentworking and cmp:
1182 fixup = []
1182 fixup = []
1183 # do a full compare of any files that might have changed
1183 # do a full compare of any files that might have changed
1184 for f in sorted(cmp):
1184 for f in sorted(cmp):
1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1185 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1186 or ctx1[f].cmp(ctx2[f])):
1186 or ctx1[f].cmp(ctx2[f])):
1187 modified.append(f)
1187 modified.append(f)
1188 else:
1188 else:
1189 fixup.append(f)
1189 fixup.append(f)
1190
1190
1191 # update dirstate for files that are actually clean
1191 # update dirstate for files that are actually clean
1192 if fixup:
1192 if fixup:
1193 if listclean:
1193 if listclean:
1194 clean += fixup
1194 clean += fixup
1195
1195
1196 try:
1196 try:
1197 # updating the dirstate is optional
1197 # updating the dirstate is optional
1198 # so we don't wait on the lock
1198 # so we don't wait on the lock
1199 wlock = self.wlock(False)
1199 wlock = self.wlock(False)
1200 try:
1200 try:
1201 for f in fixup:
1201 for f in fixup:
1202 self.dirstate.normal(f)
1202 self.dirstate.normal(f)
1203 finally:
1203 finally:
1204 wlock.release()
1204 wlock.release()
1205 except error.LockError:
1205 except error.LockError:
1206 pass
1206 pass
1207
1207
1208 if not parentworking:
1208 if not parentworking:
1209 mf1 = mfmatches(ctx1)
1209 mf1 = mfmatches(ctx1)
1210 if working:
1210 if working:
1211 # we are comparing working dir against non-parent
1211 # we are comparing working dir against non-parent
1212 # generate a pseudo-manifest for the working dir
1212 # generate a pseudo-manifest for the working dir
1213 mf2 = mfmatches(self['.'])
1213 mf2 = mfmatches(self['.'])
1214 for f in cmp + modified + added:
1214 for f in cmp + modified + added:
1215 mf2[f] = None
1215 mf2[f] = None
1216 mf2.set(f, ctx2.flags(f))
1216 mf2.set(f, ctx2.flags(f))
1217 for f in removed:
1217 for f in removed:
1218 if f in mf2:
1218 if f in mf2:
1219 del mf2[f]
1219 del mf2[f]
1220 else:
1220 else:
1221 # we are comparing two revisions
1221 # we are comparing two revisions
1222 deleted, unknown, ignored = [], [], []
1222 deleted, unknown, ignored = [], [], []
1223 mf2 = mfmatches(ctx2)
1223 mf2 = mfmatches(ctx2)
1224
1224
1225 modified, added, clean = [], [], []
1225 modified, added, clean = [], [], []
1226 for fn in mf2:
1226 for fn in mf2:
1227 if fn in mf1:
1227 if fn in mf1:
1228 if (fn not in deleted and
1228 if (fn not in deleted and
1229 (mf1.flags(fn) != mf2.flags(fn) or
1229 (mf1.flags(fn) != mf2.flags(fn) or
1230 (mf1[fn] != mf2[fn] and
1230 (mf1[fn] != mf2[fn] and
1231 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1231 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1232 modified.append(fn)
1232 modified.append(fn)
1233 elif listclean:
1233 elif listclean:
1234 clean.append(fn)
1234 clean.append(fn)
1235 del mf1[fn]
1235 del mf1[fn]
1236 elif fn not in deleted:
1236 elif fn not in deleted:
1237 added.append(fn)
1237 added.append(fn)
1238 removed = mf1.keys()
1238 removed = mf1.keys()
1239
1239
1240 r = modified, added, removed, deleted, unknown, ignored, clean
1240 r = modified, added, removed, deleted, unknown, ignored, clean
1241
1241
1242 if listsubrepos:
1242 if listsubrepos:
1243 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1243 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1244 if working:
1244 if working:
1245 rev2 = None
1245 rev2 = None
1246 else:
1246 else:
1247 rev2 = ctx2.substate[subpath][1]
1247 rev2 = ctx2.substate[subpath][1]
1248 try:
1248 try:
1249 submatch = matchmod.narrowmatcher(subpath, match)
1249 submatch = matchmod.narrowmatcher(subpath, match)
1250 s = sub.status(rev2, match=submatch, ignored=listignored,
1250 s = sub.status(rev2, match=submatch, ignored=listignored,
1251 clean=listclean, unknown=listunknown,
1251 clean=listclean, unknown=listunknown,
1252 listsubrepos=True)
1252 listsubrepos=True)
1253 for rfiles, sfiles in zip(r, s):
1253 for rfiles, sfiles in zip(r, s):
1254 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1254 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1255 except error.LookupError:
1255 except error.LookupError:
1256 self.ui.status(_("skipping missing subrepository: %s\n")
1256 self.ui.status(_("skipping missing subrepository: %s\n")
1257 % subpath)
1257 % subpath)
1258
1258
1259 for l in r:
1259 for l in r:
1260 l.sort()
1260 l.sort()
1261 return r
1261 return r
1262
1262
1263 def heads(self, start=None):
1263 def heads(self, start=None):
1264 heads = self.changelog.heads(start)
1264 heads = self.changelog.heads(start)
1265 # sort the output in rev descending order
1265 # sort the output in rev descending order
1266 return sorted(heads, key=self.changelog.rev, reverse=True)
1266 return sorted(heads, key=self.changelog.rev, reverse=True)
1267
1267
1268 def branchheads(self, branch=None, start=None, closed=False):
1268 def branchheads(self, branch=None, start=None, closed=False):
1269 '''return a (possibly filtered) list of heads for the given branch
1269 '''return a (possibly filtered) list of heads for the given branch
1270
1270
1271 Heads are returned in topological order, from newest to oldest.
1271 Heads are returned in topological order, from newest to oldest.
1272 If branch is None, use the dirstate branch.
1272 If branch is None, use the dirstate branch.
1273 If start is not None, return only heads reachable from start.
1273 If start is not None, return only heads reachable from start.
1274 If closed is True, return heads that are marked as closed as well.
1274 If closed is True, return heads that are marked as closed as well.
1275 '''
1275 '''
1276 if branch is None:
1276 if branch is None:
1277 branch = self[None].branch()
1277 branch = self[None].branch()
1278 branches = self.branchmap()
1278 branches = self.branchmap()
1279 if branch not in branches:
1279 if branch not in branches:
1280 return []
1280 return []
1281 # the cache returns heads ordered lowest to highest
1281 # the cache returns heads ordered lowest to highest
1282 bheads = list(reversed(branches[branch]))
1282 bheads = list(reversed(branches[branch]))
1283 if start is not None:
1283 if start is not None:
1284 # filter out the heads that cannot be reached from startrev
1284 # filter out the heads that cannot be reached from startrev
1285 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1285 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1286 bheads = [h for h in bheads if h in fbheads]
1286 bheads = [h for h in bheads if h in fbheads]
1287 if not closed:
1287 if not closed:
1288 bheads = [h for h in bheads if
1288 bheads = [h for h in bheads if
1289 ('close' not in self.changelog.read(h)[5])]
1289 ('close' not in self.changelog.read(h)[5])]
1290 return bheads
1290 return bheads
1291
1291
1292 def branches(self, nodes):
1292 def branches(self, nodes):
1293 if not nodes:
1293 if not nodes:
1294 nodes = [self.changelog.tip()]
1294 nodes = [self.changelog.tip()]
1295 b = []
1295 b = []
1296 for n in nodes:
1296 for n in nodes:
1297 t = n
1297 t = n
1298 while 1:
1298 while 1:
1299 p = self.changelog.parents(n)
1299 p = self.changelog.parents(n)
1300 if p[1] != nullid or p[0] == nullid:
1300 if p[1] != nullid or p[0] == nullid:
1301 b.append((t, n, p[0], p[1]))
1301 b.append((t, n, p[0], p[1]))
1302 break
1302 break
1303 n = p[0]
1303 n = p[0]
1304 return b
1304 return b
1305
1305
1306 def between(self, pairs):
1306 def between(self, pairs):
1307 r = []
1307 r = []
1308
1308
1309 for top, bottom in pairs:
1309 for top, bottom in pairs:
1310 n, l, i = top, [], 0
1310 n, l, i = top, [], 0
1311 f = 1
1311 f = 1
1312
1312
1313 while n != bottom and n != nullid:
1313 while n != bottom and n != nullid:
1314 p = self.changelog.parents(n)[0]
1314 p = self.changelog.parents(n)[0]
1315 if i == f:
1315 if i == f:
1316 l.append(n)
1316 l.append(n)
1317 f = f * 2
1317 f = f * 2
1318 n = p
1318 n = p
1319 i += 1
1319 i += 1
1320
1320
1321 r.append(l)
1321 r.append(l)
1322
1322
1323 return r
1323 return r
1324
1324
1325 def pull(self, remote, heads=None, force=False):
1325 def pull(self, remote, heads=None, force=False):
1326 lock = self.lock()
1326 lock = self.lock()
1327 try:
1327 try:
1328 usecommon = remote.capable('getbundle')
1328 usecommon = remote.capable('getbundle')
1329 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1329 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1330 force=force, commononly=usecommon)
1330 force=force, commononly=usecommon)
1331 common, fetch, rheads = tmp
1331 common, fetch, rheads = tmp
1332 if not fetch:
1332 if not fetch:
1333 self.ui.status(_("no changes found\n"))
1333 self.ui.status(_("no changes found\n"))
1334 result = 0
1334 result = 0
1335 else:
1335 else:
1336 if heads is None and list(common) == [nullid]:
1336 if heads is None and list(common) == [nullid]:
1337 self.ui.status(_("requesting all changes\n"))
1337 self.ui.status(_("requesting all changes\n"))
1338 elif heads is None and remote.capable('changegroupsubset'):
1338 elif heads is None and remote.capable('changegroupsubset'):
1339 # issue1320, avoid a race if remote changed after discovery
1339 # issue1320, avoid a race if remote changed after discovery
1340 heads = rheads
1340 heads = rheads
1341
1341
1342 if usecommon:
1342 if usecommon:
1343 cg = remote.getbundle('pull', common=common,
1343 cg = remote.getbundle('pull', common=common,
1344 heads=heads or rheads)
1344 heads=heads or rheads)
1345 elif heads is None:
1345 elif heads is None:
1346 cg = remote.changegroup(fetch, 'pull')
1346 cg = remote.changegroup(fetch, 'pull')
1347 elif not remote.capable('changegroupsubset'):
1347 elif not remote.capable('changegroupsubset'):
1348 raise util.Abort(_("partial pull cannot be done because "
1348 raise util.Abort(_("partial pull cannot be done because "
1349 "other repository doesn't support "
1349 "other repository doesn't support "
1350 "changegroupsubset."))
1350 "changegroupsubset."))
1351 else:
1351 else:
1352 cg = remote.changegroupsubset(fetch, heads, 'pull')
1352 cg = remote.changegroupsubset(fetch, heads, 'pull')
1353 result = self.addchangegroup(cg, 'pull', remote.url(),
1353 result = self.addchangegroup(cg, 'pull', remote.url(),
1354 lock=lock)
1354 lock=lock)
1355 finally:
1355 finally:
1356 lock.release()
1356 lock.release()
1357
1357
1358 return result
1358 return result
1359
1359
1360 def checkpush(self, force, revs):
1360 def checkpush(self, force, revs):
1361 """Extensions can override this function if additional checks have
1361 """Extensions can override this function if additional checks have
1362 to be performed before pushing, or call it if they override push
1362 to be performed before pushing, or call it if they override push
1363 command.
1363 command.
1364 """
1364 """
1365 pass
1365 pass
1366
1366
1367 def push(self, remote, force=False, revs=None, newbranch=False):
1367 def push(self, remote, force=False, revs=None, newbranch=False):
1368 '''Push outgoing changesets (limited by revs) from the current
1368 '''Push outgoing changesets (limited by revs) from the current
1369 repository to remote. Return an integer:
1369 repository to remote. Return an integer:
1370 - 0 means HTTP error *or* nothing to push
1370 - 0 means HTTP error *or* nothing to push
1371 - 1 means we pushed and remote head count is unchanged *or*
1371 - 1 means we pushed and remote head count is unchanged *or*
1372 we have outgoing changesets but refused to push
1372 we have outgoing changesets but refused to push
1373 - other values as described by addchangegroup()
1373 - other values as described by addchangegroup()
1374 '''
1374 '''
1375 # there are two ways to push to remote repo:
1375 # there are two ways to push to remote repo:
1376 #
1376 #
1377 # addchangegroup assumes local user can lock remote
1377 # addchangegroup assumes local user can lock remote
1378 # repo (local filesystem, old ssh servers).
1378 # repo (local filesystem, old ssh servers).
1379 #
1379 #
1380 # unbundle assumes local user cannot lock remote repo (new ssh
1380 # unbundle assumes local user cannot lock remote repo (new ssh
1381 # servers, http servers).
1381 # servers, http servers).
1382
1382
1383 self.checkpush(force, revs)
1383 self.checkpush(force, revs)
1384 lock = None
1384 lock = None
1385 unbundle = remote.capable('unbundle')
1385 unbundle = remote.capable('unbundle')
1386 if not unbundle:
1386 if not unbundle:
1387 lock = remote.lock()
1387 lock = remote.lock()
1388 try:
1388 try:
1389 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1389 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1390 newbranch)
1390 newbranch)
1391 ret = remote_heads
1391 ret = remote_heads
1392 if cg is not None:
1392 if cg is not None:
1393 if unbundle:
1393 if unbundle:
1394 # local repo finds heads on server, finds out what
1394 # local repo finds heads on server, finds out what
1395 # revs it must push. once revs transferred, if server
1395 # revs it must push. once revs transferred, if server
1396 # finds it has different heads (someone else won
1396 # finds it has different heads (someone else won
1397 # commit/push race), server aborts.
1397 # commit/push race), server aborts.
1398 if force:
1398 if force:
1399 remote_heads = ['force']
1399 remote_heads = ['force']
1400 # ssh: return remote's addchangegroup()
1400 # ssh: return remote's addchangegroup()
1401 # http: return remote's addchangegroup() or 0 for error
1401 # http: return remote's addchangegroup() or 0 for error
1402 ret = remote.unbundle(cg, remote_heads, 'push')
1402 ret = remote.unbundle(cg, remote_heads, 'push')
1403 else:
1403 else:
1404 # we return an integer indicating remote head count change
1404 # we return an integer indicating remote head count change
1405 ret = remote.addchangegroup(cg, 'push', self.url(),
1405 ret = remote.addchangegroup(cg, 'push', self.url(),
1406 lock=lock)
1406 lock=lock)
1407 finally:
1407 finally:
1408 if lock is not None:
1408 if lock is not None:
1409 lock.release()
1409 lock.release()
1410
1410
1411 self.ui.debug("checking for updated bookmarks\n")
1411 self.ui.debug("checking for updated bookmarks\n")
1412 rb = remote.listkeys('bookmarks')
1412 rb = remote.listkeys('bookmarks')
1413 for k in rb.keys():
1413 for k in rb.keys():
1414 if k in self._bookmarks:
1414 if k in self._bookmarks:
1415 nr, nl = rb[k], hex(self._bookmarks[k])
1415 nr, nl = rb[k], hex(self._bookmarks[k])
1416 if nr in self:
1416 if nr in self:
1417 cr = self[nr]
1417 cr = self[nr]
1418 cl = self[nl]
1418 cl = self[nl]
1419 if cl in cr.descendants():
1419 if cl in cr.descendants():
1420 r = remote.pushkey('bookmarks', k, nr, nl)
1420 r = remote.pushkey('bookmarks', k, nr, nl)
1421 if r:
1421 if r:
1422 self.ui.status(_("updating bookmark %s\n") % k)
1422 self.ui.status(_("updating bookmark %s\n") % k)
1423 else:
1423 else:
1424 self.ui.warn(_('updating bookmark %s'
1424 self.ui.warn(_('updating bookmark %s'
1425 ' failed!\n') % k)
1425 ' failed!\n') % k)
1426
1426
1427 return ret
1427 return ret
1428
1428
1429 def changegroupinfo(self, nodes, source):
1429 def changegroupinfo(self, nodes, source):
1430 if self.ui.verbose or source == 'bundle':
1430 if self.ui.verbose or source == 'bundle':
1431 self.ui.status(_("%d changesets found\n") % len(nodes))
1431 self.ui.status(_("%d changesets found\n") % len(nodes))
1432 if self.ui.debugflag:
1432 if self.ui.debugflag:
1433 self.ui.debug("list of changesets:\n")
1433 self.ui.debug("list of changesets:\n")
1434 for node in nodes:
1434 for node in nodes:
1435 self.ui.debug("%s\n" % hex(node))
1435 self.ui.debug("%s\n" % hex(node))
1436
1436
1437 def changegroupsubset(self, bases, heads, source):
1437 def changegroupsubset(self, bases, heads, source):
1438 """Compute a changegroup consisting of all the nodes that are
1438 """Compute a changegroup consisting of all the nodes that are
1439 descendents of any of the bases and ancestors of any of the heads.
1439 descendents of any of the bases and ancestors of any of the heads.
1440 Return a chunkbuffer object whose read() method will return
1440 Return a chunkbuffer object whose read() method will return
1441 successive changegroup chunks.
1441 successive changegroup chunks.
1442
1442
1443 It is fairly complex as determining which filenodes and which
1443 It is fairly complex as determining which filenodes and which
1444 manifest nodes need to be included for the changeset to be complete
1444 manifest nodes need to be included for the changeset to be complete
1445 is non-trivial.
1445 is non-trivial.
1446
1446
1447 Another wrinkle is doing the reverse, figuring out which changeset in
1447 Another wrinkle is doing the reverse, figuring out which changeset in
1448 the changegroup a particular filenode or manifestnode belongs to.
1448 the changegroup a particular filenode or manifestnode belongs to.
1449 """
1449 """
1450 cl = self.changelog
1450 cl = self.changelog
1451 if not bases:
1451 if not bases:
1452 bases = [nullid]
1452 bases = [nullid]
1453 csets, bases, heads = cl.nodesbetween(bases, heads)
1453 csets, bases, heads = cl.nodesbetween(bases, heads)
1454 # We assume that all ancestors of bases are known
1454 # We assume that all ancestors of bases are known
1455 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1455 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1456 return self._changegroupsubset(common, csets, heads, source)
1456 return self._changegroupsubset(common, csets, heads, source)
1457
1457
1458 def getbundle(self, source, heads=None, common=None):
1458 def getbundle(self, source, heads=None, common=None):
1459 """Like changegroupsubset, but returns the set difference between the
1459 """Like changegroupsubset, but returns the set difference between the
1460 ancestors of heads and the ancestors common.
1460 ancestors of heads and the ancestors common.
1461
1461
1462 If heads is None, use the local heads. If common is None, use [nullid].
1462 If heads is None, use the local heads. If common is None, use [nullid].
1463
1463
1464 The nodes in common might not all be known locally due to the way the
1464 The nodes in common might not all be known locally due to the way the
1465 current discovery protocol works.
1465 current discovery protocol works.
1466 """
1466 """
1467 cl = self.changelog
1467 cl = self.changelog
1468 if common:
1468 if common:
1469 nm = cl.nodemap
1469 nm = cl.nodemap
1470 common = [n for n in common if n in nm]
1470 common = [n for n in common if n in nm]
1471 else:
1471 else:
1472 common = [nullid]
1472 common = [nullid]
1473 if not heads:
1473 if not heads:
1474 heads = cl.heads()
1474 heads = cl.heads()
1475 common, missing = cl.findcommonmissing(common, heads)
1475 common, missing = cl.findcommonmissing(common, heads)
1476 return self._changegroupsubset(common, missing, heads, source)
1476 return self._changegroupsubset(common, missing, heads, source)
1477
1477
1478 def _changegroupsubset(self, commonrevs, csets, heads, source):
1478 def _changegroupsubset(self, commonrevs, csets, heads, source):
1479
1479
1480 cl = self.changelog
1480 cl = self.changelog
1481 mf = self.manifest
1481 mf = self.manifest
1482 mfs = {} # needed manifests
1482 mfs = {} # needed manifests
1483 fnodes = {} # needed file nodes
1483 fnodes = {} # needed file nodes
1484 changedfiles = set()
1484 changedfiles = set()
1485 fstate = ['', {}]
1485 fstate = ['', {}]
1486 count = [0]
1486 count = [0]
1487
1487
1488 # can we go through the fast path ?
1488 # can we go through the fast path ?
1489 heads.sort()
1489 heads.sort()
1490 if heads == sorted(self.heads()):
1490 if heads == sorted(self.heads()):
1491 return self._changegroup(csets, source)
1491 return self._changegroup(csets, source)
1492
1492
1493 # slow path
1493 # slow path
1494 self.hook('preoutgoing', throw=True, source=source)
1494 self.hook('preoutgoing', throw=True, source=source)
1495 self.changegroupinfo(csets, source)
1495 self.changegroupinfo(csets, source)
1496
1496
1497 # filter any nodes that claim to be part of the known set
1497 # filter any nodes that claim to be part of the known set
1498 def prune(revlog, missing):
1498 def prune(revlog, missing):
1499 for n in missing:
1499 for n in missing:
1500 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1500 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1501 yield n
1501 yield n
1502
1502
1503 def lookup(revlog, x):
1503 def lookup(revlog, x):
1504 if revlog == cl:
1504 if revlog == cl:
1505 c = cl.read(x)
1505 c = cl.read(x)
1506 changedfiles.update(c[3])
1506 changedfiles.update(c[3])
1507 mfs.setdefault(c[0], x)
1507 mfs.setdefault(c[0], x)
1508 count[0] += 1
1508 count[0] += 1
1509 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1509 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1510 return x
1510 return x
1511 elif revlog == mf:
1511 elif revlog == mf:
1512 clnode = mfs[x]
1512 clnode = mfs[x]
1513 mdata = mf.readfast(x)
1513 mdata = mf.readfast(x)
1514 for f in changedfiles:
1514 for f in changedfiles:
1515 if f in mdata:
1515 if f in mdata:
1516 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1516 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1517 count[0] += 1
1517 count[0] += 1
1518 self.ui.progress(_('bundling'), count[0],
1518 self.ui.progress(_('bundling'), count[0],
1519 unit=_('manifests'), total=len(mfs))
1519 unit=_('manifests'), total=len(mfs))
1520 return mfs[x]
1520 return mfs[x]
1521 else:
1521 else:
1522 self.ui.progress(
1522 self.ui.progress(
1523 _('bundling'), count[0], item=fstate[0],
1523 _('bundling'), count[0], item=fstate[0],
1524 unit=_('files'), total=len(changedfiles))
1524 unit=_('files'), total=len(changedfiles))
1525 return fstate[1][x]
1525 return fstate[1][x]
1526
1526
1527 bundler = changegroup.bundle10(lookup)
1527 bundler = changegroup.bundle10(lookup)
1528
1528
1529 def gengroup():
1529 def gengroup():
1530 # Create a changenode group generator that will call our functions
1530 # Create a changenode group generator that will call our functions
1531 # back to lookup the owning changenode and collect information.
1531 # back to lookup the owning changenode and collect information.
1532 for chunk in cl.group(csets, bundler):
1532 for chunk in cl.group(csets, bundler):
1533 yield chunk
1533 yield chunk
1534 self.ui.progress(_('bundling'), None)
1534 self.ui.progress(_('bundling'), None)
1535
1535
1536 # Create a generator for the manifestnodes that calls our lookup
1536 # Create a generator for the manifestnodes that calls our lookup
1537 # and data collection functions back.
1537 # and data collection functions back.
1538 count[0] = 0
1538 count[0] = 0
1539 for chunk in mf.group(prune(mf, mfs), bundler):
1539 for chunk in mf.group(prune(mf, mfs), bundler):
1540 yield chunk
1540 yield chunk
1541 self.ui.progress(_('bundling'), None)
1541 self.ui.progress(_('bundling'), None)
1542
1542
1543 mfs.clear()
1543 mfs.clear()
1544
1544
1545 # Go through all our files in order sorted by name.
1545 # Go through all our files in order sorted by name.
1546 count[0] = 0
1546 count[0] = 0
1547 for fname in sorted(changedfiles):
1547 for fname in sorted(changedfiles):
1548 filerevlog = self.file(fname)
1548 filerevlog = self.file(fname)
1549 if not len(filerevlog):
1549 if not len(filerevlog):
1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1550 raise util.Abort(_("empty or missing revlog for %s") % fname)
1551 fstate[0] = fname
1551 fstate[0] = fname
1552 fstate[1] = fnodes.pop(fname, {})
1552 fstate[1] = fnodes.pop(fname, {})
1553 first = True
1553 first = True
1554
1554
1555 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1555 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1556 bundler):
1556 bundler):
1557 if first:
1557 if first:
1558 if chunk == bundler.close():
1558 if chunk == bundler.close():
1559 break
1559 break
1560 count[0] += 1
1560 count[0] += 1
1561 yield bundler.fileheader(fname)
1561 yield bundler.fileheader(fname)
1562 first = False
1562 first = False
1563 yield chunk
1563 yield chunk
1564 # Signal that no more groups are left.
1564 # Signal that no more groups are left.
1565 yield bundler.close()
1565 yield bundler.close()
1566 self.ui.progress(_('bundling'), None)
1566 self.ui.progress(_('bundling'), None)
1567
1567
1568 if csets:
1568 if csets:
1569 self.hook('outgoing', node=hex(csets[0]), source=source)
1569 self.hook('outgoing', node=hex(csets[0]), source=source)
1570
1570
1571 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1571 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1572
1572
1573 def changegroup(self, basenodes, source):
1573 def changegroup(self, basenodes, source):
1574 # to avoid a race we use changegroupsubset() (issue1320)
1574 # to avoid a race we use changegroupsubset() (issue1320)
1575 return self.changegroupsubset(basenodes, self.heads(), source)
1575 return self.changegroupsubset(basenodes, self.heads(), source)
1576
1576
1577 def _changegroup(self, nodes, source):
1577 def _changegroup(self, nodes, source):
1578 """Compute the changegroup of all nodes that we have that a recipient
1578 """Compute the changegroup of all nodes that we have that a recipient
1579 doesn't. Return a chunkbuffer object whose read() method will return
1579 doesn't. Return a chunkbuffer object whose read() method will return
1580 successive changegroup chunks.
1580 successive changegroup chunks.
1581
1581
1582 This is much easier than the previous function as we can assume that
1582 This is much easier than the previous function as we can assume that
1583 the recipient has any changenode we aren't sending them.
1583 the recipient has any changenode we aren't sending them.
1584
1584
1585 nodes is the set of nodes to send"""
1585 nodes is the set of nodes to send"""
1586
1586
1587 cl = self.changelog
1587 cl = self.changelog
1588 mf = self.manifest
1588 mf = self.manifest
1589 mfs = {}
1589 mfs = {}
1590 changedfiles = set()
1590 changedfiles = set()
1591 fstate = ['']
1591 fstate = ['']
1592 count = [0]
1592 count = [0]
1593
1593
1594 self.hook('preoutgoing', throw=True, source=source)
1594 self.hook('preoutgoing', throw=True, source=source)
1595 self.changegroupinfo(nodes, source)
1595 self.changegroupinfo(nodes, source)
1596
1596
1597 revset = set([cl.rev(n) for n in nodes])
1597 revset = set([cl.rev(n) for n in nodes])
1598
1598
1599 def gennodelst(log):
1599 def gennodelst(log):
1600 for r in log:
1600 for r in log:
1601 if log.linkrev(r) in revset:
1601 if log.linkrev(r) in revset:
1602 yield log.node(r)
1602 yield log.node(r)
1603
1603
1604 def lookup(revlog, x):
1604 def lookup(revlog, x):
1605 if revlog == cl:
1605 if revlog == cl:
1606 c = cl.read(x)
1606 c = cl.read(x)
1607 changedfiles.update(c[3])
1607 changedfiles.update(c[3])
1608 mfs.setdefault(c[0], x)
1608 mfs.setdefault(c[0], x)
1609 count[0] += 1
1609 count[0] += 1
1610 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1610 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1611 return x
1611 return x
1612 elif revlog == mf:
1612 elif revlog == mf:
1613 count[0] += 1
1613 count[0] += 1
1614 self.ui.progress(_('bundling'), count[0],
1614 self.ui.progress(_('bundling'), count[0],
1615 unit=_('manifests'), total=len(mfs))
1615 unit=_('manifests'), total=len(mfs))
1616 return cl.node(revlog.linkrev(revlog.rev(x)))
1616 return cl.node(revlog.linkrev(revlog.rev(x)))
1617 else:
1617 else:
1618 self.ui.progress(
1618 self.ui.progress(
1619 _('bundling'), count[0], item=fstate[0],
1619 _('bundling'), count[0], item=fstate[0],
1620 total=len(changedfiles), unit=_('files'))
1620 total=len(changedfiles), unit=_('files'))
1621 return cl.node(revlog.linkrev(revlog.rev(x)))
1621 return cl.node(revlog.linkrev(revlog.rev(x)))
1622
1622
1623 bundler = changegroup.bundle10(lookup)
1623 bundler = changegroup.bundle10(lookup)
1624
1624
1625 def gengroup():
1625 def gengroup():
1626 '''yield a sequence of changegroup chunks (strings)'''
1626 '''yield a sequence of changegroup chunks (strings)'''
1627 # construct a list of all changed files
1627 # construct a list of all changed files
1628
1628
1629 for chunk in cl.group(nodes, bundler):
1629 for chunk in cl.group(nodes, bundler):
1630 yield chunk
1630 yield chunk
1631 self.ui.progress(_('bundling'), None)
1631 self.ui.progress(_('bundling'), None)
1632
1632
1633 count[0] = 0
1633 count[0] = 0
1634 for chunk in mf.group(gennodelst(mf), bundler):
1634 for chunk in mf.group(gennodelst(mf), bundler):
1635 yield chunk
1635 yield chunk
1636 self.ui.progress(_('bundling'), None)
1636 self.ui.progress(_('bundling'), None)
1637
1637
1638 count[0] = 0
1638 count[0] = 0
1639 for fname in sorted(changedfiles):
1639 for fname in sorted(changedfiles):
1640 filerevlog = self.file(fname)
1640 filerevlog = self.file(fname)
1641 if not len(filerevlog):
1641 if not len(filerevlog):
1642 raise util.Abort(_("empty or missing revlog for %s") % fname)
1642 raise util.Abort(_("empty or missing revlog for %s") % fname)
1643 fstate[0] = fname
1643 fstate[0] = fname
1644 first = True
1644 first = True
1645 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1645 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1646 if first:
1646 if first:
1647 if chunk == bundler.close():
1647 if chunk == bundler.close():
1648 break
1648 break
1649 count[0] += 1
1649 count[0] += 1
1650 yield bundler.fileheader(fname)
1650 yield bundler.fileheader(fname)
1651 first = False
1651 first = False
1652 yield chunk
1652 yield chunk
1653 yield bundler.close()
1653 yield bundler.close()
1654 self.ui.progress(_('bundling'), None)
1654 self.ui.progress(_('bundling'), None)
1655
1655
1656 if nodes:
1656 if nodes:
1657 self.hook('outgoing', node=hex(nodes[0]), source=source)
1657 self.hook('outgoing', node=hex(nodes[0]), source=source)
1658
1658
1659 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1659 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1660
1660
1661 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1661 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1662 """Add the changegroup returned by source.read() to this repo.
1662 """Add the changegroup returned by source.read() to this repo.
1663 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1663 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1664 the URL of the repo where this changegroup is coming from.
1664 the URL of the repo where this changegroup is coming from.
1665 If lock is not None, the function takes ownership of the lock
1665 If lock is not None, the function takes ownership of the lock
1666 and releases it after the changegroup is added.
1666 and releases it after the changegroup is added.
1667
1667
1668 Return an integer summarizing the change to this repo:
1668 Return an integer summarizing the change to this repo:
1669 - nothing changed or no source: 0
1669 - nothing changed or no source: 0
1670 - more heads than before: 1+added heads (2..n)
1670 - more heads than before: 1+added heads (2..n)
1671 - fewer heads than before: -1-removed heads (-2..-n)
1671 - fewer heads than before: -1-removed heads (-2..-n)
1672 - number of heads stays the same: 1
1672 - number of heads stays the same: 1
1673 """
1673 """
1674 def csmap(x):
1674 def csmap(x):
1675 self.ui.debug("add changeset %s\n" % short(x))
1675 self.ui.debug("add changeset %s\n" % short(x))
1676 return len(cl)
1676 return len(cl)
1677
1677
1678 def revmap(x):
1678 def revmap(x):
1679 return cl.rev(x)
1679 return cl.rev(x)
1680
1680
1681 if not source:
1681 if not source:
1682 return 0
1682 return 0
1683
1683
1684 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1684 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1685
1685
1686 changesets = files = revisions = 0
1686 changesets = files = revisions = 0
1687 efiles = set()
1687 efiles = set()
1688
1688
1689 # write changelog data to temp files so concurrent readers will not see
1689 # write changelog data to temp files so concurrent readers will not see
1690 # inconsistent view
1690 # inconsistent view
1691 cl = self.changelog
1691 cl = self.changelog
1692 cl.delayupdate()
1692 cl.delayupdate()
1693 oldheads = len(cl.heads())
1693 oldheads = len(cl.heads())
1694
1694
1695 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1695 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1696 try:
1696 try:
1697 trp = weakref.proxy(tr)
1697 trp = weakref.proxy(tr)
1698 # pull off the changeset group
1698 # pull off the changeset group
1699 self.ui.status(_("adding changesets\n"))
1699 self.ui.status(_("adding changesets\n"))
1700 clstart = len(cl)
1700 clstart = len(cl)
1701 class prog(object):
1701 class prog(object):
1702 step = _('changesets')
1702 step = _('changesets')
1703 count = 1
1703 count = 1
1704 ui = self.ui
1704 ui = self.ui
1705 total = None
1705 total = None
1706 def __call__(self):
1706 def __call__(self):
1707 self.ui.progress(self.step, self.count, unit=_('chunks'),
1707 self.ui.progress(self.step, self.count, unit=_('chunks'),
1708 total=self.total)
1708 total=self.total)
1709 self.count += 1
1709 self.count += 1
1710 pr = prog()
1710 pr = prog()
1711 source.callback = pr
1711 source.callback = pr
1712
1712
1713 if (cl.addgroup(source, csmap, trp) is None
1713 if (cl.addgroup(source, csmap, trp) is None
1714 and not emptyok):
1714 and not emptyok):
1715 raise util.Abort(_("received changelog group is empty"))
1715 raise util.Abort(_("received changelog group is empty"))
1716 clend = len(cl)
1716 clend = len(cl)
1717 changesets = clend - clstart
1717 changesets = clend - clstart
1718 for c in xrange(clstart, clend):
1718 for c in xrange(clstart, clend):
1719 efiles.update(self[c].files())
1719 efiles.update(self[c].files())
1720 efiles = len(efiles)
1720 efiles = len(efiles)
1721 self.ui.progress(_('changesets'), None)
1721 self.ui.progress(_('changesets'), None)
1722
1722
1723 # pull off the manifest group
1723 # pull off the manifest group
1724 self.ui.status(_("adding manifests\n"))
1724 self.ui.status(_("adding manifests\n"))
1725 pr.step = _('manifests')
1725 pr.step = _('manifests')
1726 pr.count = 1
1726 pr.count = 1
1727 pr.total = changesets # manifests <= changesets
1727 pr.total = changesets # manifests <= changesets
1728 # no need to check for empty manifest group here:
1728 # no need to check for empty manifest group here:
1729 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1729 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1730 # no new manifest will be created and the manifest group will
1730 # no new manifest will be created and the manifest group will
1731 # be empty during the pull
1731 # be empty during the pull
1732 self.manifest.addgroup(source, revmap, trp)
1732 self.manifest.addgroup(source, revmap, trp)
1733 self.ui.progress(_('manifests'), None)
1733 self.ui.progress(_('manifests'), None)
1734
1734
1735 needfiles = {}
1735 needfiles = {}
1736 if self.ui.configbool('server', 'validate', default=False):
1736 if self.ui.configbool('server', 'validate', default=False):
1737 # validate incoming csets have their manifests
1737 # validate incoming csets have their manifests
1738 for cset in xrange(clstart, clend):
1738 for cset in xrange(clstart, clend):
1739 mfest = self.changelog.read(self.changelog.node(cset))[0]
1739 mfest = self.changelog.read(self.changelog.node(cset))[0]
1740 mfest = self.manifest.readdelta(mfest)
1740 mfest = self.manifest.readdelta(mfest)
1741 # store file nodes we must see
1741 # store file nodes we must see
1742 for f, n in mfest.iteritems():
1742 for f, n in mfest.iteritems():
1743 needfiles.setdefault(f, set()).add(n)
1743 needfiles.setdefault(f, set()).add(n)
1744
1744
1745 # process the files
1745 # process the files
1746 self.ui.status(_("adding file changes\n"))
1746 self.ui.status(_("adding file changes\n"))
1747 pr.step = 'files'
1747 pr.step = 'files'
1748 pr.count = 1
1748 pr.count = 1
1749 pr.total = efiles
1749 pr.total = efiles
1750 source.callback = None
1750 source.callback = None
1751
1751
1752 while 1:
1752 while 1:
1753 f = source.chunk()
1753 f = source.chunk()
1754 if not f:
1754 if not f:
1755 break
1755 break
1756 self.ui.debug("adding %s revisions\n" % f)
1756 self.ui.debug("adding %s revisions\n" % f)
1757 pr()
1757 pr()
1758 fl = self.file(f)
1758 fl = self.file(f)
1759 o = len(fl)
1759 o = len(fl)
1760 if fl.addgroup(source, revmap, trp) is None:
1760 if fl.addgroup(source, revmap, trp) is None:
1761 raise util.Abort(_("received file revlog group is empty"))
1761 raise util.Abort(_("received file revlog group is empty"))
1762 revisions += len(fl) - o
1762 revisions += len(fl) - o
1763 files += 1
1763 files += 1
1764 if f in needfiles:
1764 if f in needfiles:
1765 needs = needfiles[f]
1765 needs = needfiles[f]
1766 for new in xrange(o, len(fl)):
1766 for new in xrange(o, len(fl)):
1767 n = fl.node(new)
1767 n = fl.node(new)
1768 if n in needs:
1768 if n in needs:
1769 needs.remove(n)
1769 needs.remove(n)
1770 if not needs:
1770 if not needs:
1771 del needfiles[f]
1771 del needfiles[f]
1772 self.ui.progress(_('files'), None)
1772 self.ui.progress(_('files'), None)
1773
1773
1774 for f, needs in needfiles.iteritems():
1774 for f, needs in needfiles.iteritems():
1775 fl = self.file(f)
1775 fl = self.file(f)
1776 for n in needs:
1776 for n in needs:
1777 try:
1777 try:
1778 fl.rev(n)
1778 fl.rev(n)
1779 except error.LookupError:
1779 except error.LookupError:
1780 raise util.Abort(
1780 raise util.Abort(
1781 _('missing file data for %s:%s - run hg verify') %
1781 _('missing file data for %s:%s - run hg verify') %
1782 (f, hex(n)))
1782 (f, hex(n)))
1783
1783
1784 newheads = len(cl.heads())
1784 newheads = len(cl.heads())
1785 heads = ""
1785 heads = ""
1786 if oldheads and newheads != oldheads:
1786 if oldheads and newheads != oldheads:
1787 heads = _(" (%+d heads)") % (newheads - oldheads)
1787 heads = _(" (%+d heads)") % (newheads - oldheads)
1788
1788
1789 self.ui.status(_("added %d changesets"
1789 self.ui.status(_("added %d changesets"
1790 " with %d changes to %d files%s\n")
1790 " with %d changes to %d files%s\n")
1791 % (changesets, revisions, files, heads))
1791 % (changesets, revisions, files, heads))
1792
1792
1793 if changesets > 0:
1793 if changesets > 0:
1794 p = lambda: cl.writepending() and self.root or ""
1794 p = lambda: cl.writepending() and self.root or ""
1795 self.hook('pretxnchangegroup', throw=True,
1795 self.hook('pretxnchangegroup', throw=True,
1796 node=hex(cl.node(clstart)), source=srctype,
1796 node=hex(cl.node(clstart)), source=srctype,
1797 url=url, pending=p)
1797 url=url, pending=p)
1798
1798
1799 # make changelog see real files again
1799 # make changelog see real files again
1800 cl.finalize(trp)
1800 cl.finalize(trp)
1801
1801
1802 tr.close()
1802 tr.close()
1803 finally:
1803 finally:
1804 tr.release()
1804 tr.release()
1805 if lock:
1805 if lock:
1806 lock.release()
1806 lock.release()
1807
1807
1808 if changesets > 0:
1808 if changesets > 0:
1809 # forcefully update the on-disk branch cache
1809 # forcefully update the on-disk branch cache
1810 self.ui.debug("updating the branch cache\n")
1810 self.ui.debug("updating the branch cache\n")
1811 self.updatebranchcache()
1811 self.updatebranchcache()
1812 self.hook("changegroup", node=hex(cl.node(clstart)),
1812 self.hook("changegroup", node=hex(cl.node(clstart)),
1813 source=srctype, url=url)
1813 source=srctype, url=url)
1814
1814
1815 for i in xrange(clstart, clend):
1815 for i in xrange(clstart, clend):
1816 self.hook("incoming", node=hex(cl.node(i)),
1816 self.hook("incoming", node=hex(cl.node(i)),
1817 source=srctype, url=url)
1817 source=srctype, url=url)
1818
1818
1819 # never return 0 here:
1819 # never return 0 here:
1820 if newheads < oldheads:
1820 if newheads < oldheads:
1821 return newheads - oldheads - 1
1821 return newheads - oldheads - 1
1822 else:
1822 else:
1823 return newheads - oldheads + 1
1823 return newheads - oldheads + 1
1824
1824
1825
1825
1826 def stream_in(self, remote, requirements):
1826 def stream_in(self, remote, requirements):
1827 lock = self.lock()
1827 lock = self.lock()
1828 try:
1828 try:
1829 fp = remote.stream_out()
1829 fp = remote.stream_out()
1830 l = fp.readline()
1830 l = fp.readline()
1831 try:
1831 try:
1832 resp = int(l)
1832 resp = int(l)
1833 except ValueError:
1833 except ValueError:
1834 raise error.ResponseError(
1834 raise error.ResponseError(
1835 _('Unexpected response from remote server:'), l)
1835 _('Unexpected response from remote server:'), l)
1836 if resp == 1:
1836 if resp == 1:
1837 raise util.Abort(_('operation forbidden by server'))
1837 raise util.Abort(_('operation forbidden by server'))
1838 elif resp == 2:
1838 elif resp == 2:
1839 raise util.Abort(_('locking the remote repository failed'))
1839 raise util.Abort(_('locking the remote repository failed'))
1840 elif resp != 0:
1840 elif resp != 0:
1841 raise util.Abort(_('the server sent an unknown error code'))
1841 raise util.Abort(_('the server sent an unknown error code'))
1842 self.ui.status(_('streaming all changes\n'))
1842 self.ui.status(_('streaming all changes\n'))
1843 l = fp.readline()
1843 l = fp.readline()
1844 try:
1844 try:
1845 total_files, total_bytes = map(int, l.split(' ', 1))
1845 total_files, total_bytes = map(int, l.split(' ', 1))
1846 except (ValueError, TypeError):
1846 except (ValueError, TypeError):
1847 raise error.ResponseError(
1847 raise error.ResponseError(
1848 _('Unexpected response from remote server:'), l)
1848 _('Unexpected response from remote server:'), l)
1849 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 self.ui.status(_('%d files to transfer, %s of data\n') %
1850 (total_files, util.bytecount(total_bytes)))
1850 (total_files, util.bytecount(total_bytes)))
1851 start = time.time()
1851 start = time.time()
1852 for i in xrange(total_files):
1852 for i in xrange(total_files):
1853 # XXX doesn't support '\n' or '\r' in filenames
1853 # XXX doesn't support '\n' or '\r' in filenames
1854 l = fp.readline()
1854 l = fp.readline()
1855 try:
1855 try:
1856 name, size = l.split('\0', 1)
1856 name, size = l.split('\0', 1)
1857 size = int(size)
1857 size = int(size)
1858 except (ValueError, TypeError):
1858 except (ValueError, TypeError):
1859 raise error.ResponseError(
1859 raise error.ResponseError(
1860 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1861 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1862 # for backwards compat, name was partially encoded
1862 # for backwards compat, name was partially encoded
1863 ofp = self.sopener(store.decodedir(name), 'w')
1863 ofp = self.sopener(store.decodedir(name), 'w')
1864 for chunk in util.filechunkiter(fp, limit=size):
1864 for chunk in util.filechunkiter(fp, limit=size):
1865 ofp.write(chunk)
1865 ofp.write(chunk)
1866 ofp.close()
1866 ofp.close()
1867 elapsed = time.time() - start
1867 elapsed = time.time() - start
1868 if elapsed <= 0:
1868 if elapsed <= 0:
1869 elapsed = 0.001
1869 elapsed = 0.001
1870 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1870 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1871 (util.bytecount(total_bytes), elapsed,
1871 (util.bytecount(total_bytes), elapsed,
1872 util.bytecount(total_bytes / elapsed)))
1872 util.bytecount(total_bytes / elapsed)))
1873
1873
1874 # new requirements = old non-format requirements + new format-related
1874 # new requirements = old non-format requirements + new format-related
1875 # requirements from the streamed-in repository
1875 # requirements from the streamed-in repository
1876 requirements.update(set(self.requirements) - self.supportedformats)
1876 requirements.update(set(self.requirements) - self.supportedformats)
1877 self._applyrequirements(requirements)
1877 self._applyrequirements(requirements)
1878 self._writerequirements()
1878 self._writerequirements()
1879
1879
1880 self.invalidate()
1880 self.invalidate()
1881 return len(self.heads()) + 1
1881 return len(self.heads()) + 1
1882 finally:
1882 finally:
1883 lock.release()
1883 lock.release()
1884
1884
1885 def clone(self, remote, heads=[], stream=False):
1885 def clone(self, remote, heads=[], stream=False):
1886 '''clone remote repository.
1886 '''clone remote repository.
1887
1887
1888 keyword arguments:
1888 keyword arguments:
1889 heads: list of revs to clone (forces use of pull)
1889 heads: list of revs to clone (forces use of pull)
1890 stream: use streaming clone if possible'''
1890 stream: use streaming clone if possible'''
1891
1891
1892 # now, all clients that can request uncompressed clones can
1892 # now, all clients that can request uncompressed clones can
1893 # read repo formats supported by all servers that can serve
1893 # read repo formats supported by all servers that can serve
1894 # them.
1894 # them.
1895
1895
1896 # if revlog format changes, client will have to check version
1896 # if revlog format changes, client will have to check version
1897 # and format flags on "stream" capability, and use
1897 # and format flags on "stream" capability, and use
1898 # uncompressed only if compatible.
1898 # uncompressed only if compatible.
1899
1899
1900 if stream and not heads:
1900 if stream and not heads:
1901 # 'stream' means remote revlog format is revlogv1 only
1901 # 'stream' means remote revlog format is revlogv1 only
1902 if remote.capable('stream'):
1902 if remote.capable('stream'):
1903 return self.stream_in(remote, set(('revlogv1',)))
1903 return self.stream_in(remote, set(('revlogv1',)))
1904 # otherwise, 'streamreqs' contains the remote revlog format
1904 # otherwise, 'streamreqs' contains the remote revlog format
1905 streamreqs = remote.capable('streamreqs')
1905 streamreqs = remote.capable('streamreqs')
1906 if streamreqs:
1906 if streamreqs:
1907 streamreqs = set(streamreqs.split(','))
1907 streamreqs = set(streamreqs.split(','))
1908 # if we support it, stream in and adjust our requirements
1908 # if we support it, stream in and adjust our requirements
1909 if not streamreqs - self.supportedformats:
1909 if not streamreqs - self.supportedformats:
1910 return self.stream_in(remote, streamreqs)
1910 return self.stream_in(remote, streamreqs)
1911 return self.pull(remote, heads)
1911 return self.pull(remote, heads)
1912
1912
1913 def pushkey(self, namespace, key, old, new):
1913 def pushkey(self, namespace, key, old, new):
1914 return pushkey.push(self, namespace, key, old, new)
1914 return pushkey.push(self, namespace, key, old, new)
1915
1915
1916 def listkeys(self, namespace):
1916 def listkeys(self, namespace):
1917 return pushkey.list(self, namespace)
1917 return pushkey.list(self, namespace)
1918
1918
1919 def debugwireargs(self, one, two, three=None, four=None):
1919 def debugwireargs(self, one, two, three=None, four=None):
1920 '''used to test argument passing over the wire'''
1920 '''used to test argument passing over the wire'''
1921 return "%s %s %s %s" % (one, two, three, four)
1921 return "%s %s %s %s" % (one, two, three, four)
1922
1922
1923 # used to avoid circular references so destructors work
1923 # used to avoid circular references so destructors work
1924 def aftertrans(files):
1924 def aftertrans(files):
1925 renamefiles = [tuple(t) for t in files]
1925 renamefiles = [tuple(t) for t in files]
1926 def a():
1926 def a():
1927 for src, dest in renamefiles:
1927 for src, dest in renamefiles:
1928 util.rename(src, dest)
1928 util.rename(src, dest)
1929 return a
1929 return a
1930
1930
1931 def instance(ui, path, create):
1931 def instance(ui, path, create):
1932 return localrepository(ui, urlmod.localpath(path), create)
1932 return localrepository(ui, urlmod.localpath(path), create)
1933
1933
1934 def islocal(path):
1934 def islocal(path):
1935 return True
1935 return True
@@ -1,562 +1,562 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import util, filemerge, copies, subrepo
10 import scmutil, util, filemerge, copies, subrepo
11 import errno, os, shutil
11 import errno, os, shutil
12
12
13 class mergestate(object):
13 class mergestate(object):
14 '''track 3-way merge state of individual files'''
14 '''track 3-way merge state of individual files'''
15 def __init__(self, repo):
15 def __init__(self, repo):
16 self._repo = repo
16 self._repo = repo
17 self._dirty = False
17 self._dirty = False
18 self._read()
18 self._read()
19 def reset(self, node=None):
19 def reset(self, node=None):
20 self._state = {}
20 self._state = {}
21 if node:
21 if node:
22 self._local = node
22 self._local = node
23 shutil.rmtree(self._repo.join("merge"), True)
23 shutil.rmtree(self._repo.join("merge"), True)
24 self._dirty = False
24 self._dirty = False
25 def _read(self):
25 def _read(self):
26 self._state = {}
26 self._state = {}
27 try:
27 try:
28 f = self._repo.opener("merge/state")
28 f = self._repo.opener("merge/state")
29 for i, l in enumerate(f):
29 for i, l in enumerate(f):
30 if i == 0:
30 if i == 0:
31 self._local = bin(l[:-1])
31 self._local = bin(l[:-1])
32 else:
32 else:
33 bits = l[:-1].split("\0")
33 bits = l[:-1].split("\0")
34 self._state[bits[0]] = bits[1:]
34 self._state[bits[0]] = bits[1:]
35 f.close()
35 f.close()
36 except IOError, err:
36 except IOError, err:
37 if err.errno != errno.ENOENT:
37 if err.errno != errno.ENOENT:
38 raise
38 raise
39 self._dirty = False
39 self._dirty = False
40 def commit(self):
40 def commit(self):
41 if self._dirty:
41 if self._dirty:
42 f = self._repo.opener("merge/state", "w")
42 f = self._repo.opener("merge/state", "w")
43 f.write(hex(self._local) + "\n")
43 f.write(hex(self._local) + "\n")
44 for d, v in self._state.iteritems():
44 for d, v in self._state.iteritems():
45 f.write("\0".join([d] + v) + "\n")
45 f.write("\0".join([d] + v) + "\n")
46 f.close()
46 f.close()
47 self._dirty = False
47 self._dirty = False
48 def add(self, fcl, fco, fca, fd, flags):
48 def add(self, fcl, fco, fca, fd, flags):
49 hash = util.sha1(fcl.path()).hexdigest()
49 hash = util.sha1(fcl.path()).hexdigest()
50 self._repo.opener("merge/" + hash, "w").write(fcl.data())
50 self._repo.opener("merge/" + hash, "w").write(fcl.data())
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 hex(fca.filenode()), fco.path(), flags]
52 hex(fca.filenode()), fco.path(), flags]
53 self._dirty = True
53 self._dirty = True
54 def __contains__(self, dfile):
54 def __contains__(self, dfile):
55 return dfile in self._state
55 return dfile in self._state
56 def __getitem__(self, dfile):
56 def __getitem__(self, dfile):
57 return self._state[dfile][0]
57 return self._state[dfile][0]
58 def __iter__(self):
58 def __iter__(self):
59 l = self._state.keys()
59 l = self._state.keys()
60 l.sort()
60 l.sort()
61 for f in l:
61 for f in l:
62 yield f
62 yield f
63 def mark(self, dfile, state):
63 def mark(self, dfile, state):
64 self._state[dfile][0] = state
64 self._state[dfile][0] = state
65 self._dirty = True
65 self._dirty = True
66 def resolve(self, dfile, wctx, octx):
66 def resolve(self, dfile, wctx, octx):
67 if self[dfile] == 'r':
67 if self[dfile] == 'r':
68 return 0
68 return 0
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 f = self._repo.opener("merge/" + hash)
70 f = self._repo.opener("merge/" + hash)
71 self._repo.wwrite(dfile, f.read(), flags)
71 self._repo.wwrite(dfile, f.read(), flags)
72 f.close()
72 f.close()
73 fcd = wctx[dfile]
73 fcd = wctx[dfile]
74 fco = octx[ofile]
74 fco = octx[ofile]
75 fca = self._repo.filectx(afile, fileid=anode)
75 fca = self._repo.filectx(afile, fileid=anode)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
77 if r is None:
77 if r is None:
78 # no real conflict
78 # no real conflict
79 del self._state[dfile]
79 del self._state[dfile]
80 elif not r:
80 elif not r:
81 self.mark(dfile, 'r')
81 self.mark(dfile, 'r')
82 return r
82 return r
83
83
84 def _checkunknown(wctx, mctx):
84 def _checkunknown(wctx, mctx):
85 "check for collisions between unknown files and files in mctx"
85 "check for collisions between unknown files and files in mctx"
86 for f in wctx.unknown():
86 for f in wctx.unknown():
87 if f in mctx and mctx[f].cmp(wctx[f]):
87 if f in mctx and mctx[f].cmp(wctx[f]):
88 raise util.Abort(_("untracked file in working directory differs"
88 raise util.Abort(_("untracked file in working directory differs"
89 " from file in requested revision: '%s'") % f)
89 " from file in requested revision: '%s'") % f)
90
90
91 def _checkcollision(mctx):
91 def _checkcollision(mctx):
92 "check for case folding collisions in the destination context"
92 "check for case folding collisions in the destination context"
93 folded = {}
93 folded = {}
94 for fn in mctx:
94 for fn in mctx:
95 fold = fn.lower()
95 fold = fn.lower()
96 if fold in folded:
96 if fold in folded:
97 raise util.Abort(_("case-folding collision between %s and %s")
97 raise util.Abort(_("case-folding collision between %s and %s")
98 % (fn, folded[fold]))
98 % (fn, folded[fold]))
99 folded[fold] = fn
99 folded[fold] = fn
100
100
101 def _forgetremoved(wctx, mctx, branchmerge):
101 def _forgetremoved(wctx, mctx, branchmerge):
102 """
102 """
103 Forget removed files
103 Forget removed files
104
104
105 If we're jumping between revisions (as opposed to merging), and if
105 If we're jumping between revisions (as opposed to merging), and if
106 neither the working directory nor the target rev has the file,
106 neither the working directory nor the target rev has the file,
107 then we need to remove it from the dirstate, to prevent the
107 then we need to remove it from the dirstate, to prevent the
108 dirstate from listing the file when it is no longer in the
108 dirstate from listing the file when it is no longer in the
109 manifest.
109 manifest.
110
110
111 If we're merging, and the other revision has removed a file
111 If we're merging, and the other revision has removed a file
112 that is not present in the working directory, we need to mark it
112 that is not present in the working directory, we need to mark it
113 as removed.
113 as removed.
114 """
114 """
115
115
116 action = []
116 action = []
117 state = branchmerge and 'r' or 'f'
117 state = branchmerge and 'r' or 'f'
118 for f in wctx.deleted():
118 for f in wctx.deleted():
119 if f not in mctx:
119 if f not in mctx:
120 action.append((f, state))
120 action.append((f, state))
121
121
122 if not branchmerge:
122 if not branchmerge:
123 for f in wctx.removed():
123 for f in wctx.removed():
124 if f not in mctx:
124 if f not in mctx:
125 action.append((f, "f"))
125 action.append((f, "f"))
126
126
127 return action
127 return action
128
128
129 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
129 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
130 """
130 """
131 Merge p1 and p2 with ancestor pa and generate merge action list
131 Merge p1 and p2 with ancestor pa and generate merge action list
132
132
133 overwrite = whether we clobber working files
133 overwrite = whether we clobber working files
134 partial = function to filter file lists
134 partial = function to filter file lists
135 """
135 """
136
136
137 def fmerge(f, f2, fa):
137 def fmerge(f, f2, fa):
138 """merge flags"""
138 """merge flags"""
139 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
139 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
140 if m == n: # flags agree
140 if m == n: # flags agree
141 return m # unchanged
141 return m # unchanged
142 if m and n and not a: # flags set, don't agree, differ from parent
142 if m and n and not a: # flags set, don't agree, differ from parent
143 r = repo.ui.promptchoice(
143 r = repo.ui.promptchoice(
144 _(" conflicting flags for %s\n"
144 _(" conflicting flags for %s\n"
145 "(n)one, e(x)ec or sym(l)ink?") % f,
145 "(n)one, e(x)ec or sym(l)ink?") % f,
146 (_("&None"), _("E&xec"), _("Sym&link")), 0)
146 (_("&None"), _("E&xec"), _("Sym&link")), 0)
147 if r == 1:
147 if r == 1:
148 return "x" # Exec
148 return "x" # Exec
149 if r == 2:
149 if r == 2:
150 return "l" # Symlink
150 return "l" # Symlink
151 return ""
151 return ""
152 if m and m != a: # changed from a to m
152 if m and m != a: # changed from a to m
153 return m
153 return m
154 if n and n != a: # changed from a to n
154 if n and n != a: # changed from a to n
155 return n
155 return n
156 return '' # flag was cleared
156 return '' # flag was cleared
157
157
158 def act(msg, m, f, *args):
158 def act(msg, m, f, *args):
159 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
159 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
160 action.append((f, m) + args)
160 action.append((f, m) + args)
161
161
162 action, copy = [], {}
162 action, copy = [], {}
163
163
164 if overwrite:
164 if overwrite:
165 pa = p1
165 pa = p1
166 elif pa == p2: # backwards
166 elif pa == p2: # backwards
167 pa = p1.p1()
167 pa = p1.p1()
168 elif pa and repo.ui.configbool("merge", "followcopies", True):
168 elif pa and repo.ui.configbool("merge", "followcopies", True):
169 dirs = repo.ui.configbool("merge", "followdirs", True)
169 dirs = repo.ui.configbool("merge", "followdirs", True)
170 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
170 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
171 for of, fl in diverge.iteritems():
171 for of, fl in diverge.iteritems():
172 act("divergent renames", "dr", of, fl)
172 act("divergent renames", "dr", of, fl)
173
173
174 repo.ui.note(_("resolving manifests\n"))
174 repo.ui.note(_("resolving manifests\n"))
175 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
175 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
176 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
176 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
177
177
178 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
178 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
179 copied = set(copy.values())
179 copied = set(copy.values())
180
180
181 if '.hgsubstate' in m1:
181 if '.hgsubstate' in m1:
182 # check whether sub state is modified
182 # check whether sub state is modified
183 for s in p1.substate:
183 for s in p1.substate:
184 if p1.sub(s).dirty():
184 if p1.sub(s).dirty():
185 m1['.hgsubstate'] += "+"
185 m1['.hgsubstate'] += "+"
186 break
186 break
187
187
188 # Compare manifests
188 # Compare manifests
189 for f, n in m1.iteritems():
189 for f, n in m1.iteritems():
190 if partial and not partial(f):
190 if partial and not partial(f):
191 continue
191 continue
192 if f in m2:
192 if f in m2:
193 rflags = fmerge(f, f, f)
193 rflags = fmerge(f, f, f)
194 a = ma.get(f, nullid)
194 a = ma.get(f, nullid)
195 if n == m2[f] or m2[f] == a: # same or local newer
195 if n == m2[f] or m2[f] == a: # same or local newer
196 # is file locally modified or flags need changing?
196 # is file locally modified or flags need changing?
197 # dirstate flags may need to be made current
197 # dirstate flags may need to be made current
198 if m1.flags(f) != rflags or n[20:]:
198 if m1.flags(f) != rflags or n[20:]:
199 act("update permissions", "e", f, rflags)
199 act("update permissions", "e", f, rflags)
200 elif n == a: # remote newer
200 elif n == a: # remote newer
201 act("remote is newer", "g", f, rflags)
201 act("remote is newer", "g", f, rflags)
202 else: # both changed
202 else: # both changed
203 act("versions differ", "m", f, f, f, rflags, False)
203 act("versions differ", "m", f, f, f, rflags, False)
204 elif f in copied: # files we'll deal with on m2 side
204 elif f in copied: # files we'll deal with on m2 side
205 pass
205 pass
206 elif f in copy:
206 elif f in copy:
207 f2 = copy[f]
207 f2 = copy[f]
208 if f2 not in m2: # directory rename
208 if f2 not in m2: # directory rename
209 act("remote renamed directory to " + f2, "d",
209 act("remote renamed directory to " + f2, "d",
210 f, None, f2, m1.flags(f))
210 f, None, f2, m1.flags(f))
211 else: # case 2 A,B/B/B or case 4,21 A/B/B
211 else: # case 2 A,B/B/B or case 4,21 A/B/B
212 act("local copied/moved to " + f2, "m",
212 act("local copied/moved to " + f2, "m",
213 f, f2, f, fmerge(f, f2, f2), False)
213 f, f2, f, fmerge(f, f2, f2), False)
214 elif f in ma: # clean, a different, no remote
214 elif f in ma: # clean, a different, no remote
215 if n != ma[f]:
215 if n != ma[f]:
216 if repo.ui.promptchoice(
216 if repo.ui.promptchoice(
217 _(" local changed %s which remote deleted\n"
217 _(" local changed %s which remote deleted\n"
218 "use (c)hanged version or (d)elete?") % f,
218 "use (c)hanged version or (d)elete?") % f,
219 (_("&Changed"), _("&Delete")), 0):
219 (_("&Changed"), _("&Delete")), 0):
220 act("prompt delete", "r", f)
220 act("prompt delete", "r", f)
221 else:
221 else:
222 act("prompt keep", "a", f)
222 act("prompt keep", "a", f)
223 elif n[20:] == "a": # added, no remote
223 elif n[20:] == "a": # added, no remote
224 act("remote deleted", "f", f)
224 act("remote deleted", "f", f)
225 elif n[20:] != "u":
225 elif n[20:] != "u":
226 act("other deleted", "r", f)
226 act("other deleted", "r", f)
227
227
228 for f, n in m2.iteritems():
228 for f, n in m2.iteritems():
229 if partial and not partial(f):
229 if partial and not partial(f):
230 continue
230 continue
231 if f in m1 or f in copied: # files already visited
231 if f in m1 or f in copied: # files already visited
232 continue
232 continue
233 if f in copy:
233 if f in copy:
234 f2 = copy[f]
234 f2 = copy[f]
235 if f2 not in m1: # directory rename
235 if f2 not in m1: # directory rename
236 act("local renamed directory to " + f2, "d",
236 act("local renamed directory to " + f2, "d",
237 None, f, f2, m2.flags(f))
237 None, f, f2, m2.flags(f))
238 elif f2 in m2: # rename case 1, A/A,B/A
238 elif f2 in m2: # rename case 1, A/A,B/A
239 act("remote copied to " + f, "m",
239 act("remote copied to " + f, "m",
240 f2, f, f, fmerge(f2, f, f2), False)
240 f2, f, f, fmerge(f2, f, f2), False)
241 else: # case 3,20 A/B/A
241 else: # case 3,20 A/B/A
242 act("remote moved to " + f, "m",
242 act("remote moved to " + f, "m",
243 f2, f, f, fmerge(f2, f, f2), True)
243 f2, f, f, fmerge(f2, f, f2), True)
244 elif f not in ma:
244 elif f not in ma:
245 act("remote created", "g", f, m2.flags(f))
245 act("remote created", "g", f, m2.flags(f))
246 elif n != ma[f]:
246 elif n != ma[f]:
247 if repo.ui.promptchoice(
247 if repo.ui.promptchoice(
248 _("remote changed %s which local deleted\n"
248 _("remote changed %s which local deleted\n"
249 "use (c)hanged version or leave (d)eleted?") % f,
249 "use (c)hanged version or leave (d)eleted?") % f,
250 (_("&Changed"), _("&Deleted")), 0) == 0:
250 (_("&Changed"), _("&Deleted")), 0) == 0:
251 act("prompt recreating", "g", f, m2.flags(f))
251 act("prompt recreating", "g", f, m2.flags(f))
252
252
253 return action
253 return action
254
254
255 def actionkey(a):
255 def actionkey(a):
256 return a[1] == 'r' and -1 or 0, a
256 return a[1] == 'r' and -1 or 0, a
257
257
258 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
258 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
259 """apply the merge action list to the working directory
259 """apply the merge action list to the working directory
260
260
261 wctx is the working copy context
261 wctx is the working copy context
262 mctx is the context to be merged into the working copy
262 mctx is the context to be merged into the working copy
263 actx is the context of the common ancestor
263 actx is the context of the common ancestor
264
264
265 Return a tuple of counts (updated, merged, removed, unresolved) that
265 Return a tuple of counts (updated, merged, removed, unresolved) that
266 describes how many files were affected by the update.
266 describes how many files were affected by the update.
267 """
267 """
268
268
269 updated, merged, removed, unresolved = 0, 0, 0, 0
269 updated, merged, removed, unresolved = 0, 0, 0, 0
270 ms = mergestate(repo)
270 ms = mergestate(repo)
271 ms.reset(wctx.p1().node())
271 ms.reset(wctx.p1().node())
272 moves = []
272 moves = []
273 action.sort(key=actionkey)
273 action.sort(key=actionkey)
274 substate = wctx.substate # prime
274 substate = wctx.substate # prime
275
275
276 # prescan for merges
276 # prescan for merges
277 u = repo.ui
277 u = repo.ui
278 for a in action:
278 for a in action:
279 f, m = a[:2]
279 f, m = a[:2]
280 if m == 'm': # merge
280 if m == 'm': # merge
281 f2, fd, flags, move = a[2:]
281 f2, fd, flags, move = a[2:]
282 if f == '.hgsubstate': # merged internally
282 if f == '.hgsubstate': # merged internally
283 continue
283 continue
284 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
284 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
285 fcl = wctx[f]
285 fcl = wctx[f]
286 fco = mctx[f2]
286 fco = mctx[f2]
287 if mctx == actx: # backwards, use working dir parent as ancestor
287 if mctx == actx: # backwards, use working dir parent as ancestor
288 if fcl.parents():
288 if fcl.parents():
289 fca = fcl.p1()
289 fca = fcl.p1()
290 else:
290 else:
291 fca = repo.filectx(f, fileid=nullrev)
291 fca = repo.filectx(f, fileid=nullrev)
292 else:
292 else:
293 fca = fcl.ancestor(fco, actx)
293 fca = fcl.ancestor(fco, actx)
294 if not fca:
294 if not fca:
295 fca = repo.filectx(f, fileid=nullrev)
295 fca = repo.filectx(f, fileid=nullrev)
296 ms.add(fcl, fco, fca, fd, flags)
296 ms.add(fcl, fco, fca, fd, flags)
297 if f != fd and move:
297 if f != fd and move:
298 moves.append(f)
298 moves.append(f)
299
299
300 # remove renamed files after safely stored
300 # remove renamed files after safely stored
301 for f in moves:
301 for f in moves:
302 if os.path.lexists(repo.wjoin(f)):
302 if os.path.lexists(repo.wjoin(f)):
303 repo.ui.debug("removing %s\n" % f)
303 repo.ui.debug("removing %s\n" % f)
304 os.unlink(repo.wjoin(f))
304 os.unlink(repo.wjoin(f))
305
305
306 audit_path = util.path_auditor(repo.root)
306 audit_path = scmutil.path_auditor(repo.root)
307
307
308 numupdates = len(action)
308 numupdates = len(action)
309 for i, a in enumerate(action):
309 for i, a in enumerate(action):
310 f, m = a[:2]
310 f, m = a[:2]
311 u.progress(_('updating'), i + 1, item=f, total=numupdates,
311 u.progress(_('updating'), i + 1, item=f, total=numupdates,
312 unit=_('files'))
312 unit=_('files'))
313 if f and f[0] == "/":
313 if f and f[0] == "/":
314 continue
314 continue
315 if m == "r": # remove
315 if m == "r": # remove
316 repo.ui.note(_("removing %s\n") % f)
316 repo.ui.note(_("removing %s\n") % f)
317 audit_path(f)
317 audit_path(f)
318 if f == '.hgsubstate': # subrepo states need updating
318 if f == '.hgsubstate': # subrepo states need updating
319 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
319 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
320 try:
320 try:
321 util.unlinkpath(repo.wjoin(f))
321 util.unlinkpath(repo.wjoin(f))
322 except OSError, inst:
322 except OSError, inst:
323 if inst.errno != errno.ENOENT:
323 if inst.errno != errno.ENOENT:
324 repo.ui.warn(_("update failed to remove %s: %s!\n") %
324 repo.ui.warn(_("update failed to remove %s: %s!\n") %
325 (f, inst.strerror))
325 (f, inst.strerror))
326 removed += 1
326 removed += 1
327 elif m == "m": # merge
327 elif m == "m": # merge
328 if f == '.hgsubstate': # subrepo states need updating
328 if f == '.hgsubstate': # subrepo states need updating
329 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
329 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
330 continue
330 continue
331 f2, fd, flags, move = a[2:]
331 f2, fd, flags, move = a[2:]
332 r = ms.resolve(fd, wctx, mctx)
332 r = ms.resolve(fd, wctx, mctx)
333 if r is not None and r > 0:
333 if r is not None and r > 0:
334 unresolved += 1
334 unresolved += 1
335 else:
335 else:
336 if r is None:
336 if r is None:
337 updated += 1
337 updated += 1
338 else:
338 else:
339 merged += 1
339 merged += 1
340 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
340 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
341 if (move and repo.dirstate.normalize(fd) != f
341 if (move and repo.dirstate.normalize(fd) != f
342 and os.path.lexists(repo.wjoin(f))):
342 and os.path.lexists(repo.wjoin(f))):
343 repo.ui.debug("removing %s\n" % f)
343 repo.ui.debug("removing %s\n" % f)
344 os.unlink(repo.wjoin(f))
344 os.unlink(repo.wjoin(f))
345 elif m == "g": # get
345 elif m == "g": # get
346 flags = a[2]
346 flags = a[2]
347 repo.ui.note(_("getting %s\n") % f)
347 repo.ui.note(_("getting %s\n") % f)
348 t = mctx.filectx(f).data()
348 t = mctx.filectx(f).data()
349 repo.wwrite(f, t, flags)
349 repo.wwrite(f, t, flags)
350 t = None
350 t = None
351 updated += 1
351 updated += 1
352 if f == '.hgsubstate': # subrepo states need updating
352 if f == '.hgsubstate': # subrepo states need updating
353 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
353 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
354 elif m == "d": # directory rename
354 elif m == "d": # directory rename
355 f2, fd, flags = a[2:]
355 f2, fd, flags = a[2:]
356 if f:
356 if f:
357 repo.ui.note(_("moving %s to %s\n") % (f, fd))
357 repo.ui.note(_("moving %s to %s\n") % (f, fd))
358 t = wctx.filectx(f).data()
358 t = wctx.filectx(f).data()
359 repo.wwrite(fd, t, flags)
359 repo.wwrite(fd, t, flags)
360 util.unlinkpath(repo.wjoin(f))
360 util.unlinkpath(repo.wjoin(f))
361 if f2:
361 if f2:
362 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
362 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
363 t = mctx.filectx(f2).data()
363 t = mctx.filectx(f2).data()
364 repo.wwrite(fd, t, flags)
364 repo.wwrite(fd, t, flags)
365 updated += 1
365 updated += 1
366 elif m == "dr": # divergent renames
366 elif m == "dr": # divergent renames
367 fl = a[2]
367 fl = a[2]
368 repo.ui.warn(_("note: possible conflict - %s was renamed "
368 repo.ui.warn(_("note: possible conflict - %s was renamed "
369 "multiple times to:\n") % f)
369 "multiple times to:\n") % f)
370 for nf in fl:
370 for nf in fl:
371 repo.ui.warn(" %s\n" % nf)
371 repo.ui.warn(" %s\n" % nf)
372 elif m == "e": # exec
372 elif m == "e": # exec
373 flags = a[2]
373 flags = a[2]
374 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
374 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
375 ms.commit()
375 ms.commit()
376 u.progress(_('updating'), None, total=numupdates, unit=_('files'))
376 u.progress(_('updating'), None, total=numupdates, unit=_('files'))
377
377
378 return updated, merged, removed, unresolved
378 return updated, merged, removed, unresolved
379
379
380 def recordupdates(repo, action, branchmerge):
380 def recordupdates(repo, action, branchmerge):
381 "record merge actions to the dirstate"
381 "record merge actions to the dirstate"
382
382
383 for a in action:
383 for a in action:
384 f, m = a[:2]
384 f, m = a[:2]
385 if m == "r": # remove
385 if m == "r": # remove
386 if branchmerge:
386 if branchmerge:
387 repo.dirstate.remove(f)
387 repo.dirstate.remove(f)
388 else:
388 else:
389 repo.dirstate.forget(f)
389 repo.dirstate.forget(f)
390 elif m == "a": # re-add
390 elif m == "a": # re-add
391 if not branchmerge:
391 if not branchmerge:
392 repo.dirstate.add(f)
392 repo.dirstate.add(f)
393 elif m == "f": # forget
393 elif m == "f": # forget
394 repo.dirstate.forget(f)
394 repo.dirstate.forget(f)
395 elif m == "e": # exec change
395 elif m == "e": # exec change
396 repo.dirstate.normallookup(f)
396 repo.dirstate.normallookup(f)
397 elif m == "g": # get
397 elif m == "g": # get
398 if branchmerge:
398 if branchmerge:
399 repo.dirstate.otherparent(f)
399 repo.dirstate.otherparent(f)
400 else:
400 else:
401 repo.dirstate.normal(f)
401 repo.dirstate.normal(f)
402 elif m == "m": # merge
402 elif m == "m": # merge
403 f2, fd, flag, move = a[2:]
403 f2, fd, flag, move = a[2:]
404 if branchmerge:
404 if branchmerge:
405 # We've done a branch merge, mark this file as merged
405 # We've done a branch merge, mark this file as merged
406 # so that we properly record the merger later
406 # so that we properly record the merger later
407 repo.dirstate.merge(fd)
407 repo.dirstate.merge(fd)
408 if f != f2: # copy/rename
408 if f != f2: # copy/rename
409 if move:
409 if move:
410 repo.dirstate.remove(f)
410 repo.dirstate.remove(f)
411 if f != fd:
411 if f != fd:
412 repo.dirstate.copy(f, fd)
412 repo.dirstate.copy(f, fd)
413 else:
413 else:
414 repo.dirstate.copy(f2, fd)
414 repo.dirstate.copy(f2, fd)
415 else:
415 else:
416 # We've update-merged a locally modified file, so
416 # We've update-merged a locally modified file, so
417 # we set the dirstate to emulate a normal checkout
417 # we set the dirstate to emulate a normal checkout
418 # of that file some time in the past. Thus our
418 # of that file some time in the past. Thus our
419 # merge will appear as a normal local file
419 # merge will appear as a normal local file
420 # modification.
420 # modification.
421 if f2 == fd: # file not locally copied/moved
421 if f2 == fd: # file not locally copied/moved
422 repo.dirstate.normallookup(fd)
422 repo.dirstate.normallookup(fd)
423 if move:
423 if move:
424 repo.dirstate.forget(f)
424 repo.dirstate.forget(f)
425 elif m == "d": # directory rename
425 elif m == "d": # directory rename
426 f2, fd, flag = a[2:]
426 f2, fd, flag = a[2:]
427 if not f2 and f not in repo.dirstate:
427 if not f2 and f not in repo.dirstate:
428 # untracked file moved
428 # untracked file moved
429 continue
429 continue
430 if branchmerge:
430 if branchmerge:
431 repo.dirstate.add(fd)
431 repo.dirstate.add(fd)
432 if f:
432 if f:
433 repo.dirstate.remove(f)
433 repo.dirstate.remove(f)
434 repo.dirstate.copy(f, fd)
434 repo.dirstate.copy(f, fd)
435 if f2:
435 if f2:
436 repo.dirstate.copy(f2, fd)
436 repo.dirstate.copy(f2, fd)
437 else:
437 else:
438 repo.dirstate.normal(fd)
438 repo.dirstate.normal(fd)
439 if f:
439 if f:
440 repo.dirstate.forget(f)
440 repo.dirstate.forget(f)
441
441
442 def update(repo, node, branchmerge, force, partial, ancestor=None):
442 def update(repo, node, branchmerge, force, partial, ancestor=None):
443 """
443 """
444 Perform a merge between the working directory and the given node
444 Perform a merge between the working directory and the given node
445
445
446 node = the node to update to, or None if unspecified
446 node = the node to update to, or None if unspecified
447 branchmerge = whether to merge between branches
447 branchmerge = whether to merge between branches
448 force = whether to force branch merging or file overwriting
448 force = whether to force branch merging or file overwriting
449 partial = a function to filter file lists (dirstate not updated)
449 partial = a function to filter file lists (dirstate not updated)
450
450
451 The table below shows all the behaviors of the update command
451 The table below shows all the behaviors of the update command
452 given the -c and -C or no options, whether the working directory
452 given the -c and -C or no options, whether the working directory
453 is dirty, whether a revision is specified, and the relationship of
453 is dirty, whether a revision is specified, and the relationship of
454 the parent rev to the target rev (linear, on the same named
454 the parent rev to the target rev (linear, on the same named
455 branch, or on another named branch).
455 branch, or on another named branch).
456
456
457 This logic is tested by test-update-branches.t.
457 This logic is tested by test-update-branches.t.
458
458
459 -c -C dirty rev | linear same cross
459 -c -C dirty rev | linear same cross
460 n n n n | ok (1) x
460 n n n n | ok (1) x
461 n n n y | ok ok ok
461 n n n y | ok ok ok
462 n n y * | merge (2) (2)
462 n n y * | merge (2) (2)
463 n y * * | --- discard ---
463 n y * * | --- discard ---
464 y n y * | --- (3) ---
464 y n y * | --- (3) ---
465 y n n * | --- ok ---
465 y n n * | --- ok ---
466 y y * * | --- (4) ---
466 y y * * | --- (4) ---
467
467
468 x = can't happen
468 x = can't happen
469 * = don't-care
469 * = don't-care
470 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
470 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
471 2 = abort: crosses branches (use 'hg merge' to merge or
471 2 = abort: crosses branches (use 'hg merge' to merge or
472 use 'hg update -C' to discard changes)
472 use 'hg update -C' to discard changes)
473 3 = abort: uncommitted local changes
473 3 = abort: uncommitted local changes
474 4 = incompatible options (checked in commands.py)
474 4 = incompatible options (checked in commands.py)
475
475
476 Return the same tuple as applyupdates().
476 Return the same tuple as applyupdates().
477 """
477 """
478
478
479 onode = node
479 onode = node
480 wlock = repo.wlock()
480 wlock = repo.wlock()
481 try:
481 try:
482 wc = repo[None]
482 wc = repo[None]
483 if node is None:
483 if node is None:
484 # tip of current branch
484 # tip of current branch
485 try:
485 try:
486 node = repo.branchtags()[wc.branch()]
486 node = repo.branchtags()[wc.branch()]
487 except KeyError:
487 except KeyError:
488 if wc.branch() == "default": # no default branch!
488 if wc.branch() == "default": # no default branch!
489 node = repo.lookup("tip") # update to tip
489 node = repo.lookup("tip") # update to tip
490 else:
490 else:
491 raise util.Abort(_("branch %s not found") % wc.branch())
491 raise util.Abort(_("branch %s not found") % wc.branch())
492 overwrite = force and not branchmerge
492 overwrite = force and not branchmerge
493 pl = wc.parents()
493 pl = wc.parents()
494 p1, p2 = pl[0], repo[node]
494 p1, p2 = pl[0], repo[node]
495 if ancestor:
495 if ancestor:
496 pa = repo[ancestor]
496 pa = repo[ancestor]
497 else:
497 else:
498 pa = p1.ancestor(p2)
498 pa = p1.ancestor(p2)
499
499
500 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
500 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
501
501
502 ### check phase
502 ### check phase
503 if not overwrite and len(pl) > 1:
503 if not overwrite and len(pl) > 1:
504 raise util.Abort(_("outstanding uncommitted merges"))
504 raise util.Abort(_("outstanding uncommitted merges"))
505 if branchmerge:
505 if branchmerge:
506 if pa == p2:
506 if pa == p2:
507 raise util.Abort(_("merging with a working directory ancestor"
507 raise util.Abort(_("merging with a working directory ancestor"
508 " has no effect"))
508 " has no effect"))
509 elif pa == p1:
509 elif pa == p1:
510 if p1.branch() == p2.branch():
510 if p1.branch() == p2.branch():
511 raise util.Abort(_("nothing to merge (use 'hg update'"
511 raise util.Abort(_("nothing to merge (use 'hg update'"
512 " or check 'hg heads')"))
512 " or check 'hg heads')"))
513 if not force and (wc.files() or wc.deleted()):
513 if not force and (wc.files() or wc.deleted()):
514 raise util.Abort(_("outstanding uncommitted changes "
514 raise util.Abort(_("outstanding uncommitted changes "
515 "(use 'hg status' to list changes)"))
515 "(use 'hg status' to list changes)"))
516 for s in wc.substate:
516 for s in wc.substate:
517 if wc.sub(s).dirty():
517 if wc.sub(s).dirty():
518 raise util.Abort(_("outstanding uncommitted changes in "
518 raise util.Abort(_("outstanding uncommitted changes in "
519 "subrepository '%s'") % s)
519 "subrepository '%s'") % s)
520
520
521 elif not overwrite:
521 elif not overwrite:
522 if pa == p1 or pa == p2: # linear
522 if pa == p1 or pa == p2: # linear
523 pass # all good
523 pass # all good
524 elif wc.files() or wc.deleted():
524 elif wc.files() or wc.deleted():
525 raise util.Abort(_("crosses branches (merge branches or use"
525 raise util.Abort(_("crosses branches (merge branches or use"
526 " --clean to discard changes)"))
526 " --clean to discard changes)"))
527 elif onode is None:
527 elif onode is None:
528 raise util.Abort(_("crosses branches (merge branches or use"
528 raise util.Abort(_("crosses branches (merge branches or use"
529 " --check to force update)"))
529 " --check to force update)"))
530 else:
530 else:
531 # Allow jumping branches if clean and specific rev given
531 # Allow jumping branches if clean and specific rev given
532 overwrite = True
532 overwrite = True
533
533
534 ### calculate phase
534 ### calculate phase
535 action = []
535 action = []
536 wc.status(unknown=True) # prime cache
536 wc.status(unknown=True) # prime cache
537 if not force:
537 if not force:
538 _checkunknown(wc, p2)
538 _checkunknown(wc, p2)
539 if not util.checkcase(repo.path):
539 if not util.checkcase(repo.path):
540 _checkcollision(p2)
540 _checkcollision(p2)
541 action += _forgetremoved(wc, p2, branchmerge)
541 action += _forgetremoved(wc, p2, branchmerge)
542 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
542 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
543
543
544 ### apply phase
544 ### apply phase
545 if not branchmerge: # just jump to the new rev
545 if not branchmerge: # just jump to the new rev
546 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
546 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
547 if not partial:
547 if not partial:
548 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
548 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
549
549
550 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
550 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
551
551
552 if not partial:
552 if not partial:
553 repo.dirstate.setparents(fp1, fp2)
553 repo.dirstate.setparents(fp1, fp2)
554 recordupdates(repo, action, branchmerge)
554 recordupdates(repo, action, branchmerge)
555 if not branchmerge:
555 if not branchmerge:
556 repo.dirstate.setbranch(p2.branch())
556 repo.dirstate.setbranch(p2.branch())
557 finally:
557 finally:
558 wlock.release()
558 wlock.release()
559
559
560 if not partial:
560 if not partial:
561 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
561 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
562 return stats
562 return stats
@@ -1,170 +1,245 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, error
9 import util, error
10 import os, errno
10 import os, errno, stat
11
11
12 def checkportable(ui, f):
12 def checkportable(ui, f):
13 '''Check if filename f is portable and warn or abort depending on config'''
13 '''Check if filename f is portable and warn or abort depending on config'''
14 util.checkfilename(f)
14 util.checkfilename(f)
15 val = ui.config('ui', 'portablefilenames', 'warn')
15 val = ui.config('ui', 'portablefilenames', 'warn')
16 lval = val.lower()
16 lval = val.lower()
17 abort = os.name == 'nt' or lval == 'abort'
17 abort = os.name == 'nt' or lval == 'abort'
18 bval = util.parsebool(val)
18 bval = util.parsebool(val)
19 if abort or lval == 'warn' or bval:
19 if abort or lval == 'warn' or bval:
20 msg = util.checkwinfilename(f)
20 msg = util.checkwinfilename(f)
21 if msg:
21 if msg:
22 if abort:
22 if abort:
23 raise util.Abort("%s: %r" % (msg, f))
23 raise util.Abort("%s: %r" % (msg, f))
24 ui.warn(_("warning: %s: %r\n") % (msg, f))
24 ui.warn(_("warning: %s: %r\n") % (msg, f))
25 elif bval is None and lval != 'ignore':
25 elif bval is None and lval != 'ignore':
26 raise error.ConfigError(
26 raise error.ConfigError(
27 _("ui.portablefilenames value is invalid ('%s')") % val)
27 _("ui.portablefilenames value is invalid ('%s')") % val)
28
28
29 class path_auditor(object):
30 '''ensure that a filesystem path contains no banned components.
31 the following properties of a path are checked:
32
33 - ends with a directory separator
34 - under top-level .hg
35 - starts at the root of a windows drive
36 - contains ".."
37 - traverses a symlink (e.g. a/symlink_here/b)
38 - inside a nested repository (a callback can be used to approve
39 some nested repositories, e.g., subrepositories)
40 '''
41
42 def __init__(self, root, callback=None):
43 self.audited = set()
44 self.auditeddir = set()
45 self.root = root
46 self.callback = callback
47
48 def __call__(self, path):
49 '''Check the relative path.
50 path may contain a pattern (e.g. foodir/**.txt)'''
51
52 if path in self.audited:
53 return
54 # AIX ignores "/" at end of path, others raise EISDIR.
55 if util.endswithsep(path):
56 raise util.Abort(_("path ends in directory separator: %s") % path)
57 normpath = os.path.normcase(path)
58 parts = util.splitpath(normpath)
59 if (os.path.splitdrive(path)[0]
60 or parts[0].lower() in ('.hg', '.hg.', '')
61 or os.pardir in parts):
62 raise util.Abort(_("path contains illegal component: %s") % path)
63 if '.hg' in path.lower():
64 lparts = [p.lower() for p in parts]
65 for p in '.hg', '.hg.':
66 if p in lparts[1:]:
67 pos = lparts.index(p)
68 base = os.path.join(*parts[:pos])
69 raise util.Abort(_('path %r is inside nested repo %r')
70 % (path, base))
71
72 parts.pop()
73 prefixes = []
74 while parts:
75 prefix = os.sep.join(parts)
76 if prefix in self.auditeddir:
77 break
78 curpath = os.path.join(self.root, prefix)
79 try:
80 st = os.lstat(curpath)
81 except OSError, err:
82 # EINVAL can be raised as invalid path syntax under win32.
83 # They must be ignored for patterns can be checked too.
84 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
85 raise
86 else:
87 if stat.S_ISLNK(st.st_mode):
88 raise util.Abort(
89 _('path %r traverses symbolic link %r')
90 % (path, prefix))
91 elif (stat.S_ISDIR(st.st_mode) and
92 os.path.isdir(os.path.join(curpath, '.hg'))):
93 if not self.callback or not self.callback(curpath):
94 raise util.Abort(_('path %r is inside nested repo %r') %
95 (path, prefix))
96 prefixes.append(prefix)
97 parts.pop()
98
99 self.audited.add(path)
100 # only add prefixes to the cache after checking everything: we don't
101 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
102 self.auditeddir.update(prefixes)
103
29 class opener(object):
104 class opener(object):
30 '''Open files relative to a base directory
105 '''Open files relative to a base directory
31
106
32 This class is used to hide the details of COW semantics and
107 This class is used to hide the details of COW semantics and
33 remote file access from higher level code.
108 remote file access from higher level code.
34 '''
109 '''
35 def __init__(self, base, audit=True):
110 def __init__(self, base, audit=True):
36 self.base = base
111 self.base = base
37 if audit:
112 if audit:
38 self.auditor = util.path_auditor(base)
113 self.auditor = path_auditor(base)
39 else:
114 else:
40 self.auditor = util.always
115 self.auditor = util.always
41 self.createmode = None
116 self.createmode = None
42 self._trustnlink = None
117 self._trustnlink = None
43
118
44 @util.propertycache
119 @util.propertycache
45 def _can_symlink(self):
120 def _can_symlink(self):
46 return util.checklink(self.base)
121 return util.checklink(self.base)
47
122
48 def _fixfilemode(self, name):
123 def _fixfilemode(self, name):
49 if self.createmode is None:
124 if self.createmode is None:
50 return
125 return
51 os.chmod(name, self.createmode & 0666)
126 os.chmod(name, self.createmode & 0666)
52
127
53 def __call__(self, path, mode="r", text=False, atomictemp=False):
128 def __call__(self, path, mode="r", text=False, atomictemp=False):
54 r = util.checkosfilename(path)
129 r = util.checkosfilename(path)
55 if r:
130 if r:
56 raise Abort("%s: %r" % (r, path))
131 raise Abort("%s: %r" % (r, path))
57 self.auditor(path)
132 self.auditor(path)
58 f = os.path.join(self.base, path)
133 f = os.path.join(self.base, path)
59
134
60 if not text and "b" not in mode:
135 if not text and "b" not in mode:
61 mode += "b" # for that other OS
136 mode += "b" # for that other OS
62
137
63 nlink = -1
138 nlink = -1
64 dirname, basename = os.path.split(f)
139 dirname, basename = os.path.split(f)
65 # If basename is empty, then the path is malformed because it points
140 # If basename is empty, then the path is malformed because it points
66 # to a directory. Let the posixfile() call below raise IOError.
141 # to a directory. Let the posixfile() call below raise IOError.
67 if basename and mode not in ('r', 'rb'):
142 if basename and mode not in ('r', 'rb'):
68 if atomictemp:
143 if atomictemp:
69 if not os.path.isdir(dirname):
144 if not os.path.isdir(dirname):
70 util.makedirs(dirname, self.createmode)
145 util.makedirs(dirname, self.createmode)
71 return util.atomictempfile(f, mode, self.createmode)
146 return util.atomictempfile(f, mode, self.createmode)
72 try:
147 try:
73 if 'w' in mode:
148 if 'w' in mode:
74 util.unlink(f)
149 util.unlink(f)
75 nlink = 0
150 nlink = 0
76 else:
151 else:
77 # nlinks() may behave differently for files on Windows
152 # nlinks() may behave differently for files on Windows
78 # shares if the file is open.
153 # shares if the file is open.
79 fd = util.posixfile(f)
154 fd = util.posixfile(f)
80 nlink = util.nlinks(f)
155 nlink = util.nlinks(f)
81 if nlink < 1:
156 if nlink < 1:
82 nlink = 2 # force mktempcopy (issue1922)
157 nlink = 2 # force mktempcopy (issue1922)
83 fd.close()
158 fd.close()
84 except (OSError, IOError), e:
159 except (OSError, IOError), e:
85 if e.errno != errno.ENOENT:
160 if e.errno != errno.ENOENT:
86 raise
161 raise
87 nlink = 0
162 nlink = 0
88 if not os.path.isdir(dirname):
163 if not os.path.isdir(dirname):
89 util.makedirs(dirname, self.createmode)
164 util.makedirs(dirname, self.createmode)
90 if nlink > 0:
165 if nlink > 0:
91 if self._trustnlink is None:
166 if self._trustnlink is None:
92 self._trustnlink = nlink > 1 or util.checknlink(f)
167 self._trustnlink = nlink > 1 or util.checknlink(f)
93 if nlink > 1 or not self._trustnlink:
168 if nlink > 1 or not self._trustnlink:
94 util.rename(util.mktempcopy(f), f)
169 util.rename(util.mktempcopy(f), f)
95 fp = util.posixfile(f, mode)
170 fp = util.posixfile(f, mode)
96 if nlink == 0:
171 if nlink == 0:
97 self._fixfilemode(f)
172 self._fixfilemode(f)
98 return fp
173 return fp
99
174
100 def symlink(self, src, dst):
175 def symlink(self, src, dst):
101 self.auditor(dst)
176 self.auditor(dst)
102 linkname = os.path.join(self.base, dst)
177 linkname = os.path.join(self.base, dst)
103 try:
178 try:
104 os.unlink(linkname)
179 os.unlink(linkname)
105 except OSError:
180 except OSError:
106 pass
181 pass
107
182
108 dirname = os.path.dirname(linkname)
183 dirname = os.path.dirname(linkname)
109 if not os.path.exists(dirname):
184 if not os.path.exists(dirname):
110 util.makedirs(dirname, self.createmode)
185 util.makedirs(dirname, self.createmode)
111
186
112 if self._can_symlink:
187 if self._can_symlink:
113 try:
188 try:
114 os.symlink(src, linkname)
189 os.symlink(src, linkname)
115 except OSError, err:
190 except OSError, err:
116 raise OSError(err.errno, _('could not symlink to %r: %s') %
191 raise OSError(err.errno, _('could not symlink to %r: %s') %
117 (src, err.strerror), linkname)
192 (src, err.strerror), linkname)
118 else:
193 else:
119 f = self(dst, "w")
194 f = self(dst, "w")
120 f.write(src)
195 f.write(src)
121 f.close()
196 f.close()
122 self._fixfilemode(dst)
197 self._fixfilemode(dst)
123
198
124 def canonpath(root, cwd, myname, auditor=None):
199 def canonpath(root, cwd, myname, auditor=None):
125 '''return the canonical path of myname, given cwd and root'''
200 '''return the canonical path of myname, given cwd and root'''
126 if util.endswithsep(root):
201 if util.endswithsep(root):
127 rootsep = root
202 rootsep = root
128 else:
203 else:
129 rootsep = root + os.sep
204 rootsep = root + os.sep
130 name = myname
205 name = myname
131 if not os.path.isabs(name):
206 if not os.path.isabs(name):
132 name = os.path.join(root, cwd, name)
207 name = os.path.join(root, cwd, name)
133 name = os.path.normpath(name)
208 name = os.path.normpath(name)
134 if auditor is None:
209 if auditor is None:
135 auditor = util.path_auditor(root)
210 auditor = path_auditor(root)
136 if name != rootsep and name.startswith(rootsep):
211 if name != rootsep and name.startswith(rootsep):
137 name = name[len(rootsep):]
212 name = name[len(rootsep):]
138 auditor(name)
213 auditor(name)
139 return util.pconvert(name)
214 return util.pconvert(name)
140 elif name == root:
215 elif name == root:
141 return ''
216 return ''
142 else:
217 else:
143 # Determine whether `name' is in the hierarchy at or beneath `root',
218 # Determine whether `name' is in the hierarchy at or beneath `root',
144 # by iterating name=dirname(name) until that causes no change (can't
219 # by iterating name=dirname(name) until that causes no change (can't
145 # check name == '/', because that doesn't work on windows). For each
220 # check name == '/', because that doesn't work on windows). For each
146 # `name', compare dev/inode numbers. If they match, the list `rel'
221 # `name', compare dev/inode numbers. If they match, the list `rel'
147 # holds the reversed list of components making up the relative file
222 # holds the reversed list of components making up the relative file
148 # name we want.
223 # name we want.
149 root_st = os.stat(root)
224 root_st = os.stat(root)
150 rel = []
225 rel = []
151 while True:
226 while True:
152 try:
227 try:
153 name_st = os.stat(name)
228 name_st = os.stat(name)
154 except OSError:
229 except OSError:
155 break
230 break
156 if util.samestat(name_st, root_st):
231 if util.samestat(name_st, root_st):
157 if not rel:
232 if not rel:
158 # name was actually the same as root (maybe a symlink)
233 # name was actually the same as root (maybe a symlink)
159 return ''
234 return ''
160 rel.reverse()
235 rel.reverse()
161 name = os.path.join(*rel)
236 name = os.path.join(*rel)
162 auditor(name)
237 auditor(name)
163 return util.pconvert(name)
238 return util.pconvert(name)
164 dirname, basename = os.path.split(name)
239 dirname, basename = os.path.split(name)
165 rel.append(basename)
240 rel.append(basename)
166 if dirname == name:
241 if dirname == name:
167 break
242 break
168 name = dirname
243 name = dirname
169
244
170 raise util.Abort('%s not under root' % myname)
245 raise util.Abort('%s not under root' % myname)
@@ -1,1022 +1,1022 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil, posixpath
8 import errno, os, re, xml.dom.minidom, shutil, posixpath
9 import stat, subprocess, tarfile
9 import stat, subprocess, tarfile
10 from i18n import _
10 from i18n import _
11 import config, util, node, error, cmdutil, url, bookmarks
11 import config, scmutil, util, node, error, cmdutil, url, bookmarks
12 hg = None
12 hg = None
13
13
14 nullstate = ('', '', 'empty')
14 nullstate = ('', '', 'empty')
15
15
16 def state(ctx, ui):
16 def state(ctx, ui):
17 """return a state dict, mapping subrepo paths configured in .hgsub
17 """return a state dict, mapping subrepo paths configured in .hgsub
18 to tuple: (source from .hgsub, revision from .hgsubstate, kind
18 to tuple: (source from .hgsub, revision from .hgsubstate, kind
19 (key in types dict))
19 (key in types dict))
20 """
20 """
21 p = config.config()
21 p = config.config()
22 def read(f, sections=None, remap=None):
22 def read(f, sections=None, remap=None):
23 if f in ctx:
23 if f in ctx:
24 try:
24 try:
25 data = ctx[f].data()
25 data = ctx[f].data()
26 except IOError, err:
26 except IOError, err:
27 if err.errno != errno.ENOENT:
27 if err.errno != errno.ENOENT:
28 raise
28 raise
29 # handle missing subrepo spec files as removed
29 # handle missing subrepo spec files as removed
30 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
30 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
31 return
31 return
32 p.parse(f, data, sections, remap, read)
32 p.parse(f, data, sections, remap, read)
33 else:
33 else:
34 raise util.Abort(_("subrepo spec file %s not found") % f)
34 raise util.Abort(_("subrepo spec file %s not found") % f)
35
35
36 if '.hgsub' in ctx:
36 if '.hgsub' in ctx:
37 read('.hgsub')
37 read('.hgsub')
38
38
39 for path, src in ui.configitems('subpaths'):
39 for path, src in ui.configitems('subpaths'):
40 p.set('subpaths', path, src, ui.configsource('subpaths', path))
40 p.set('subpaths', path, src, ui.configsource('subpaths', path))
41
41
42 rev = {}
42 rev = {}
43 if '.hgsubstate' in ctx:
43 if '.hgsubstate' in ctx:
44 try:
44 try:
45 for l in ctx['.hgsubstate'].data().splitlines():
45 for l in ctx['.hgsubstate'].data().splitlines():
46 revision, path = l.split(" ", 1)
46 revision, path = l.split(" ", 1)
47 rev[path] = revision
47 rev[path] = revision
48 except IOError, err:
48 except IOError, err:
49 if err.errno != errno.ENOENT:
49 if err.errno != errno.ENOENT:
50 raise
50 raise
51
51
52 state = {}
52 state = {}
53 for path, src in p[''].items():
53 for path, src in p[''].items():
54 kind = 'hg'
54 kind = 'hg'
55 if src.startswith('['):
55 if src.startswith('['):
56 if ']' not in src:
56 if ']' not in src:
57 raise util.Abort(_('missing ] in subrepo source'))
57 raise util.Abort(_('missing ] in subrepo source'))
58 kind, src = src.split(']', 1)
58 kind, src = src.split(']', 1)
59 kind = kind[1:]
59 kind = kind[1:]
60
60
61 for pattern, repl in p.items('subpaths'):
61 for pattern, repl in p.items('subpaths'):
62 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
62 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
63 # does a string decode.
63 # does a string decode.
64 repl = repl.encode('string-escape')
64 repl = repl.encode('string-escape')
65 # However, we still want to allow back references to go
65 # However, we still want to allow back references to go
66 # through unharmed, so we turn r'\\1' into r'\1'. Again,
66 # through unharmed, so we turn r'\\1' into r'\1'. Again,
67 # extra escapes are needed because re.sub string decodes.
67 # extra escapes are needed because re.sub string decodes.
68 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
68 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
69 try:
69 try:
70 src = re.sub(pattern, repl, src, 1)
70 src = re.sub(pattern, repl, src, 1)
71 except re.error, e:
71 except re.error, e:
72 raise util.Abort(_("bad subrepository pattern in %s: %s")
72 raise util.Abort(_("bad subrepository pattern in %s: %s")
73 % (p.source('subpaths', pattern), e))
73 % (p.source('subpaths', pattern), e))
74
74
75 state[path] = (src.strip(), rev.get(path, ''), kind)
75 state[path] = (src.strip(), rev.get(path, ''), kind)
76
76
77 return state
77 return state
78
78
79 def writestate(repo, state):
79 def writestate(repo, state):
80 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
80 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
81 repo.wwrite('.hgsubstate',
81 repo.wwrite('.hgsubstate',
82 ''.join(['%s %s\n' % (state[s][1], s)
82 ''.join(['%s %s\n' % (state[s][1], s)
83 for s in sorted(state)]), '')
83 for s in sorted(state)]), '')
84
84
85 def submerge(repo, wctx, mctx, actx, overwrite):
85 def submerge(repo, wctx, mctx, actx, overwrite):
86 """delegated from merge.applyupdates: merging of .hgsubstate file
86 """delegated from merge.applyupdates: merging of .hgsubstate file
87 in working context, merging context and ancestor context"""
87 in working context, merging context and ancestor context"""
88 if mctx == actx: # backwards?
88 if mctx == actx: # backwards?
89 actx = wctx.p1()
89 actx = wctx.p1()
90 s1 = wctx.substate
90 s1 = wctx.substate
91 s2 = mctx.substate
91 s2 = mctx.substate
92 sa = actx.substate
92 sa = actx.substate
93 sm = {}
93 sm = {}
94
94
95 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
95 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
96
96
97 def debug(s, msg, r=""):
97 def debug(s, msg, r=""):
98 if r:
98 if r:
99 r = "%s:%s:%s" % r
99 r = "%s:%s:%s" % r
100 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
100 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
101
101
102 for s, l in s1.items():
102 for s, l in s1.items():
103 a = sa.get(s, nullstate)
103 a = sa.get(s, nullstate)
104 ld = l # local state with possible dirty flag for compares
104 ld = l # local state with possible dirty flag for compares
105 if wctx.sub(s).dirty():
105 if wctx.sub(s).dirty():
106 ld = (l[0], l[1] + "+")
106 ld = (l[0], l[1] + "+")
107 if wctx == actx: # overwrite
107 if wctx == actx: # overwrite
108 a = ld
108 a = ld
109
109
110 if s in s2:
110 if s in s2:
111 r = s2[s]
111 r = s2[s]
112 if ld == r or r == a: # no change or local is newer
112 if ld == r or r == a: # no change or local is newer
113 sm[s] = l
113 sm[s] = l
114 continue
114 continue
115 elif ld == a: # other side changed
115 elif ld == a: # other side changed
116 debug(s, "other changed, get", r)
116 debug(s, "other changed, get", r)
117 wctx.sub(s).get(r, overwrite)
117 wctx.sub(s).get(r, overwrite)
118 sm[s] = r
118 sm[s] = r
119 elif ld[0] != r[0]: # sources differ
119 elif ld[0] != r[0]: # sources differ
120 if repo.ui.promptchoice(
120 if repo.ui.promptchoice(
121 _(' subrepository sources for %s differ\n'
121 _(' subrepository sources for %s differ\n'
122 'use (l)ocal source (%s) or (r)emote source (%s)?')
122 'use (l)ocal source (%s) or (r)emote source (%s)?')
123 % (s, l[0], r[0]),
123 % (s, l[0], r[0]),
124 (_('&Local'), _('&Remote')), 0):
124 (_('&Local'), _('&Remote')), 0):
125 debug(s, "prompt changed, get", r)
125 debug(s, "prompt changed, get", r)
126 wctx.sub(s).get(r, overwrite)
126 wctx.sub(s).get(r, overwrite)
127 sm[s] = r
127 sm[s] = r
128 elif ld[1] == a[1]: # local side is unchanged
128 elif ld[1] == a[1]: # local side is unchanged
129 debug(s, "other side changed, get", r)
129 debug(s, "other side changed, get", r)
130 wctx.sub(s).get(r, overwrite)
130 wctx.sub(s).get(r, overwrite)
131 sm[s] = r
131 sm[s] = r
132 else:
132 else:
133 debug(s, "both sides changed, merge with", r)
133 debug(s, "both sides changed, merge with", r)
134 wctx.sub(s).merge(r)
134 wctx.sub(s).merge(r)
135 sm[s] = l
135 sm[s] = l
136 elif ld == a: # remote removed, local unchanged
136 elif ld == a: # remote removed, local unchanged
137 debug(s, "remote removed, remove")
137 debug(s, "remote removed, remove")
138 wctx.sub(s).remove()
138 wctx.sub(s).remove()
139 else:
139 else:
140 if repo.ui.promptchoice(
140 if repo.ui.promptchoice(
141 _(' local changed subrepository %s which remote removed\n'
141 _(' local changed subrepository %s which remote removed\n'
142 'use (c)hanged version or (d)elete?') % s,
142 'use (c)hanged version or (d)elete?') % s,
143 (_('&Changed'), _('&Delete')), 0):
143 (_('&Changed'), _('&Delete')), 0):
144 debug(s, "prompt remove")
144 debug(s, "prompt remove")
145 wctx.sub(s).remove()
145 wctx.sub(s).remove()
146
146
147 for s, r in sorted(s2.items()):
147 for s, r in sorted(s2.items()):
148 if s in s1:
148 if s in s1:
149 continue
149 continue
150 elif s not in sa:
150 elif s not in sa:
151 debug(s, "remote added, get", r)
151 debug(s, "remote added, get", r)
152 mctx.sub(s).get(r)
152 mctx.sub(s).get(r)
153 sm[s] = r
153 sm[s] = r
154 elif r != sa[s]:
154 elif r != sa[s]:
155 if repo.ui.promptchoice(
155 if repo.ui.promptchoice(
156 _(' remote changed subrepository %s which local removed\n'
156 _(' remote changed subrepository %s which local removed\n'
157 'use (c)hanged version or (d)elete?') % s,
157 'use (c)hanged version or (d)elete?') % s,
158 (_('&Changed'), _('&Delete')), 0) == 0:
158 (_('&Changed'), _('&Delete')), 0) == 0:
159 debug(s, "prompt recreate", r)
159 debug(s, "prompt recreate", r)
160 wctx.sub(s).get(r)
160 wctx.sub(s).get(r)
161 sm[s] = r
161 sm[s] = r
162
162
163 # record merged .hgsubstate
163 # record merged .hgsubstate
164 writestate(repo, sm)
164 writestate(repo, sm)
165
165
166 def _updateprompt(ui, sub, dirty, local, remote):
166 def _updateprompt(ui, sub, dirty, local, remote):
167 if dirty:
167 if dirty:
168 msg = (_(' subrepository sources for %s differ\n'
168 msg = (_(' subrepository sources for %s differ\n'
169 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
169 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
170 % (subrelpath(sub), local, remote))
170 % (subrelpath(sub), local, remote))
171 else:
171 else:
172 msg = (_(' subrepository sources for %s differ (in checked out version)\n'
172 msg = (_(' subrepository sources for %s differ (in checked out version)\n'
173 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
173 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
174 % (subrelpath(sub), local, remote))
174 % (subrelpath(sub), local, remote))
175 return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
175 return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
176
176
177 def reporelpath(repo):
177 def reporelpath(repo):
178 """return path to this (sub)repo as seen from outermost repo"""
178 """return path to this (sub)repo as seen from outermost repo"""
179 parent = repo
179 parent = repo
180 while hasattr(parent, '_subparent'):
180 while hasattr(parent, '_subparent'):
181 parent = parent._subparent
181 parent = parent._subparent
182 return repo.root[len(parent.root)+1:]
182 return repo.root[len(parent.root)+1:]
183
183
184 def subrelpath(sub):
184 def subrelpath(sub):
185 """return path to this subrepo as seen from outermost repo"""
185 """return path to this subrepo as seen from outermost repo"""
186 if hasattr(sub, '_relpath'):
186 if hasattr(sub, '_relpath'):
187 return sub._relpath
187 return sub._relpath
188 if not hasattr(sub, '_repo'):
188 if not hasattr(sub, '_repo'):
189 return sub._path
189 return sub._path
190 return reporelpath(sub._repo)
190 return reporelpath(sub._repo)
191
191
192 def _abssource(repo, push=False, abort=True):
192 def _abssource(repo, push=False, abort=True):
193 """return pull/push path of repo - either based on parent repo .hgsub info
193 """return pull/push path of repo - either based on parent repo .hgsub info
194 or on the top repo config. Abort or return None if no source found."""
194 or on the top repo config. Abort or return None if no source found."""
195 if hasattr(repo, '_subparent'):
195 if hasattr(repo, '_subparent'):
196 source = url.url(repo._subsource)
196 source = url.url(repo._subsource)
197 source.path = posixpath.normpath(source.path)
197 source.path = posixpath.normpath(source.path)
198 if posixpath.isabs(source.path) or source.scheme:
198 if posixpath.isabs(source.path) or source.scheme:
199 return str(source)
199 return str(source)
200 parent = _abssource(repo._subparent, push, abort=False)
200 parent = _abssource(repo._subparent, push, abort=False)
201 if parent:
201 if parent:
202 parent = url.url(parent)
202 parent = url.url(parent)
203 parent.path = posixpath.join(parent.path, source.path)
203 parent.path = posixpath.join(parent.path, source.path)
204 parent.path = posixpath.normpath(parent.path)
204 parent.path = posixpath.normpath(parent.path)
205 return str(parent)
205 return str(parent)
206 else: # recursion reached top repo
206 else: # recursion reached top repo
207 if hasattr(repo, '_subtoppath'):
207 if hasattr(repo, '_subtoppath'):
208 return repo._subtoppath
208 return repo._subtoppath
209 if push and repo.ui.config('paths', 'default-push'):
209 if push and repo.ui.config('paths', 'default-push'):
210 return repo.ui.config('paths', 'default-push')
210 return repo.ui.config('paths', 'default-push')
211 if repo.ui.config('paths', 'default'):
211 if repo.ui.config('paths', 'default'):
212 return repo.ui.config('paths', 'default')
212 return repo.ui.config('paths', 'default')
213 if abort:
213 if abort:
214 raise util.Abort(_("default path for subrepository %s not found") %
214 raise util.Abort(_("default path for subrepository %s not found") %
215 reporelpath(repo))
215 reporelpath(repo))
216
216
217 def itersubrepos(ctx1, ctx2):
217 def itersubrepos(ctx1, ctx2):
218 """find subrepos in ctx1 or ctx2"""
218 """find subrepos in ctx1 or ctx2"""
219 # Create a (subpath, ctx) mapping where we prefer subpaths from
219 # Create a (subpath, ctx) mapping where we prefer subpaths from
220 # ctx1. The subpaths from ctx2 are important when the .hgsub file
220 # ctx1. The subpaths from ctx2 are important when the .hgsub file
221 # has been modified (in ctx2) but not yet committed (in ctx1).
221 # has been modified (in ctx2) but not yet committed (in ctx1).
222 subpaths = dict.fromkeys(ctx2.substate, ctx2)
222 subpaths = dict.fromkeys(ctx2.substate, ctx2)
223 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
223 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
224 for subpath, ctx in sorted(subpaths.iteritems()):
224 for subpath, ctx in sorted(subpaths.iteritems()):
225 yield subpath, ctx.sub(subpath)
225 yield subpath, ctx.sub(subpath)
226
226
227 def subrepo(ctx, path):
227 def subrepo(ctx, path):
228 """return instance of the right subrepo class for subrepo in path"""
228 """return instance of the right subrepo class for subrepo in path"""
229 # subrepo inherently violates our import layering rules
229 # subrepo inherently violates our import layering rules
230 # because it wants to make repo objects from deep inside the stack
230 # because it wants to make repo objects from deep inside the stack
231 # so we manually delay the circular imports to not break
231 # so we manually delay the circular imports to not break
232 # scripts that don't use our demand-loading
232 # scripts that don't use our demand-loading
233 global hg
233 global hg
234 import hg as h
234 import hg as h
235 hg = h
235 hg = h
236
236
237 util.path_auditor(ctx._repo.root)(path)
237 scmutil.path_auditor(ctx._repo.root)(path)
238 state = ctx.substate.get(path, nullstate)
238 state = ctx.substate.get(path, nullstate)
239 if state[2] not in types:
239 if state[2] not in types:
240 raise util.Abort(_('unknown subrepo type %s') % state[2])
240 raise util.Abort(_('unknown subrepo type %s') % state[2])
241 return types[state[2]](ctx, path, state[:2])
241 return types[state[2]](ctx, path, state[:2])
242
242
243 # subrepo classes need to implement the following abstract class:
243 # subrepo classes need to implement the following abstract class:
244
244
245 class abstractsubrepo(object):
245 class abstractsubrepo(object):
246
246
247 def dirty(self, ignoreupdate=False):
247 def dirty(self, ignoreupdate=False):
248 """returns true if the dirstate of the subrepo is dirty or does not
248 """returns true if the dirstate of the subrepo is dirty or does not
249 match current stored state. If ignoreupdate is true, only check
249 match current stored state. If ignoreupdate is true, only check
250 whether the subrepo has uncommitted changes in its dirstate.
250 whether the subrepo has uncommitted changes in its dirstate.
251 """
251 """
252 raise NotImplementedError
252 raise NotImplementedError
253
253
254 def checknested(self, path):
254 def checknested(self, path):
255 """check if path is a subrepository within this repository"""
255 """check if path is a subrepository within this repository"""
256 return False
256 return False
257
257
258 def commit(self, text, user, date):
258 def commit(self, text, user, date):
259 """commit the current changes to the subrepo with the given
259 """commit the current changes to the subrepo with the given
260 log message. Use given user and date if possible. Return the
260 log message. Use given user and date if possible. Return the
261 new state of the subrepo.
261 new state of the subrepo.
262 """
262 """
263 raise NotImplementedError
263 raise NotImplementedError
264
264
265 def remove(self):
265 def remove(self):
266 """remove the subrepo
266 """remove the subrepo
267
267
268 (should verify the dirstate is not dirty first)
268 (should verify the dirstate is not dirty first)
269 """
269 """
270 raise NotImplementedError
270 raise NotImplementedError
271
271
272 def get(self, state, overwrite=False):
272 def get(self, state, overwrite=False):
273 """run whatever commands are needed to put the subrepo into
273 """run whatever commands are needed to put the subrepo into
274 this state
274 this state
275 """
275 """
276 raise NotImplementedError
276 raise NotImplementedError
277
277
278 def merge(self, state):
278 def merge(self, state):
279 """merge currently-saved state with the new state."""
279 """merge currently-saved state with the new state."""
280 raise NotImplementedError
280 raise NotImplementedError
281
281
282 def push(self, force):
282 def push(self, force):
283 """perform whatever action is analogous to 'hg push'
283 """perform whatever action is analogous to 'hg push'
284
284
285 This may be a no-op on some systems.
285 This may be a no-op on some systems.
286 """
286 """
287 raise NotImplementedError
287 raise NotImplementedError
288
288
289 def add(self, ui, match, dryrun, prefix):
289 def add(self, ui, match, dryrun, prefix):
290 return []
290 return []
291
291
292 def status(self, rev2, **opts):
292 def status(self, rev2, **opts):
293 return [], [], [], [], [], [], []
293 return [], [], [], [], [], [], []
294
294
295 def diff(self, diffopts, node2, match, prefix, **opts):
295 def diff(self, diffopts, node2, match, prefix, **opts):
296 pass
296 pass
297
297
298 def outgoing(self, ui, dest, opts):
298 def outgoing(self, ui, dest, opts):
299 return 1
299 return 1
300
300
301 def incoming(self, ui, source, opts):
301 def incoming(self, ui, source, opts):
302 return 1
302 return 1
303
303
304 def files(self):
304 def files(self):
305 """return filename iterator"""
305 """return filename iterator"""
306 raise NotImplementedError
306 raise NotImplementedError
307
307
308 def filedata(self, name):
308 def filedata(self, name):
309 """return file data"""
309 """return file data"""
310 raise NotImplementedError
310 raise NotImplementedError
311
311
312 def fileflags(self, name):
312 def fileflags(self, name):
313 """return file flags"""
313 """return file flags"""
314 return ''
314 return ''
315
315
316 def archive(self, ui, archiver, prefix):
316 def archive(self, ui, archiver, prefix):
317 files = self.files()
317 files = self.files()
318 total = len(files)
318 total = len(files)
319 relpath = subrelpath(self)
319 relpath = subrelpath(self)
320 ui.progress(_('archiving (%s)') % relpath, 0,
320 ui.progress(_('archiving (%s)') % relpath, 0,
321 unit=_('files'), total=total)
321 unit=_('files'), total=total)
322 for i, name in enumerate(files):
322 for i, name in enumerate(files):
323 flags = self.fileflags(name)
323 flags = self.fileflags(name)
324 mode = 'x' in flags and 0755 or 0644
324 mode = 'x' in flags and 0755 or 0644
325 symlink = 'l' in flags
325 symlink = 'l' in flags
326 archiver.addfile(os.path.join(prefix, self._path, name),
326 archiver.addfile(os.path.join(prefix, self._path, name),
327 mode, symlink, self.filedata(name))
327 mode, symlink, self.filedata(name))
328 ui.progress(_('archiving (%s)') % relpath, i + 1,
328 ui.progress(_('archiving (%s)') % relpath, i + 1,
329 unit=_('files'), total=total)
329 unit=_('files'), total=total)
330 ui.progress(_('archiving (%s)') % relpath, None)
330 ui.progress(_('archiving (%s)') % relpath, None)
331
331
332
332
333 class hgsubrepo(abstractsubrepo):
333 class hgsubrepo(abstractsubrepo):
334 def __init__(self, ctx, path, state):
334 def __init__(self, ctx, path, state):
335 self._path = path
335 self._path = path
336 self._state = state
336 self._state = state
337 r = ctx._repo
337 r = ctx._repo
338 root = r.wjoin(path)
338 root = r.wjoin(path)
339 create = False
339 create = False
340 if not os.path.exists(os.path.join(root, '.hg')):
340 if not os.path.exists(os.path.join(root, '.hg')):
341 create = True
341 create = True
342 util.makedirs(root)
342 util.makedirs(root)
343 self._repo = hg.repository(r.ui, root, create=create)
343 self._repo = hg.repository(r.ui, root, create=create)
344 self._repo._subparent = r
344 self._repo._subparent = r
345 self._repo._subsource = state[0]
345 self._repo._subsource = state[0]
346
346
347 if create:
347 if create:
348 fp = self._repo.opener("hgrc", "w", text=True)
348 fp = self._repo.opener("hgrc", "w", text=True)
349 fp.write('[paths]\n')
349 fp.write('[paths]\n')
350
350
351 def addpathconfig(key, value):
351 def addpathconfig(key, value):
352 if value:
352 if value:
353 fp.write('%s = %s\n' % (key, value))
353 fp.write('%s = %s\n' % (key, value))
354 self._repo.ui.setconfig('paths', key, value)
354 self._repo.ui.setconfig('paths', key, value)
355
355
356 defpath = _abssource(self._repo, abort=False)
356 defpath = _abssource(self._repo, abort=False)
357 defpushpath = _abssource(self._repo, True, abort=False)
357 defpushpath = _abssource(self._repo, True, abort=False)
358 addpathconfig('default', defpath)
358 addpathconfig('default', defpath)
359 if defpath != defpushpath:
359 if defpath != defpushpath:
360 addpathconfig('default-push', defpushpath)
360 addpathconfig('default-push', defpushpath)
361 fp.close()
361 fp.close()
362
362
363 def add(self, ui, match, dryrun, prefix):
363 def add(self, ui, match, dryrun, prefix):
364 return cmdutil.add(ui, self._repo, match, dryrun, True,
364 return cmdutil.add(ui, self._repo, match, dryrun, True,
365 os.path.join(prefix, self._path))
365 os.path.join(prefix, self._path))
366
366
367 def status(self, rev2, **opts):
367 def status(self, rev2, **opts):
368 try:
368 try:
369 rev1 = self._state[1]
369 rev1 = self._state[1]
370 ctx1 = self._repo[rev1]
370 ctx1 = self._repo[rev1]
371 ctx2 = self._repo[rev2]
371 ctx2 = self._repo[rev2]
372 return self._repo.status(ctx1, ctx2, **opts)
372 return self._repo.status(ctx1, ctx2, **opts)
373 except error.RepoLookupError, inst:
373 except error.RepoLookupError, inst:
374 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
374 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
375 % (inst, subrelpath(self)))
375 % (inst, subrelpath(self)))
376 return [], [], [], [], [], [], []
376 return [], [], [], [], [], [], []
377
377
378 def diff(self, diffopts, node2, match, prefix, **opts):
378 def diff(self, diffopts, node2, match, prefix, **opts):
379 try:
379 try:
380 node1 = node.bin(self._state[1])
380 node1 = node.bin(self._state[1])
381 # We currently expect node2 to come from substate and be
381 # We currently expect node2 to come from substate and be
382 # in hex format
382 # in hex format
383 if node2 is not None:
383 if node2 is not None:
384 node2 = node.bin(node2)
384 node2 = node.bin(node2)
385 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
385 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
386 node1, node2, match,
386 node1, node2, match,
387 prefix=os.path.join(prefix, self._path),
387 prefix=os.path.join(prefix, self._path),
388 listsubrepos=True, **opts)
388 listsubrepos=True, **opts)
389 except error.RepoLookupError, inst:
389 except error.RepoLookupError, inst:
390 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
390 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
391 % (inst, subrelpath(self)))
391 % (inst, subrelpath(self)))
392
392
393 def archive(self, ui, archiver, prefix):
393 def archive(self, ui, archiver, prefix):
394 abstractsubrepo.archive(self, ui, archiver, prefix)
394 abstractsubrepo.archive(self, ui, archiver, prefix)
395
395
396 rev = self._state[1]
396 rev = self._state[1]
397 ctx = self._repo[rev]
397 ctx = self._repo[rev]
398 for subpath in ctx.substate:
398 for subpath in ctx.substate:
399 s = subrepo(ctx, subpath)
399 s = subrepo(ctx, subpath)
400 s.archive(ui, archiver, os.path.join(prefix, self._path))
400 s.archive(ui, archiver, os.path.join(prefix, self._path))
401
401
402 def dirty(self, ignoreupdate=False):
402 def dirty(self, ignoreupdate=False):
403 r = self._state[1]
403 r = self._state[1]
404 if r == '' and not ignoreupdate: # no state recorded
404 if r == '' and not ignoreupdate: # no state recorded
405 return True
405 return True
406 w = self._repo[None]
406 w = self._repo[None]
407 if w.p1() != self._repo[r] and not ignoreupdate:
407 if w.p1() != self._repo[r] and not ignoreupdate:
408 # different version checked out
408 # different version checked out
409 return True
409 return True
410 return w.dirty() # working directory changed
410 return w.dirty() # working directory changed
411
411
412 def checknested(self, path):
412 def checknested(self, path):
413 return self._repo._checknested(self._repo.wjoin(path))
413 return self._repo._checknested(self._repo.wjoin(path))
414
414
415 def commit(self, text, user, date):
415 def commit(self, text, user, date):
416 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
416 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
417 n = self._repo.commit(text, user, date)
417 n = self._repo.commit(text, user, date)
418 if not n:
418 if not n:
419 return self._repo['.'].hex() # different version checked out
419 return self._repo['.'].hex() # different version checked out
420 return node.hex(n)
420 return node.hex(n)
421
421
422 def remove(self):
422 def remove(self):
423 # we can't fully delete the repository as it may contain
423 # we can't fully delete the repository as it may contain
424 # local-only history
424 # local-only history
425 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
425 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
426 hg.clean(self._repo, node.nullid, False)
426 hg.clean(self._repo, node.nullid, False)
427
427
428 def _get(self, state):
428 def _get(self, state):
429 source, revision, kind = state
429 source, revision, kind = state
430 if revision not in self._repo:
430 if revision not in self._repo:
431 self._repo._subsource = source
431 self._repo._subsource = source
432 srcurl = _abssource(self._repo)
432 srcurl = _abssource(self._repo)
433 self._repo.ui.status(_('pulling subrepo %s from %s\n')
433 self._repo.ui.status(_('pulling subrepo %s from %s\n')
434 % (subrelpath(self), srcurl))
434 % (subrelpath(self), srcurl))
435 other = hg.repository(self._repo.ui, srcurl)
435 other = hg.repository(self._repo.ui, srcurl)
436 self._repo.pull(other)
436 self._repo.pull(other)
437 bookmarks.updatefromremote(self._repo.ui, self._repo, other)
437 bookmarks.updatefromremote(self._repo.ui, self._repo, other)
438
438
439 def get(self, state, overwrite=False):
439 def get(self, state, overwrite=False):
440 self._get(state)
440 self._get(state)
441 source, revision, kind = state
441 source, revision, kind = state
442 self._repo.ui.debug("getting subrepo %s\n" % self._path)
442 self._repo.ui.debug("getting subrepo %s\n" % self._path)
443 hg.clean(self._repo, revision, False)
443 hg.clean(self._repo, revision, False)
444
444
445 def merge(self, state):
445 def merge(self, state):
446 self._get(state)
446 self._get(state)
447 cur = self._repo['.']
447 cur = self._repo['.']
448 dst = self._repo[state[1]]
448 dst = self._repo[state[1]]
449 anc = dst.ancestor(cur)
449 anc = dst.ancestor(cur)
450
450
451 def mergefunc():
451 def mergefunc():
452 if anc == cur:
452 if anc == cur:
453 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
453 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
454 hg.update(self._repo, state[1])
454 hg.update(self._repo, state[1])
455 elif anc == dst:
455 elif anc == dst:
456 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
456 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
457 else:
457 else:
458 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
458 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
459 hg.merge(self._repo, state[1], remind=False)
459 hg.merge(self._repo, state[1], remind=False)
460
460
461 wctx = self._repo[None]
461 wctx = self._repo[None]
462 if self.dirty():
462 if self.dirty():
463 if anc != dst:
463 if anc != dst:
464 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
464 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
465 mergefunc()
465 mergefunc()
466 else:
466 else:
467 mergefunc()
467 mergefunc()
468 else:
468 else:
469 mergefunc()
469 mergefunc()
470
470
471 def push(self, force):
471 def push(self, force):
472 # push subrepos depth-first for coherent ordering
472 # push subrepos depth-first for coherent ordering
473 c = self._repo['']
473 c = self._repo['']
474 subs = c.substate # only repos that are committed
474 subs = c.substate # only repos that are committed
475 for s in sorted(subs):
475 for s in sorted(subs):
476 if not c.sub(s).push(force):
476 if not c.sub(s).push(force):
477 return False
477 return False
478
478
479 dsturl = _abssource(self._repo, True)
479 dsturl = _abssource(self._repo, True)
480 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
480 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
481 (subrelpath(self), dsturl))
481 (subrelpath(self), dsturl))
482 other = hg.repository(self._repo.ui, dsturl)
482 other = hg.repository(self._repo.ui, dsturl)
483 return self._repo.push(other, force)
483 return self._repo.push(other, force)
484
484
485 def outgoing(self, ui, dest, opts):
485 def outgoing(self, ui, dest, opts):
486 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
486 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
487
487
488 def incoming(self, ui, source, opts):
488 def incoming(self, ui, source, opts):
489 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
489 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
490
490
491 def files(self):
491 def files(self):
492 rev = self._state[1]
492 rev = self._state[1]
493 ctx = self._repo[rev]
493 ctx = self._repo[rev]
494 return ctx.manifest()
494 return ctx.manifest()
495
495
496 def filedata(self, name):
496 def filedata(self, name):
497 rev = self._state[1]
497 rev = self._state[1]
498 return self._repo[rev][name].data()
498 return self._repo[rev][name].data()
499
499
500 def fileflags(self, name):
500 def fileflags(self, name):
501 rev = self._state[1]
501 rev = self._state[1]
502 ctx = self._repo[rev]
502 ctx = self._repo[rev]
503 return ctx.flags(name)
503 return ctx.flags(name)
504
504
505
505
506 class svnsubrepo(abstractsubrepo):
506 class svnsubrepo(abstractsubrepo):
507 def __init__(self, ctx, path, state):
507 def __init__(self, ctx, path, state):
508 self._path = path
508 self._path = path
509 self._state = state
509 self._state = state
510 self._ctx = ctx
510 self._ctx = ctx
511 self._ui = ctx._repo.ui
511 self._ui = ctx._repo.ui
512
512
513 def _svncommand(self, commands, filename=''):
513 def _svncommand(self, commands, filename=''):
514 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
514 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
515 cmd = ['svn'] + commands + [path]
515 cmd = ['svn'] + commands + [path]
516 env = dict(os.environ)
516 env = dict(os.environ)
517 # Avoid localized output, preserve current locale for everything else.
517 # Avoid localized output, preserve current locale for everything else.
518 env['LC_MESSAGES'] = 'C'
518 env['LC_MESSAGES'] = 'C'
519 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
519 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
520 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
520 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
521 universal_newlines=True, env=env)
521 universal_newlines=True, env=env)
522 stdout, stderr = p.communicate()
522 stdout, stderr = p.communicate()
523 stderr = stderr.strip()
523 stderr = stderr.strip()
524 if stderr:
524 if stderr:
525 raise util.Abort(stderr)
525 raise util.Abort(stderr)
526 return stdout
526 return stdout
527
527
528 def _wcrevs(self):
528 def _wcrevs(self):
529 # Get the working directory revision as well as the last
529 # Get the working directory revision as well as the last
530 # commit revision so we can compare the subrepo state with
530 # commit revision so we can compare the subrepo state with
531 # both. We used to store the working directory one.
531 # both. We used to store the working directory one.
532 output = self._svncommand(['info', '--xml'])
532 output = self._svncommand(['info', '--xml'])
533 doc = xml.dom.minidom.parseString(output)
533 doc = xml.dom.minidom.parseString(output)
534 entries = doc.getElementsByTagName('entry')
534 entries = doc.getElementsByTagName('entry')
535 lastrev, rev = '0', '0'
535 lastrev, rev = '0', '0'
536 if entries:
536 if entries:
537 rev = str(entries[0].getAttribute('revision')) or '0'
537 rev = str(entries[0].getAttribute('revision')) or '0'
538 commits = entries[0].getElementsByTagName('commit')
538 commits = entries[0].getElementsByTagName('commit')
539 if commits:
539 if commits:
540 lastrev = str(commits[0].getAttribute('revision')) or '0'
540 lastrev = str(commits[0].getAttribute('revision')) or '0'
541 return (lastrev, rev)
541 return (lastrev, rev)
542
542
543 def _wcrev(self):
543 def _wcrev(self):
544 return self._wcrevs()[0]
544 return self._wcrevs()[0]
545
545
546 def _wcchanged(self):
546 def _wcchanged(self):
547 """Return (changes, extchanges) where changes is True
547 """Return (changes, extchanges) where changes is True
548 if the working directory was changed, and extchanges is
548 if the working directory was changed, and extchanges is
549 True if any of these changes concern an external entry.
549 True if any of these changes concern an external entry.
550 """
550 """
551 output = self._svncommand(['status', '--xml'])
551 output = self._svncommand(['status', '--xml'])
552 externals, changes = [], []
552 externals, changes = [], []
553 doc = xml.dom.minidom.parseString(output)
553 doc = xml.dom.minidom.parseString(output)
554 for e in doc.getElementsByTagName('entry'):
554 for e in doc.getElementsByTagName('entry'):
555 s = e.getElementsByTagName('wc-status')
555 s = e.getElementsByTagName('wc-status')
556 if not s:
556 if not s:
557 continue
557 continue
558 item = s[0].getAttribute('item')
558 item = s[0].getAttribute('item')
559 props = s[0].getAttribute('props')
559 props = s[0].getAttribute('props')
560 path = e.getAttribute('path')
560 path = e.getAttribute('path')
561 if item == 'external':
561 if item == 'external':
562 externals.append(path)
562 externals.append(path)
563 if (item not in ('', 'normal', 'unversioned', 'external')
563 if (item not in ('', 'normal', 'unversioned', 'external')
564 or props not in ('', 'none')):
564 or props not in ('', 'none')):
565 changes.append(path)
565 changes.append(path)
566 for path in changes:
566 for path in changes:
567 for ext in externals:
567 for ext in externals:
568 if path == ext or path.startswith(ext + os.sep):
568 if path == ext or path.startswith(ext + os.sep):
569 return True, True
569 return True, True
570 return bool(changes), False
570 return bool(changes), False
571
571
572 def dirty(self, ignoreupdate=False):
572 def dirty(self, ignoreupdate=False):
573 if not self._wcchanged()[0]:
573 if not self._wcchanged()[0]:
574 if self._state[1] in self._wcrevs() or ignoreupdate:
574 if self._state[1] in self._wcrevs() or ignoreupdate:
575 return False
575 return False
576 return True
576 return True
577
577
578 def commit(self, text, user, date):
578 def commit(self, text, user, date):
579 # user and date are out of our hands since svn is centralized
579 # user and date are out of our hands since svn is centralized
580 changed, extchanged = self._wcchanged()
580 changed, extchanged = self._wcchanged()
581 if not changed:
581 if not changed:
582 return self._wcrev()
582 return self._wcrev()
583 if extchanged:
583 if extchanged:
584 # Do not try to commit externals
584 # Do not try to commit externals
585 raise util.Abort(_('cannot commit svn externals'))
585 raise util.Abort(_('cannot commit svn externals'))
586 commitinfo = self._svncommand(['commit', '-m', text])
586 commitinfo = self._svncommand(['commit', '-m', text])
587 self._ui.status(commitinfo)
587 self._ui.status(commitinfo)
588 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
588 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
589 if not newrev:
589 if not newrev:
590 raise util.Abort(commitinfo.splitlines()[-1])
590 raise util.Abort(commitinfo.splitlines()[-1])
591 newrev = newrev.groups()[0]
591 newrev = newrev.groups()[0]
592 self._ui.status(self._svncommand(['update', '-r', newrev]))
592 self._ui.status(self._svncommand(['update', '-r', newrev]))
593 return newrev
593 return newrev
594
594
595 def remove(self):
595 def remove(self):
596 if self.dirty():
596 if self.dirty():
597 self._ui.warn(_('not removing repo %s because '
597 self._ui.warn(_('not removing repo %s because '
598 'it has changes.\n' % self._path))
598 'it has changes.\n' % self._path))
599 return
599 return
600 self._ui.note(_('removing subrepo %s\n') % self._path)
600 self._ui.note(_('removing subrepo %s\n') % self._path)
601
601
602 def onerror(function, path, excinfo):
602 def onerror(function, path, excinfo):
603 if function is not os.remove:
603 if function is not os.remove:
604 raise
604 raise
605 # read-only files cannot be unlinked under Windows
605 # read-only files cannot be unlinked under Windows
606 s = os.stat(path)
606 s = os.stat(path)
607 if (s.st_mode & stat.S_IWRITE) != 0:
607 if (s.st_mode & stat.S_IWRITE) != 0:
608 raise
608 raise
609 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
609 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
610 os.remove(path)
610 os.remove(path)
611
611
612 path = self._ctx._repo.wjoin(self._path)
612 path = self._ctx._repo.wjoin(self._path)
613 shutil.rmtree(path, onerror=onerror)
613 shutil.rmtree(path, onerror=onerror)
614 try:
614 try:
615 os.removedirs(os.path.dirname(path))
615 os.removedirs(os.path.dirname(path))
616 except OSError:
616 except OSError:
617 pass
617 pass
618
618
619 def get(self, state, overwrite=False):
619 def get(self, state, overwrite=False):
620 if overwrite:
620 if overwrite:
621 self._svncommand(['revert', '--recursive'])
621 self._svncommand(['revert', '--recursive'])
622 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
622 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
623 if not re.search('Checked out revision [0-9]+.', status):
623 if not re.search('Checked out revision [0-9]+.', status):
624 raise util.Abort(status.splitlines()[-1])
624 raise util.Abort(status.splitlines()[-1])
625 self._ui.status(status)
625 self._ui.status(status)
626
626
627 def merge(self, state):
627 def merge(self, state):
628 old = self._state[1]
628 old = self._state[1]
629 new = state[1]
629 new = state[1]
630 if new != self._wcrev():
630 if new != self._wcrev():
631 dirty = old == self._wcrev() or self._wcchanged()[0]
631 dirty = old == self._wcrev() or self._wcchanged()[0]
632 if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
632 if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
633 self.get(state, False)
633 self.get(state, False)
634
634
635 def push(self, force):
635 def push(self, force):
636 # push is a no-op for SVN
636 # push is a no-op for SVN
637 return True
637 return True
638
638
639 def files(self):
639 def files(self):
640 output = self._svncommand(['list'])
640 output = self._svncommand(['list'])
641 # This works because svn forbids \n in filenames.
641 # This works because svn forbids \n in filenames.
642 return output.splitlines()
642 return output.splitlines()
643
643
644 def filedata(self, name):
644 def filedata(self, name):
645 return self._svncommand(['cat'], name)
645 return self._svncommand(['cat'], name)
646
646
647
647
648 class gitsubrepo(abstractsubrepo):
648 class gitsubrepo(abstractsubrepo):
649 def __init__(self, ctx, path, state):
649 def __init__(self, ctx, path, state):
650 # TODO add git version check.
650 # TODO add git version check.
651 self._state = state
651 self._state = state
652 self._ctx = ctx
652 self._ctx = ctx
653 self._path = path
653 self._path = path
654 self._relpath = os.path.join(reporelpath(ctx._repo), path)
654 self._relpath = os.path.join(reporelpath(ctx._repo), path)
655 self._abspath = ctx._repo.wjoin(path)
655 self._abspath = ctx._repo.wjoin(path)
656 self._subparent = ctx._repo
656 self._subparent = ctx._repo
657 self._ui = ctx._repo.ui
657 self._ui = ctx._repo.ui
658
658
659 def _gitcommand(self, commands, env=None, stream=False):
659 def _gitcommand(self, commands, env=None, stream=False):
660 return self._gitdir(commands, env=env, stream=stream)[0]
660 return self._gitdir(commands, env=env, stream=stream)[0]
661
661
662 def _gitdir(self, commands, env=None, stream=False):
662 def _gitdir(self, commands, env=None, stream=False):
663 return self._gitnodir(commands, env=env, stream=stream,
663 return self._gitnodir(commands, env=env, stream=stream,
664 cwd=self._abspath)
664 cwd=self._abspath)
665
665
666 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
666 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
667 """Calls the git command
667 """Calls the git command
668
668
669 The methods tries to call the git command. versions previor to 1.6.0
669 The methods tries to call the git command. versions previor to 1.6.0
670 are not supported and very probably fail.
670 are not supported and very probably fail.
671 """
671 """
672 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
672 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
673 # unless ui.quiet is set, print git's stderr,
673 # unless ui.quiet is set, print git's stderr,
674 # which is mostly progress and useful info
674 # which is mostly progress and useful info
675 errpipe = None
675 errpipe = None
676 if self._ui.quiet:
676 if self._ui.quiet:
677 errpipe = open(os.devnull, 'w')
677 errpipe = open(os.devnull, 'w')
678 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
678 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
679 close_fds=util.closefds,
679 close_fds=util.closefds,
680 stdout=subprocess.PIPE, stderr=errpipe)
680 stdout=subprocess.PIPE, stderr=errpipe)
681 if stream:
681 if stream:
682 return p.stdout, None
682 return p.stdout, None
683
683
684 retdata = p.stdout.read().strip()
684 retdata = p.stdout.read().strip()
685 # wait for the child to exit to avoid race condition.
685 # wait for the child to exit to avoid race condition.
686 p.wait()
686 p.wait()
687
687
688 if p.returncode != 0 and p.returncode != 1:
688 if p.returncode != 0 and p.returncode != 1:
689 # there are certain error codes that are ok
689 # there are certain error codes that are ok
690 command = commands[0]
690 command = commands[0]
691 if command in ('cat-file', 'symbolic-ref'):
691 if command in ('cat-file', 'symbolic-ref'):
692 return retdata, p.returncode
692 return retdata, p.returncode
693 # for all others, abort
693 # for all others, abort
694 raise util.Abort('git %s error %d in %s' %
694 raise util.Abort('git %s error %d in %s' %
695 (command, p.returncode, self._relpath))
695 (command, p.returncode, self._relpath))
696
696
697 return retdata, p.returncode
697 return retdata, p.returncode
698
698
699 def _gitmissing(self):
699 def _gitmissing(self):
700 return not os.path.exists(os.path.join(self._abspath, '.git'))
700 return not os.path.exists(os.path.join(self._abspath, '.git'))
701
701
702 def _gitstate(self):
702 def _gitstate(self):
703 return self._gitcommand(['rev-parse', 'HEAD'])
703 return self._gitcommand(['rev-parse', 'HEAD'])
704
704
705 def _gitcurrentbranch(self):
705 def _gitcurrentbranch(self):
706 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
706 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
707 if err:
707 if err:
708 current = None
708 current = None
709 return current
709 return current
710
710
711 def _gitremote(self, remote):
711 def _gitremote(self, remote):
712 out = self._gitcommand(['remote', 'show', '-n', remote])
712 out = self._gitcommand(['remote', 'show', '-n', remote])
713 line = out.split('\n')[1]
713 line = out.split('\n')[1]
714 i = line.index('URL: ') + len('URL: ')
714 i = line.index('URL: ') + len('URL: ')
715 return line[i:]
715 return line[i:]
716
716
717 def _githavelocally(self, revision):
717 def _githavelocally(self, revision):
718 out, code = self._gitdir(['cat-file', '-e', revision])
718 out, code = self._gitdir(['cat-file', '-e', revision])
719 return code == 0
719 return code == 0
720
720
721 def _gitisancestor(self, r1, r2):
721 def _gitisancestor(self, r1, r2):
722 base = self._gitcommand(['merge-base', r1, r2])
722 base = self._gitcommand(['merge-base', r1, r2])
723 return base == r1
723 return base == r1
724
724
725 def _gitbranchmap(self):
725 def _gitbranchmap(self):
726 '''returns 2 things:
726 '''returns 2 things:
727 a map from git branch to revision
727 a map from git branch to revision
728 a map from revision to branches'''
728 a map from revision to branches'''
729 branch2rev = {}
729 branch2rev = {}
730 rev2branch = {}
730 rev2branch = {}
731
731
732 out = self._gitcommand(['for-each-ref', '--format',
732 out = self._gitcommand(['for-each-ref', '--format',
733 '%(objectname) %(refname)'])
733 '%(objectname) %(refname)'])
734 for line in out.split('\n'):
734 for line in out.split('\n'):
735 revision, ref = line.split(' ')
735 revision, ref = line.split(' ')
736 if (not ref.startswith('refs/heads/') and
736 if (not ref.startswith('refs/heads/') and
737 not ref.startswith('refs/remotes/')):
737 not ref.startswith('refs/remotes/')):
738 continue
738 continue
739 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
739 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
740 continue # ignore remote/HEAD redirects
740 continue # ignore remote/HEAD redirects
741 branch2rev[ref] = revision
741 branch2rev[ref] = revision
742 rev2branch.setdefault(revision, []).append(ref)
742 rev2branch.setdefault(revision, []).append(ref)
743 return branch2rev, rev2branch
743 return branch2rev, rev2branch
744
744
745 def _gittracking(self, branches):
745 def _gittracking(self, branches):
746 'return map of remote branch to local tracking branch'
746 'return map of remote branch to local tracking branch'
747 # assumes no more than one local tracking branch for each remote
747 # assumes no more than one local tracking branch for each remote
748 tracking = {}
748 tracking = {}
749 for b in branches:
749 for b in branches:
750 if b.startswith('refs/remotes/'):
750 if b.startswith('refs/remotes/'):
751 continue
751 continue
752 remote = self._gitcommand(['config', 'branch.%s.remote' % b])
752 remote = self._gitcommand(['config', 'branch.%s.remote' % b])
753 if remote:
753 if remote:
754 ref = self._gitcommand(['config', 'branch.%s.merge' % b])
754 ref = self._gitcommand(['config', 'branch.%s.merge' % b])
755 tracking['refs/remotes/%s/%s' %
755 tracking['refs/remotes/%s/%s' %
756 (remote, ref.split('/', 2)[2])] = b
756 (remote, ref.split('/', 2)[2])] = b
757 return tracking
757 return tracking
758
758
759 def _abssource(self, source):
759 def _abssource(self, source):
760 if '://' not in source:
760 if '://' not in source:
761 # recognize the scp syntax as an absolute source
761 # recognize the scp syntax as an absolute source
762 colon = source.find(':')
762 colon = source.find(':')
763 if colon != -1 and '/' not in source[:colon]:
763 if colon != -1 and '/' not in source[:colon]:
764 return source
764 return source
765 self._subsource = source
765 self._subsource = source
766 return _abssource(self)
766 return _abssource(self)
767
767
768 def _fetch(self, source, revision):
768 def _fetch(self, source, revision):
769 if self._gitmissing():
769 if self._gitmissing():
770 source = self._abssource(source)
770 source = self._abssource(source)
771 self._ui.status(_('cloning subrepo %s from %s\n') %
771 self._ui.status(_('cloning subrepo %s from %s\n') %
772 (self._relpath, source))
772 (self._relpath, source))
773 self._gitnodir(['clone', source, self._abspath])
773 self._gitnodir(['clone', source, self._abspath])
774 if self._githavelocally(revision):
774 if self._githavelocally(revision):
775 return
775 return
776 self._ui.status(_('pulling subrepo %s from %s\n') %
776 self._ui.status(_('pulling subrepo %s from %s\n') %
777 (self._relpath, self._gitremote('origin')))
777 (self._relpath, self._gitremote('origin')))
778 # try only origin: the originally cloned repo
778 # try only origin: the originally cloned repo
779 self._gitcommand(['fetch'])
779 self._gitcommand(['fetch'])
780 if not self._githavelocally(revision):
780 if not self._githavelocally(revision):
781 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
781 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
782 (revision, self._relpath))
782 (revision, self._relpath))
783
783
784 def dirty(self, ignoreupdate=False):
784 def dirty(self, ignoreupdate=False):
785 if self._gitmissing():
785 if self._gitmissing():
786 return True
786 return True
787 if not ignoreupdate and self._state[1] != self._gitstate():
787 if not ignoreupdate and self._state[1] != self._gitstate():
788 # different version checked out
788 # different version checked out
789 return True
789 return True
790 # check for staged changes or modified files; ignore untracked files
790 # check for staged changes or modified files; ignore untracked files
791 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
791 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
792 return code == 1
792 return code == 1
793
793
794 def get(self, state, overwrite=False):
794 def get(self, state, overwrite=False):
795 source, revision, kind = state
795 source, revision, kind = state
796 self._fetch(source, revision)
796 self._fetch(source, revision)
797 # if the repo was set to be bare, unbare it
797 # if the repo was set to be bare, unbare it
798 if self._gitcommand(['config', '--bool', 'core.bare']) == 'true':
798 if self._gitcommand(['config', '--bool', 'core.bare']) == 'true':
799 self._gitcommand(['config', 'core.bare', 'false'])
799 self._gitcommand(['config', 'core.bare', 'false'])
800 if self._gitstate() == revision:
800 if self._gitstate() == revision:
801 self._gitcommand(['reset', '--hard', 'HEAD'])
801 self._gitcommand(['reset', '--hard', 'HEAD'])
802 return
802 return
803 elif self._gitstate() == revision:
803 elif self._gitstate() == revision:
804 if overwrite:
804 if overwrite:
805 # first reset the index to unmark new files for commit, because
805 # first reset the index to unmark new files for commit, because
806 # reset --hard will otherwise throw away files added for commit,
806 # reset --hard will otherwise throw away files added for commit,
807 # not just unmark them.
807 # not just unmark them.
808 self._gitcommand(['reset', 'HEAD'])
808 self._gitcommand(['reset', 'HEAD'])
809 self._gitcommand(['reset', '--hard', 'HEAD'])
809 self._gitcommand(['reset', '--hard', 'HEAD'])
810 return
810 return
811 branch2rev, rev2branch = self._gitbranchmap()
811 branch2rev, rev2branch = self._gitbranchmap()
812
812
813 def checkout(args):
813 def checkout(args):
814 cmd = ['checkout']
814 cmd = ['checkout']
815 if overwrite:
815 if overwrite:
816 # first reset the index to unmark new files for commit, because
816 # first reset the index to unmark new files for commit, because
817 # the -f option will otherwise throw away files added for
817 # the -f option will otherwise throw away files added for
818 # commit, not just unmark them.
818 # commit, not just unmark them.
819 self._gitcommand(['reset', 'HEAD'])
819 self._gitcommand(['reset', 'HEAD'])
820 cmd.append('-f')
820 cmd.append('-f')
821 self._gitcommand(cmd + args)
821 self._gitcommand(cmd + args)
822
822
823 def rawcheckout():
823 def rawcheckout():
824 # no branch to checkout, check it out with no branch
824 # no branch to checkout, check it out with no branch
825 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
825 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
826 self._relpath)
826 self._relpath)
827 self._ui.warn(_('check out a git branch if you intend '
827 self._ui.warn(_('check out a git branch if you intend '
828 'to make changes\n'))
828 'to make changes\n'))
829 checkout(['-q', revision])
829 checkout(['-q', revision])
830
830
831 if revision not in rev2branch:
831 if revision not in rev2branch:
832 rawcheckout()
832 rawcheckout()
833 return
833 return
834 branches = rev2branch[revision]
834 branches = rev2branch[revision]
835 firstlocalbranch = None
835 firstlocalbranch = None
836 for b in branches:
836 for b in branches:
837 if b == 'refs/heads/master':
837 if b == 'refs/heads/master':
838 # master trumps all other branches
838 # master trumps all other branches
839 checkout(['refs/heads/master'])
839 checkout(['refs/heads/master'])
840 return
840 return
841 if not firstlocalbranch and not b.startswith('refs/remotes/'):
841 if not firstlocalbranch and not b.startswith('refs/remotes/'):
842 firstlocalbranch = b
842 firstlocalbranch = b
843 if firstlocalbranch:
843 if firstlocalbranch:
844 checkout([firstlocalbranch])
844 checkout([firstlocalbranch])
845 return
845 return
846
846
847 tracking = self._gittracking(branch2rev.keys())
847 tracking = self._gittracking(branch2rev.keys())
848 # choose a remote branch already tracked if possible
848 # choose a remote branch already tracked if possible
849 remote = branches[0]
849 remote = branches[0]
850 if remote not in tracking:
850 if remote not in tracking:
851 for b in branches:
851 for b in branches:
852 if b in tracking:
852 if b in tracking:
853 remote = b
853 remote = b
854 break
854 break
855
855
856 if remote not in tracking:
856 if remote not in tracking:
857 # create a new local tracking branch
857 # create a new local tracking branch
858 local = remote.split('/', 2)[2]
858 local = remote.split('/', 2)[2]
859 checkout(['-b', local, remote])
859 checkout(['-b', local, remote])
860 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
860 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
861 # When updating to a tracked remote branch,
861 # When updating to a tracked remote branch,
862 # if the local tracking branch is downstream of it,
862 # if the local tracking branch is downstream of it,
863 # a normal `git pull` would have performed a "fast-forward merge"
863 # a normal `git pull` would have performed a "fast-forward merge"
864 # which is equivalent to updating the local branch to the remote.
864 # which is equivalent to updating the local branch to the remote.
865 # Since we are only looking at branching at update, we need to
865 # Since we are only looking at branching at update, we need to
866 # detect this situation and perform this action lazily.
866 # detect this situation and perform this action lazily.
867 if tracking[remote] != self._gitcurrentbranch():
867 if tracking[remote] != self._gitcurrentbranch():
868 checkout([tracking[remote]])
868 checkout([tracking[remote]])
869 self._gitcommand(['merge', '--ff', remote])
869 self._gitcommand(['merge', '--ff', remote])
870 else:
870 else:
871 # a real merge would be required, just checkout the revision
871 # a real merge would be required, just checkout the revision
872 rawcheckout()
872 rawcheckout()
873
873
874 def commit(self, text, user, date):
874 def commit(self, text, user, date):
875 if self._gitmissing():
875 if self._gitmissing():
876 raise util.Abort(_("subrepo %s is missing") % self._relpath)
876 raise util.Abort(_("subrepo %s is missing") % self._relpath)
877 cmd = ['commit', '-a', '-m', text]
877 cmd = ['commit', '-a', '-m', text]
878 env = os.environ.copy()
878 env = os.environ.copy()
879 if user:
879 if user:
880 cmd += ['--author', user]
880 cmd += ['--author', user]
881 if date:
881 if date:
882 # git's date parser silently ignores when seconds < 1e9
882 # git's date parser silently ignores when seconds < 1e9
883 # convert to ISO8601
883 # convert to ISO8601
884 env['GIT_AUTHOR_DATE'] = util.datestr(date,
884 env['GIT_AUTHOR_DATE'] = util.datestr(date,
885 '%Y-%m-%dT%H:%M:%S %1%2')
885 '%Y-%m-%dT%H:%M:%S %1%2')
886 self._gitcommand(cmd, env=env)
886 self._gitcommand(cmd, env=env)
887 # make sure commit works otherwise HEAD might not exist under certain
887 # make sure commit works otherwise HEAD might not exist under certain
888 # circumstances
888 # circumstances
889 return self._gitstate()
889 return self._gitstate()
890
890
891 def merge(self, state):
891 def merge(self, state):
892 source, revision, kind = state
892 source, revision, kind = state
893 self._fetch(source, revision)
893 self._fetch(source, revision)
894 base = self._gitcommand(['merge-base', revision, self._state[1]])
894 base = self._gitcommand(['merge-base', revision, self._state[1]])
895 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
895 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
896
896
897 def mergefunc():
897 def mergefunc():
898 if base == revision:
898 if base == revision:
899 self.get(state) # fast forward merge
899 self.get(state) # fast forward merge
900 elif base != self._state[1]:
900 elif base != self._state[1]:
901 self._gitcommand(['merge', '--no-commit', revision])
901 self._gitcommand(['merge', '--no-commit', revision])
902
902
903 if self.dirty():
903 if self.dirty():
904 if self._gitstate() != revision:
904 if self._gitstate() != revision:
905 dirty = self._gitstate() == self._state[1] or code != 0
905 dirty = self._gitstate() == self._state[1] or code != 0
906 if _updateprompt(self._ui, self, dirty,
906 if _updateprompt(self._ui, self, dirty,
907 self._state[1][:7], revision[:7]):
907 self._state[1][:7], revision[:7]):
908 mergefunc()
908 mergefunc()
909 else:
909 else:
910 mergefunc()
910 mergefunc()
911
911
912 def push(self, force):
912 def push(self, force):
913 if self._gitmissing():
913 if self._gitmissing():
914 raise util.Abort(_("subrepo %s is missing") % self._relpath)
914 raise util.Abort(_("subrepo %s is missing") % self._relpath)
915 # if a branch in origin contains the revision, nothing to do
915 # if a branch in origin contains the revision, nothing to do
916 branch2rev, rev2branch = self._gitbranchmap()
916 branch2rev, rev2branch = self._gitbranchmap()
917 if self._state[1] in rev2branch:
917 if self._state[1] in rev2branch:
918 for b in rev2branch[self._state[1]]:
918 for b in rev2branch[self._state[1]]:
919 if b.startswith('refs/remotes/origin/'):
919 if b.startswith('refs/remotes/origin/'):
920 return True
920 return True
921 for b, revision in branch2rev.iteritems():
921 for b, revision in branch2rev.iteritems():
922 if b.startswith('refs/remotes/origin/'):
922 if b.startswith('refs/remotes/origin/'):
923 if self._gitisancestor(self._state[1], revision):
923 if self._gitisancestor(self._state[1], revision):
924 return True
924 return True
925 # otherwise, try to push the currently checked out branch
925 # otherwise, try to push the currently checked out branch
926 cmd = ['push']
926 cmd = ['push']
927 if force:
927 if force:
928 cmd.append('--force')
928 cmd.append('--force')
929
929
930 current = self._gitcurrentbranch()
930 current = self._gitcurrentbranch()
931 if current:
931 if current:
932 # determine if the current branch is even useful
932 # determine if the current branch is even useful
933 if not self._gitisancestor(self._state[1], current):
933 if not self._gitisancestor(self._state[1], current):
934 self._ui.warn(_('unrelated git branch checked out '
934 self._ui.warn(_('unrelated git branch checked out '
935 'in subrepo %s\n') % self._relpath)
935 'in subrepo %s\n') % self._relpath)
936 return False
936 return False
937 self._ui.status(_('pushing branch %s of subrepo %s\n') %
937 self._ui.status(_('pushing branch %s of subrepo %s\n') %
938 (current.split('/', 2)[2], self._relpath))
938 (current.split('/', 2)[2], self._relpath))
939 self._gitcommand(cmd + ['origin', current])
939 self._gitcommand(cmd + ['origin', current])
940 return True
940 return True
941 else:
941 else:
942 self._ui.warn(_('no branch checked out in subrepo %s\n'
942 self._ui.warn(_('no branch checked out in subrepo %s\n'
943 'cannot push revision %s') %
943 'cannot push revision %s') %
944 (self._relpath, self._state[1]))
944 (self._relpath, self._state[1]))
945 return False
945 return False
946
946
947 def remove(self):
947 def remove(self):
948 if self._gitmissing():
948 if self._gitmissing():
949 return
949 return
950 if self.dirty():
950 if self.dirty():
951 self._ui.warn(_('not removing repo %s because '
951 self._ui.warn(_('not removing repo %s because '
952 'it has changes.\n') % self._relpath)
952 'it has changes.\n') % self._relpath)
953 return
953 return
954 # we can't fully delete the repository as it may contain
954 # we can't fully delete the repository as it may contain
955 # local-only history
955 # local-only history
956 self._ui.note(_('removing subrepo %s\n') % self._relpath)
956 self._ui.note(_('removing subrepo %s\n') % self._relpath)
957 self._gitcommand(['config', 'core.bare', 'true'])
957 self._gitcommand(['config', 'core.bare', 'true'])
958 for f in os.listdir(self._abspath):
958 for f in os.listdir(self._abspath):
959 if f == '.git':
959 if f == '.git':
960 continue
960 continue
961 path = os.path.join(self._abspath, f)
961 path = os.path.join(self._abspath, f)
962 if os.path.isdir(path) and not os.path.islink(path):
962 if os.path.isdir(path) and not os.path.islink(path):
963 shutil.rmtree(path)
963 shutil.rmtree(path)
964 else:
964 else:
965 os.remove(path)
965 os.remove(path)
966
966
967 def archive(self, ui, archiver, prefix):
967 def archive(self, ui, archiver, prefix):
968 source, revision = self._state
968 source, revision = self._state
969 self._fetch(source, revision)
969 self._fetch(source, revision)
970
970
971 # Parse git's native archive command.
971 # Parse git's native archive command.
972 # This should be much faster than manually traversing the trees
972 # This should be much faster than manually traversing the trees
973 # and objects with many subprocess calls.
973 # and objects with many subprocess calls.
974 tarstream = self._gitcommand(['archive', revision], stream=True)
974 tarstream = self._gitcommand(['archive', revision], stream=True)
975 tar = tarfile.open(fileobj=tarstream, mode='r|')
975 tar = tarfile.open(fileobj=tarstream, mode='r|')
976 relpath = subrelpath(self)
976 relpath = subrelpath(self)
977 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
977 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
978 for i, info in enumerate(tar):
978 for i, info in enumerate(tar):
979 if info.isdir():
979 if info.isdir():
980 continue
980 continue
981 if info.issym():
981 if info.issym():
982 data = info.linkname
982 data = info.linkname
983 else:
983 else:
984 data = tar.extractfile(info).read()
984 data = tar.extractfile(info).read()
985 archiver.addfile(os.path.join(prefix, self._path, info.name),
985 archiver.addfile(os.path.join(prefix, self._path, info.name),
986 info.mode, info.issym(), data)
986 info.mode, info.issym(), data)
987 ui.progress(_('archiving (%s)') % relpath, i + 1,
987 ui.progress(_('archiving (%s)') % relpath, i + 1,
988 unit=_('files'))
988 unit=_('files'))
989 ui.progress(_('archiving (%s)') % relpath, None)
989 ui.progress(_('archiving (%s)') % relpath, None)
990
990
991
991
992 def status(self, rev2, **opts):
992 def status(self, rev2, **opts):
993 if self._gitmissing():
993 if self._gitmissing():
994 # if the repo is missing, return no results
994 # if the repo is missing, return no results
995 return [], [], [], [], [], [], []
995 return [], [], [], [], [], [], []
996 rev1 = self._state[1]
996 rev1 = self._state[1]
997 modified, added, removed = [], [], []
997 modified, added, removed = [], [], []
998 if rev2:
998 if rev2:
999 command = ['diff-tree', rev1, rev2]
999 command = ['diff-tree', rev1, rev2]
1000 else:
1000 else:
1001 command = ['diff-index', rev1]
1001 command = ['diff-index', rev1]
1002 out = self._gitcommand(command)
1002 out = self._gitcommand(command)
1003 for line in out.split('\n'):
1003 for line in out.split('\n'):
1004 tab = line.find('\t')
1004 tab = line.find('\t')
1005 if tab == -1:
1005 if tab == -1:
1006 continue
1006 continue
1007 status, f = line[tab - 1], line[tab + 1:]
1007 status, f = line[tab - 1], line[tab + 1:]
1008 if status == 'M':
1008 if status == 'M':
1009 modified.append(f)
1009 modified.append(f)
1010 elif status == 'A':
1010 elif status == 'A':
1011 added.append(f)
1011 added.append(f)
1012 elif status == 'D':
1012 elif status == 'D':
1013 removed.append(f)
1013 removed.append(f)
1014
1014
1015 deleted = unknown = ignored = clean = []
1015 deleted = unknown = ignored = clean = []
1016 return modified, added, removed, deleted, unknown, ignored, clean
1016 return modified, added, removed, deleted, unknown, ignored, clean
1017
1017
1018 types = {
1018 types = {
1019 'hg': hgsubrepo,
1019 'hg': hgsubrepo,
1020 'svn': svnsubrepo,
1020 'svn': svnsubrepo,
1021 'git': gitsubrepo,
1021 'git': gitsubrepo,
1022 }
1022 }
@@ -1,1442 +1,1368 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, unicodedata, signal
19 import os, time, calendar, textwrap, unicodedata, signal
20 import imp, socket
20 import imp, socket
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 if sys.version_info >= (2, 5):
31 if sys.version_info >= (2, 5):
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 else:
33 else:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 if sys.version_info[0] < 3:
41 if sys.version_info[0] < 3:
42 def fakebuffer(sliceable, offset=0):
42 def fakebuffer(sliceable, offset=0):
43 return sliceable[offset:]
43 return sliceable[offset:]
44 else:
44 else:
45 def fakebuffer(sliceable, offset=0):
45 def fakebuffer(sliceable, offset=0):
46 return memoryview(sliceable)[offset:]
46 return memoryview(sliceable)[offset:]
47 try:
47 try:
48 buffer
48 buffer
49 except NameError:
49 except NameError:
50 __builtin__.buffer = fakebuffer
50 __builtin__.buffer = fakebuffer
51
51
52 import subprocess
52 import subprocess
53 closefds = os.name == 'posix'
53 closefds = os.name == 'posix'
54
54
55 def popen2(cmd, env=None, newlines=False):
55 def popen2(cmd, env=None, newlines=False):
56 # Setting bufsize to -1 lets the system decide the buffer size.
56 # Setting bufsize to -1 lets the system decide the buffer size.
57 # The default for bufsize is 0, meaning unbuffered. This leads to
57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 close_fds=closefds,
60 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 universal_newlines=newlines,
62 universal_newlines=newlines,
63 env=env)
63 env=env)
64 return p.stdin, p.stdout
64 return p.stdin, p.stdout
65
65
66 def popen3(cmd, env=None, newlines=False):
66 def popen3(cmd, env=None, newlines=False):
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 close_fds=closefds,
68 close_fds=closefds,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 stderr=subprocess.PIPE,
70 stderr=subprocess.PIPE,
71 universal_newlines=newlines,
71 universal_newlines=newlines,
72 env=env)
72 env=env)
73 return p.stdin, p.stdout, p.stderr
73 return p.stdin, p.stdout, p.stderr
74
74
75 def version():
75 def version():
76 """Return version information if available."""
76 """Return version information if available."""
77 try:
77 try:
78 import __version__
78 import __version__
79 return __version__.version
79 return __version__.version
80 except ImportError:
80 except ImportError:
81 return 'unknown'
81 return 'unknown'
82
82
83 # used by parsedate
83 # used by parsedate
84 defaultdateformats = (
84 defaultdateformats = (
85 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d',
89 '%Y-%m-%d',
90 '%m-%d',
90 '%m-%d',
91 '%m/%d',
91 '%m/%d',
92 '%m/%d/%y',
92 '%m/%d/%y',
93 '%m/%d/%Y',
93 '%m/%d/%Y',
94 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %I:%M:%S%p %Y',
95 '%a %b %d %I:%M:%S%p %Y',
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%b %d %H:%M:%S %Y',
97 '%b %d %H:%M:%S %Y',
98 '%b %d %I:%M:%S%p %Y',
98 '%b %d %I:%M:%S%p %Y',
99 '%b %d %H:%M:%S',
99 '%b %d %H:%M:%S',
100 '%b %d %I:%M:%S%p',
100 '%b %d %I:%M:%S%p',
101 '%b %d %H:%M',
101 '%b %d %H:%M',
102 '%b %d %I:%M%p',
102 '%b %d %I:%M%p',
103 '%b %d %Y',
103 '%b %d %Y',
104 '%b %d',
104 '%b %d',
105 '%H:%M:%S',
105 '%H:%M:%S',
106 '%I:%M:%S%p',
106 '%I:%M:%S%p',
107 '%H:%M',
107 '%H:%M',
108 '%I:%M%p',
108 '%I:%M%p',
109 )
109 )
110
110
111 extendeddateformats = defaultdateformats + (
111 extendeddateformats = defaultdateformats + (
112 "%Y",
112 "%Y",
113 "%Y-%m",
113 "%Y-%m",
114 "%b",
114 "%b",
115 "%b %Y",
115 "%b %Y",
116 )
116 )
117
117
118 def cachefunc(func):
118 def cachefunc(func):
119 '''cache the result of function calls'''
119 '''cache the result of function calls'''
120 # XXX doesn't handle keywords args
120 # XXX doesn't handle keywords args
121 cache = {}
121 cache = {}
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 # we gain a small amount of time because
123 # we gain a small amount of time because
124 # we don't need to pack/unpack the list
124 # we don't need to pack/unpack the list
125 def f(arg):
125 def f(arg):
126 if arg not in cache:
126 if arg not in cache:
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 return cache[arg]
128 return cache[arg]
129 else:
129 else:
130 def f(*args):
130 def f(*args):
131 if args not in cache:
131 if args not in cache:
132 cache[args] = func(*args)
132 cache[args] = func(*args)
133 return cache[args]
133 return cache[args]
134
134
135 return f
135 return f
136
136
137 def lrucachefunc(func):
137 def lrucachefunc(func):
138 '''cache most recent results of function calls'''
138 '''cache most recent results of function calls'''
139 cache = {}
139 cache = {}
140 order = []
140 order = []
141 if func.func_code.co_argcount == 1:
141 if func.func_code.co_argcount == 1:
142 def f(arg):
142 def f(arg):
143 if arg not in cache:
143 if arg not in cache:
144 if len(cache) > 20:
144 if len(cache) > 20:
145 del cache[order.pop(0)]
145 del cache[order.pop(0)]
146 cache[arg] = func(arg)
146 cache[arg] = func(arg)
147 else:
147 else:
148 order.remove(arg)
148 order.remove(arg)
149 order.append(arg)
149 order.append(arg)
150 return cache[arg]
150 return cache[arg]
151 else:
151 else:
152 def f(*args):
152 def f(*args):
153 if args not in cache:
153 if args not in cache:
154 if len(cache) > 20:
154 if len(cache) > 20:
155 del cache[order.pop(0)]
155 del cache[order.pop(0)]
156 cache[args] = func(*args)
156 cache[args] = func(*args)
157 else:
157 else:
158 order.remove(args)
158 order.remove(args)
159 order.append(args)
159 order.append(args)
160 return cache[args]
160 return cache[args]
161
161
162 return f
162 return f
163
163
164 class propertycache(object):
164 class propertycache(object):
165 def __init__(self, func):
165 def __init__(self, func):
166 self.func = func
166 self.func = func
167 self.name = func.__name__
167 self.name = func.__name__
168 def __get__(self, obj, type=None):
168 def __get__(self, obj, type=None):
169 result = self.func(obj)
169 result = self.func(obj)
170 setattr(obj, self.name, result)
170 setattr(obj, self.name, result)
171 return result
171 return result
172
172
173 def pipefilter(s, cmd):
173 def pipefilter(s, cmd):
174 '''filter string S through command CMD, returning its output'''
174 '''filter string S through command CMD, returning its output'''
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 pout, perr = p.communicate(s)
177 pout, perr = p.communicate(s)
178 return pout
178 return pout
179
179
180 def tempfilter(s, cmd):
180 def tempfilter(s, cmd):
181 '''filter string S through a pair of temporary files with CMD.
181 '''filter string S through a pair of temporary files with CMD.
182 CMD is used as a template to create the real command to be run,
182 CMD is used as a template to create the real command to be run,
183 with the strings INFILE and OUTFILE replaced by the real names of
183 with the strings INFILE and OUTFILE replaced by the real names of
184 the temporary files generated.'''
184 the temporary files generated.'''
185 inname, outname = None, None
185 inname, outname = None, None
186 try:
186 try:
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 fp = os.fdopen(infd, 'wb')
188 fp = os.fdopen(infd, 'wb')
189 fp.write(s)
189 fp.write(s)
190 fp.close()
190 fp.close()
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 os.close(outfd)
192 os.close(outfd)
193 cmd = cmd.replace('INFILE', inname)
193 cmd = cmd.replace('INFILE', inname)
194 cmd = cmd.replace('OUTFILE', outname)
194 cmd = cmd.replace('OUTFILE', outname)
195 code = os.system(cmd)
195 code = os.system(cmd)
196 if sys.platform == 'OpenVMS' and code & 1:
196 if sys.platform == 'OpenVMS' and code & 1:
197 code = 0
197 code = 0
198 if code:
198 if code:
199 raise Abort(_("command '%s' failed: %s") %
199 raise Abort(_("command '%s' failed: %s") %
200 (cmd, explain_exit(code)))
200 (cmd, explain_exit(code)))
201 fp = open(outname, 'rb')
201 fp = open(outname, 'rb')
202 r = fp.read()
202 r = fp.read()
203 fp.close()
203 fp.close()
204 return r
204 return r
205 finally:
205 finally:
206 try:
206 try:
207 if inname:
207 if inname:
208 os.unlink(inname)
208 os.unlink(inname)
209 except:
209 except:
210 pass
210 pass
211 try:
211 try:
212 if outname:
212 if outname:
213 os.unlink(outname)
213 os.unlink(outname)
214 except:
214 except:
215 pass
215 pass
216
216
217 filtertable = {
217 filtertable = {
218 'tempfile:': tempfilter,
218 'tempfile:': tempfilter,
219 'pipe:': pipefilter,
219 'pipe:': pipefilter,
220 }
220 }
221
221
222 def filter(s, cmd):
222 def filter(s, cmd):
223 "filter a string through a command that transforms its input to its output"
223 "filter a string through a command that transforms its input to its output"
224 for name, fn in filtertable.iteritems():
224 for name, fn in filtertable.iteritems():
225 if cmd.startswith(name):
225 if cmd.startswith(name):
226 return fn(s, cmd[len(name):].lstrip())
226 return fn(s, cmd[len(name):].lstrip())
227 return pipefilter(s, cmd)
227 return pipefilter(s, cmd)
228
228
229 def binary(s):
229 def binary(s):
230 """return true if a string is binary data"""
230 """return true if a string is binary data"""
231 return bool(s and '\0' in s)
231 return bool(s and '\0' in s)
232
232
233 def increasingchunks(source, min=1024, max=65536):
233 def increasingchunks(source, min=1024, max=65536):
234 '''return no less than min bytes per chunk while data remains,
234 '''return no less than min bytes per chunk while data remains,
235 doubling min after each chunk until it reaches max'''
235 doubling min after each chunk until it reaches max'''
236 def log2(x):
236 def log2(x):
237 if not x:
237 if not x:
238 return 0
238 return 0
239 i = 0
239 i = 0
240 while x:
240 while x:
241 x >>= 1
241 x >>= 1
242 i += 1
242 i += 1
243 return i - 1
243 return i - 1
244
244
245 buf = []
245 buf = []
246 blen = 0
246 blen = 0
247 for chunk in source:
247 for chunk in source:
248 buf.append(chunk)
248 buf.append(chunk)
249 blen += len(chunk)
249 blen += len(chunk)
250 if blen >= min:
250 if blen >= min:
251 if min < max:
251 if min < max:
252 min = min << 1
252 min = min << 1
253 nmin = 1 << log2(blen)
253 nmin = 1 << log2(blen)
254 if nmin > min:
254 if nmin > min:
255 min = nmin
255 min = nmin
256 if min > max:
256 if min > max:
257 min = max
257 min = max
258 yield ''.join(buf)
258 yield ''.join(buf)
259 blen = 0
259 blen = 0
260 buf = []
260 buf = []
261 if buf:
261 if buf:
262 yield ''.join(buf)
262 yield ''.join(buf)
263
263
264 Abort = error.Abort
264 Abort = error.Abort
265
265
266 def always(fn):
266 def always(fn):
267 return True
267 return True
268
268
269 def never(fn):
269 def never(fn):
270 return False
270 return False
271
271
272 def pathto(root, n1, n2):
272 def pathto(root, n1, n2):
273 '''return the relative path from one place to another.
273 '''return the relative path from one place to another.
274 root should use os.sep to separate directories
274 root should use os.sep to separate directories
275 n1 should use os.sep to separate directories
275 n1 should use os.sep to separate directories
276 n2 should use "/" to separate directories
276 n2 should use "/" to separate directories
277 returns an os.sep-separated path.
277 returns an os.sep-separated path.
278
278
279 If n1 is a relative path, it's assumed it's
279 If n1 is a relative path, it's assumed it's
280 relative to root.
280 relative to root.
281 n2 should always be relative to root.
281 n2 should always be relative to root.
282 '''
282 '''
283 if not n1:
283 if not n1:
284 return localpath(n2)
284 return localpath(n2)
285 if os.path.isabs(n1):
285 if os.path.isabs(n1):
286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
287 return os.path.join(root, localpath(n2))
287 return os.path.join(root, localpath(n2))
288 n2 = '/'.join((pconvert(root), n2))
288 n2 = '/'.join((pconvert(root), n2))
289 a, b = splitpath(n1), n2.split('/')
289 a, b = splitpath(n1), n2.split('/')
290 a.reverse()
290 a.reverse()
291 b.reverse()
291 b.reverse()
292 while a and b and a[-1] == b[-1]:
292 while a and b and a[-1] == b[-1]:
293 a.pop()
293 a.pop()
294 b.pop()
294 b.pop()
295 b.reverse()
295 b.reverse()
296 return os.sep.join((['..'] * len(a)) + b) or '.'
296 return os.sep.join((['..'] * len(a)) + b) or '.'
297
297
298 _hgexecutable = None
298 _hgexecutable = None
299
299
300 def main_is_frozen():
300 def main_is_frozen():
301 """return True if we are a frozen executable.
301 """return True if we are a frozen executable.
302
302
303 The code supports py2exe (most common, Windows only) and tools/freeze
303 The code supports py2exe (most common, Windows only) and tools/freeze
304 (portable, not much used).
304 (portable, not much used).
305 """
305 """
306 return (hasattr(sys, "frozen") or # new py2exe
306 return (hasattr(sys, "frozen") or # new py2exe
307 hasattr(sys, "importers") or # old py2exe
307 hasattr(sys, "importers") or # old py2exe
308 imp.is_frozen("__main__")) # tools/freeze
308 imp.is_frozen("__main__")) # tools/freeze
309
309
310 def hgexecutable():
310 def hgexecutable():
311 """return location of the 'hg' executable.
311 """return location of the 'hg' executable.
312
312
313 Defaults to $HG or 'hg' in the search path.
313 Defaults to $HG or 'hg' in the search path.
314 """
314 """
315 if _hgexecutable is None:
315 if _hgexecutable is None:
316 hg = os.environ.get('HG')
316 hg = os.environ.get('HG')
317 if hg:
317 if hg:
318 set_hgexecutable(hg)
318 set_hgexecutable(hg)
319 elif main_is_frozen():
319 elif main_is_frozen():
320 set_hgexecutable(sys.executable)
320 set_hgexecutable(sys.executable)
321 else:
321 else:
322 exe = find_exe('hg') or os.path.basename(sys.argv[0])
322 exe = find_exe('hg') or os.path.basename(sys.argv[0])
323 set_hgexecutable(exe)
323 set_hgexecutable(exe)
324 return _hgexecutable
324 return _hgexecutable
325
325
326 def set_hgexecutable(path):
326 def set_hgexecutable(path):
327 """set location of the 'hg' executable"""
327 """set location of the 'hg' executable"""
328 global _hgexecutable
328 global _hgexecutable
329 _hgexecutable = path
329 _hgexecutable = path
330
330
331 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
331 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
332 '''enhanced shell command execution.
332 '''enhanced shell command execution.
333 run with environment maybe modified, maybe in different dir.
333 run with environment maybe modified, maybe in different dir.
334
334
335 if command fails and onerr is None, return status. if ui object,
335 if command fails and onerr is None, return status. if ui object,
336 print error message and return status, else raise onerr object as
336 print error message and return status, else raise onerr object as
337 exception.
337 exception.
338
338
339 if out is specified, it is assumed to be a file-like object that has a
339 if out is specified, it is assumed to be a file-like object that has a
340 write() method. stdout and stderr will be redirected to out.'''
340 write() method. stdout and stderr will be redirected to out.'''
341 try:
341 try:
342 sys.stdout.flush()
342 sys.stdout.flush()
343 except Exception:
343 except Exception:
344 pass
344 pass
345 def py2shell(val):
345 def py2shell(val):
346 'convert python object into string that is useful to shell'
346 'convert python object into string that is useful to shell'
347 if val is None or val is False:
347 if val is None or val is False:
348 return '0'
348 return '0'
349 if val is True:
349 if val is True:
350 return '1'
350 return '1'
351 return str(val)
351 return str(val)
352 origcmd = cmd
352 origcmd = cmd
353 cmd = quotecommand(cmd)
353 cmd = quotecommand(cmd)
354 env = dict(os.environ)
354 env = dict(os.environ)
355 env.update((k, py2shell(v)) for k, v in environ.iteritems())
355 env.update((k, py2shell(v)) for k, v in environ.iteritems())
356 env['HG'] = hgexecutable()
356 env['HG'] = hgexecutable()
357 if out is None:
357 if out is None:
358 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
358 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
359 env=env, cwd=cwd)
359 env=env, cwd=cwd)
360 else:
360 else:
361 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
361 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
362 env=env, cwd=cwd, stdout=subprocess.PIPE,
362 env=env, cwd=cwd, stdout=subprocess.PIPE,
363 stderr=subprocess.STDOUT)
363 stderr=subprocess.STDOUT)
364 for line in proc.stdout:
364 for line in proc.stdout:
365 out.write(line)
365 out.write(line)
366 proc.wait()
366 proc.wait()
367 rc = proc.returncode
367 rc = proc.returncode
368 if sys.platform == 'OpenVMS' and rc & 1:
368 if sys.platform == 'OpenVMS' and rc & 1:
369 rc = 0
369 rc = 0
370 if rc and onerr:
370 if rc and onerr:
371 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
371 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
372 explain_exit(rc)[0])
372 explain_exit(rc)[0])
373 if errprefix:
373 if errprefix:
374 errmsg = '%s: %s' % (errprefix, errmsg)
374 errmsg = '%s: %s' % (errprefix, errmsg)
375 try:
375 try:
376 onerr.warn(errmsg + '\n')
376 onerr.warn(errmsg + '\n')
377 except AttributeError:
377 except AttributeError:
378 raise onerr(errmsg)
378 raise onerr(errmsg)
379 return rc
379 return rc
380
380
381 def checksignature(func):
381 def checksignature(func):
382 '''wrap a function with code to check for calling errors'''
382 '''wrap a function with code to check for calling errors'''
383 def check(*args, **kwargs):
383 def check(*args, **kwargs):
384 try:
384 try:
385 return func(*args, **kwargs)
385 return func(*args, **kwargs)
386 except TypeError:
386 except TypeError:
387 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
387 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
388 raise error.SignatureError
388 raise error.SignatureError
389 raise
389 raise
390
390
391 return check
391 return check
392
392
393 def makedir(path, notindexed):
393 def makedir(path, notindexed):
394 os.mkdir(path)
394 os.mkdir(path)
395
395
396 def unlinkpath(f):
396 def unlinkpath(f):
397 """unlink and remove the directory if it is empty"""
397 """unlink and remove the directory if it is empty"""
398 os.unlink(f)
398 os.unlink(f)
399 # try removing directories that might now be empty
399 # try removing directories that might now be empty
400 try:
400 try:
401 os.removedirs(os.path.dirname(f))
401 os.removedirs(os.path.dirname(f))
402 except OSError:
402 except OSError:
403 pass
403 pass
404
404
405 def copyfile(src, dest):
405 def copyfile(src, dest):
406 "copy a file, preserving mode and atime/mtime"
406 "copy a file, preserving mode and atime/mtime"
407 if os.path.islink(src):
407 if os.path.islink(src):
408 try:
408 try:
409 os.unlink(dest)
409 os.unlink(dest)
410 except:
410 except:
411 pass
411 pass
412 os.symlink(os.readlink(src), dest)
412 os.symlink(os.readlink(src), dest)
413 else:
413 else:
414 try:
414 try:
415 shutil.copyfile(src, dest)
415 shutil.copyfile(src, dest)
416 shutil.copymode(src, dest)
416 shutil.copymode(src, dest)
417 except shutil.Error, inst:
417 except shutil.Error, inst:
418 raise Abort(str(inst))
418 raise Abort(str(inst))
419
419
420 def copyfiles(src, dst, hardlink=None):
420 def copyfiles(src, dst, hardlink=None):
421 """Copy a directory tree using hardlinks if possible"""
421 """Copy a directory tree using hardlinks if possible"""
422
422
423 if hardlink is None:
423 if hardlink is None:
424 hardlink = (os.stat(src).st_dev ==
424 hardlink = (os.stat(src).st_dev ==
425 os.stat(os.path.dirname(dst)).st_dev)
425 os.stat(os.path.dirname(dst)).st_dev)
426
426
427 num = 0
427 num = 0
428 if os.path.isdir(src):
428 if os.path.isdir(src):
429 os.mkdir(dst)
429 os.mkdir(dst)
430 for name, kind in osutil.listdir(src):
430 for name, kind in osutil.listdir(src):
431 srcname = os.path.join(src, name)
431 srcname = os.path.join(src, name)
432 dstname = os.path.join(dst, name)
432 dstname = os.path.join(dst, name)
433 hardlink, n = copyfiles(srcname, dstname, hardlink)
433 hardlink, n = copyfiles(srcname, dstname, hardlink)
434 num += n
434 num += n
435 else:
435 else:
436 if hardlink:
436 if hardlink:
437 try:
437 try:
438 os_link(src, dst)
438 os_link(src, dst)
439 except (IOError, OSError):
439 except (IOError, OSError):
440 hardlink = False
440 hardlink = False
441 shutil.copy(src, dst)
441 shutil.copy(src, dst)
442 else:
442 else:
443 shutil.copy(src, dst)
443 shutil.copy(src, dst)
444 num += 1
444 num += 1
445
445
446 return hardlink, num
446 return hardlink, num
447
447
448 def checkfilename(f):
448 def checkfilename(f):
449 '''Check that the filename f is an acceptable filename for a tracked file'''
449 '''Check that the filename f is an acceptable filename for a tracked file'''
450 if '\r' in f or '\n' in f:
450 if '\r' in f or '\n' in f:
451 raise Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
451 raise Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
452
452
453 _windows_reserved_filenames = '''con prn aux nul
453 _windows_reserved_filenames = '''con prn aux nul
454 com1 com2 com3 com4 com5 com6 com7 com8 com9
454 com1 com2 com3 com4 com5 com6 com7 com8 com9
455 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
455 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
456 _windows_reserved_chars = ':*?"<>|'
456 _windows_reserved_chars = ':*?"<>|'
457 def checkwinfilename(path):
457 def checkwinfilename(path):
458 '''Check that the base-relative path is a valid filename on Windows.
458 '''Check that the base-relative path is a valid filename on Windows.
459 Returns None if the path is ok, or a UI string describing the problem.
459 Returns None if the path is ok, or a UI string describing the problem.
460
460
461 >>> checkwinfilename("just/a/normal/path")
461 >>> checkwinfilename("just/a/normal/path")
462 >>> checkwinfilename("foo/bar/con.xml")
462 >>> checkwinfilename("foo/bar/con.xml")
463 "filename contains 'con', which is reserved on Windows"
463 "filename contains 'con', which is reserved on Windows"
464 >>> checkwinfilename("foo/con.xml/bar")
464 >>> checkwinfilename("foo/con.xml/bar")
465 "filename contains 'con', which is reserved on Windows"
465 "filename contains 'con', which is reserved on Windows"
466 >>> checkwinfilename("foo/bar/xml.con")
466 >>> checkwinfilename("foo/bar/xml.con")
467 >>> checkwinfilename("foo/bar/AUX/bla.txt")
467 >>> checkwinfilename("foo/bar/AUX/bla.txt")
468 "filename contains 'AUX', which is reserved on Windows"
468 "filename contains 'AUX', which is reserved on Windows"
469 >>> checkwinfilename("foo/bar/bla:.txt")
469 >>> checkwinfilename("foo/bar/bla:.txt")
470 "filename contains ':', which is reserved on Windows"
470 "filename contains ':', which is reserved on Windows"
471 >>> checkwinfilename("foo/bar/b\07la.txt")
471 >>> checkwinfilename("foo/bar/b\07la.txt")
472 "filename contains '\\\\x07', which is invalid on Windows"
472 "filename contains '\\\\x07', which is invalid on Windows"
473 >>> checkwinfilename("foo/bar/bla ")
473 >>> checkwinfilename("foo/bar/bla ")
474 "filename ends with ' ', which is not allowed on Windows"
474 "filename ends with ' ', which is not allowed on Windows"
475 '''
475 '''
476 for n in path.replace('\\', '/').split('/'):
476 for n in path.replace('\\', '/').split('/'):
477 if not n:
477 if not n:
478 continue
478 continue
479 for c in n:
479 for c in n:
480 if c in _windows_reserved_chars:
480 if c in _windows_reserved_chars:
481 return _("filename contains '%s', which is reserved "
481 return _("filename contains '%s', which is reserved "
482 "on Windows") % c
482 "on Windows") % c
483 if ord(c) <= 31:
483 if ord(c) <= 31:
484 return _("filename contains %r, which is invalid "
484 return _("filename contains %r, which is invalid "
485 "on Windows") % c
485 "on Windows") % c
486 base = n.split('.')[0]
486 base = n.split('.')[0]
487 if base and base.lower() in _windows_reserved_filenames:
487 if base and base.lower() in _windows_reserved_filenames:
488 return _("filename contains '%s', which is reserved "
488 return _("filename contains '%s', which is reserved "
489 "on Windows") % base
489 "on Windows") % base
490 t = n[-1]
490 t = n[-1]
491 if t in '. ':
491 if t in '. ':
492 return _("filename ends with '%s', which is not allowed "
492 return _("filename ends with '%s', which is not allowed "
493 "on Windows") % t
493 "on Windows") % t
494
494
495 class path_auditor(object):
496 '''ensure that a filesystem path contains no banned components.
497 the following properties of a path are checked:
498
499 - ends with a directory separator
500 - under top-level .hg
501 - starts at the root of a windows drive
502 - contains ".."
503 - traverses a symlink (e.g. a/symlink_here/b)
504 - inside a nested repository (a callback can be used to approve
505 some nested repositories, e.g., subrepositories)
506 '''
507
508 def __init__(self, root, callback=None):
509 self.audited = set()
510 self.auditeddir = set()
511 self.root = root
512 self.callback = callback
513
514 def __call__(self, path):
515 '''Check the relative path.
516 path may contain a pattern (e.g. foodir/**.txt)'''
517
518 if path in self.audited:
519 return
520 # AIX ignores "/" at end of path, others raise EISDIR.
521 if endswithsep(path):
522 raise Abort(_("path ends in directory separator: %s") % path)
523 normpath = os.path.normcase(path)
524 parts = splitpath(normpath)
525 if (os.path.splitdrive(path)[0]
526 or parts[0].lower() in ('.hg', '.hg.', '')
527 or os.pardir in parts):
528 raise Abort(_("path contains illegal component: %s") % path)
529 if '.hg' in path.lower():
530 lparts = [p.lower() for p in parts]
531 for p in '.hg', '.hg.':
532 if p in lparts[1:]:
533 pos = lparts.index(p)
534 base = os.path.join(*parts[:pos])
535 raise Abort(_('path %r is inside nested repo %r')
536 % (path, base))
537
538 parts.pop()
539 prefixes = []
540 while parts:
541 prefix = os.sep.join(parts)
542 if prefix in self.auditeddir:
543 break
544 curpath = os.path.join(self.root, prefix)
545 try:
546 st = os.lstat(curpath)
547 except OSError, err:
548 # EINVAL can be raised as invalid path syntax under win32.
549 # They must be ignored for patterns can be checked too.
550 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
551 raise
552 else:
553 if stat.S_ISLNK(st.st_mode):
554 raise Abort(_('path %r traverses symbolic link %r') %
555 (path, prefix))
556 elif (stat.S_ISDIR(st.st_mode) and
557 os.path.isdir(os.path.join(curpath, '.hg'))):
558 if not self.callback or not self.callback(curpath):
559 raise Abort(_('path %r is inside nested repo %r') %
560 (path, prefix))
561 prefixes.append(prefix)
562 parts.pop()
563
564 self.audited.add(path)
565 # only add prefixes to the cache after checking everything: we don't
566 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
567 self.auditeddir.update(prefixes)
568
569 def lookup_reg(key, name=None, scope=None):
495 def lookup_reg(key, name=None, scope=None):
570 return None
496 return None
571
497
572 def hidewindow():
498 def hidewindow():
573 """Hide current shell window.
499 """Hide current shell window.
574
500
575 Used to hide the window opened when starting asynchronous
501 Used to hide the window opened when starting asynchronous
576 child process under Windows, unneeded on other systems.
502 child process under Windows, unneeded on other systems.
577 """
503 """
578 pass
504 pass
579
505
580 if os.name == 'nt':
506 if os.name == 'nt':
581 checkosfilename = checkwinfilename
507 checkosfilename = checkwinfilename
582 from windows import *
508 from windows import *
583 else:
509 else:
584 from posix import *
510 from posix import *
585
511
586 def makelock(info, pathname):
512 def makelock(info, pathname):
587 try:
513 try:
588 return os.symlink(info, pathname)
514 return os.symlink(info, pathname)
589 except OSError, why:
515 except OSError, why:
590 if why.errno == errno.EEXIST:
516 if why.errno == errno.EEXIST:
591 raise
517 raise
592 except AttributeError: # no symlink in os
518 except AttributeError: # no symlink in os
593 pass
519 pass
594
520
595 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
521 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
596 os.write(ld, info)
522 os.write(ld, info)
597 os.close(ld)
523 os.close(ld)
598
524
599 def readlock(pathname):
525 def readlock(pathname):
600 try:
526 try:
601 return os.readlink(pathname)
527 return os.readlink(pathname)
602 except OSError, why:
528 except OSError, why:
603 if why.errno not in (errno.EINVAL, errno.ENOSYS):
529 if why.errno not in (errno.EINVAL, errno.ENOSYS):
604 raise
530 raise
605 except AttributeError: # no symlink in os
531 except AttributeError: # no symlink in os
606 pass
532 pass
607 fp = posixfile(pathname)
533 fp = posixfile(pathname)
608 r = fp.read()
534 r = fp.read()
609 fp.close()
535 fp.close()
610 return r
536 return r
611
537
612 def fstat(fp):
538 def fstat(fp):
613 '''stat file object that may not have fileno method.'''
539 '''stat file object that may not have fileno method.'''
614 try:
540 try:
615 return os.fstat(fp.fileno())
541 return os.fstat(fp.fileno())
616 except AttributeError:
542 except AttributeError:
617 return os.stat(fp.name)
543 return os.stat(fp.name)
618
544
619 # File system features
545 # File system features
620
546
621 def checkcase(path):
547 def checkcase(path):
622 """
548 """
623 Check whether the given path is on a case-sensitive filesystem
549 Check whether the given path is on a case-sensitive filesystem
624
550
625 Requires a path (like /foo/.hg) ending with a foldable final
551 Requires a path (like /foo/.hg) ending with a foldable final
626 directory component.
552 directory component.
627 """
553 """
628 s1 = os.stat(path)
554 s1 = os.stat(path)
629 d, b = os.path.split(path)
555 d, b = os.path.split(path)
630 p2 = os.path.join(d, b.upper())
556 p2 = os.path.join(d, b.upper())
631 if path == p2:
557 if path == p2:
632 p2 = os.path.join(d, b.lower())
558 p2 = os.path.join(d, b.lower())
633 try:
559 try:
634 s2 = os.stat(p2)
560 s2 = os.stat(p2)
635 if s2 == s1:
561 if s2 == s1:
636 return False
562 return False
637 return True
563 return True
638 except:
564 except:
639 return True
565 return True
640
566
641 _fspathcache = {}
567 _fspathcache = {}
642 def fspath(name, root):
568 def fspath(name, root):
643 '''Get name in the case stored in the filesystem
569 '''Get name in the case stored in the filesystem
644
570
645 The name is either relative to root, or it is an absolute path starting
571 The name is either relative to root, or it is an absolute path starting
646 with root. Note that this function is unnecessary, and should not be
572 with root. Note that this function is unnecessary, and should not be
647 called, for case-sensitive filesystems (simply because it's expensive).
573 called, for case-sensitive filesystems (simply because it's expensive).
648 '''
574 '''
649 # If name is absolute, make it relative
575 # If name is absolute, make it relative
650 if name.lower().startswith(root.lower()):
576 if name.lower().startswith(root.lower()):
651 l = len(root)
577 l = len(root)
652 if name[l] == os.sep or name[l] == os.altsep:
578 if name[l] == os.sep or name[l] == os.altsep:
653 l = l + 1
579 l = l + 1
654 name = name[l:]
580 name = name[l:]
655
581
656 if not os.path.lexists(os.path.join(root, name)):
582 if not os.path.lexists(os.path.join(root, name)):
657 return None
583 return None
658
584
659 seps = os.sep
585 seps = os.sep
660 if os.altsep:
586 if os.altsep:
661 seps = seps + os.altsep
587 seps = seps + os.altsep
662 # Protect backslashes. This gets silly very quickly.
588 # Protect backslashes. This gets silly very quickly.
663 seps.replace('\\','\\\\')
589 seps.replace('\\','\\\\')
664 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
590 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
665 dir = os.path.normcase(os.path.normpath(root))
591 dir = os.path.normcase(os.path.normpath(root))
666 result = []
592 result = []
667 for part, sep in pattern.findall(name):
593 for part, sep in pattern.findall(name):
668 if sep:
594 if sep:
669 result.append(sep)
595 result.append(sep)
670 continue
596 continue
671
597
672 if dir not in _fspathcache:
598 if dir not in _fspathcache:
673 _fspathcache[dir] = os.listdir(dir)
599 _fspathcache[dir] = os.listdir(dir)
674 contents = _fspathcache[dir]
600 contents = _fspathcache[dir]
675
601
676 lpart = part.lower()
602 lpart = part.lower()
677 lenp = len(part)
603 lenp = len(part)
678 for n in contents:
604 for n in contents:
679 if lenp == len(n) and n.lower() == lpart:
605 if lenp == len(n) and n.lower() == lpart:
680 result.append(n)
606 result.append(n)
681 break
607 break
682 else:
608 else:
683 # Cannot happen, as the file exists!
609 # Cannot happen, as the file exists!
684 result.append(part)
610 result.append(part)
685 dir = os.path.join(dir, lpart)
611 dir = os.path.join(dir, lpart)
686
612
687 return ''.join(result)
613 return ''.join(result)
688
614
689 def checknlink(testfile):
615 def checknlink(testfile):
690 '''check whether hardlink count reporting works properly'''
616 '''check whether hardlink count reporting works properly'''
691
617
692 # testfile may be open, so we need a separate file for checking to
618 # testfile may be open, so we need a separate file for checking to
693 # work around issue2543 (or testfile may get lost on Samba shares)
619 # work around issue2543 (or testfile may get lost on Samba shares)
694 f1 = testfile + ".hgtmp1"
620 f1 = testfile + ".hgtmp1"
695 if os.path.lexists(f1):
621 if os.path.lexists(f1):
696 return False
622 return False
697 try:
623 try:
698 posixfile(f1, 'w').close()
624 posixfile(f1, 'w').close()
699 except IOError:
625 except IOError:
700 return False
626 return False
701
627
702 f2 = testfile + ".hgtmp2"
628 f2 = testfile + ".hgtmp2"
703 fd = None
629 fd = None
704 try:
630 try:
705 try:
631 try:
706 os_link(f1, f2)
632 os_link(f1, f2)
707 except OSError:
633 except OSError:
708 return False
634 return False
709
635
710 # nlinks() may behave differently for files on Windows shares if
636 # nlinks() may behave differently for files on Windows shares if
711 # the file is open.
637 # the file is open.
712 fd = posixfile(f2)
638 fd = posixfile(f2)
713 return nlinks(f2) > 1
639 return nlinks(f2) > 1
714 finally:
640 finally:
715 if fd is not None:
641 if fd is not None:
716 fd.close()
642 fd.close()
717 for f in (f1, f2):
643 for f in (f1, f2):
718 try:
644 try:
719 os.unlink(f)
645 os.unlink(f)
720 except OSError:
646 except OSError:
721 pass
647 pass
722
648
723 return False
649 return False
724
650
725 def endswithsep(path):
651 def endswithsep(path):
726 '''Check path ends with os.sep or os.altsep.'''
652 '''Check path ends with os.sep or os.altsep.'''
727 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
653 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
728
654
729 def splitpath(path):
655 def splitpath(path):
730 '''Split path by os.sep.
656 '''Split path by os.sep.
731 Note that this function does not use os.altsep because this is
657 Note that this function does not use os.altsep because this is
732 an alternative of simple "xxx.split(os.sep)".
658 an alternative of simple "xxx.split(os.sep)".
733 It is recommended to use os.path.normpath() before using this
659 It is recommended to use os.path.normpath() before using this
734 function if need.'''
660 function if need.'''
735 return path.split(os.sep)
661 return path.split(os.sep)
736
662
737 def gui():
663 def gui():
738 '''Are we running in a GUI?'''
664 '''Are we running in a GUI?'''
739 if sys.platform == 'darwin':
665 if sys.platform == 'darwin':
740 if 'SSH_CONNECTION' in os.environ:
666 if 'SSH_CONNECTION' in os.environ:
741 # handle SSH access to a box where the user is logged in
667 # handle SSH access to a box where the user is logged in
742 return False
668 return False
743 elif getattr(osutil, 'isgui', None):
669 elif getattr(osutil, 'isgui', None):
744 # check if a CoreGraphics session is available
670 # check if a CoreGraphics session is available
745 return osutil.isgui()
671 return osutil.isgui()
746 else:
672 else:
747 # pure build; use a safe default
673 # pure build; use a safe default
748 return True
674 return True
749 else:
675 else:
750 return os.name == "nt" or os.environ.get("DISPLAY")
676 return os.name == "nt" or os.environ.get("DISPLAY")
751
677
752 def mktempcopy(name, emptyok=False, createmode=None):
678 def mktempcopy(name, emptyok=False, createmode=None):
753 """Create a temporary file with the same contents from name
679 """Create a temporary file with the same contents from name
754
680
755 The permission bits are copied from the original file.
681 The permission bits are copied from the original file.
756
682
757 If the temporary file is going to be truncated immediately, you
683 If the temporary file is going to be truncated immediately, you
758 can use emptyok=True as an optimization.
684 can use emptyok=True as an optimization.
759
685
760 Returns the name of the temporary file.
686 Returns the name of the temporary file.
761 """
687 """
762 d, fn = os.path.split(name)
688 d, fn = os.path.split(name)
763 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
689 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
764 os.close(fd)
690 os.close(fd)
765 # Temporary files are created with mode 0600, which is usually not
691 # Temporary files are created with mode 0600, which is usually not
766 # what we want. If the original file already exists, just copy
692 # what we want. If the original file already exists, just copy
767 # its mode. Otherwise, manually obey umask.
693 # its mode. Otherwise, manually obey umask.
768 try:
694 try:
769 st_mode = os.lstat(name).st_mode & 0777
695 st_mode = os.lstat(name).st_mode & 0777
770 except OSError, inst:
696 except OSError, inst:
771 if inst.errno != errno.ENOENT:
697 if inst.errno != errno.ENOENT:
772 raise
698 raise
773 st_mode = createmode
699 st_mode = createmode
774 if st_mode is None:
700 if st_mode is None:
775 st_mode = ~umask
701 st_mode = ~umask
776 st_mode &= 0666
702 st_mode &= 0666
777 os.chmod(temp, st_mode)
703 os.chmod(temp, st_mode)
778 if emptyok:
704 if emptyok:
779 return temp
705 return temp
780 try:
706 try:
781 try:
707 try:
782 ifp = posixfile(name, "rb")
708 ifp = posixfile(name, "rb")
783 except IOError, inst:
709 except IOError, inst:
784 if inst.errno == errno.ENOENT:
710 if inst.errno == errno.ENOENT:
785 return temp
711 return temp
786 if not getattr(inst, 'filename', None):
712 if not getattr(inst, 'filename', None):
787 inst.filename = name
713 inst.filename = name
788 raise
714 raise
789 ofp = posixfile(temp, "wb")
715 ofp = posixfile(temp, "wb")
790 for chunk in filechunkiter(ifp):
716 for chunk in filechunkiter(ifp):
791 ofp.write(chunk)
717 ofp.write(chunk)
792 ifp.close()
718 ifp.close()
793 ofp.close()
719 ofp.close()
794 except:
720 except:
795 try: os.unlink(temp)
721 try: os.unlink(temp)
796 except: pass
722 except: pass
797 raise
723 raise
798 return temp
724 return temp
799
725
800 class atomictempfile(object):
726 class atomictempfile(object):
801 """file-like object that atomically updates a file
727 """file-like object that atomically updates a file
802
728
803 All writes will be redirected to a temporary copy of the original
729 All writes will be redirected to a temporary copy of the original
804 file. When rename is called, the copy is renamed to the original
730 file. When rename is called, the copy is renamed to the original
805 name, making the changes visible.
731 name, making the changes visible.
806 """
732 """
807 def __init__(self, name, mode='w+b', createmode=None):
733 def __init__(self, name, mode='w+b', createmode=None):
808 self.__name = name
734 self.__name = name
809 self._fp = None
735 self._fp = None
810 self.temp = mktempcopy(name, emptyok=('w' in mode),
736 self.temp = mktempcopy(name, emptyok=('w' in mode),
811 createmode=createmode)
737 createmode=createmode)
812 self._fp = posixfile(self.temp, mode)
738 self._fp = posixfile(self.temp, mode)
813
739
814 def __getattr__(self, name):
740 def __getattr__(self, name):
815 return getattr(self._fp, name)
741 return getattr(self._fp, name)
816
742
817 def rename(self):
743 def rename(self):
818 if not self._fp.closed:
744 if not self._fp.closed:
819 self._fp.close()
745 self._fp.close()
820 rename(self.temp, localpath(self.__name))
746 rename(self.temp, localpath(self.__name))
821
747
822 def close(self):
748 def close(self):
823 if not self._fp:
749 if not self._fp:
824 return
750 return
825 if not self._fp.closed:
751 if not self._fp.closed:
826 try:
752 try:
827 os.unlink(self.temp)
753 os.unlink(self.temp)
828 except: pass
754 except: pass
829 self._fp.close()
755 self._fp.close()
830
756
831 def __del__(self):
757 def __del__(self):
832 self.close()
758 self.close()
833
759
834 def makedirs(name, mode=None):
760 def makedirs(name, mode=None):
835 """recursive directory creation with parent mode inheritance"""
761 """recursive directory creation with parent mode inheritance"""
836 parent = os.path.abspath(os.path.dirname(name))
762 parent = os.path.abspath(os.path.dirname(name))
837 try:
763 try:
838 os.mkdir(name)
764 os.mkdir(name)
839 if mode is not None:
765 if mode is not None:
840 os.chmod(name, mode)
766 os.chmod(name, mode)
841 return
767 return
842 except OSError, err:
768 except OSError, err:
843 if err.errno == errno.EEXIST:
769 if err.errno == errno.EEXIST:
844 return
770 return
845 if not name or parent == name or err.errno != errno.ENOENT:
771 if not name or parent == name or err.errno != errno.ENOENT:
846 raise
772 raise
847 makedirs(parent, mode)
773 makedirs(parent, mode)
848 makedirs(name, mode)
774 makedirs(name, mode)
849
775
850 class chunkbuffer(object):
776 class chunkbuffer(object):
851 """Allow arbitrary sized chunks of data to be efficiently read from an
777 """Allow arbitrary sized chunks of data to be efficiently read from an
852 iterator over chunks of arbitrary size."""
778 iterator over chunks of arbitrary size."""
853
779
854 def __init__(self, in_iter):
780 def __init__(self, in_iter):
855 """in_iter is the iterator that's iterating over the input chunks.
781 """in_iter is the iterator that's iterating over the input chunks.
856 targetsize is how big a buffer to try to maintain."""
782 targetsize is how big a buffer to try to maintain."""
857 def splitbig(chunks):
783 def splitbig(chunks):
858 for chunk in chunks:
784 for chunk in chunks:
859 if len(chunk) > 2**20:
785 if len(chunk) > 2**20:
860 pos = 0
786 pos = 0
861 while pos < len(chunk):
787 while pos < len(chunk):
862 end = pos + 2 ** 18
788 end = pos + 2 ** 18
863 yield chunk[pos:end]
789 yield chunk[pos:end]
864 pos = end
790 pos = end
865 else:
791 else:
866 yield chunk
792 yield chunk
867 self.iter = splitbig(in_iter)
793 self.iter = splitbig(in_iter)
868 self._queue = []
794 self._queue = []
869
795
870 def read(self, l):
796 def read(self, l):
871 """Read L bytes of data from the iterator of chunks of data.
797 """Read L bytes of data from the iterator of chunks of data.
872 Returns less than L bytes if the iterator runs dry."""
798 Returns less than L bytes if the iterator runs dry."""
873 left = l
799 left = l
874 buf = ''
800 buf = ''
875 queue = self._queue
801 queue = self._queue
876 while left > 0:
802 while left > 0:
877 # refill the queue
803 # refill the queue
878 if not queue:
804 if not queue:
879 target = 2**18
805 target = 2**18
880 for chunk in self.iter:
806 for chunk in self.iter:
881 queue.append(chunk)
807 queue.append(chunk)
882 target -= len(chunk)
808 target -= len(chunk)
883 if target <= 0:
809 if target <= 0:
884 break
810 break
885 if not queue:
811 if not queue:
886 break
812 break
887
813
888 chunk = queue.pop(0)
814 chunk = queue.pop(0)
889 left -= len(chunk)
815 left -= len(chunk)
890 if left < 0:
816 if left < 0:
891 queue.insert(0, chunk[left:])
817 queue.insert(0, chunk[left:])
892 buf += chunk[:left]
818 buf += chunk[:left]
893 else:
819 else:
894 buf += chunk
820 buf += chunk
895
821
896 return buf
822 return buf
897
823
898 def filechunkiter(f, size=65536, limit=None):
824 def filechunkiter(f, size=65536, limit=None):
899 """Create a generator that produces the data in the file size
825 """Create a generator that produces the data in the file size
900 (default 65536) bytes at a time, up to optional limit (default is
826 (default 65536) bytes at a time, up to optional limit (default is
901 to read all data). Chunks may be less than size bytes if the
827 to read all data). Chunks may be less than size bytes if the
902 chunk is the last chunk in the file, or the file is a socket or
828 chunk is the last chunk in the file, or the file is a socket or
903 some other type of file that sometimes reads less data than is
829 some other type of file that sometimes reads less data than is
904 requested."""
830 requested."""
905 assert size >= 0
831 assert size >= 0
906 assert limit is None or limit >= 0
832 assert limit is None or limit >= 0
907 while True:
833 while True:
908 if limit is None:
834 if limit is None:
909 nbytes = size
835 nbytes = size
910 else:
836 else:
911 nbytes = min(limit, size)
837 nbytes = min(limit, size)
912 s = nbytes and f.read(nbytes)
838 s = nbytes and f.read(nbytes)
913 if not s:
839 if not s:
914 break
840 break
915 if limit:
841 if limit:
916 limit -= len(s)
842 limit -= len(s)
917 yield s
843 yield s
918
844
919 def makedate():
845 def makedate():
920 lt = time.localtime()
846 lt = time.localtime()
921 if lt[8] == 1 and time.daylight:
847 if lt[8] == 1 and time.daylight:
922 tz = time.altzone
848 tz = time.altzone
923 else:
849 else:
924 tz = time.timezone
850 tz = time.timezone
925 t = time.mktime(lt)
851 t = time.mktime(lt)
926 if t < 0:
852 if t < 0:
927 hint = _("check your clock")
853 hint = _("check your clock")
928 raise Abort(_("negative timestamp: %d") % t, hint=hint)
854 raise Abort(_("negative timestamp: %d") % t, hint=hint)
929 return t, tz
855 return t, tz
930
856
931 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
857 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
932 """represent a (unixtime, offset) tuple as a localized time.
858 """represent a (unixtime, offset) tuple as a localized time.
933 unixtime is seconds since the epoch, and offset is the time zone's
859 unixtime is seconds since the epoch, and offset is the time zone's
934 number of seconds away from UTC. if timezone is false, do not
860 number of seconds away from UTC. if timezone is false, do not
935 append time zone to string."""
861 append time zone to string."""
936 t, tz = date or makedate()
862 t, tz = date or makedate()
937 if t < 0:
863 if t < 0:
938 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
864 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
939 tz = 0
865 tz = 0
940 if "%1" in format or "%2" in format:
866 if "%1" in format or "%2" in format:
941 sign = (tz > 0) and "-" or "+"
867 sign = (tz > 0) and "-" or "+"
942 minutes = abs(tz) // 60
868 minutes = abs(tz) // 60
943 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
869 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
944 format = format.replace("%2", "%02d" % (minutes % 60))
870 format = format.replace("%2", "%02d" % (minutes % 60))
945 s = time.strftime(format, time.gmtime(float(t) - tz))
871 s = time.strftime(format, time.gmtime(float(t) - tz))
946 return s
872 return s
947
873
948 def shortdate(date=None):
874 def shortdate(date=None):
949 """turn (timestamp, tzoff) tuple into iso 8631 date."""
875 """turn (timestamp, tzoff) tuple into iso 8631 date."""
950 return datestr(date, format='%Y-%m-%d')
876 return datestr(date, format='%Y-%m-%d')
951
877
952 def strdate(string, format, defaults=[]):
878 def strdate(string, format, defaults=[]):
953 """parse a localized time string and return a (unixtime, offset) tuple.
879 """parse a localized time string and return a (unixtime, offset) tuple.
954 if the string cannot be parsed, ValueError is raised."""
880 if the string cannot be parsed, ValueError is raised."""
955 def timezone(string):
881 def timezone(string):
956 tz = string.split()[-1]
882 tz = string.split()[-1]
957 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
883 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
958 sign = (tz[0] == "+") and 1 or -1
884 sign = (tz[0] == "+") and 1 or -1
959 hours = int(tz[1:3])
885 hours = int(tz[1:3])
960 minutes = int(tz[3:5])
886 minutes = int(tz[3:5])
961 return -sign * (hours * 60 + minutes) * 60
887 return -sign * (hours * 60 + minutes) * 60
962 if tz == "GMT" or tz == "UTC":
888 if tz == "GMT" or tz == "UTC":
963 return 0
889 return 0
964 return None
890 return None
965
891
966 # NOTE: unixtime = localunixtime + offset
892 # NOTE: unixtime = localunixtime + offset
967 offset, date = timezone(string), string
893 offset, date = timezone(string), string
968 if offset is not None:
894 if offset is not None:
969 date = " ".join(string.split()[:-1])
895 date = " ".join(string.split()[:-1])
970
896
971 # add missing elements from defaults
897 # add missing elements from defaults
972 usenow = False # default to using biased defaults
898 usenow = False # default to using biased defaults
973 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
899 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
974 found = [True for p in part if ("%"+p) in format]
900 found = [True for p in part if ("%"+p) in format]
975 if not found:
901 if not found:
976 date += "@" + defaults[part][usenow]
902 date += "@" + defaults[part][usenow]
977 format += "@%" + part[0]
903 format += "@%" + part[0]
978 else:
904 else:
979 # We've found a specific time element, less specific time
905 # We've found a specific time element, less specific time
980 # elements are relative to today
906 # elements are relative to today
981 usenow = True
907 usenow = True
982
908
983 timetuple = time.strptime(date, format)
909 timetuple = time.strptime(date, format)
984 localunixtime = int(calendar.timegm(timetuple))
910 localunixtime = int(calendar.timegm(timetuple))
985 if offset is None:
911 if offset is None:
986 # local timezone
912 # local timezone
987 unixtime = int(time.mktime(timetuple))
913 unixtime = int(time.mktime(timetuple))
988 offset = unixtime - localunixtime
914 offset = unixtime - localunixtime
989 else:
915 else:
990 unixtime = localunixtime + offset
916 unixtime = localunixtime + offset
991 return unixtime, offset
917 return unixtime, offset
992
918
993 def parsedate(date, formats=None, bias={}):
919 def parsedate(date, formats=None, bias={}):
994 """parse a localized date/time and return a (unixtime, offset) tuple.
920 """parse a localized date/time and return a (unixtime, offset) tuple.
995
921
996 The date may be a "unixtime offset" string or in one of the specified
922 The date may be a "unixtime offset" string or in one of the specified
997 formats. If the date already is a (unixtime, offset) tuple, it is returned.
923 formats. If the date already is a (unixtime, offset) tuple, it is returned.
998 """
924 """
999 if not date:
925 if not date:
1000 return 0, 0
926 return 0, 0
1001 if isinstance(date, tuple) and len(date) == 2:
927 if isinstance(date, tuple) and len(date) == 2:
1002 return date
928 return date
1003 if not formats:
929 if not formats:
1004 formats = defaultdateformats
930 formats = defaultdateformats
1005 date = date.strip()
931 date = date.strip()
1006 try:
932 try:
1007 when, offset = map(int, date.split(' '))
933 when, offset = map(int, date.split(' '))
1008 except ValueError:
934 except ValueError:
1009 # fill out defaults
935 # fill out defaults
1010 now = makedate()
936 now = makedate()
1011 defaults = {}
937 defaults = {}
1012 nowmap = {}
938 nowmap = {}
1013 for part in ("d", "mb", "yY", "HI", "M", "S"):
939 for part in ("d", "mb", "yY", "HI", "M", "S"):
1014 # this piece is for rounding the specific end of unknowns
940 # this piece is for rounding the specific end of unknowns
1015 b = bias.get(part)
941 b = bias.get(part)
1016 if b is None:
942 if b is None:
1017 if part[0] in "HMS":
943 if part[0] in "HMS":
1018 b = "00"
944 b = "00"
1019 else:
945 else:
1020 b = "0"
946 b = "0"
1021
947
1022 # this piece is for matching the generic end to today's date
948 # this piece is for matching the generic end to today's date
1023 n = datestr(now, "%" + part[0])
949 n = datestr(now, "%" + part[0])
1024
950
1025 defaults[part] = (b, n)
951 defaults[part] = (b, n)
1026
952
1027 for format in formats:
953 for format in formats:
1028 try:
954 try:
1029 when, offset = strdate(date, format, defaults)
955 when, offset = strdate(date, format, defaults)
1030 except (ValueError, OverflowError):
956 except (ValueError, OverflowError):
1031 pass
957 pass
1032 else:
958 else:
1033 break
959 break
1034 else:
960 else:
1035 raise Abort(_('invalid date: %r') % date)
961 raise Abort(_('invalid date: %r') % date)
1036 # validate explicit (probably user-specified) date and
962 # validate explicit (probably user-specified) date and
1037 # time zone offset. values must fit in signed 32 bits for
963 # time zone offset. values must fit in signed 32 bits for
1038 # current 32-bit linux runtimes. timezones go from UTC-12
964 # current 32-bit linux runtimes. timezones go from UTC-12
1039 # to UTC+14
965 # to UTC+14
1040 if abs(when) > 0x7fffffff:
966 if abs(when) > 0x7fffffff:
1041 raise Abort(_('date exceeds 32 bits: %d') % when)
967 raise Abort(_('date exceeds 32 bits: %d') % when)
1042 if when < 0:
968 if when < 0:
1043 raise Abort(_('negative date value: %d') % when)
969 raise Abort(_('negative date value: %d') % when)
1044 if offset < -50400 or offset > 43200:
970 if offset < -50400 or offset > 43200:
1045 raise Abort(_('impossible time zone offset: %d') % offset)
971 raise Abort(_('impossible time zone offset: %d') % offset)
1046 return when, offset
972 return when, offset
1047
973
1048 def matchdate(date):
974 def matchdate(date):
1049 """Return a function that matches a given date match specifier
975 """Return a function that matches a given date match specifier
1050
976
1051 Formats include:
977 Formats include:
1052
978
1053 '{date}' match a given date to the accuracy provided
979 '{date}' match a given date to the accuracy provided
1054
980
1055 '<{date}' on or before a given date
981 '<{date}' on or before a given date
1056
982
1057 '>{date}' on or after a given date
983 '>{date}' on or after a given date
1058
984
1059 >>> p1 = parsedate("10:29:59")
985 >>> p1 = parsedate("10:29:59")
1060 >>> p2 = parsedate("10:30:00")
986 >>> p2 = parsedate("10:30:00")
1061 >>> p3 = parsedate("10:30:59")
987 >>> p3 = parsedate("10:30:59")
1062 >>> p4 = parsedate("10:31:00")
988 >>> p4 = parsedate("10:31:00")
1063 >>> p5 = parsedate("Sep 15 10:30:00 1999")
989 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1064 >>> f = matchdate("10:30")
990 >>> f = matchdate("10:30")
1065 >>> f(p1[0])
991 >>> f(p1[0])
1066 False
992 False
1067 >>> f(p2[0])
993 >>> f(p2[0])
1068 True
994 True
1069 >>> f(p3[0])
995 >>> f(p3[0])
1070 True
996 True
1071 >>> f(p4[0])
997 >>> f(p4[0])
1072 False
998 False
1073 >>> f(p5[0])
999 >>> f(p5[0])
1074 False
1000 False
1075 """
1001 """
1076
1002
1077 def lower(date):
1003 def lower(date):
1078 d = dict(mb="1", d="1")
1004 d = dict(mb="1", d="1")
1079 return parsedate(date, extendeddateformats, d)[0]
1005 return parsedate(date, extendeddateformats, d)[0]
1080
1006
1081 def upper(date):
1007 def upper(date):
1082 d = dict(mb="12", HI="23", M="59", S="59")
1008 d = dict(mb="12", HI="23", M="59", S="59")
1083 for days in ("31", "30", "29"):
1009 for days in ("31", "30", "29"):
1084 try:
1010 try:
1085 d["d"] = days
1011 d["d"] = days
1086 return parsedate(date, extendeddateformats, d)[0]
1012 return parsedate(date, extendeddateformats, d)[0]
1087 except:
1013 except:
1088 pass
1014 pass
1089 d["d"] = "28"
1015 d["d"] = "28"
1090 return parsedate(date, extendeddateformats, d)[0]
1016 return parsedate(date, extendeddateformats, d)[0]
1091
1017
1092 date = date.strip()
1018 date = date.strip()
1093
1019
1094 if not date:
1020 if not date:
1095 raise Abort(_("dates cannot consist entirely of whitespace"))
1021 raise Abort(_("dates cannot consist entirely of whitespace"))
1096 elif date[0] == "<":
1022 elif date[0] == "<":
1097 if not date[1:]:
1023 if not date[1:]:
1098 raise Abort(_("invalid day spec, use '<DATE'"))
1024 raise Abort(_("invalid day spec, use '<DATE'"))
1099 when = upper(date[1:])
1025 when = upper(date[1:])
1100 return lambda x: x <= when
1026 return lambda x: x <= when
1101 elif date[0] == ">":
1027 elif date[0] == ">":
1102 if not date[1:]:
1028 if not date[1:]:
1103 raise Abort(_("invalid day spec, use '>DATE'"))
1029 raise Abort(_("invalid day spec, use '>DATE'"))
1104 when = lower(date[1:])
1030 when = lower(date[1:])
1105 return lambda x: x >= when
1031 return lambda x: x >= when
1106 elif date[0] == "-":
1032 elif date[0] == "-":
1107 try:
1033 try:
1108 days = int(date[1:])
1034 days = int(date[1:])
1109 except ValueError:
1035 except ValueError:
1110 raise Abort(_("invalid day spec: %s") % date[1:])
1036 raise Abort(_("invalid day spec: %s") % date[1:])
1111 if days < 0:
1037 if days < 0:
1112 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1038 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1113 % date[1:])
1039 % date[1:])
1114 when = makedate()[0] - days * 3600 * 24
1040 when = makedate()[0] - days * 3600 * 24
1115 return lambda x: x >= when
1041 return lambda x: x >= when
1116 elif " to " in date:
1042 elif " to " in date:
1117 a, b = date.split(" to ")
1043 a, b = date.split(" to ")
1118 start, stop = lower(a), upper(b)
1044 start, stop = lower(a), upper(b)
1119 return lambda x: x >= start and x <= stop
1045 return lambda x: x >= start and x <= stop
1120 else:
1046 else:
1121 start, stop = lower(date), upper(date)
1047 start, stop = lower(date), upper(date)
1122 return lambda x: x >= start and x <= stop
1048 return lambda x: x >= start and x <= stop
1123
1049
1124 def shortuser(user):
1050 def shortuser(user):
1125 """Return a short representation of a user name or email address."""
1051 """Return a short representation of a user name or email address."""
1126 f = user.find('@')
1052 f = user.find('@')
1127 if f >= 0:
1053 if f >= 0:
1128 user = user[:f]
1054 user = user[:f]
1129 f = user.find('<')
1055 f = user.find('<')
1130 if f >= 0:
1056 if f >= 0:
1131 user = user[f + 1:]
1057 user = user[f + 1:]
1132 f = user.find(' ')
1058 f = user.find(' ')
1133 if f >= 0:
1059 if f >= 0:
1134 user = user[:f]
1060 user = user[:f]
1135 f = user.find('.')
1061 f = user.find('.')
1136 if f >= 0:
1062 if f >= 0:
1137 user = user[:f]
1063 user = user[:f]
1138 return user
1064 return user
1139
1065
1140 def email(author):
1066 def email(author):
1141 '''get email of author.'''
1067 '''get email of author.'''
1142 r = author.find('>')
1068 r = author.find('>')
1143 if r == -1:
1069 if r == -1:
1144 r = None
1070 r = None
1145 return author[author.find('<') + 1:r]
1071 return author[author.find('<') + 1:r]
1146
1072
1147 def _ellipsis(text, maxlength):
1073 def _ellipsis(text, maxlength):
1148 if len(text) <= maxlength:
1074 if len(text) <= maxlength:
1149 return text, False
1075 return text, False
1150 else:
1076 else:
1151 return "%s..." % (text[:maxlength - 3]), True
1077 return "%s..." % (text[:maxlength - 3]), True
1152
1078
1153 def ellipsis(text, maxlength=400):
1079 def ellipsis(text, maxlength=400):
1154 """Trim string to at most maxlength (default: 400) characters."""
1080 """Trim string to at most maxlength (default: 400) characters."""
1155 try:
1081 try:
1156 # use unicode not to split at intermediate multi-byte sequence
1082 # use unicode not to split at intermediate multi-byte sequence
1157 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1083 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1158 maxlength)
1084 maxlength)
1159 if not truncated:
1085 if not truncated:
1160 return text
1086 return text
1161 return utext.encode(encoding.encoding)
1087 return utext.encode(encoding.encoding)
1162 except (UnicodeDecodeError, UnicodeEncodeError):
1088 except (UnicodeDecodeError, UnicodeEncodeError):
1163 return _ellipsis(text, maxlength)[0]
1089 return _ellipsis(text, maxlength)[0]
1164
1090
1165 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1091 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1166 '''yield every hg repository under path, recursively.'''
1092 '''yield every hg repository under path, recursively.'''
1167 def errhandler(err):
1093 def errhandler(err):
1168 if err.filename == path:
1094 if err.filename == path:
1169 raise err
1095 raise err
1170 if followsym and hasattr(os.path, 'samestat'):
1096 if followsym and hasattr(os.path, 'samestat'):
1171 def _add_dir_if_not_there(dirlst, dirname):
1097 def _add_dir_if_not_there(dirlst, dirname):
1172 match = False
1098 match = False
1173 samestat = os.path.samestat
1099 samestat = os.path.samestat
1174 dirstat = os.stat(dirname)
1100 dirstat = os.stat(dirname)
1175 for lstdirstat in dirlst:
1101 for lstdirstat in dirlst:
1176 if samestat(dirstat, lstdirstat):
1102 if samestat(dirstat, lstdirstat):
1177 match = True
1103 match = True
1178 break
1104 break
1179 if not match:
1105 if not match:
1180 dirlst.append(dirstat)
1106 dirlst.append(dirstat)
1181 return not match
1107 return not match
1182 else:
1108 else:
1183 followsym = False
1109 followsym = False
1184
1110
1185 if (seen_dirs is None) and followsym:
1111 if (seen_dirs is None) and followsym:
1186 seen_dirs = []
1112 seen_dirs = []
1187 _add_dir_if_not_there(seen_dirs, path)
1113 _add_dir_if_not_there(seen_dirs, path)
1188 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1114 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1189 dirs.sort()
1115 dirs.sort()
1190 if '.hg' in dirs:
1116 if '.hg' in dirs:
1191 yield root # found a repository
1117 yield root # found a repository
1192 qroot = os.path.join(root, '.hg', 'patches')
1118 qroot = os.path.join(root, '.hg', 'patches')
1193 if os.path.isdir(os.path.join(qroot, '.hg')):
1119 if os.path.isdir(os.path.join(qroot, '.hg')):
1194 yield qroot # we have a patch queue repo here
1120 yield qroot # we have a patch queue repo here
1195 if recurse:
1121 if recurse:
1196 # avoid recursing inside the .hg directory
1122 # avoid recursing inside the .hg directory
1197 dirs.remove('.hg')
1123 dirs.remove('.hg')
1198 else:
1124 else:
1199 dirs[:] = [] # don't descend further
1125 dirs[:] = [] # don't descend further
1200 elif followsym:
1126 elif followsym:
1201 newdirs = []
1127 newdirs = []
1202 for d in dirs:
1128 for d in dirs:
1203 fname = os.path.join(root, d)
1129 fname = os.path.join(root, d)
1204 if _add_dir_if_not_there(seen_dirs, fname):
1130 if _add_dir_if_not_there(seen_dirs, fname):
1205 if os.path.islink(fname):
1131 if os.path.islink(fname):
1206 for hgname in walkrepos(fname, True, seen_dirs):
1132 for hgname in walkrepos(fname, True, seen_dirs):
1207 yield hgname
1133 yield hgname
1208 else:
1134 else:
1209 newdirs.append(d)
1135 newdirs.append(d)
1210 dirs[:] = newdirs
1136 dirs[:] = newdirs
1211
1137
1212 _rcpath = None
1138 _rcpath = None
1213
1139
1214 def os_rcpath():
1140 def os_rcpath():
1215 '''return default os-specific hgrc search path'''
1141 '''return default os-specific hgrc search path'''
1216 path = system_rcpath()
1142 path = system_rcpath()
1217 path.extend(user_rcpath())
1143 path.extend(user_rcpath())
1218 path = [os.path.normpath(f) for f in path]
1144 path = [os.path.normpath(f) for f in path]
1219 return path
1145 return path
1220
1146
1221 def rcpath():
1147 def rcpath():
1222 '''return hgrc search path. if env var HGRCPATH is set, use it.
1148 '''return hgrc search path. if env var HGRCPATH is set, use it.
1223 for each item in path, if directory, use files ending in .rc,
1149 for each item in path, if directory, use files ending in .rc,
1224 else use item.
1150 else use item.
1225 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1151 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1226 if no HGRCPATH, use default os-specific path.'''
1152 if no HGRCPATH, use default os-specific path.'''
1227 global _rcpath
1153 global _rcpath
1228 if _rcpath is None:
1154 if _rcpath is None:
1229 if 'HGRCPATH' in os.environ:
1155 if 'HGRCPATH' in os.environ:
1230 _rcpath = []
1156 _rcpath = []
1231 for p in os.environ['HGRCPATH'].split(os.pathsep):
1157 for p in os.environ['HGRCPATH'].split(os.pathsep):
1232 if not p:
1158 if not p:
1233 continue
1159 continue
1234 p = expandpath(p)
1160 p = expandpath(p)
1235 if os.path.isdir(p):
1161 if os.path.isdir(p):
1236 for f, kind in osutil.listdir(p):
1162 for f, kind in osutil.listdir(p):
1237 if f.endswith('.rc'):
1163 if f.endswith('.rc'):
1238 _rcpath.append(os.path.join(p, f))
1164 _rcpath.append(os.path.join(p, f))
1239 else:
1165 else:
1240 _rcpath.append(p)
1166 _rcpath.append(p)
1241 else:
1167 else:
1242 _rcpath = os_rcpath()
1168 _rcpath = os_rcpath()
1243 return _rcpath
1169 return _rcpath
1244
1170
1245 def bytecount(nbytes):
1171 def bytecount(nbytes):
1246 '''return byte count formatted as readable string, with units'''
1172 '''return byte count formatted as readable string, with units'''
1247
1173
1248 units = (
1174 units = (
1249 (100, 1 << 30, _('%.0f GB')),
1175 (100, 1 << 30, _('%.0f GB')),
1250 (10, 1 << 30, _('%.1f GB')),
1176 (10, 1 << 30, _('%.1f GB')),
1251 (1, 1 << 30, _('%.2f GB')),
1177 (1, 1 << 30, _('%.2f GB')),
1252 (100, 1 << 20, _('%.0f MB')),
1178 (100, 1 << 20, _('%.0f MB')),
1253 (10, 1 << 20, _('%.1f MB')),
1179 (10, 1 << 20, _('%.1f MB')),
1254 (1, 1 << 20, _('%.2f MB')),
1180 (1, 1 << 20, _('%.2f MB')),
1255 (100, 1 << 10, _('%.0f KB')),
1181 (100, 1 << 10, _('%.0f KB')),
1256 (10, 1 << 10, _('%.1f KB')),
1182 (10, 1 << 10, _('%.1f KB')),
1257 (1, 1 << 10, _('%.2f KB')),
1183 (1, 1 << 10, _('%.2f KB')),
1258 (1, 1, _('%.0f bytes')),
1184 (1, 1, _('%.0f bytes')),
1259 )
1185 )
1260
1186
1261 for multiplier, divisor, format in units:
1187 for multiplier, divisor, format in units:
1262 if nbytes >= divisor * multiplier:
1188 if nbytes >= divisor * multiplier:
1263 return format % (nbytes / float(divisor))
1189 return format % (nbytes / float(divisor))
1264 return units[-1][2] % nbytes
1190 return units[-1][2] % nbytes
1265
1191
1266 def uirepr(s):
1192 def uirepr(s):
1267 # Avoid double backslash in Windows path repr()
1193 # Avoid double backslash in Windows path repr()
1268 return repr(s).replace('\\\\', '\\')
1194 return repr(s).replace('\\\\', '\\')
1269
1195
1270 # delay import of textwrap
1196 # delay import of textwrap
1271 def MBTextWrapper(**kwargs):
1197 def MBTextWrapper(**kwargs):
1272 class tw(textwrap.TextWrapper):
1198 class tw(textwrap.TextWrapper):
1273 """
1199 """
1274 Extend TextWrapper for double-width characters.
1200 Extend TextWrapper for double-width characters.
1275
1201
1276 Some Asian characters use two terminal columns instead of one.
1202 Some Asian characters use two terminal columns instead of one.
1277 A good example of this behavior can be seen with u'\u65e5\u672c',
1203 A good example of this behavior can be seen with u'\u65e5\u672c',
1278 the two Japanese characters for "Japan":
1204 the two Japanese characters for "Japan":
1279 len() returns 2, but when printed to a terminal, they eat 4 columns.
1205 len() returns 2, but when printed to a terminal, they eat 4 columns.
1280
1206
1281 (Note that this has nothing to do whatsoever with unicode
1207 (Note that this has nothing to do whatsoever with unicode
1282 representation, or encoding of the underlying string)
1208 representation, or encoding of the underlying string)
1283 """
1209 """
1284 def __init__(self, **kwargs):
1210 def __init__(self, **kwargs):
1285 textwrap.TextWrapper.__init__(self, **kwargs)
1211 textwrap.TextWrapper.__init__(self, **kwargs)
1286
1212
1287 def _cutdown(self, str, space_left):
1213 def _cutdown(self, str, space_left):
1288 l = 0
1214 l = 0
1289 ucstr = unicode(str, encoding.encoding)
1215 ucstr = unicode(str, encoding.encoding)
1290 colwidth = unicodedata.east_asian_width
1216 colwidth = unicodedata.east_asian_width
1291 for i in xrange(len(ucstr)):
1217 for i in xrange(len(ucstr)):
1292 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1218 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1293 if space_left < l:
1219 if space_left < l:
1294 return (ucstr[:i].encode(encoding.encoding),
1220 return (ucstr[:i].encode(encoding.encoding),
1295 ucstr[i:].encode(encoding.encoding))
1221 ucstr[i:].encode(encoding.encoding))
1296 return str, ''
1222 return str, ''
1297
1223
1298 # overriding of base class
1224 # overriding of base class
1299 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1225 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1300 space_left = max(width - cur_len, 1)
1226 space_left = max(width - cur_len, 1)
1301
1227
1302 if self.break_long_words:
1228 if self.break_long_words:
1303 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1229 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1304 cur_line.append(cut)
1230 cur_line.append(cut)
1305 reversed_chunks[-1] = res
1231 reversed_chunks[-1] = res
1306 elif not cur_line:
1232 elif not cur_line:
1307 cur_line.append(reversed_chunks.pop())
1233 cur_line.append(reversed_chunks.pop())
1308
1234
1309 global MBTextWrapper
1235 global MBTextWrapper
1310 MBTextWrapper = tw
1236 MBTextWrapper = tw
1311 return tw(**kwargs)
1237 return tw(**kwargs)
1312
1238
1313 def wrap(line, width, initindent='', hangindent=''):
1239 def wrap(line, width, initindent='', hangindent=''):
1314 maxindent = max(len(hangindent), len(initindent))
1240 maxindent = max(len(hangindent), len(initindent))
1315 if width <= maxindent:
1241 if width <= maxindent:
1316 # adjust for weird terminal size
1242 # adjust for weird terminal size
1317 width = max(78, maxindent + 1)
1243 width = max(78, maxindent + 1)
1318 wrapper = MBTextWrapper(width=width,
1244 wrapper = MBTextWrapper(width=width,
1319 initial_indent=initindent,
1245 initial_indent=initindent,
1320 subsequent_indent=hangindent)
1246 subsequent_indent=hangindent)
1321 return wrapper.fill(line)
1247 return wrapper.fill(line)
1322
1248
1323 def iterlines(iterator):
1249 def iterlines(iterator):
1324 for chunk in iterator:
1250 for chunk in iterator:
1325 for line in chunk.splitlines():
1251 for line in chunk.splitlines():
1326 yield line
1252 yield line
1327
1253
1328 def expandpath(path):
1254 def expandpath(path):
1329 return os.path.expanduser(os.path.expandvars(path))
1255 return os.path.expanduser(os.path.expandvars(path))
1330
1256
1331 def hgcmd():
1257 def hgcmd():
1332 """Return the command used to execute current hg
1258 """Return the command used to execute current hg
1333
1259
1334 This is different from hgexecutable() because on Windows we want
1260 This is different from hgexecutable() because on Windows we want
1335 to avoid things opening new shell windows like batch files, so we
1261 to avoid things opening new shell windows like batch files, so we
1336 get either the python call or current executable.
1262 get either the python call or current executable.
1337 """
1263 """
1338 if main_is_frozen():
1264 if main_is_frozen():
1339 return [sys.executable]
1265 return [sys.executable]
1340 return gethgcmd()
1266 return gethgcmd()
1341
1267
1342 def rundetached(args, condfn):
1268 def rundetached(args, condfn):
1343 """Execute the argument list in a detached process.
1269 """Execute the argument list in a detached process.
1344
1270
1345 condfn is a callable which is called repeatedly and should return
1271 condfn is a callable which is called repeatedly and should return
1346 True once the child process is known to have started successfully.
1272 True once the child process is known to have started successfully.
1347 At this point, the child process PID is returned. If the child
1273 At this point, the child process PID is returned. If the child
1348 process fails to start or finishes before condfn() evaluates to
1274 process fails to start or finishes before condfn() evaluates to
1349 True, return -1.
1275 True, return -1.
1350 """
1276 """
1351 # Windows case is easier because the child process is either
1277 # Windows case is easier because the child process is either
1352 # successfully starting and validating the condition or exiting
1278 # successfully starting and validating the condition or exiting
1353 # on failure. We just poll on its PID. On Unix, if the child
1279 # on failure. We just poll on its PID. On Unix, if the child
1354 # process fails to start, it will be left in a zombie state until
1280 # process fails to start, it will be left in a zombie state until
1355 # the parent wait on it, which we cannot do since we expect a long
1281 # the parent wait on it, which we cannot do since we expect a long
1356 # running process on success. Instead we listen for SIGCHLD telling
1282 # running process on success. Instead we listen for SIGCHLD telling
1357 # us our child process terminated.
1283 # us our child process terminated.
1358 terminated = set()
1284 terminated = set()
1359 def handler(signum, frame):
1285 def handler(signum, frame):
1360 terminated.add(os.wait())
1286 terminated.add(os.wait())
1361 prevhandler = None
1287 prevhandler = None
1362 if hasattr(signal, 'SIGCHLD'):
1288 if hasattr(signal, 'SIGCHLD'):
1363 prevhandler = signal.signal(signal.SIGCHLD, handler)
1289 prevhandler = signal.signal(signal.SIGCHLD, handler)
1364 try:
1290 try:
1365 pid = spawndetached(args)
1291 pid = spawndetached(args)
1366 while not condfn():
1292 while not condfn():
1367 if ((pid in terminated or not testpid(pid))
1293 if ((pid in terminated or not testpid(pid))
1368 and not condfn()):
1294 and not condfn()):
1369 return -1
1295 return -1
1370 time.sleep(0.1)
1296 time.sleep(0.1)
1371 return pid
1297 return pid
1372 finally:
1298 finally:
1373 if prevhandler is not None:
1299 if prevhandler is not None:
1374 signal.signal(signal.SIGCHLD, prevhandler)
1300 signal.signal(signal.SIGCHLD, prevhandler)
1375
1301
1376 try:
1302 try:
1377 any, all = any, all
1303 any, all = any, all
1378 except NameError:
1304 except NameError:
1379 def any(iterable):
1305 def any(iterable):
1380 for i in iterable:
1306 for i in iterable:
1381 if i:
1307 if i:
1382 return True
1308 return True
1383 return False
1309 return False
1384
1310
1385 def all(iterable):
1311 def all(iterable):
1386 for i in iterable:
1312 for i in iterable:
1387 if not i:
1313 if not i:
1388 return False
1314 return False
1389 return True
1315 return True
1390
1316
1391 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1317 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1392 """Return the result of interpolating items in the mapping into string s.
1318 """Return the result of interpolating items in the mapping into string s.
1393
1319
1394 prefix is a single character string, or a two character string with
1320 prefix is a single character string, or a two character string with
1395 a backslash as the first character if the prefix needs to be escaped in
1321 a backslash as the first character if the prefix needs to be escaped in
1396 a regular expression.
1322 a regular expression.
1397
1323
1398 fn is an optional function that will be applied to the replacement text
1324 fn is an optional function that will be applied to the replacement text
1399 just before replacement.
1325 just before replacement.
1400
1326
1401 escape_prefix is an optional flag that allows using doubled prefix for
1327 escape_prefix is an optional flag that allows using doubled prefix for
1402 its escaping.
1328 its escaping.
1403 """
1329 """
1404 fn = fn or (lambda s: s)
1330 fn = fn or (lambda s: s)
1405 patterns = '|'.join(mapping.keys())
1331 patterns = '|'.join(mapping.keys())
1406 if escape_prefix:
1332 if escape_prefix:
1407 patterns += '|' + prefix
1333 patterns += '|' + prefix
1408 if len(prefix) > 1:
1334 if len(prefix) > 1:
1409 prefix_char = prefix[1:]
1335 prefix_char = prefix[1:]
1410 else:
1336 else:
1411 prefix_char = prefix
1337 prefix_char = prefix
1412 mapping[prefix_char] = prefix_char
1338 mapping[prefix_char] = prefix_char
1413 r = re.compile(r'%s(%s)' % (prefix, patterns))
1339 r = re.compile(r'%s(%s)' % (prefix, patterns))
1414 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1340 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1415
1341
1416 def getport(port):
1342 def getport(port):
1417 """Return the port for a given network service.
1343 """Return the port for a given network service.
1418
1344
1419 If port is an integer, it's returned as is. If it's a string, it's
1345 If port is an integer, it's returned as is. If it's a string, it's
1420 looked up using socket.getservbyname(). If there's no matching
1346 looked up using socket.getservbyname(). If there's no matching
1421 service, util.Abort is raised.
1347 service, util.Abort is raised.
1422 """
1348 """
1423 try:
1349 try:
1424 return int(port)
1350 return int(port)
1425 except ValueError:
1351 except ValueError:
1426 pass
1352 pass
1427
1353
1428 try:
1354 try:
1429 return socket.getservbyname(port)
1355 return socket.getservbyname(port)
1430 except socket.error:
1356 except socket.error:
1431 raise Abort(_("no port number associated with service '%s'") % port)
1357 raise Abort(_("no port number associated with service '%s'") % port)
1432
1358
1433 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1359 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1434 '0': False, 'no': False, 'false': False, 'off': False,
1360 '0': False, 'no': False, 'false': False, 'off': False,
1435 'never': False}
1361 'never': False}
1436
1362
1437 def parsebool(s):
1363 def parsebool(s):
1438 """Parse s into a boolean.
1364 """Parse s into a boolean.
1439
1365
1440 If s is not a valid boolean, returns None.
1366 If s is not a valid boolean, returns None.
1441 """
1367 """
1442 return _booleans.get(s.lower(), None)
1368 return _booleans.get(s.lower(), None)
General Comments 0
You need to be logged in to leave comments. Login now